summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarry Wentland <harry.wentland@amd.com>2015-07-31 21:53:34 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-21 17:45:16 -0400
commit622b586fbab151ccaaddf3ac9d321ff5bfbbc7e8 (patch)
tree63b34c206b13aeb4f05d2d4c438f47c764042ad8
parent3fc35e593d6e761cfce8efb2f3a20161a64e6a1b (diff)
amd/dal: TopologyManager
The topology manager is responsible for creating and management of graphics objects and display paths. It also encapsulates the display detection logic. SW Layer /===============================================================\ | Timing Mode Asic | | Service Manager Capability | | | | Display Topology Display Link Adapter | | Path Manager Capability Service Service | | Service | |---------------------------------------------------------------| | GPIO IRQ I2cAux HW BIOS | | Service Manager Sequencer Parser | | | | Connector Encoder Audio GPU Controller | \===============================================================/ HW Layer Signed-off-by: Harry Wentland <harry.wentland@amd.com>
-rw-r--r--drivers/gpu/drm/amd/dal/Makefile2
-rw-r--r--drivers/gpu/drm/amd/dal/include/display_path_set_interface.h64
-rw-r--r--drivers/gpu/drm/amd/dal/topology/Makefile12
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.c2425
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.h160
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_display_path_set.c158
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_internal_types.h255
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource.c381
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource.h162
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource_builder.c1871
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource_builder.h85
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.c3178
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.h274
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.c877
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.h104
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_utils.c1230
-rw-r--r--drivers/gpu/drm/amd/dal/topology/tm_utils.h142
-rw-r--r--drivers/gpu/drm/amd/dal/topology/topology.c5313
-rw-r--r--drivers/gpu/drm/amd/dal/topology/topology.h39
19 files changed, 16731 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/dal/Makefile b/drivers/gpu/drm/amd/dal/Makefile
index 92f0cee9bba7..b697398d7e4f 100644
--- a/drivers/gpu/drm/amd/dal/Makefile
+++ b/drivers/gpu/drm/amd/dal/Makefile
@@ -9,7 +9,7 @@ subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include -DDAL_CZ_BRINGUP
DAL_LIBS = adapter amdgpu_dm audio asic_capability basics bios connector \
controller dcs display_path encoder gpio gpu hw_sequencer i2caux irq \
- link_service mode_manager timing_service
+ link_service mode_manager timing_service topology
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DAL_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/dal/include/display_path_set_interface.h b/drivers/gpu/drm/amd/dal/include/display_path_set_interface.h
new file mode 100644
index 000000000000..f626a4dfc5bb
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/include/display_path_set_interface.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DISPLAY_PATH_SET_INTERFACE_H__
+#define __DAL_DISPLAY_PATH_SET_INTERFACE_H__
+
+#include "include/display_path_interface.h"
+
+/**
+ *
+ * Display Path Set is the container to store a set of co-functional
+ * Display Paths with temporarily allocated resources.
+ */
+
+struct display_path_set;
+
+struct display_path_set_init_data {
+ struct dal_context *dal_context;
+ uint32_t display_path_num;
+};
+
+struct display_path_set *dal_display_path_set_create(
+ struct display_path_set_init_data *init_data);
+
+void dal_display_path_set_destroy(
+ struct display_path_set **dps);
+
+bool dal_display_path_set_add_path(
+ struct display_path_set *dps,
+ struct display_path *display_path);
+
+/* Returns Path at [display_index] */
+struct display_path *dal_display_path_set_path_at_index(
+ struct display_path_set *dps,
+ uint32_t display_index);
+
+/* Returns Path which has "index" property equal to "display_index". */
+struct display_path *dal_display_path_set_index_to_path(
+ struct display_path_set *dps,
+ uint32_t display_index);
+
+#endif /* __DAL_TM_DISPLAY_PATH_SET_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/Makefile b/drivers/gpu/drm/amd/dal/topology/Makefile
new file mode 100644
index 000000000000..f183142f768b
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/Makefile
@@ -0,0 +1,12 @@
+#
+# Makefile for the 'topology' sub-component of DAL.
+# It facilitates the control on the sharable HW blocks and resources
+# of the adapter.
+
+TOPOLOGY = tm_resource_mgr.o tm_detection_mgr.o tm_resource_builder.o \
+ tm_utils.o tm_display_path_set.o tm_subsets_cache.o tm_resource.o \
+ topology.o
+
+AMD_DAL_TOPOLOGY = $(addprefix $(AMDDALPATH)/topology/,$(TOPOLOGY))
+
+AMD_DAL_FILES += $(AMD_DAL_TOPOLOGY)
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.c b/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.c
new file mode 100644
index 000000000000..e96776a7b8c3
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.c
@@ -0,0 +1,2425 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+#include "tm_utils.h"
+#include "tm_detection_mgr.h"
+#include "tm_internal_types.h"
+#include "topology.h"
+#include "include/i2caux_interface.h"
+#include "include/connector_interface.h"
+#include "include/dcs_interface.h"
+#include "include/encoder_interface.h"
+#include "include/flat_set.h"
+
+
+/*****************************************************************************
+ * private data structures
+ ***************************************************************************/
+
+struct display_state {
+ struct display_sink_capability sink_cap;
+ uint32_t edid_len;
+ uint8_t edid[512];
+ bool audio_cap;
+};
+
+/*************************************
+ * Structure for registration for IRQ.
+ *************************************/
+#define MAX_NUM_OF_PATHS_PER_CONNECTOR 2
+
+enum tmdm_timer_irq_state {
+ TMDM_TMR_STATE_NOT_REGISTERED,
+ TMDM_TMR_STATE_REGISTERED,/* registered, currently in timer queue */
+ TMDM_TMR_STATE_CANCELLED /* registered, but we don't want it anymore */
+};
+
+#define TMDM_DECODE_TIMER_STATE(state) \
+ (state == TMDM_TMR_STATE_NOT_REGISTERED ? \
+ "TMDM_TMR_STATE_NOT_REGISTERED" : \
+ state == TMDM_TMR_STATE_REGISTERED ? \
+ "TMDM_TMR_STATE_REGISTERED" : \
+ state == TMDM_TMR_STATE_CANCELLED ? \
+ "TMDM_TMR_STATE_CANCELLED" : "Invalid")
+
+struct tmdm_irq_entry {
+ enum dal_irq_source irq_src;
+ irq_handler_idx handler_index_ref;
+ /* This flag protects against nesting of timers for the same
+ * connector.
+ * If we allow nesting of timers we have no way of knowing which timer
+ * was fired when the handler runs.
+ * All access to 'timer_state' must be done *only* via
+ * tmdm_set_timer_state() and tmdm_get_timer_state()!*/
+ enum tmdm_timer_irq_state timer_state;
+};
+
+struct tmdm_hpd_flags {
+ /* Controls use of SW Timer for de-bounce of HPD interrupts. */
+ bool SW_HPD_FILTERING:1;
+ /* HW Capability to filter (de-bounce) HPD interrupts. */
+ bool HW_HPD_FILTERING:1;
+};
+
+/* This structure is allocated dynamically for each Connector. */
+struct tmdm_irq_registration {
+ struct graphics_object_id connector_id;
+ struct connector *connector;
+
+ /* Access to 'hpd_flags' only via sw_hpd_filter_??? functions! */
+ struct tmdm_hpd_flags hpd_flags;
+
+ /* Number of times detection was continuously rescheduled */
+ uint32_t resched_count;
+
+ struct display_path *connected_display;
+ /* A DP connector can have a passive dongle plugged, so it can be
+ * registered for more than one Display Path. One registration is for
+ * DP and another is for HDMI. */
+ struct display_path *displays[MAX_NUM_OF_PATHS_PER_CONNECTOR];
+ uint32_t displays_num;
+
+ /* A connector may register for both HPD and Timer interrupt.
+ * 'entries' stores information related to each type of interrupt. */
+ struct tmdm_irq_entry entries[TM_INTERRUPT_TYPE_COUNT];
+
+ /* Pointer back to Detection Manager. We will use this pointer in
+ * interrupt handlers, because 'struct tmdm_irq_registration'
+ * will be passed in as interrupt context. */
+ struct tm_detection_mgr *detection_mgr;
+};
+
+/* irq entry accessor macro definitions */
+#define IRQ_ENTRY(irq_reg, type) (&irq_reg->entries[type])
+#define IRQ_ENTRY_HPD(irq_reg) IRQ_ENTRY(irq_reg, TM_INTERRUPT_TYPE_HOTPLUG)
+#define IRQ_ENTRY_TIMER(irq_reg) IRQ_ENTRY(irq_reg, TM_INTERRUPT_TYPE_TIMER)
+
+
+struct tm_detection_mgr {
+ struct dal_context *dal_context;
+ struct adapter_service *as;
+ struct hw_sequencer *hwss;
+ struct tm_resource_mgr *resource_mgr;
+ struct topology_mgr *tm_hpd_callback;
+
+ /*detection options, irq source, irq handler of connectors*/
+ struct tmdm_irq_registration *connector_irq_regsitrations;
+ /* One irq registration for each connector, so it is the same
+ * as number of connectors. */
+ uint8_t irq_registrations_num;
+
+ bool is_blocking_detection;
+ bool is_blocking_interrupts;
+};
+
+enum {
+ DELAY_ON_CONNECT_IN_MS = 500,
+ DELAY_ON_DISCONNECT_IN_MS = 100,
+ DP_PASSIVE_DONGLE_INTERVAL_IN_MS = 500,
+ RESCHED_TIMER_INTERVAL_IN_MS = 3000,
+ NUM_OF_DETECTION_RETRIES = 1,
+ NUM_OF_LOCK_RETRIES = 50,
+ LOCK_RETRY_INTERVAL_IN_MS = 1,
+ MICROSECONDS_IN_MILLISECOND = 1000
+};
+
+/*****************************************************************************
+ * prototypes of static functions
+ *****************************************************************************/
+
+static void tmdm_handle_hpd_interrupt(
+ void *interrupt_params);
+
+static void tmdm_handle_timer_interrupt(
+ void *interrupt_params);
+
+static void tmdm_set_timer_state(struct tm_detection_mgr *tm_dm,
+ struct tmdm_irq_entry *irq_entry,
+ enum tmdm_timer_irq_state new_timer_state);
+
+/* HPD Filter - related */
+static void sw_hpd_filter_set(struct tmdm_irq_registration *connector_irq,
+ bool new_val);
+static void hw_hpd_filter_set(struct tmdm_irq_registration *connector_irq,
+ bool new_val);
+
+static bool sw_hpd_filter_get(struct tmdm_irq_registration *irq_reg);
+static bool hw_hpd_filter_get(struct tmdm_irq_registration *irq_reg);
+
+/*****************************************************************************
+ * static functions
+ *****************************************************************************/
+static bool construct(
+ struct tm_detection_mgr *tm_dm,
+ struct tm_detection_mgr_init_data *init_data)
+{
+ uint8_t i;
+ uint8_t j;
+ struct tmdm_irq_registration *irq_regsitration;
+ struct tmdm_irq_entry *irq_entry;
+
+ tm_dm->dal_context = init_data->dal_context;
+ tm_dm->as = init_data->as;
+ tm_dm->hwss = init_data->hwss;
+ tm_dm->resource_mgr = init_data->resource_mgr;
+ tm_dm->tm_hpd_callback = init_data->tm;
+ tm_dm->is_blocking_detection = false;
+ tm_dm->is_blocking_interrupts = false;
+ tm_dm->irq_registrations_num =
+ dal_adapter_service_get_connectors_num(tm_dm->as);
+
+ if (tm_dm->irq_registrations_num == 0)
+ return false;
+
+ tm_dm->connector_irq_regsitrations = dal_alloc(
+ tm_dm->irq_registrations_num *
+ sizeof(struct tmdm_irq_registration));
+
+ /** Reset all entries. Please note, at this point it is impossible
+ * to initialize entries properly since connector OBJECTS (and their
+ * properties) do not exist yet.
+ *
+ * While topology creation, it will register display for detection
+ * register_display. connector will be filled with connector from
+ * display path. After this, display_path->connector->go_base.id =
+ * tm_dm->connector_irq_regsitrations[i].id.
+ */
+ for (i = 0; i < tm_dm->irq_registrations_num; ++i) {
+
+ irq_regsitration = &tm_dm->connector_irq_regsitrations[i];
+
+ irq_regsitration->detection_mgr = tm_dm;
+
+ irq_regsitration->connector_id =
+ dal_adapter_service_get_connector_obj_id(
+ tm_dm->as, i);
+ irq_regsitration->displays_num = 0;
+ sw_hpd_filter_set(irq_regsitration, false);
+ hw_hpd_filter_set(irq_regsitration, false);
+ irq_regsitration->resched_count = 0;
+
+ for (j = 0; j < TM_INTERRUPT_TYPE_COUNT; ++j) {
+ irq_entry = IRQ_ENTRY(irq_regsitration, j);
+
+ irq_entry->irq_src = DAL_IRQ_SOURCE_INVALID;
+ irq_entry->handler_index_ref =
+ DAL_INVALID_IRQ_HANDLER_IDX;
+ tmdm_set_timer_state(tm_dm, irq_entry,
+ TMDM_TMR_STATE_NOT_REGISTERED);
+ }
+ }
+
+ return true;
+}
+
+static void destruct(
+ struct tm_detection_mgr *tm_dm)
+{
+ if (tm_dm->connector_irq_regsitrations != NULL)
+ dal_free(tm_dm->connector_irq_regsitrations);
+}
+
+static struct graphics_object_id get_connector_obj_id(
+ struct display_path *display_path)
+{
+ return dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(display_path));
+}
+
+static bool handle_skipping_detection(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status)
+{
+ union display_path_properties properties;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ if (method == DETECTION_METHOD_CACHED)
+ return true;
+
+ if (method == DETECTION_METHOD_CONNECTED_ONLY &&
+ !dal_display_path_is_target_connected(display_path))
+ return true;
+
+ TM_ASSERT(display_path != NULL);
+ TM_ASSERT(detection_status != NULL);
+
+ properties = dal_display_path_get_properties(display_path);
+
+ /*For MST branch display paths detection is done by MST Manager*/
+ if (properties.bits.IS_BRANCH_DP_MST_PATH) {
+ detection_status->dp_mst_detection = true;
+ return true;
+ }
+
+ /**For embedded display (LVDS/eDP) we skip the HW detection
+ * (unless explicitly requested to do detection)
+ */
+ if (method != DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED &&
+ dal_is_embedded_signal(
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX)))
+ return true;
+
+ /**If the display path is faked (for Gemini Slave and Stream computing)
+ * it's not connected
+ */
+ if (properties.bits.FAKED_PATH) {
+ detection_status->connected = false;
+ return true;
+ }
+
+ /**If "force connected" state has been set for the display path
+ * then no detection is needed
+ */
+ if (properties.bits.ALWAYS_CONNECTED) {
+ detection_status->connected = true;
+ return true;
+ }
+
+ /**If we reached here, detection was not handled as
+ * "skipped" so we probably need to do physical detection
+ */
+ return false;
+}
+
+/**
+ *****************************************************************************
+ * Obtains pointer to IRQ registration.
+ *
+ * \param [in] connector: connector ID which identifies IRQ registration entry
+ *
+ * \return Pointer to entry if such found, NULL otherwise
+ *****************************************************************************
+ */
+static struct tmdm_irq_registration *get_irq_entry(
+ struct tm_detection_mgr *tm_dm,
+ struct graphics_object_id connector)
+{
+ uint8_t i;
+
+ for (i = 0; i < tm_dm->irq_registrations_num; ++i) {
+ if (dal_graphics_object_id_is_equal(
+ tm_dm->connector_irq_regsitrations[i].connector_id,
+ connector))
+ return &tm_dm->connector_irq_regsitrations[i];
+ }
+ return NULL;
+}
+
+/**
+ *****************************************************************************
+ * Function: get_irq_source
+ *
+ * @brief
+ * Returns IRQSource matching given connector + interrupt type
+ * x Timer interrupt - not associated with connector
+ * x Hotplug interrupt associated with connector HPD line
+ * x DDC polling interrupt associated with connector DDC line
+ * x Sink status (short pulse) interrupt associated with connector DDC
+ * line
+ * (though interrupt occurs on hpd line)
+ *
+ * @param [in] connector: connector ID with HPD and DDC lines
+ * @param [in] interruptType: TM interrupt type
+ *
+ * @return
+ * IRQSource matching given connector + interrupt type
+ *****************************************************************************
+ */
+static enum dal_irq_source get_irq_source(
+ struct tm_detection_mgr *tm_dm,
+ struct graphics_object_id connector,
+ enum tm_interrupt_type type)
+{
+ enum dal_irq_source irq_src = DAL_IRQ_SOURCE_INVALID;
+
+ switch (type) {
+ case TM_INTERRUPT_TYPE_TIMER:
+ irq_src = DAL_IRQ_SOURCE_TIMER;
+ break;
+
+ case TM_INTERRUPT_TYPE_HOTPLUG: {
+ struct irq *hpd_gpio;
+
+ hpd_gpio = dal_adapter_service_obtain_hpd_irq(tm_dm->as,
+ connector);
+
+ if (hpd_gpio != NULL) {
+ irq_src = dal_irq_get_source(hpd_gpio);
+ dal_adapter_service_release_irq(tm_dm->as, hpd_gpio);
+ }
+ break;
+ } /* switch() */
+
+ default:
+ break;
+ }
+ return irq_src;
+}
+
+static void tmdm_set_timer_state(struct tm_detection_mgr *tm_dm,
+ struct tmdm_irq_entry *irq_entry,
+ enum tmdm_timer_irq_state new_timer_state)
+{
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_HPD_IRQ("%s:from: %s --> to: %s\n", __func__,
+ TMDM_DECODE_TIMER_STATE(irq_entry->timer_state),
+ TMDM_DECODE_TIMER_STATE(new_timer_state));
+
+ irq_entry->timer_state = new_timer_state;
+}
+
+static enum tmdm_timer_irq_state tmdm_get_timer_state(
+ struct tmdm_irq_entry *irq_entry)
+{
+ return irq_entry->timer_state;
+}
+
+static void sw_hpd_filter_set(struct tmdm_irq_registration *connector_irq,
+ bool new_val)
+{
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_HPD_IRQ("irq_src=%d: SW_HPD_FILTERING: %d --> to: %d\n",
+ IRQ_ENTRY_HPD(connector_irq)->irq_src,
+ connector_irq->hpd_flags.SW_HPD_FILTERING,
+ new_val);
+
+ connector_irq->hpd_flags.SW_HPD_FILTERING = new_val;
+}
+
+static bool sw_hpd_filter_get(struct tmdm_irq_registration *irq_reg)
+{
+ return irq_reg->hpd_flags.SW_HPD_FILTERING;
+}
+
+static void hw_hpd_filter_set(struct tmdm_irq_registration *connector_irq,
+ bool new_val)
+{
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_HPD_IRQ("irq_src=%d: HW_HPD_FILTERING: %d --> to: %d\n",
+ IRQ_ENTRY_HPD(connector_irq)->irq_src,
+ connector_irq->hpd_flags.HW_HPD_FILTERING,
+ new_val);
+
+ connector_irq->hpd_flags.HW_HPD_FILTERING = new_val;
+}
+
+static bool hw_hpd_filter_get(struct tmdm_irq_registration *irq_reg)
+{
+ return irq_reg->hpd_flags.HW_HPD_FILTERING;
+}
+
+/**
+ *****************************************************************************
+ * Function: allow_aux_while_hpd_low
+ *
+ * @brief
+ * Configures aux to allow transactions while HPD is low
+ *
+ * @param [in] display_path: Display path on which to perform operation
+ * @param [in] allow: if true allow aux transactions while HPD is low
+ *****************************************************************************
+ */
+static void allow_aux_while_hpd_low(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ bool allow)
+{
+ union aux_config aux_flags;
+ struct ddc *ddc;
+
+ aux_flags.raw = 0;
+ aux_flags.bits.ALLOW_AUX_WHEN_HPD_LOW = allow;
+
+ ddc = dal_adapter_service_obtain_ddc(tm_dm->as,
+ get_connector_obj_id(display_path));
+ if (ddc != NULL) {
+ dal_i2caux_configure_aux(dal_adapter_service_get_i2caux(
+ tm_dm->as),
+ ddc, aux_flags);
+
+ dal_adapter_service_release_ddc(tm_dm->as, ddc);
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: need_handle_connection_status_based_on_sink_count
+ *
+ * @brief : check whether we need handle dongle sink count info. only check
+ * sink_count == 0 for some known dongle or if runtime parameter exist.
+ *
+ * @return
+ *
+ *****************************************************************************
+ */
+bool need_handle_connection_status_based_on_sink_count(
+ struct tm_detection_mgr *tm_dm,
+ struct tm_detection_status *detection_status)
+{
+ enum display_dongle_type dongle_type;
+ dongle_type = detection_status->sink_capabilities.dongle_type;
+
+ /*limit this sink count to active dongle only.*/
+ if (detection_status == NULL)
+ return false;
+
+ if (!((detection_status->
+ sink_capabilities.downstrm_sink_count_valid == true) ||
+ dal_adapter_service_is_feature_supported(
+ FEATURE_DONGLE_SINK_COUNT_CHECK)))
+ return false;
+
+
+ if (((dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+ (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)) &&
+ (detection_status->sink_capabilities.downstrm_sink_count == 0))
+ return true;
+
+ return false;
+}
+
+/**
+ *****************************************************************************
+ * Function: apply_load_detection_based_edid_patch
+ *
+ * @brief
+ * Applies any needed load detection based on EDID patch presence.
+ *
+ * @param [in] display_path: Display path on which to perform
+ * detection
+ * @param [int] detection_status: Structure which holds detection-related
+ * info from current detection procedure.
+ *
+ * @return
+ * true if patch was applied, false otherwise
+ *****************************************************************************
+ */
+static bool apply_load_detection_based_edid_patch(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ const struct tm_detection_status *detection_status)
+{
+ bool apply_patch = false;
+ union dcs_monitor_patch_flags patch_flags;
+ struct monitor_patch_info patch_info;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ patch_info.type = MONITOR_PATCH_TYPE_DUAL_EDID_PANEL;
+ patch_info.param = 1;
+
+ patch_flags = dal_dcs_get_monitor_patch_flags(
+ dal_display_path_get_dcs(display_path));
+
+ /*Dual EDID display support*/
+ if (patch_flags.flags.DUAL_EDID_PANEL) {
+ struct display_path *temp_path;
+ struct graphics_object_id temp_id;
+ struct graphics_object_id path_id;
+ enum signal_type signal;
+
+ if (SIGNAL_TYPE_RGB == detection_status->detected_signal) {
+ signal = dal_hw_sequencer_detect_load(
+ tm_dm->hwss, display_path);
+ if (SIGNAL_TYPE_RGB == signal)
+ apply_patch = true;
+ /** If it is a DP_VGA active dongle, force the edid to
+ * analog, without the detection check
+ */
+ } else if (detection_status->sink_capabilities.dongle_type
+ == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+ enum dcs_edid_connector_type conn_type =
+ dal_dcs_get_connector_type(
+ dal_display_path_get_dcs(display_path));
+
+ if (conn_type != EDID_CONNECTOR_ANALOG)
+ apply_patch = true;
+ /**if we're currently detecting DVI, we force analog
+ * edid only if there is load detected on the CRT on
+ * the same connector as current DVI
+ */
+ } else if (dal_is_dvi_signal(
+ detection_status->detected_signal)) {
+ uint8_t i;
+ struct tmdm_irq_registration *connector_irq;
+
+ connector_irq = get_irq_entry(tm_dm,
+ get_connector_obj_id(display_path));
+
+ TM_ASSERT(connector_irq != NULL);
+
+ for (i = 0; i < connector_irq->displays_num; ++i) {
+ enum tm_result tm_ret;
+
+ temp_path = connector_irq->displays[i];
+
+ /*if connectors are the same, and the temp
+ * display path's signal type is RGB (CRT).
+ * TODO : what does temp_path !=
+ * display_path mean? debug
+ * if (temp_path != display_path &&
+ * dal_display_path_get_query_signal
+ * (temp_path, SINK_LINK_INDEX) ==
+ * SIGNAL_TYPE_RGB)
+ */
+ temp_id = get_connector_obj_id(temp_path);
+ path_id = get_connector_obj_id(display_path);
+ signal = dal_display_path_get_query_signal(
+ temp_path,
+ SINK_LINK_INDEX);
+
+ if (dal_graphics_object_id_is_equal(
+ temp_id, path_id) ||
+ signal != SIGNAL_TYPE_RGB)
+ continue;
+
+ tm_ret =
+ tm_resource_mgr_acquire_resources(
+ tm_dm->resource_mgr,
+ temp_path,
+ TM_ACQUIRE_METHOD_SW);
+ if (TM_RESULT_SUCCESS == tm_ret) {
+ signal =
+ dal_hw_sequencer_detect_load(
+ tm_dm->hwss,
+ temp_path);
+ apply_patch =
+ (signal == SIGNAL_TYPE_RGB);
+
+ tm_resource_mgr_release_resources(
+ tm_dm->resource_mgr,
+ temp_path,
+ TM_ACQUIRE_METHOD_SW);
+ } else
+ BREAK_TO_DEBUGGER();
+ }
+ }
+ }
+
+ /* apply the patch if needed. In this context "apply" actually refers
+ * to "setup patch value"
+ */
+ if (apply_patch)
+ if (!dal_dcs_set_monitor_patch_info(
+ dal_display_path_get_dcs(display_path), &patch_info)) {
+ BREAK_TO_DEBUGGER();
+ apply_patch = false;
+ }
+
+ return apply_patch;
+}
+
+/**
+ *****************************************************************************
+ * Function: apply_detection_status_patches
+ *
+ * @brief
+ * Apply EDID-based workarounds to sink capabilities.
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ * @param [int] detection_status: Structure which holds detection-related info
+ * from current detection procedure.
+ *****************************************************************************
+ */
+static void apply_detection_status_patches(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status)
+{
+ union dcs_monitor_patch_flags patch_flags =
+ dal_dcs_get_monitor_patch_flags(
+ dal_display_path_get_dcs(display_path));
+
+ /*allow aux transactions when hpd low for
+ * AllowAuxWhenHpdLow monitor patch*/
+ if ((patch_flags.flags.ALLOW_AUX_WHEN_HPD_LOW)
+ && (detection_status->detected_signal ==
+ SIGNAL_TYPE_DISPLAY_PORT))
+ allow_aux_while_hpd_low(tm_dm, display_path, true);
+
+ /* Add other patches here */
+}
+
+/**
+ *****************************************************************************
+ * Function: read_edid
+ *
+ * @brief
+ * Atomic Edid retrieval
+ * Reads Edid data, setups monitor patches and updates Edid
+ * internals if needed. After we read EDID we can know whether
+ * in addition load detection needed to
+ * be performed
+ *
+ * @param [in] display_path: Display path on which to perform
+ * detection
+ * @param [in] destructive: true if detection method is
+ * destructive,
+ * false if non-destructive
+ * @param [in/out] detection_status: Structure to hold detection-
+ * related info
+ * from current detection procedure.
+ *
+ * @return
+ * True if load detection needed to complete display detection,
+ * false otherwise
+ *****************************************************************************
+ */
+static bool read_edid(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ bool destructive,
+ struct tm_detection_status *detection_status)
+{
+ bool load_detect_need = false;
+ bool connected = false;
+ struct vendor_product_id_info old_vendor_info;
+ bool old_vendor_info_retrieved;
+ enum edid_retrieve_status status;
+
+ /**Get old vendor product info - we need this for later on
+ * to figure out if the monitor changed or not...
+ */
+ old_vendor_info_retrieved = dal_dcs_get_vendor_product_id_info(
+ dal_display_path_get_dcs(display_path), &old_vendor_info);
+
+ /*TODO: android does not support 2 encoder for now,
+ * preDDC and postDDC are not needed for now
+ * configure display path before I2C over AUX case
+ * m_pHWSS->PreDDC(display_path);
+ *
+ * Read Edid data from monitor into local buffer.
+ * It will also create list of applicable monitor patches
+ */
+ status = dal_dcs_retrieve_raw_edid(
+ dal_display_path_get_dcs(display_path));
+
+ /**TODO
+ * restore configure display path after I2C over AUX case
+ * m_pHWSS->PostDDC(display_path);
+ */
+
+ /**Only apply Dual-EDID patch and update return "status"
+ * when new EDID is retrieved
+ */
+ if (status == EDID_RETRIEVE_SUCCESS)
+ /**Handle any load detection based edid patching. In case
+ * it was succefully applied - treat Edid as new
+ */
+ apply_load_detection_based_edid_patch(tm_dm, display_path,
+ detection_status);
+
+ /**Update Edid internals if new Edid was retrieved. In case
+ * of update failure we consider no Edid detected
+ */
+ if (status == EDID_RETRIEVE_SUCCESS)
+ status = dal_dcs_update_edid_from_last_retrieved(
+ dal_display_path_get_dcs(display_path));
+
+ /*On successful Edid read we need to update detection status*/
+ if (status == EDID_RETRIEVE_SUCCESS ||
+ status == EDID_RETRIEVE_SAME_EDID) {
+ enum dcs_edid_connector_type connector_type;
+ enum display_dongle_type dongle;
+ enum signal_type signal;
+ /*apply EDID-based sink capability patches*/
+ apply_detection_status_patches(
+ tm_dm, display_path, detection_status);
+
+ connector_type = dal_dcs_get_connector_type(
+ dal_display_path_get_dcs(display_path));
+
+ signal = detection_status->detected_signal;
+
+ dongle = detection_status->sink_capabilities.dongle_type;
+
+ /*check whether EDID connector is right for the signal*/
+ if (tm_utils_is_edid_connector_type_valid_with_signal_type(
+ dongle,
+ connector_type, signal)) {
+ /*downgrade signal in case it has
+ * changed but EDID hasn't*/
+ detection_status->detected_signal =
+ tm_utils_get_downgraded_signal_type(
+ signal,
+ connector_type);
+
+ /* report connected since edid successfully detected*/
+ connected = true;
+ }
+
+ /* TODO m_isPersistenceEmulationIsOn is not implemented in
+ * As yet always return false for now
+ * m_pAdapterService->GetEdidPersistenceMode() = false
+ */
+
+ /**Check whether monitor ID changed - this flag only
+ * matters if the connected flag is set
+ */
+ if (connected && old_vendor_info_retrieved) {
+ /**Get new vendor product info - we only need to
+ * test this if we were able to retrieve the old
+ * vendor information
+ */
+ struct vendor_product_id_info new_vendor_info;
+ bool new_vendor_info_retrieved;
+
+ new_vendor_info_retrieved =
+ dal_dcs_get_vendor_product_id_info(
+ dal_display_path_get_dcs(
+ display_path),
+ &new_vendor_info);
+
+ if (new_vendor_info_retrieved) {
+ if (old_vendor_info.manufacturer_id !=
+ new_vendor_info.manufacturer_id ||
+ old_vendor_info.product_id !=
+ new_vendor_info.product_id ||
+ old_vendor_info.serial_id !=
+ new_vendor_info.serial_id)
+ detection_status->monitor_changed =
+ true;
+ /*else if(m_pAdapterService->
+ * GetEdidPersistenceMode())
+ * {
+ * status = EDID_RETRIEVE_SAME_EDID;
+ * }
+ */
+ }
+ }
+ }
+
+ /**By DisplayPort spec, if sink is present and Edid is not available
+ * DP display should support 640x480, therefore we can report
+ * DisplayPort
+ * as connected if HPD high or Edid present (one indication enough)
+ */
+ if (dal_is_dp_signal(detection_status->detected_signal))
+ connected = (detection_status->connected || connected);
+
+ /* Handling the case of unplugging of a VGA monitor with
+ * DDC polling. In this case we keep track of previous EDID read status
+ * so we can detect a change and detect the unplug return so that we do
+ * not try and detect load as this is non destructive and we want to
+ * update connected to false
+ */
+ if (SIGNAL_TYPE_RGB == detection_status->detected_signal &&
+ !destructive
+ && status == EDID_RETRIEVE_FAIL_WITH_PREVIOUS_SUCCESS)
+ connected = false;
+ else if (!connected && dal_is_analog_signal(
+ detection_status->detected_signal))
+ load_detect_need = true;
+
+ /**For embedded display its connection state just depends on lid state.
+ * Edid retrieving state should not affect it.
+ */
+ if (!dal_is_embedded_signal(detection_status->detected_signal))
+ detection_status->connected = connected;
+
+ return load_detect_need;
+}
+
+/**
+ *****************************************************************************
+ * Function: is_sink_present
+ *
+ * @brief
+ * Checks whether the sink is present
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ * @param [out] detection_status: Structure to hold detection-related info
+ * from current detection procedure.
+ *****************************************************************************
+ */
+static bool is_sink_present(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ bool connected = dal_hw_sequencer_is_sink_present(tm_dm->hwss,
+ display_path);
+ uint32_t display_index = dal_display_path_get_display_index(
+ display_path);
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_DISPLAY_DETECT("HPD of DisplayIndex:%d is: %s\n",
+ display_index, (connected == true ? "High" : "Low"));
+
+ if (!connected && dal_dcs_get_monitor_patch_flags(
+ dal_display_path_get_dcs(display_path)).
+ flags.ALLOW_AUX_WHEN_HPD_LOW) {
+ uint8_t value = 0;
+ enum ddc_result result = dal_dcs_dpcd_read(
+ dal_display_path_get_dcs(display_path),
+ DPCD_ADDRESS_DPCD_REV, &value, 1);
+
+ if ((result == DDC_RESULT_SUCESSFULL) && (value > 0))
+ connected = true;
+ else
+ /*reset the aux control to disallow aux
+ * transactions when hpd low*/
+ allow_aux_while_hpd_low(tm_dm, display_path, false);
+ }
+
+ return connected;
+}
+
+/**
+ *****************************************************************************
+ * Function: detect_sink_caps
+ *
+ * @brief
+ * Actual does sink detection with retrievng all related capabilities.
+ * XXX: Besides updating output structure DCS state updated as well.
+ * Needed to be called before first Edid read to setup transaction mode.
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ * @param [out] detection_status: Structure to hold detection-related info
+ * from current detection procedure.
+ *****************************************************************************
+ */
+static void detect_sink_caps(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status)
+{
+ enum signal_type detected_signal;
+ struct dcs *dcs;
+ struct display_sink_capability cur_sink_cap;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_ASSERT(detection_status != NULL);
+ TM_ASSERT(dal_display_path_get_dcs(display_path) != NULL);
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ /*Obtain signal and connectivity state*/
+ detection_status->connected = is_sink_present(tm_dm, display_path);
+ detection_status->detected_signal = dal_hw_sequencer_detect_sink(
+ tm_dm->hwss, display_path);
+
+ /*Detect MST signal (This function should be only called
+ * for MST Master path)*/
+ if (detection_status->connected &&
+ detection_status->detected_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ struct link_service *link_service;
+
+ link_service = tm_resource_mgr_find_link_service(
+ tm_dm->resource_mgr,
+ display_path,
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+
+ if (link_service != NULL &&
+ dal_ls_is_mst_network_present(link_service))
+ detection_status->detected_signal =
+ SIGNAL_TYPE_DISPLAY_PORT_MST;
+ }
+
+ /*update DCS with corresponding DDC type*/
+ detected_signal = detection_status->detected_signal;
+
+ /**if we have passive dongle dp_to _hdmi and edid emulator presence,
+ * we could not detect sink. we force it connected and do not allow
+ * to change
+ */
+
+ dal_dcs_set_transaction_type(dcs,
+ tm_utils_get_ddc_transaction_type(detected_signal,
+ dal_display_path_sink_signal_to_asic_signal(
+ display_path,
+ detected_signal)));
+
+ /**MST Display Path detection will be done by MST Manager.
+ * Setting this flag will skip further detection steps
+ */
+ if ((detection_status->connected &&
+ (detection_status->detected_signal
+ == SIGNAL_TYPE_DISPLAY_PORT_MST)) ||
+ (!detection_status->connected &&
+ dal_display_path_get_query_signal(
+ display_path,
+ SINK_LINK_INDEX) ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST)) {
+
+ detection_status->dp_mst_detection = true;
+ if (!detection_status->connected)
+ dal_dcs_reset_sink_capability(dcs);
+ return;
+ }
+
+ /**query output sink capability when connected
+ * or if the signal is embedded
+ */
+ dal_memset(&cur_sink_cap, 0, sizeof(cur_sink_cap));
+ if (detection_status->connected
+ || dal_is_embedded_signal(detection_status->detected_signal))
+ /**update DCS with the latest sink capability
+ * this (DCS) should go directly to the real DDC
+ * to retrieve the current sink capabilities
+ */
+ dal_dcs_query_sink_capability(
+ dcs,
+ &detection_status->sink_capabilities,
+ detection_status->connected);
+
+ /**we are here only when passive dongle and edid emulator
+ * TODO: emulator is not implemented yet
+ *
+ * else if (dcs->QueryEdidEmulatorCapability(&cur_sink_cap))
+ * {
+ * detection_status->sink_capabilities = cur_sink_cap;
+ * }
+ */
+ else
+ /*clear (reported) capabilities*/
+ dal_dcs_reset_sink_capability(dcs);
+}
+
+/**
+ *****************************************************************************
+ * Function: do_target_detection
+ *
+ * @brief
+ * Does physical detection of display on given display path.
+ * Detection composed from 3 steps:
+ * 1. Sink detection (not so relevant for analog displays)
+ * 2. Edid read
+ * 3. Load detection (relevant only for analog displays)
+ * Cached method not handled in this function
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ * @param [in] destructive: true if detection method is destructive,
+ * false if non-destructive.
+ * @param [out] detection_status: Structure to hold detection-related info
+ * from current detection procedure.
+ *****************************************************************************
+ */
+static void do_target_detection(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ bool destructive,
+ struct tm_detection_status *detection_status)
+{
+ struct tm_resource *connector_rsrc;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_ASSERT(detection_status != NULL);
+
+ /** Step 1. Set appropriate DDC. Set the DDC for the real device
+ * as we're doing real detection at this point
+ */
+ connector_rsrc = tm_resource_mgr_find_resource(
+ tm_dm->resource_mgr,
+ get_connector_obj_id(display_path));
+
+ if ((connector_rsrc != NULL) &&
+ (TO_CONNECTOR_INFO(connector_rsrc)->ddc_service != NULL)) {
+
+ /** if the obtained Ddc service is different than the real and
+ * the emulated DDCs that we have stored with resource manager
+ * then the old DDC might be from MST in which case we want to
+ * leave things alone as the MST DDC comes from a different
+ * place and needs to be cleaned up differently...
+ */
+ /**TODO: ddcServiceWithEmulation is not implemented yet.
+ struct ddc_service *ddc_old_dev;
+ ddc_old_dev = dal_dcs_update_ddc(
+ dal_display_path_get_dcs(display_path),
+ TO_CONNECTOR_INFO(connector_rsrc)->ddc_service);
+ if ((ddc_old_dev != connector_rsrc->connector.ddc_service) &&
+ (ddc_old_dev != connector_rsrc->connector.
+ ddcServiceWithEmulation))
+ {
+ display_path->GetDCS()->UpdateDdcService(ddc_old_dev);
+ }
+ */
+ }
+
+ /** Step 2. Sink detection Update all sink capabilities
+ * - signal, dongle, sink presence, etc.
+ */
+ detect_sink_caps(tm_dm, display_path, detection_status);
+
+ /** Step 3. Handle MST detection. MST discovery done by MST Manager
+ * which will eventually notify TM about connectivity change.
+ */
+ if (detection_status->dp_mst_detection)
+ return;
+
+ /** Step 4. Handle the case where sink presence not detected
+ * yet (HPD line is low)
+ */
+ if (!detection_status->connected) {
+ switch (detection_status->detected_signal) {
+ /**Display Port requires HPD high. So we force skip
+ * further steps. However eDP we want to read EDID
+ * always (eDP not connected =
+ * LID closed, not related to HPD)
+ */
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return;
+
+ /** DVI/HDMI does not require HPD high, but in hotplug
+ * context we might want to reschedule detection
+ */
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ detection_status->hpd_pin_failure = true;
+ break;
+
+ /*All the rest- continue to EDID read/load detection*/
+ default:
+ break;
+ }
+ }
+
+ /** Step 5. Edid read - updates detection status
+ * only read edid, if connected (HPD sensebit check), or
+ * runtime parameters not set, or set but value is 0
+ */
+ if (detection_status->connected
+ || !dal_adapter_service_is_feature_supported(
+ FEATURE_DETECT_REQUIRE_HPD_HIGH)) {
+ bool load_detect_need;
+ /*not read Edid if HPD sense bit low.*/
+ load_detect_need = read_edid(tm_dm, display_path, destructive,
+ detection_status);
+
+ /**Step 6. Load detection (relevant only for non-DDC analog
+ * displays if analog display marked as connected, it means
+ * it has EDID)
+ */
+ if (load_detect_need) {
+ if (destructive) {
+ enum signal_type signal;
+ /*Update signal if detected. Update connection
+ * status always*/
+ signal = dal_hw_sequencer_detect_load(
+ tm_dm->hwss,
+ display_path);
+
+ if (signal != SIGNAL_TYPE_NONE)
+ detection_status->detected_signal =
+ signal;
+
+ detection_status->connected =
+ (signal != SIGNAL_TYPE_NONE);
+ } else {
+ /**In non-destructive case, connectivity
+ * does not change.so we can return
+ * cached value
+ */
+ detection_status->connected =
+ dal_display_path_is_target_connected(
+ display_path);
+ }
+ }
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: do_target_pre_processing
+ *
+ * @brief
+ * Before detection and emulation (if required), retrieve the current
+ * display information which we will need to compare against in
+ * do_target_post_processing
+ *
+ * @param [in] display_path: Display path on which to perform
+ * detection
+ * @param [in] detection_status: Structure to hold detection-related
+ * info from current detection procedure.
+ * @param [int/out] display_state: the previous display information
+ * before target detection and emulation.
+ *****************************************************************************
+ */
+static void do_target_pre_processing(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct display_state *display_state)
+{
+ const uint8_t *orig_edid;
+
+ dal_memset(display_state, 0, sizeof(struct display_state));
+
+ /*retrieve the sink capabilities*/
+ dal_dcs_get_sink_capability(dal_display_path_get_dcs(display_path),
+ &display_state->sink_cap);
+
+ /*retrieve the old Edid information*/
+ orig_edid = dal_dcs_get_edid_raw_data(
+ dal_display_path_get_dcs(display_path),
+ &display_state->edid_len);
+
+ dal_memmove(display_state->edid, orig_edid, display_state->edid_len);
+
+ /*Get old audio capabilities*/
+ display_state->audio_cap = dal_dcs_is_audio_supported(
+ dal_display_path_get_dcs(display_path));
+}
+
+/**
+ *****************************************************************************
+ * Function: do_target_post_processing
+ *
+ * @brief
+ * After detection and emulation (if required), updates DCS and the
+ * detection status
+ * Expects do_target_detection and doTargetEmulation to be called first
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ * @param [out] detection_status: Structure to hold detection-related info
+ * from current detection procedure.
+ * @param [in] display_state: the previous display information before
+ * target detection and emulation.
+ *****************************************************************************
+ */
+static void do_target_post_processing(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status,
+ const struct display_state *display_state)
+{
+ uint32_t cur_edid_len;
+ uint32_t i;
+ const uint8_t *cur_edid;
+
+ /*NOTE: do we need to check if the signal type has changed?*/
+
+ /*Report capability change if sink capabilities are different*/
+ if (display_state->sink_cap.dp_link_lane_count
+ != detection_status->sink_capabilities.dp_link_lane_count
+ || display_state->sink_cap.dp_link_rate
+ != detection_status->sink_capabilities.dp_link_rate)
+ detection_status->capability_changed = true;
+
+ /**report capability change if Edid has changed
+ * NOTE: if we already noticed the capability changed due
+ * to link settings modified, no need to waste CPU
+ * clocks to see if the Edid changed just so we can
+ * set the same flag...
+ */
+ if (!detection_status->capability_changed) {
+ /**retrieve the current EDID information that DCS has and see
+ * if it's been modified during the detect/emulate process
+ */
+ cur_edid_len = 0;
+ cur_edid = dal_dcs_get_edid_raw_data(
+ dal_display_path_get_dcs(display_path), &cur_edid_len);
+
+ /**NOTE: the comparison of the Edid should be placed
+ * in a separate function that's available to everyone,
+ * instead of just having this all over the code -
+ * TO BE DONE LATER
+ * NOTE: since the EDID contains a check-sum, it might
+ * be faster to just compare the checksum instead of
+ * going through each byte...
+ * NOTE: we need to compare the Edid if the starting
+ * pointers are different (obviously if they start at
+ * the same address, it's the same buffer) or the lengths
+ * are different...
+ */
+ if (cur_edid && (cur_edid_len == display_state->edid_len)) {
+ for (i = 0; i < cur_edid_len; ++i) {
+ if (cur_edid[i] != display_state->edid[i]) {
+ detection_status->capability_changed =
+ true;
+ break;
+ }
+ }
+ } else if (cur_edid_len != display_state->edid_len) {
+ detection_status->capability_changed = true;
+ }
+ }
+
+ /*report if audio capabilities changed*/
+ detection_status->audio_cap_changed =
+ dal_dcs_is_audio_supported(
+ dal_display_path_get_dcs(display_path))
+ != display_state->audio_cap;
+
+ /*Update signal based on dongle*/
+ switch (detection_status->sink_capabilities.dongle_type) {
+ /**Upgrade DVI --> HDMI signal if HDMI dongle present and
+ * hdmi audio is supported on display path
+ */
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ if (dal_display_path_get_properties(
+ display_path).bits.IS_HDMI_AUDIO_SUPPORTED &&
+ dal_is_dvi_signal(detection_status->detected_signal) &&
+ dal_dcs_get_connector_type(
+ dal_display_path_get_dcs(display_path)) ==
+ EDID_CONNECTOR_HDMIA)
+ detection_status->detected_signal =
+ SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ detection_status->detected_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+
+ default:
+ break;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Detects signal which goes out of ASIC.
+ *
+ * @param [in] display_path: Display path on which to perform detection
+ *
+ * @return
+ * Detected signal if sink connected, SIGNAL_TYPE_NONE otherwise
+ *****************************************************************************
+ */
+static enum signal_type detect_asic_signal(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ struct encoder *encoder;
+ struct graphics_object_id down_stream_id;
+ enum signal_type signal = SIGNAL_TYPE_NONE;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ encoder = dal_display_path_get_upstream_object(display_path,
+ ASIC_LINK_INDEX);
+
+ down_stream_id = get_connector_obj_id(display_path);
+
+ /*TODO: connector?*/
+ if (dal_display_path_get_downstream_object(display_path,
+ ASIC_LINK_INDEX) != NULL)
+ down_stream_id = dal_encoder_get_graphics_object_id(
+ dal_display_path_get_downstream_object(display_path,
+ ASIC_LINK_INDEX));
+ if (encoder != NULL)
+ if (dal_encoder_is_sink_present(encoder, down_stream_id))
+ signal = dal_encoder_detect_sink(encoder,
+ down_stream_id);
+
+ return signal;
+}
+
+/**
+ *****************************************************************************
+ * Function: reconnect_link_services
+ *
+ * @brief
+ * Connect/Disconnect LinkServices associated to the given display path
+ * based on detection status
+ *
+ * @param [in] display_path: connect/disconnect link services associated
+ * to this Display path
+ * @param [in] detection_status: connect/disconnect based on detection status
+ *****************************************************************************
+ */
+static void reconnect_link_services(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status)
+{
+ uint8_t i;
+ uint8_t link_count;
+ enum signal_type connect_signal;
+ enum signal_type dis_connect_signal;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ link_count = dal_display_path_get_number_of_links(display_path);
+ connect_signal = SIGNAL_TYPE_NONE;
+ dis_connect_signal = SIGNAL_TYPE_NONE;
+
+ if (dal_is_embedded_signal(detection_status->detected_signal)) {
+ /**For embedded displays we need to call Link Service to
+ * retrieve link capabilities regardless of lid state
+ */
+ connect_signal = detection_status->detected_signal;
+ dis_connect_signal = SIGNAL_TYPE_NONE;
+ } else {
+ /**Disconnect Link service on signal change
+ * (and of course if not connected)
+ */
+ if (!detection_status->connected
+ || dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX) != detection_status->detected_signal)
+ dis_connect_signal = dal_display_path_get_query_signal(
+ display_path,
+ SINK_LINK_INDEX);
+
+ if (detection_status->connected)
+ connect_signal = detection_status->detected_signal;
+ }
+
+ /*only disconnect the last link, and connect all others*/
+ if (dis_connect_signal != SIGNAL_TYPE_NONE) {
+ /*disconnect link service*/
+ /* TODO: check if dead loop*/
+ for (i = link_count; i > 0; i--) {
+ link_service = tm_resource_mgr_get_link_service(
+ tm_dm->resource_mgr,
+ display_path,
+ i - 1,
+ dis_connect_signal);
+
+ TM_ASSERT(link_service != NULL);
+
+ if (i < link_count) {
+ dal_ls_connect_link(link_service, display_path,
+ tm_dm->is_blocking_detection);
+
+ if (need_handle_connection_status_based_on_sink_count(
+ tm_dm,
+ detection_status))
+ /*we need this to remove the optimiztation in
+ * connectlink logic*/
+ dal_ls_invalidate_down_stream_devices(
+ link_service);
+ } else
+ dal_ls_disconnect_link(link_service);
+
+ dis_connect_signal =
+ dal_display_path_downstream_to_upstream_signal(
+ display_path,
+ dis_connect_signal,
+ i - 1);
+ }
+ }
+
+ if (connect_signal != SIGNAL_TYPE_NONE) {
+ /*connect link service*/
+ for (i = 0; i < link_count; ++i) {
+ enum signal_type current_signal;
+
+ current_signal =
+ dal_display_path_sink_signal_to_link_signal(
+ display_path, connect_signal, i);
+
+ link_service = tm_resource_mgr_get_link_service(
+ tm_dm->resource_mgr,
+ display_path,
+ i,
+ current_signal);
+
+ TM_ASSERT(link_service != NULL);
+
+ dal_ls_connect_link(link_service, display_path,
+ tm_dm->is_blocking_detection);
+ }
+ }
+}
+
+/* Interrupt related handlers */
+
+/**
+ *****************************************************************************
+ * Function: register_irq_source
+ *
+ * @brief
+ * Registers irq source for requested interrupt type within given irq
+ * registration entry
+ * Does nothing if such irq source is already registered
+ *
+ * @param [in] interruptType: TM interrupt type
+ * @param [in] irq_entry: Entry within which to register irq source
+ *****************************************************************************
+ */
+static void register_irq_source(
+ struct tm_detection_mgr *tm_dm,
+ enum tm_interrupt_type type,
+ struct tmdm_irq_registration *connector_irq)
+{
+ struct dal_context *dal_context = tm_dm->dal_context;
+ struct tmdm_irq_entry *irq_entry;
+
+ TM_ASSERT(type < TM_INTERRUPT_TYPE_COUNT);
+ TM_ASSERT(connector_irq != NULL);
+
+ irq_entry = IRQ_ENTRY(connector_irq, type);
+
+ if (irq_entry->irq_src == DAL_IRQ_SOURCE_INVALID)
+ irq_entry->irq_src = get_irq_source(tm_dm,
+ connector_irq->connector_id, type);
+
+ /**This TM_ASSERT may hit for connectors which do not have ddc/hpd line
+ * associated. Though in these cases it is valid, we still want to
+ * catch such configuration (very rare config)
+ */
+ TM_ASSERT(irq_entry->irq_src != DAL_IRQ_SOURCE_INVALID);
+}
+
+/**
+ *****************************************************************************
+ * Function: init_irq_entry
+ *
+ * @brief
+ * Initializes irq entry. It includes few things:
+ * 1. Cache connector inetrface (till now we had only connector
+ * object ID)
+ * 2. Register irq sources for relevant interrupts
+ * 3. Initialize interrupt features (like hpd filtering and hw
+ * ddc polling)
+ * This fucntion typically called when first dispay registered within
+ * irq registration entry
+ *
+ * @param [in] display_path: Registered display path
+ * @param [in] irq_entry: IRQ registration entry corresponding to
+ * this display path
+ *
+ * @return
+ * true if entry successfully initialized, false otherwise
+ *****************************************************************************
+ */
+static bool init_irq_entry(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ struct tmdm_irq_registration *connector_irq)
+{
+ struct connector_feature_support features = {0};
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+ TM_ASSERT(connector_irq != NULL);
+ TM_ASSERT(connector_irq->connector == NULL);
+
+ /*Obtain and cache connector*/
+ connector_irq->connector = dal_display_path_get_connector(
+ display_path);
+
+ if (connector_irq->connector == NULL) {
+ TM_ERROR("%s: no Connector on Display Path!\n", __func__);
+ return false;
+ }
+
+ dal_connector_get_features(connector_irq->connector, &features);
+
+ if (features.hpd_line != HPD_SOURCEID_UNKNOWN) {
+ /* Since connector supports HPD, register HPD IRQ source. */
+ register_irq_source(tm_dm, TM_INTERRUPT_TYPE_HOTPLUG,
+ connector_irq);
+ }
+
+ /* Setup Delayed HPD mode: On regular display path we need
+ * either HW HPD filtering or SW timer. But NOT both. */
+ if (IRQ_ENTRY_HPD(connector_irq)->irq_src != DAL_IRQ_SOURCE_INVALID) {
+ hw_hpd_filter_set(connector_irq, features.HPD_FILTERING);
+ sw_hpd_filter_set(connector_irq, !features.HPD_FILTERING);
+ }
+
+ register_irq_source(tm_dm, TM_INTERRUPT_TYPE_TIMER, connector_irq);
+
+ return true;
+}
+
+/**
+ *****************************************************************************
+ * Function: update_irq_on_connect
+ *
+ * @brief
+ * Updates interrupt state when display path becomes connected.
+ * This includes reprogramming HW for special interrupt-related features
+ * (like hpd filtering)
+ *
+ * @param [in] irq_entry: IRQ registration entry corresponding to this
+ * display path
+ *****************************************************************************
+ */
+static void update_irq_on_connect(
+ struct tm_detection_mgr *tm_dm,
+ struct tmdm_irq_registration *connector_irq)
+{
+ struct display_path *display_path = connector_irq->connected_display;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ if (NULL == display_path) {
+ TM_ERROR("%s: 'connected_display' is NULL!\n", __func__);
+ return;
+ }
+
+ /* Setup HPD Filtering flags for next DISCONNECT interrupt. */
+ if (tm_utils_is_dp_connector(connector_irq->connector)) {
+ if (tm_utils_is_dp_asic_signal(display_path))
+ sw_hpd_filter_set(connector_irq, false);
+ else
+ sw_hpd_filter_set(connector_irq, true);
+ }
+}
+
+static bool is_active_converter(enum display_dongle_type type)
+{
+ switch (type) {
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: register_interrupt
+ *
+ * @brief
+ * Registers requested interrupt within IRQ Manager
+ * Does nothing if such interrupt already registered
+ * Therefore this function cannot be used as is to re-register Timer
+ * interrupts
+ *
+ * @param [in] interruptType: TM interrupt type
+ * @param [in] irq_entry: Entry which to register for this interrupt
+ *****************************************************************************
+ */
+static void register_interrupt(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_interrupt_type type,
+ struct tmdm_irq_registration *connector_irq,
+ uint32_t timeout_ms)
+{
+ enum dal_irq_source irq_src;
+ struct dal_context *dal_context = tm_dm->dal_context;
+ struct dal_interrupt_params int_params = {0};
+ struct dal_timer_interrupt_params timer_int_params = {0};
+ struct tmdm_irq_entry *irq_entry;
+
+ TM_ASSERT(type < TM_INTERRUPT_TYPE_COUNT);
+ TM_ASSERT(connector_irq != NULL);
+
+ irq_entry = IRQ_ENTRY(connector_irq, type);
+
+ irq_src = irq_entry->irq_src;
+
+ if (irq_src == DAL_IRQ_SOURCE_INVALID)
+ return;
+
+ if (DAL_INVALID_IRQ_HANDLER_IDX != irq_entry->handler_index_ref) {
+ /* We don't want to overwrite somebody's IRQ handler!
+ * If we get here, we have inconsistent state - somebody
+ * is still registered for this IRQ and we register again. */
+ TM_ERROR("%s: can not overwrite IRQ handler!\n", __func__);
+ return;
+ }
+
+ switch (type) {
+ case TM_INTERRUPT_TYPE_TIMER:
+
+ if (tmdm_get_timer_state(irq_entry) !=
+ TMDM_TMR_STATE_NOT_REGISTERED) {
+ /* We don't want multiple timers for a connector being
+ * queued at the same time. */
+ TM_HPD_IRQ("%s:Can not register timer - another already registered!\n",
+ __func__);
+ break;
+ }
+
+ timer_int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ timer_int_params.micro_sec_interval =
+ timeout_ms * MICROSECONDS_IN_MILLISECOND;
+ int_params.no_mutex_wait = false;
+ int_params.one_shot = true;/* We only have 'one shot' timers,
+ but let's make it explicit for readability.*/
+
+ /* dal_register_timer_interrupt has no return code - we assume
+ * the call is always successful. */
+ dal_register_timer_interrupt(dal_context,
+ &timer_int_params,
+ tmdm_handle_timer_interrupt,
+ connector_irq);
+
+ tmdm_set_timer_state(tm_dm, irq_entry,
+ TMDM_TMR_STATE_REGISTERED);
+
+ TM_HPD_IRQ("%s: type=%s timeout_ms=%d\n", __func__,
+ TM_DECODE_INTERRUPT_TYPE(type),
+ timeout_ms);
+ break;
+
+ case TM_INTERRUPT_TYPE_HOTPLUG: {
+ struct display_sink_capability sink_cap;
+
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = irq_src;
+
+ /* Trigger an interrupt for both - Connect and Disconnect
+ * cases. */
+ int_params.requested_polarity = INTERRUPT_POLARITY_BOTH;
+
+ dal_dcs_get_sink_capability(
+ dal_display_path_get_dcs(display_path),
+ &sink_cap);
+
+ /* The display path target state is initialised by
+ * dal_tm_do_initial_detection() and updated by HPD Interrupt
+ * handler. */
+ if (dal_display_path_is_target_connected(display_path) ||
+ is_active_converter(sink_cap.dongle_type))
+ int_params.current_polarity = INTERRUPT_POLARITY_HIGH;
+ else
+ int_params.current_polarity = INTERRUPT_POLARITY_LOW;
+
+ int_params.no_mutex_wait = false;
+ int_params.one_shot = false;/* we'll stop it during DAL
+ unload. */
+
+ irq_entry->handler_index_ref =
+ dal_register_interrupt(dal_context,
+ &int_params,
+ tmdm_handle_hpd_interrupt,
+ connector_irq);
+
+ if (DAL_INVALID_IRQ_HANDLER_IDX !=
+ irq_entry->handler_index_ref) {
+ TM_HPD_IRQ(
+ "%s: type=%s irq_src=%d current_polarity=%s\n",
+ __func__, TM_DECODE_INTERRUPT_TYPE(type),
+ irq_src,
+ DAL_DECODE_INTERRUPT_POLARITY(
+ int_params.current_polarity));
+ } else {
+ TM_ERROR("%s: failed to register: type=%s irq_src=%d\n",
+ __func__, TM_DECODE_INTERRUPT_TYPE(type),
+ irq_src);
+ }
+ break;
+ }
+ default:
+ TM_WARNING("%s: request for unknown type of IRQ handler: %d!\n",
+ __func__, type);
+ break;
+ }
+}
+
+/**
+ *****************************************************************************
+ * Function: unregister_interrupt
+ *
+ * @brief
+ * Unregisters requested interrupt within IRQ Manager
+ * Does nothing if such interrupt not registered
+ *
+ * @param [in] interruptType: TM interrupt type
+ * @param [in] irq_entry: Entry which to unregister for this interrupt
+ *****************************************************************************
+ */
+static void unregister_interrupt(
+ struct tm_detection_mgr *tm_dm,
+ enum tm_interrupt_type type,
+ struct tmdm_irq_registration *connector_irq)
+{
+ struct dal_context *dal_context = tm_dm->dal_context;
+ struct tmdm_irq_entry *irq_entry = IRQ_ENTRY(connector_irq, type);
+ enum dal_irq_source irq_src = irq_entry->irq_src;
+
+ TM_ASSERT(type < TM_INTERRUPT_TYPE_COUNT);
+
+ switch (type) {
+ case TM_INTERRUPT_TYPE_TIMER:
+ /* Timer interrupt *can not* be unregistered (no API to
+ * do that).
+ * We can only change internal timer state, which will indicate
+ * to tmdm_handle_timer_interrupt() that timer was cancelled
+ * and no work should be done in timer interrupt handler. */
+ switch (irq_entry->timer_state) {
+ case TMDM_TMR_STATE_REGISTERED:
+ tmdm_set_timer_state(tm_dm, irq_entry,
+ TMDM_TMR_STATE_CANCELLED);
+ break;
+ case TMDM_TMR_STATE_NOT_REGISTERED:
+ /* do nothing */
+ break;
+ case TMDM_TMR_STATE_CANCELLED:
+ tmdm_set_timer_state(tm_dm, irq_entry,
+ TMDM_TMR_STATE_NOT_REGISTERED);
+ break;
+ default:
+ TM_WARNING("%s: invalid timer state!\n", __func__);
+ break;
+ }
+ break;
+
+ case TM_INTERRUPT_TYPE_HOTPLUG:
+ if (DAL_INVALID_IRQ_HANDLER_IDX ==
+ irq_entry->handler_index_ref) {
+ /* we can not unregister if no handler */
+ TM_WARNING("%s: HPD Interrupt has no handler!\n",
+ __func__);
+ break;
+ }
+
+ TM_HPD_IRQ("%s: type=%s irq_src=%d\n", __func__,
+ TM_DECODE_INTERRUPT_TYPE(type), irq_src);
+
+ dal_unregister_interrupt(dal_context, irq_src,
+ irq_entry->handler_index_ref);
+
+ irq_entry->handler_index_ref = DAL_INVALID_IRQ_HANDLER_IDX;
+ break;
+
+ default:
+ TM_WARNING("%s: unknown type of IRQ: %d!\n", __func__, type);
+ break;
+ }
+}
+
+static void hpd_notify(struct tmdm_irq_registration *connector_irq)
+{
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ if (connector_irq->connected_display != NULL) {
+ TM_HPD_IRQ("%s: connected_display != NULL\n", __func__);
+
+ /* should be a Disconnect event on a connected path. */
+ tm_handle_hpd_event(tm_dm->tm_hpd_callback,
+ connector_irq->connected_display);
+ } else {
+ uint8_t i;
+
+ TM_HPD_IRQ("%s: connected_display == NULL\n", __func__);
+
+ /* should be a Connect event on disconnected path. */
+ for (i = 0; i < connector_irq->displays_num; ++i) {
+ if (TM_RESULT_DISPLAY_CONNECTED == tm_handle_hpd_event(
+ tm_dm->tm_hpd_callback,
+ connector_irq->displays[i])) {
+ /* A path got connected. We can stop, since it
+ * means connectivity changed. */
+ break;
+ }
+ } /*for () */
+ }
+}
+
+/*****************************************************************************
+ * Handles Timer interrupt - notify connectivity changed.
+ *
+ * If this is connect event on disconnected path
+ * - notification will be sent for all registered display paths until
+ * connected path found
+ *
+ * If this is disconnected event on connected path
+ * - notification will be sent only for connected path
+ *****************************************************************************/
+static void tmdm_handle_timer_interrupt(void *interrupt_params)
+{
+ struct tmdm_irq_registration *connector_irq = interrupt_params;
+ struct tmdm_irq_entry *irq_entry = IRQ_ENTRY_TIMER(connector_irq);
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ TM_HPD_IRQ("%s: timer state: %s\n", __func__,
+ TMDM_DECODE_TIMER_STATE(tmdm_get_timer_state(irq_entry)));
+
+ if (TMDM_TMR_STATE_REGISTERED != tmdm_get_timer_state(irq_entry)) {
+ /* Timer was cancelled while it was in timer queue.
+ * No work should be done by the timer. */
+ return;
+ }
+
+ hpd_notify(connector_irq);
+
+ /* Note that we allow for next timer only at the very end of
+ * handling of this timer. */
+ unregister_interrupt(tm_dm, TM_INTERRUPT_TYPE_TIMER, connector_irq);
+}
+
+/* Passive DP-to-HDMI or DP-to-DVI dongles may introduce instability on
+ * HPD Pin. */
+static bool dp_passive_dongle_hpd_workaround(
+ struct tmdm_irq_registration *connector_irq)
+{
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+ struct display_path *display_path = connector_irq->displays[0];
+ bool schedule_timer = sw_hpd_filter_get(connector_irq);
+
+ if (connector_irq->connected_display == NULL
+ && tm_utils_is_dp_connector(connector_irq->connector)) {
+ enum signal_type asic_signal;
+ /* DP *connector*. Sleep 50ms before detection because of noise
+ * on DDC pin.
+ * Note that MST Long/Short pulse interrupt is handled by
+ * DP Link Service, not here. */
+ dal_sleep_in_milliseconds(50);
+
+ /* We are disconnected (because connected_display == NULL).
+ * That means we are handling a connect event.
+ * DP mutable path cannot rely on sw_hpd_filter_get().
+ * Instead we need to detect ASIC signal and decide
+ * based on it. */
+ asic_signal = detect_asic_signal(tm_dm, display_path);
+
+ TM_HPD_IRQ("%s: Connector=DP, ASIC signal=%s\n", __func__,
+ tm_utils_signal_type_to_str(asic_signal));
+
+ if (dal_is_dp_signal(asic_signal) == false) {
+ /* DP Connector but non-DP Signal - Passive dongle
+ * case. */
+ schedule_timer = true;
+ } else {
+ /* Both Connector and Signal are DP. */
+ schedule_timer = false;
+ }
+ }
+
+ TM_HPD_IRQ("%s: schedule de-bouncing timer: %s\n", __func__,
+ (schedule_timer == true ? "true" : "false"));
+
+ return schedule_timer;
+}
+
+/**
+ *****************************************************************************
+ * Handles HotPlug interrupt - reschedule timer or notify connectivity
+ * changed
+ *
+ * @param [in] connector_irq: Entry corresponding to occurred interrupt
+ *****************************************************************************
+ */
+static void tmdm_handle_hpd_interrupt(void *interrupt_params)
+{
+ struct tmdm_irq_registration *connector_irq = interrupt_params;
+ struct tm_detection_mgr *tm_dm = connector_irq->detection_mgr;
+ struct dal_context *dal_context = tm_dm->dal_context;
+ struct display_path *display_path = connector_irq->displays[0];
+
+ TM_INFO("HPD interrupt: irq_src=%d\n",
+ IRQ_ENTRY_HPD(connector_irq)->irq_src);
+
+ if (dp_passive_dongle_hpd_workaround(connector_irq)) {
+ register_interrupt(tm_dm, display_path,
+ TM_INTERRUPT_TYPE_TIMER,
+ connector_irq,
+ DP_PASSIVE_DONGLE_INTERVAL_IN_MS);
+ } else {
+ /* notify without delay */
+ hpd_notify(connector_irq);
+ }
+}
+
+/*****************************************************************************
+ * public functions
+ * **************************************************************************/
+
+struct tm_detection_mgr *dal_tm_detection_mgr_create(
+ struct tm_detection_mgr_init_data *init_data)
+{
+ struct tm_detection_mgr *tm_dm;
+
+ if (init_data->as == NULL || init_data->hwss == NULL
+ || init_data->resource_mgr == NULL
+ || init_data->tm == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ tm_dm = dal_alloc(sizeof(*tm_dm));
+
+ if (tm_dm == NULL)
+ return NULL;
+
+ if (construct(tm_dm, init_data))
+ return tm_dm;
+ dal_free(tm_dm);
+ return NULL;
+}
+
+void dal_tm_detection_mgr_destroy(
+ struct tm_detection_mgr **tm_dm)
+{
+ if (!tm_dm || !*tm_dm) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ destruct(*tm_dm);
+
+ dal_free(*tm_dm);
+ *tm_dm = NULL;
+}
+
+void dal_tm_detection_mgr_init_hw(
+ struct tm_detection_mgr *tm_dm)
+{
+ uint8_t i;
+ struct tmdm_irq_registration *connector_irq;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ for (i = 0; i < tm_dm->irq_registrations_num; ++i) {
+ connector_irq = &tm_dm->connector_irq_regsitrations[i];
+ if (connector_irq->connector == NULL) {
+ /* This situation is possible if number of connectors
+ * is greater than number of display path. In other
+ * words, a creation of a display path failed for a
+ * connector. */
+ TM_WARNING("%s: 'connector_irq->connector' is NULL!\n",
+ __func__);
+ continue;
+ }
+
+ /*Setup initial state of HW*/
+ if (hw_hpd_filter_get(connector_irq)) {
+ dal_connector_program_hpd_filter(
+ connector_irq->connector,
+ DELAY_ON_CONNECT_IN_MS,
+ DELAY_ON_DISCONNECT_IN_MS);
+ } else {
+ dal_connector_program_hpd_filter(
+ connector_irq->connector, 0, 0);
+ }
+
+ /*Update HW state based on current connectivity state*/
+ if (connector_irq->connected_display != NULL)
+ update_irq_on_connect(tm_dm, connector_irq);
+ }
+}
+
+void dal_tm_detection_mgr_release_hw(
+ struct tm_detection_mgr *tm_dm)
+{
+ uint8_t i;
+
+ if (tm_dm->connector_irq_regsitrations == NULL)
+ return;
+
+ for (i = 0; i < tm_dm->irq_registrations_num; ++i) {
+ uint8_t j;
+
+ for (j = 0; j < TM_INTERRUPT_TYPE_COUNT; ++j) {
+ enum tm_interrupt_type irq_type;
+
+ irq_type = TM_INTERRUPT_TYPE_TIMER;
+ if (j > 0)
+ irq_type = TM_INTERRUPT_TYPE_HOTPLUG;
+
+ unregister_interrupt(tm_dm, irq_type,
+ &tm_dm->connector_irq_regsitrations[i]);
+ }
+ }
+}
+
+bool dal_tm_detection_mgr_detect_display(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status)
+{
+ bool destructive;
+ bool detect_performed = false;
+ struct display_state original_info;
+
+ destructive = tm_utils_is_destructive_method(method);
+
+ if (display_path == NULL || detection_status == NULL)
+ return detect_performed;
+
+ detection_status->detected_signal = dal_display_path_get_query_signal(
+ display_path, SINK_LINK_INDEX);
+
+ detection_status->connected = dal_display_path_is_target_connected(
+ display_path);
+
+ /**Do physical detection or handle the case when no physical detection
+ * should be done. In both cases detection_status will have valid info.
+ * Detection procedure does not change display_path_state
+ * (only can update DCS)
+ */
+ if (handle_skipping_detection(tm_dm, display_path, method,
+ detection_status)) {
+ /* no need to do physical detection */
+ return detect_performed;
+ }
+
+ if (!tm_resource_mgr_acquire_resources(tm_dm->resource_mgr,
+ display_path, TM_ACQUIRE_METHOD_SW))
+ return detect_performed;
+
+ /**Step 1: retrieve the current sink capabilities and Edid
+ * we need them for later on to see if after detection,
+ * information changed
+ */
+ do_target_pre_processing(tm_dm, display_path, &original_info);
+
+ /*Step 2: perform real detection*/
+ do_target_detection(tm_dm, display_path, destructive, detection_status);
+
+ /**Step 3: perform emulation if needed
+ * do_target_emulation(display_path, destructive,
+ * detection_status);
+ */
+
+ /**Step 4: compare to previous information (sink/Edid)
+ * and finalize DCS and detection information
+ */
+ do_target_post_processing(tm_dm, display_path, detection_status,
+ &original_info);
+
+ /*Final Step: Connect/Disconnect link services*/
+ reconnect_link_services(tm_dm, display_path, detection_status);
+
+ detect_performed = true;
+
+ /* After reconnect_link_services(), we need consider downstream
+ * sinkcount is 0 case, i.e. not report connected to OS and not update
+ * internal modelist in postTargetDetection
+ */
+ if (need_handle_connection_status_based_on_sink_count(tm_dm,
+ detection_status))
+ detection_status->connected = false;
+
+ /*Revert to cached state if this is MST detection*/
+ if (detection_status->dp_mst_detection) {
+ detection_status->detected_signal =
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+ detection_status->connected =
+ dal_display_path_is_target_connected(
+ display_path);
+ }
+
+ tm_resource_mgr_release_resources(tm_dm->resource_mgr, display_path,
+ TM_ACQUIRE_METHOD_SW);
+
+ return detect_performed;
+}
+
+bool dal_tm_detection_mgr_retreive_sink_info(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status)
+{
+ struct display_state original_info;
+ bool destructive = tm_utils_is_destructive_method(method);
+ bool detect_performed = false;
+
+ if (display_path == NULL || detection_status == NULL)
+ return false;
+
+ if (method != DETECTION_METHOD_CACHED) {
+ /**Retrieve the current sink capabilities and Edid
+ * we need the Edid information to compare it with
+ * the Edid after being read see if anything changed
+ * NOTE: the extra work with
+ * do_target_pre_processing/do_target_post_processing
+ * is because we currently don't have an Edid
+ * class that can provide comparing capability
+ * NOTE: do_target_pre_processing/do_target_post_processing
+ * do a bit more checking other than comparing
+ * the Edid, but the other information should
+ * not change, so it shouldn't come into play
+ */
+
+ do_target_pre_processing(tm_dm, display_path, &original_info);
+
+ dal_dcs_query_sink_capability(
+ dal_display_path_get_dcs(display_path),
+ &detection_status->sink_capabilities, 0);
+
+ read_edid(tm_dm, display_path, destructive, detection_status);
+
+ /**compare to previous information (sink/Edid) - in this
+ * case we're only interested in the Edid
+ */
+ do_target_post_processing(
+ tm_dm,
+ display_path,
+ detection_status,
+ &original_info);
+
+ detect_performed = true;
+ } else
+ dal_dcs_get_sink_capability(
+ dal_display_path_get_dcs(display_path),
+ &detection_status->sink_capabilities);
+
+ return detect_performed;
+}
+
+void dal_tm_detection_mgr_reschedule_detection(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ bool reschedule)
+{
+ struct tmdm_irq_registration *connector_irq;
+
+ if (display_path == NULL)
+ return;
+
+ connector_irq = get_irq_entry(tm_dm,
+ get_connector_obj_id(display_path));
+
+ if (connector_irq == NULL || connector_irq->displays_num == 0)
+ return;
+
+ unregister_interrupt(tm_dm, TM_INTERRUPT_TYPE_TIMER, connector_irq);
+
+ if (!reschedule) {
+ connector_irq->resched_count = 0;
+ return;
+ }
+
+ if (connector_irq->resched_count >= NUM_OF_DETECTION_RETRIES) {
+ connector_irq->resched_count = 0;
+ return;
+ }
+
+ register_interrupt(tm_dm, display_path, TM_INTERRUPT_TYPE_TIMER,
+ connector_irq, RESCHED_TIMER_INTERVAL_IN_MS);
+
+ connector_irq->resched_count++;
+}
+
+/* IRQ Source registration and state update methods.
+ *
+ * This function will *not* register for HPD Interrupt.
+ * To register for HPD Interrupt call dal_tm_detection_mgr_register_hpd_irq().
+ */
+bool dal_tm_detection_mgr_register_display(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ struct tmdm_irq_registration *connector_irq;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ if (display_path == NULL) {
+ TM_ERROR("%s: 'display_path' is NULL!\n", __func__);
+ return false;
+ }
+
+ /* Obtain irq registration entry*/
+ connector_irq = get_irq_entry(tm_dm,
+ get_connector_obj_id(display_path));
+
+ if (connector_irq == NULL) {
+ TM_ERROR("%s: 'connector_irq' was not found!\n", __func__);
+ return false;
+ }
+
+ /* First registration connector object is not cached - need to
+ * initialize entry */
+ if (connector_irq->connector == NULL)
+ if (!init_irq_entry(tm_dm, display_path, connector_irq))
+ return false;
+
+ /*We exceeded maximum allowed display registrations per entry*/
+ if (connector_irq->displays_num >= MAX_NUM_OF_PATHS_PER_CONNECTOR) {
+ TM_WARNING("%s: exceeded maximum registrations!\n", __func__);
+ return false;
+ }
+ /* Update SW delay request in processing of HPD interrupt.
+ *
+ * Order of evaluation is important: 1st evaluate DP *connector*, as
+ * one with mutable DisplayPort. */
+ if (IRQ_ENTRY_HPD(connector_irq)->irq_src != DAL_IRQ_SOURCE_INVALID) {
+
+ if (tm_utils_is_dp_connector(connector_irq->connector)) {
+ /* Case 1: mutable Display Port path.
+ *
+ * By mutable we mean possibility of having a passive
+ * DP->HDMI dongle.
+ * Start with no delay */
+
+ sw_hpd_filter_set(connector_irq, false);
+ /* Note: we START with disconnected path so
+ * schedule_hpd_timer can't be valid for this option */
+
+ } else if (tm_utils_is_dp_asic_signal(display_path)) {
+ /* Case 2: unmutable Display Port path.
+ *
+ * Make sure it is not overriding existing settings
+ * with higher priority.
+ * By unmutable we mean that ASIC signal will
+ * *always* be DP because there is an Active Converter
+ * on the board, which delivers the correct signal
+ * to the non-DP Connector. */
+ sw_hpd_filter_set(connector_irq, false);
+ hw_hpd_filter_set(connector_irq, false);
+ }
+ }
+
+ /* Add display to notification list */
+ connector_irq->displays[connector_irq->displays_num] = display_path;
+ connector_irq->displays_num++;
+
+ return true;
+}
+
+bool dal_tm_detection_mgr_register_hpd_irq(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ struct tmdm_irq_registration *connector_irq;
+ struct dal_context *dal_context = tm_dm->dal_context;
+
+ if (display_path == NULL) {
+ TM_ERROR("%s: 'display_path' is NULL!\n", __func__);
+ return false;
+ }
+
+ connector_irq = get_irq_entry(tm_dm,
+ get_connector_obj_id(display_path));
+
+ if (connector_irq == NULL) {
+ TM_ERROR("%s: 'connector_irq' was not found!\n", __func__);
+ return false;
+ }
+
+ if (connector_irq->connector == NULL) {
+ TM_ERROR("%s: 'connector' is NULL!\n", __func__);
+ return false;
+ }
+
+ register_interrupt(tm_dm, display_path, TM_INTERRUPT_TYPE_HOTPLUG,
+ connector_irq, 0);
+
+ return true;
+}
+
+void dal_tm_detection_mgr_update_active_state(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ struct tmdm_irq_registration *connector_irq;
+ bool display_registered = false;
+ uint8_t i;
+
+ if (display_path == NULL)
+ return;
+
+ connector_irq = get_irq_entry(tm_dm,
+ get_connector_obj_id(display_path));
+
+ if (connector_irq == NULL || connector_irq->displays_num == 0)
+ return;
+
+ /*here only handle registered display path*/
+ for (i = 0; i < connector_irq->displays_num; ++i) {
+ /* TODO: check connector id only. */
+ if (dal_graphics_object_id_is_equal(
+ get_connector_obj_id(
+ connector_irq->displays[i]),
+ get_connector_obj_id(display_path)))
+ display_registered = true;
+ break;
+ }
+
+ if (!display_registered)
+ /*skip hanlding because the display path is not registered*/
+ return;
+
+ if (dal_display_path_is_target_connected(display_path)) {
+ connector_irq->connected_display = display_path;
+ update_irq_on_connect(tm_dm, connector_irq);
+ } else {
+ connector_irq->connected_display = NULL;
+ }
+}
+
+void dal_tm_detection_mgr_set_blocking_detection(
+ struct tm_detection_mgr *tm_dm,
+ bool blocking)
+{
+ tm_dm->is_blocking_detection = blocking;
+}
+
+bool dal_tm_detection_mgr_is_blocking_detection(
+ struct tm_detection_mgr *tm_dm)
+{
+ return tm_dm->is_blocking_detection;
+}
+
+void dal_tm_detection_mgr_set_blocking_interrupts(
+ struct tm_detection_mgr *tm_dm,
+ bool blocking)
+{
+ tm_dm->is_blocking_interrupts = blocking;
+}
+
+bool dal_tm_detection_mgr_is_blocking_interrupts(
+ struct tm_detection_mgr *tm_dm)
+{
+ return tm_dm->is_blocking_interrupts;
+}
+
+/******************************************************************************
+ * Check if the monitor patch flag is set.
+ * If yes, then we program HPD filter with the delay that is defined in
+ * the patch.
+ * If no, then we program HPD filter to default value.
+ *
+ * TODO: check whether we should handle active converter sinkcount info?
+ *****************************************************************************/
+void dal_tm_detection_mgr_program_hpd_filter(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path)
+{
+ struct connector *connector;
+ struct dcs *dcs;
+ uint32_t delay_on_disconnect_in_ms = DELAY_ON_DISCONNECT_IN_MS;
+ const struct monitor_patch_info *mon_patch_info;
+ struct dal_context *dal_context = tm_dm->dal_context;
+ uint32_t display_ind;
+
+ connector = dal_display_path_get_connector(display_path);
+ dcs = dal_display_path_get_dcs(display_path);
+ display_ind = dal_display_path_get_display_index(display_path);
+
+ /* Get HPD disconnect delay value from the monitor patch. */
+ mon_patch_info = dal_dcs_get_monitor_patch_info(dcs,
+ MONITOR_PATCH_TYPE_EXTRA_DELAY_ON_DISCONNECT);
+
+ if (NULL != mon_patch_info) {
+ delay_on_disconnect_in_ms = mon_patch_info->param;
+
+ TM_HPD_IRQ(
+ "%s: Path[%d]: 'patch delay' on disconnect: %d Ms\n",
+ __func__, display_ind, delay_on_disconnect_in_ms);
+ } else {
+ TM_HPD_IRQ(
+ "%s: Path[%d]: 'default delay' on disconnect: %d Ms\n",
+ __func__, display_ind, delay_on_disconnect_in_ms);
+ }
+
+ dal_connector_program_hpd_filter(connector, DELAY_ON_CONNECT_IN_MS,
+ delay_on_disconnect_in_ms);
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.h b/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.h
new file mode 100644
index 000000000000..4e4d9e86960d
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_detection_mgr.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TM_DETECTION_MGR_H__
+#define __DAL_TM_DETECTION_MGR_H__
+
+/* External includes */
+
+#include "include/irq_types.h"
+#include "include/topology_mgr_interface.h"
+#include "include/display_path_interface.h"
+
+/* Internal includes */
+#include "tm_internal_types.h"
+#include "tm_resource_mgr.h"
+
+/**
+ *****************************************************************************
+ * TMDetectionMgr
+ *
+ * @brief
+ * TMDetectionMgr responsible for performing detection of displays and
+ * handling display interrupts.
+ * Detection includes detecting sink presence, detecting sink signal,
+ * reading EDID and reading sink capabilities.
+ * Detection does NOT include updating display path state.
+ *
+ *****************************************************************************
+ */
+
+/* forward declarations */
+struct dal_context;
+struct tm_detection_mgr;
+
+struct tm_detection_mgr_init_data {
+ struct dal_context *dal_context;
+ struct adapter_service *as;
+ struct hw_sequencer *hwss;
+ struct tm_resource_mgr *resource_mgr;
+ struct topology_mgr *tm;
+};
+
+struct tm_detection_status {
+ /* currently detected signal */
+ enum signal_type detected_signal;
+
+ struct display_sink_capability sink_capabilities;
+
+ /* set when no connectivity change, but capabilities changed
+ * (EDID, SinkCap, etc.) */
+ bool capability_changed;
+ /* status indicating if the monitor ID changed -
+ * only valid when monitor connected */
+ bool monitor_changed;
+ /* currently detected connectivity state */
+ bool connected;
+ /* DpMst signal was detected (including
+ * transitions DpMst-->Dp and Dp-->DpMst) */
+ bool dp_mst_detection;
+ /* HPD line is low though EDID might be detected */
+ bool hpd_pin_failure;
+ /* audio capabilities changed */
+ bool audio_cap_changed;
+};
+
+
+/**tm_detection will be exposed to Topology only.
+ * Function exposed to Topology will be declared in tmdetectionmgr.h
+ * Function used by tm_detection only will be static function and
+ * not listed in tm_detectionmgr.h
+ *
+ */
+
+/*TODO: how to handle this
+ * void tm_detection_handle_interrupt(
+ * struct tm_detection_mgr *tm_dm,
+ * InterruptInfo* pInterruptInfo);
+ */
+
+struct tm_detection_mgr *dal_tm_detection_mgr_create(
+ struct tm_detection_mgr_init_data *init_data);
+
+void dal_tm_detection_mgr_destroy(struct tm_detection_mgr **tm_dm);
+
+void dal_tm_detection_mgr_release_hw(struct tm_detection_mgr *tm_dm);
+
+/* Detection methods */
+bool dal_tm_detection_mgr_detect_display(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status);
+
+bool dal_tm_detection_mgr_retreive_sink_info(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status);
+
+void dal_tm_detection_mgr_reschedule_detection(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path,
+ bool reschedule);
+
+/* IRQ registration and state update methods */
+bool dal_tm_detection_mgr_register_display(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path);
+
+bool dal_tm_detection_mgr_register_hpd_irq(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path);
+
+void dal_tm_detection_mgr_update_active_state(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path);
+
+void dal_tm_detection_mgr_init_hw(struct tm_detection_mgr *tm_dm);
+
+void dal_tm_detection_mgr_set_blocking_detection(
+ struct tm_detection_mgr *tm_dm,
+ bool blocking);
+
+bool dal_tm_detection_mgr_is_blocking_detection(
+ struct tm_detection_mgr *tm_dm);
+
+void dal_tm_detection_mgr_set_blocking_interrupts(
+ struct tm_detection_mgr *tm_dm,
+ bool blocking);
+
+bool dal_tm_detection_mgr_is_blocking_interrupts(
+ struct tm_detection_mgr *tm_dm);
+
+void dal_tm_detection_mgr_program_hpd_filter(
+ struct tm_detection_mgr *tm_dm,
+ struct display_path *display_path);
+
+#endif /* __TM_DETECTION_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_display_path_set.c b/drivers/gpu/drm/amd/dal/topology/tm_display_path_set.c
new file mode 100644
index 000000000000..841912d58c7d
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_display_path_set.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dal_services.h"
+
+#include "include/vector.h"
+
+#include "include/display_path_set_interface.h"
+#include "tm_utils.h"
+#include "tm_internal_types.h"
+
+struct display_path_set {
+ struct dal_context *dal_context;
+ struct vector *display_paths;
+};
+
+DAL_VECTOR_APPEND(display_path_set, struct display_path **);
+DAL_VECTOR_AT_INDEX(display_path_set, struct display_path **);
+
+static bool dps_construct(struct display_path_set *dps,
+ struct display_path_set_init_data *init_data)
+{
+ struct dal_context *dal_context = init_data->dal_context;
+ uint32_t display_path_num = init_data->display_path_num;
+
+ dps->display_paths = dal_vector_create(display_path_num,
+ sizeof(struct display_path *));
+
+ if (NULL == dps->display_paths) {
+ TM_ERROR("%s: failed to allocate storage for %d paths!\n",
+ __func__, display_path_num);
+ return false;
+ }
+
+ return true;
+}
+
+struct display_path_set *dal_display_path_set_create(
+ struct display_path_set_init_data *init_data)
+{
+ struct display_path_set *dps;
+ struct dal_context *dal_context = init_data->dal_context;
+
+ dps = dal_alloc(sizeof(*dps));
+
+ if (NULL == dps) {
+ TM_ERROR("%s: failed to allocate Display Pat Set!\n",
+ __func__);
+ return NULL;
+ }
+
+ if (false == dps_construct(dps, init_data)) {
+ dal_free(dps);
+ return NULL;
+ }
+
+ return dps;
+}
+
+static void destruct(struct display_path_set *dps)
+{
+ uint32_t count;
+ uint32_t i;
+
+ count = dal_vector_get_count(dps->display_paths);
+
+ for (i = 0; i < count; ++i) {
+ struct display_path **dp = dal_vector_at_index(
+ dps->display_paths,
+ i);
+ dal_display_path_destroy(dp);
+ }
+ dal_vector_destroy(&dps->display_paths);
+}
+
+void dal_display_path_set_destroy(
+ struct display_path_set **ptr)
+{
+ if (!ptr || !*ptr)
+ return;
+
+ destruct(*ptr);
+
+ dal_free(*ptr);
+ *ptr = NULL;
+}
+
+bool dal_display_path_set_add_path(
+ struct display_path_set *dps,
+ struct display_path *display_path)
+{
+ display_path = dal_display_path_clone(display_path, false);
+ return display_path_set_vector_append(
+ dps->display_paths,
+ &display_path);
+}
+
+struct display_path *dal_display_path_set_path_at_index(
+ struct display_path_set *dps,
+ uint32_t index)
+{
+ struct display_path **display_path =
+ display_path_set_vector_at_index(dps->display_paths, index);
+
+ if (display_path)
+ return *display_path;
+
+ return NULL;
+}
+
+struct display_path *dal_display_path_set_index_to_path(
+ struct display_path_set *dps,
+ uint32_t display_index)
+{
+ uint32_t capacity;
+ uint32_t i;
+ struct display_path *display_path;
+
+ capacity = dal_vector_get_count(dps->display_paths);
+
+ for (i = 0; i < capacity; i++) {
+
+ display_path = *display_path_set_vector_at_index(
+ dps->display_paths, i);
+
+ if (NULL == display_path)
+ continue;
+
+ if (display_index == dal_display_path_get_display_index(
+ display_path)) {
+ /* found it */
+ return display_path;
+ }
+ }
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_internal_types.h b/drivers/gpu/drm/amd/dal/topology/tm_internal_types.h
new file mode 100644
index 000000000000..3c46cafaf587
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_internal_types.h
@@ -0,0 +1,255 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * \file tm_internal_types.h
+ *
+ * \brief Internal types for use inside of Topology Manager.
+ */
+
+#ifndef __DAL_TM_INTERNAL_TYPES_H__
+#define __DAL_TM_INTERNAL_TYPES_H__
+
+#include "include/display_path_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/timing_service_interface.h"
+#include "include/logger_interface.h"
+#include "include/ddc_service_interface.h"
+
+
+/*****************
+ Debug facilities
+******************/
+
+#define TM_BREAK_TO_DEBUGGER() /*ASSERT_CRITICAL(0)*/
+
+/* debugging macro definitions */
+#define TM_IFACE_TRACE() \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_IFACE_TRACE, \
+ "%s():line:%d\n", __func__, __LINE__)
+
+#define TM_RESOURCES(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_RESOURCES, __VA_ARGS__)
+
+#define TM_ENCODER_CTL(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_ENCODER_CTL, __VA_ARGS__)
+
+#define TM_ENG_ASN(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_ENG_ASN, __VA_ARGS__)
+
+#define TM_CONTROLLER_ASN(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_CONTROLLER_ASN, __VA_ARGS__)
+
+#define TM_PWR_GATING(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_PWR_GATING, __VA_ARGS__)
+
+#define TM_BUILD_DSP_PATH(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_BUILD_DSP_PATH, __VA_ARGS__)
+
+#define TM_INFO(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_INFO, __VA_ARGS__)
+
+#define TM_DISPLAY_DETECT(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_DISPLAY_DETECT, __VA_ARGS__)
+
+#define TM_LINK_SRV(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_LINK_SRV, __VA_ARGS__)
+
+#define TM_COFUNC_PATH(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_COFUNC_PATH, __VA_ARGS__)
+
+#define TM_HPD_IRQ(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_HW_TRACE, LOG_MINOR_HW_TRACE_INTERRUPT, __VA_ARGS__)
+
+#define TM_MPO(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_HW_TRACE, LOG_MINOR_HW_TRACE_MPO, __VA_ARGS__)
+
+#define TM_NOT_IMPLEMENTED() \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_NOT_IMPLEMENTED, \
+ "%s()\n", __func__)
+
+#define TM_ASSERT(condition) \
+ do { \
+ if (!(condition)) { \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_TM, LOG_MINOR_TM_INFO, \
+ "TM_ASSERT: '%s'\n", #condition); \
+ TM_BREAK_TO_DEBUGGER(); \
+ } \
+ } while (0)
+
+#define TM_ERROR(...) \
+ do { \
+ dal_logger_write(dal_context->logger, LOG_MAJOR_ERROR, \
+ LOG_MINOR_COMPONENT_TOPOLOGY_MANAGER, \
+ __VA_ARGS__); \
+ TM_BREAK_TO_DEBUGGER(); \
+ } while (0)
+
+#define TM_WARNING(...) \
+ dal_logger_write(dal_context->logger, LOG_MAJOR_WARNING, \
+ LOG_MINOR_COMPONENT_TOPOLOGY_MANAGER, \
+ __VA_ARGS__)
+
+/*******
+ Enums
+********/
+
+enum tm_display_type {
+ TM_DISPLAY_TYPE_UNK = 0x00000000,
+ TM_DISPLAY_TYPE_CRT = 0x00000001,
+ TM_DISPLAY_TYPE_CRT_DAC2 = 0x00000002,
+ TM_DISPLAY_TYPE_LCD = 0x00000004,
+ TM_DISPLAY_TYPE_TV = 0x00000008,
+ TM_DISPLAY_TYPE_CV = 0x00000010,
+ TM_DISPLAY_TYPE_DFP = 0x00000020,
+ TM_DISPLAY_TYPE_WIRELESS = 0x00000040
+};
+
+enum tm_stereo_priority {
+ /* Lowest priority */
+ TM_STEREO_PRIORITY_UNDEFINED = 0,
+ /* DVO which can be used as display path resource for their display */
+ TM_STEREO_PRIORITY_DISPLAYPATH_RESOURCE_DVO,
+ /* DAC which can be used as display path resource for their display */
+ TM_STEREO_PRIORITY_DISPLAYPATH_RESOURCE_DAC,
+ /* DAC */
+ TM_STEREO_PRIORITY_DAC,
+ /* DVO */
+ TM_STEREO_PRIORITY_DVO,
+ /* Highest priority, stereo resource located on display path itself */
+ TM_STEREO_PRIORITY_ON_PATH
+};
+
+/* We always prefer path that outputs VGA */
+enum tm_path_stereo_priority {
+ /* Invalid priority */
+ TM_PATH_STEREO_PRIORITY_UNDEFINED = 0,
+ /* Lowest priority */
+ TM_PATH_STEREO_PRIORITY_DEFAULT,
+ TM_PATH_STEREO_PRIORITY_DISPLAYPORT,
+ TM_PATH_STEREO_PRIORITY_HDMI,
+ TM_PATH_STEREO_PRIORITY_DVI,
+ /* Active dongle that converts to VGA signal */
+ TM_PATH_STEREO_PRIORITY_VGA_CONVERTER,
+ /* ASIC signal is not VGA, but there is external encoder
+ * which converts to VGA */
+ TM_PATH_STEREO_PRIORITY_VGA_EXT_ENCODER,
+ /* ASIC and Sink signal are VGA */
+ TM_PATH_STEREO_PRIORITY_VGA_NATIVE
+};
+
+/******************************************************************************
+ MST Display Path Stream Engine resources can be used by Display Paths
+ The priority of the Stream Engine resource assignment is defined as follows:
+ For DP MST Display Path Stream Engine resource with highest priority should
+ be assigned to make available the resource with lower priority for
+ non MST DP display path Resource with the priority
+ TM_ENGINE_PRIORITY_NON_MST_CAPABLE will not be used by MST DP
+ TM_ENGINE_PRIORITY_MST_DP_MST_ONLY has highest priority to assign engine
+ to MST path
+******************************************************************************/
+enum tm_engine_priority {
+ /* DP_MST_ONLY not driving any connector, and could only be
+ * used as MST stream Engine. */
+ TM_ENGINE_PRIORITY_MST_DP_MST_ONLY = 0,
+ TM_ENGINE_PRIORITY_MST_DP_CONNECTED,
+ TM_ENGINE_PRIORITY_MST_DVI,
+ TM_ENGINE_PRIORITY_MST_HDMI,
+ TM_ENGINE_PRIORITY_MST_DVI_CONNECTED,
+ TM_ENGINE_PRIORITY_MST_HDMI_CONNECTED,
+ TM_ENGINE_PRIORITY_NON_MST_CAPABLE,
+ TM_ENGINE_PRIORITY_UNKNOWN
+};
+
+enum tm_encoder_ctx_priority {
+ TM_ENCODER_CTX_PRIORITY_INVALID = 0,
+ TM_ENCODER_CTX_PRIORITY_DEFAULT,
+ TM_ENCODER_CTX_PRIORITY_CONNECTED,
+ TM_ENCODER_CTX_PRIORITY_ACQUIRED,
+ TM_ENCODER_CTX_PRIORITY_ACQUIRED_CONNECTED,
+ /* should be equal to highest */
+ TM_ENCODER_CTX_PRIORITY_HIGHEST =
+ TM_ENCODER_CTX_PRIORITY_ACQUIRED_CONNECTED
+};
+
+enum tm_interrupt_type {
+ TM_INTERRUPT_TYPE_TIMER = 0,
+ TM_INTERRUPT_TYPE_HOTPLUG,
+ TM_INTERRUPT_TYPE_COUNT
+};
+
+#define TM_DECODE_INTERRUPT_TYPE(type) \
+ (type == TM_INTERRUPT_TYPE_TIMER) ? "TIMER" : \
+ (type == TM_INTERRUPT_TYPE_HOTPLUG) ? "HOTPLUG" : \
+ "Invalid"
+
+enum tm_power_gate_state {
+ TM_POWER_GATE_STATE_NONE = 0,
+ TM_POWER_GATE_STATE_OFF,
+ TM_POWER_GATE_STATE_ON
+};
+
+enum tm_acquire_method {
+ /* Activates all resources, checks for co-functionality and
+ * updates HW and Display path context if needed */
+ TM_ACQUIRE_METHOD_HW = 0,
+ /* Checks for co-functionality only. Will NOT change HW state. */
+ TM_ACQUIRE_METHOD_SW
+};
+
+/************
+ Structures
+*************/
+
+/* Used when building display paths */
+struct tm_display_path_init_data {
+ struct connector *connector;
+ uint32_t num_of_encoders;
+ /* Encoders in reverse order, starting with connector */
+ struct encoder *encoders[MAX_NUM_OF_LINKS_PER_PATH];
+
+ struct ddc_service *ddc_service;
+ /* DeviceType_Unknown if this is real display path */
+ struct device_id faked_path_device_id;
+ enum signal_type sink_signal;
+};
+
+
+#endif /* __DAL_TM_INTERNAL_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource.c b/drivers/gpu/drm/amd/dal/topology/tm_resource.c
new file mode 100644
index 000000000000..d268405e71c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource.c
@@ -0,0 +1,381 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/encoder_interface.h"
+#include "include/connector_interface.h"
+#include "include/controller_interface.h"
+#include "include/clock_source_interface.h"
+#include "include/audio_interface.h"
+
+#include "tm_resource_mgr.h"
+#include "tm_resource.h"
+
+struct tm_resource_private {
+ uint32_t ref_counter;
+ bool cloned;
+};
+
+void tm_res_ref_counter_increment(struct tm_resource *resource)
+{
+ ++resource->res_private->ref_counter;
+}
+
+void tm_res_ref_counter_decrement(struct tm_resource *resource)
+{
+ --resource->res_private->ref_counter;
+}
+
+uint32_t tm_res_ref_counter_get(const struct tm_resource *resource)
+{
+ return resource->res_private->ref_counter;
+}
+
+void tm_res_ref_counter_reset(struct tm_resource *resource)
+{
+ resource->res_private->ref_counter = 0;
+}
+
+static bool allocate_tm_resource_private(struct tm_resource_private **output)
+{
+ struct tm_resource_private *priv;
+
+ priv = dal_alloc(sizeof(*priv));
+ if (NULL == priv)
+ return false;
+
+ *output = priv;
+
+ return true;
+}
+
+static uint32_t clock_source_get_priority(const struct tm_resource *resource)
+{
+ return dal_clock_source_is_clk_src_with_fixed_freq(
+ TO_CLOCK_SOURCE(resource)) ? 0 : 1;
+}
+
+static uint32_t encoder_get_priority(const struct tm_resource *resource)
+{
+ struct encoder_feature_support feature;
+
+ /* Encoder has priority based on internal/external property.
+ * Internal has higher priority - lower value. */
+
+ feature = dal_encoder_get_supported_features(
+ TO_ENCODER(resource));
+
+ return feature.flags.bits.EXTERNAL_ENCODER ? 1 : 0;
+}
+
+static void connector_destroy(struct tm_resource **resource)
+{
+ struct tm_resource_connector_info *info;
+ struct tm_resource_private *priv;
+
+ info = TO_CONNECTOR_INFO(*resource);
+ priv = info->resource.res_private;
+
+ if (!priv->cloned) {
+ dal_connector_destroy(&info->connector);
+ dal_ddc_service_destroy(&info->ddc_service);
+ }
+
+ dal_free(priv);
+ dal_free(info);
+ *resource = NULL;
+}
+
+static void engine_destroy(struct tm_resource **resource)
+{
+ struct tm_resource_engine_info *info;
+
+ info = TO_ENGINE_INFO(*resource);
+ dal_free((*resource)->res_private);
+ dal_free(info);
+ *resource = NULL;
+}
+
+#define TM_RES_DESTROY(type, TYPE)\
+static void type##_destroy(struct tm_resource **resource)\
+{\
+ struct tm_resource_##type##_info *info;\
+ struct tm_resource_private *priv;\
+ \
+ if (!resource || !*resource)\
+ return;\
+ \
+ info = TO_##TYPE##_INFO(*resource);\
+ priv = info->resource.res_private; \
+ \
+ if (!priv->cloned)\
+ dal_##type##_destroy(&info->type);\
+ \
+ dal_free(priv);\
+ dal_free(info);\
+ *resource = NULL;\
+}
+
+#define TM_RES_CLONE(type, TYPE)\
+static struct tm_resource *type ## _clone(\
+ struct tm_resource *resource)\
+{\
+ struct tm_resource_ ## type ## _info *info = dal_alloc(sizeof(*info));\
+ struct tm_resource_private *priv;\
+ \
+ if (NULL == info) \
+ return NULL; \
+ \
+ *info = *TO_ ## TYPE ## _INFO(resource);\
+ \
+ if (!allocate_tm_resource_private(&info->resource.res_private)) { \
+ dal_free(info); \
+ return NULL; \
+ } \
+ \
+ priv = info->resource.res_private; \
+ \
+ *priv = *(resource->res_private);\
+ priv->ref_counter = 0;\
+ priv->cloned = true;\
+ return &info->resource;\
+}
+
+#define GET_GRPH_ID(type, TYPE)\
+static const struct graphics_object_id type##_get_grph_id(\
+ const struct tm_resource *resource)\
+{\
+ return dal_##type##_get_graphics_object_id(TO_##TYPE(resource));\
+}
+
+TM_RES_CLONE(connector, CONNECTOR)
+TM_RES_CLONE(controller, CONTROLLER)
+TM_RES_CLONE(encoder, ENCODER)
+TM_RES_CLONE(audio, AUDIO)
+TM_RES_CLONE(clock_source, CLOCK_SOURCE)
+TM_RES_CLONE(engine, ENGINE)
+TM_RES_DESTROY(controller, CONTROLLER)
+TM_RES_DESTROY(encoder, ENCODER)
+TM_RES_DESTROY(audio, AUDIO)
+TM_RES_DESTROY(clock_source, CLOCK_SOURCE)
+GET_GRPH_ID(connector, CONNECTOR)
+GET_GRPH_ID(controller, CONTROLLER)
+GET_GRPH_ID(encoder, ENCODER)
+GET_GRPH_ID(audio, AUDIO)
+GET_GRPH_ID(clock_source, CLOCK_SOURCE)
+
+static const struct graphics_object_id engine_get_grph_id(
+ const struct tm_resource *resource)
+{
+ return TO_ENGINE_INFO(resource)->id;
+}
+
+static void empty_release_hw(struct tm_resource *resource)
+{
+}
+
+static void encoder_release_hw(struct tm_resource *resource)
+{
+ dal_encoder_release_hw(TO_ENCODER(resource));
+}
+
+static uint32_t empty_get_priority(const struct tm_resource *resource)
+{
+ return 0;
+}
+
+static void encoder_set_multi_path(
+ struct tm_resource *resource,
+ bool is_multi_path)
+{
+ dal_encoder_set_multi_path(TO_ENCODER(resource), is_multi_path);
+}
+
+static void empty_set_multi_path(
+ struct tm_resource *resource,
+ bool is_multi_path)
+{
+
+}
+
+static const struct tm_resource_funcs connector_funcs = {
+ .destroy = connector_destroy,
+ .get_grph_id = connector_get_grph_id,
+ .release_hw = empty_release_hw,
+ .clone = connector_clone,
+ .get_priority = empty_get_priority,
+ .set_multi_path = empty_set_multi_path
+};
+
+static const struct tm_resource_funcs controller_funcs = {
+ .destroy = controller_destroy,
+ .get_grph_id = controller_get_grph_id,
+ .release_hw = empty_release_hw,
+ .clone = controller_clone,
+ .get_priority = empty_get_priority,
+ .set_multi_path = empty_set_multi_path
+};
+
+static const struct tm_resource_funcs encoder_funcs = {
+ .destroy = encoder_destroy,
+ .get_grph_id = encoder_get_grph_id,
+ .release_hw = encoder_release_hw,
+ .clone = encoder_clone,
+ .get_priority = encoder_get_priority,
+ .set_multi_path = encoder_set_multi_path
+};
+
+static const struct tm_resource_funcs clock_source_funcs = {
+ .destroy = clock_source_destroy,
+ .get_grph_id = clock_source_get_grph_id,
+ .release_hw = empty_release_hw,
+ .clone = clock_source_clone,
+ .get_priority = clock_source_get_priority,
+ .set_multi_path = empty_set_multi_path
+};
+
+static const struct tm_resource_funcs audio_funcs = {
+ .destroy = audio_destroy,
+ .get_grph_id = audio_get_grph_id,
+ .release_hw = empty_release_hw,
+ .clone = audio_clone,
+ .get_priority = empty_get_priority,
+ .set_multi_path = empty_set_multi_path
+};
+
+static const struct tm_resource_funcs engine_funcs = {
+ .destroy = engine_destroy,
+ .get_grph_id = engine_get_grph_id,
+ .release_hw = empty_release_hw,
+ .clone = engine_clone,
+ .get_priority = empty_get_priority,
+ .set_multi_path = empty_set_multi_path
+};
+
+struct tm_resource *dal_tm_resource_encoder_create(struct encoder *enc)
+{
+ struct tm_resource_encoder_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->paired_encoder_index = RESOURCE_INVALID_INDEX;
+ info->encoder = enc;
+ info->resource.funcs = &encoder_funcs;
+ return &info->resource;
+}
+
+struct tm_resource *dal_tm_resource_controller_create(struct controller *cntl)
+{
+ struct tm_resource_controller_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->power_gating_state = TM_POWER_GATE_STATE_NONE;
+ info->controller = cntl;
+ info->resource.funcs = &controller_funcs;
+ return &info->resource;
+}
+
+struct tm_resource *dal_tm_resource_connector_create(struct connector *conn)
+{
+ struct tm_resource_connector_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->connector = conn;
+ info->resource.funcs = &connector_funcs;
+ return &info->resource;
+}
+
+struct tm_resource *dal_tm_resource_clock_source_create(struct clock_source *cs)
+{
+ struct tm_resource_clock_source_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->clk_sharing_group = CLOCK_SHARING_GROUP_EXCLUSIVE;
+ info->clock_source = cs;
+ info->resource.funcs = &clock_source_funcs;
+ return &info->resource;
+}
+
+struct tm_resource *dal_tm_resource_engine_create(struct graphics_object_id id)
+{
+ struct tm_resource_engine_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->id = id;
+ info->resource.funcs = &engine_funcs;
+ return &info->resource;
+}
+
+struct tm_resource *dal_tm_resource_audio_create(struct audio *audio)
+{
+ struct tm_resource_audio_info *info = dal_alloc(sizeof(*info));
+
+ if (NULL == info)
+ return NULL;
+
+ if (!allocate_tm_resource_private(&info->resource.res_private)) {
+ dal_free(info);
+ return NULL;
+ }
+
+ info->audio = audio;
+ info->resource.funcs = &audio_funcs;
+ return &info->resource;
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource.h b/drivers/gpu/drm/amd/dal/topology/tm_resource.h
new file mode 100644
index 000000000000..d7384e3fd8b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_TM_RESOURCE_H__
+#define __DAL_TM_RESOURCE_H__
+
+/* Generic structure to hold interface to
+ * HW resource + TM proprietary info + resource-specific info */
+struct tm_resource_flags {
+ uint8_t resource_active:1;
+ uint8_t display_path_resource:1;
+ uint8_t mst_resource:1;
+ uint8_t multi_path:1;
+};
+
+struct tm_resource;
+
+/* These functions perform operations SPECIFIC for a type of resource. */
+struct tm_resource_funcs {
+ void (*destroy)(struct tm_resource **resource);
+ struct tm_resource * (*clone)(struct tm_resource *resource);
+ void (*release_hw)(struct tm_resource *resource);
+ const struct graphics_object_id (*get_grph_id)(
+ const struct tm_resource *resource);
+ uint32_t (*get_priority)(const struct tm_resource *resource);
+ void (*set_multi_path)(
+ struct tm_resource *resource,
+ bool is_multi_path);
+};
+
+/* Reference count functions - these operations IDENTICAL for all types of
+ * resources. */
+void tm_res_ref_counter_increment(struct tm_resource *resource);
+void tm_res_ref_counter_decrement(struct tm_resource *resource);
+uint32_t tm_res_ref_counter_get(const struct tm_resource *resource);
+void tm_res_ref_counter_reset(struct tm_resource *resource);
+/**** end of reference count functions ****/
+
+
+struct tm_resource_private;
+
+struct tm_resource {
+ const struct tm_resource_funcs *funcs;
+
+ /* private data of tm_resource */
+ struct tm_resource_private *res_private;
+
+ struct tm_resource_flags flags;
+};
+
+
+struct tm_resource_connector_info {
+ struct tm_resource resource;
+ struct ddc_service *ddc_service;
+ struct connector *connector;
+};
+
+struct tm_resource_encoder_info {
+ struct tm_resource resource;
+ struct encoder *encoder;
+ uint32_t paired_encoder_index;
+};
+
+struct tm_resource_controller_info {
+ struct tm_resource resource;
+ struct controller *controller;
+ enum tm_power_gate_state power_gating_state;
+};
+
+struct tm_resource_clock_source_info {
+ struct tm_resource resource;
+ struct clock_source *clock_source;
+ enum clock_sharing_group clk_sharing_group;
+};
+
+struct tm_resource_engine_info {
+ struct tm_resource resource;
+ enum tm_engine_priority priority;
+ struct graphics_object_id id;
+};
+
+struct tm_resource_audio_info {
+ struct tm_resource resource;
+ struct audio *audio;
+};
+
+struct tm_resource *dal_tm_resource_encoder_create(struct encoder *enc);
+struct tm_resource *dal_tm_resource_audio_create(struct audio *audio);
+struct tm_resource *dal_tm_resource_engine_create(struct graphics_object_id id);
+struct tm_resource *dal_tm_resource_clock_source_create(
+ struct clock_source *cs);
+struct tm_resource *dal_tm_resource_connector_create(struct connector *conn);
+struct tm_resource *dal_tm_resource_controller_create(struct controller *cntl);
+
+#define GRPH_ID(resource) ((resource)->funcs->get_grph_id((resource)))
+
+#define TO_CONNECTOR_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_connector_info, resource))
+
+#define TO_CONNECTOR(tm_resource) (TO_CONNECTOR_INFO(tm_resource)->connector)
+
+#define TO_CONTROLLER_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_controller_info, resource))
+
+#define TO_CONTROLLER(tm_resource) (TO_CONTROLLER_INFO(tm_resource)->controller)
+
+#define TO_ENCODER_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_encoder_info, resource))
+
+#define TO_ENCODER(tm_resource) (TO_ENCODER_INFO(tm_resource)->encoder)
+
+#define TO_CLOCK_SOURCE_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_clock_source_info, resource))
+#define TO_CLOCK_SOURCE(tm_resource)\
+ (TO_CLOCK_SOURCE_INFO(tm_resource)->clock_source)
+
+#define TO_ENGINE_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_engine_info, resource))
+
+#define TO_AUDIO_INFO(tm_resource)\
+ (container_of((tm_resource),\
+ struct tm_resource_audio_info, resource))
+#define TO_AUDIO(tm_resource) (TO_AUDIO_INFO(tm_resource)->audio)
+
+
+#define TM_RES_REF_CNT_INCREMENT(resource) \
+ (tm_res_ref_counter_increment(resource))
+#define TM_RES_REF_CNT_DECREMENT(resource) \
+ (tm_res_ref_counter_decrement(resource))
+#define TM_RES_REF_CNT_GET(resource) \
+ (tm_res_ref_counter_get(resource))
+#define TM_RES_REF_CNT_RESET(resource) \
+ (tm_res_ref_counter_reset(resource))
+
+#endif
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.c b/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.c
new file mode 100644
index 000000000000..51342ed0dcb4
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.c
@@ -0,0 +1,1871 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+/* External includes */
+#include "include/topology_mgr_interface.h"
+#include "include/adapter_service_interface.h"
+#include "include/gpu_interface.h"
+#include "include/connector_interface.h"
+#include "include/encoder_types.h"
+#include "include/encoder_interface.h"
+#include "include/dcs_interface.h"
+#include "include/link_service_interface.h"
+#include "include/ddc_service_interface.h"
+#include "include/controller_interface.h"
+#include "include/audio_interface.h"
+
+/* Internal includes */
+#include "tm_resource_builder.h"
+#include "tm_resource_mgr.h"
+#include "tm_internal_types.h"
+#include "tm_utils.h"
+
+static const enum dal_device_type tmrb_device_enumeration_order[] = {
+ DEVICE_TYPE_LCD,
+ DEVICE_TYPE_CRT,
+ DEVICE_TYPE_DFP,
+ DEVICE_TYPE_CV,
+ DEVICE_TYPE_TV,
+ DEVICE_TYPE_WIRELESS,
+ DEVICE_TYPE_CF };
+
+static const uint32_t tmrb_num_of_devices_in_order_enumeration =
+ sizeof(tmrb_device_enumeration_order)
+ / sizeof(tmrb_device_enumeration_order[0]);
+
+/* local macro definitions */
+#define TM_RB_MAX_NUM_OF_DISPLAY_PATHS 20
+
+
+/*****************************************************************************
+ * private data structures
+ ***************************************************************************/
+
+struct tm_resource_builder {
+ struct dal_context *dal_context;
+ struct adapter_service *as;
+ struct timing_service *timing_srvc;
+ struct hw_sequencer *hwss;
+ struct tm_resource_mgr *tm_rm;
+ struct irq_manager *irq_manager;
+ struct topology_mgr *tm;
+
+ struct display_path *display_paths[TM_RB_MAX_NUM_OF_DISPLAY_PATHS];
+ struct display_path *root_display_paths[TM_RB_MAX_NUM_OF_DISPLAY_PATHS];
+ uint32_t num_of_display_paths;
+ uint32_t num_of_cf_paths;
+};
+
+/*****************************************************************************
+ * private functions
+ ***************************************************************************/
+
+static bool tm_resource_builder_construct(
+ struct tm_resource_builder *tm_rb,
+ const struct tm_resource_builder_init_data *init_data)
+{
+ tm_rb->dal_context = init_data->dal_context;
+ tm_rb->as = init_data->adapter_service;
+ tm_rb->hwss = init_data->hwss;
+ tm_rb->timing_srvc = init_data->timing_service;
+ tm_rb->tm_rm = init_data->resource_mgr;
+ tm_rb->irq_manager = init_data->irq_manager;
+ tm_rb->tm = init_data->tm;
+ return true;
+}
+
+/** create the TM Resource Builder */
+struct tm_resource_builder*
+tm_resource_builder_create(
+ const struct tm_resource_builder_init_data *init_data)
+{
+ struct tm_resource_builder *tm_rb;
+
+ tm_rb = dal_alloc(sizeof(*tm_rb));
+
+ if (!tm_rb) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ if (tm_resource_builder_construct(tm_rb, init_data))
+ return tm_rb;
+
+ dal_free(tm_rb);
+ return NULL;
+}
+
+/** destroy the TM Builder Manager */
+void tm_resource_builder_destroy(struct tm_resource_builder **tm_rb)
+{
+ if (!tm_rb || !*tm_rb) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* TODO: deallocate everything here */
+
+ dal_free(*tm_rb);
+ *tm_rb = NULL;
+}
+
+static void init_gpu_static_clocks(struct tm_resource_builder *tm_rb,
+ struct gpu *gpu)
+{
+ struct dal_system_clock_range dal_sys_clks = {};
+
+ if (false == dal_adapter_service_is_feature_supported(
+ FEATURE_USE_PPLIB))
+ return;
+
+ if (false == dal_get_system_clocks_range(tm_rb->dal_context,
+ &dal_sys_clks)) {
+ /* GPU can continue with default clocks.
+ * Do NOT fail this call - only log a warning. */
+ dal_logger_write(tm_rb->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_GPU,
+ "Failed to get BM Static Clock Ranges! Will be using default values.\n");
+ } else {
+ struct gpu_clock_info gpu_clk_info;
+
+ dal_memset(&gpu_clk_info, 0, sizeof(gpu_clk_info));
+
+ gpu_clk_info.max_dclk_khz = dal_sys_clks.max_dclk;
+ gpu_clk_info.min_dclk_khz = dal_sys_clks.min_dclk;
+
+ gpu_clk_info.max_mclk_khz = dal_sys_clks.max_mclk;
+ gpu_clk_info.min_mclk_khz = dal_sys_clks.min_mclk;
+
+ gpu_clk_info.max_sclk_khz = dal_sys_clks.max_sclk;
+ gpu_clk_info.min_sclk_khz = dal_sys_clks.min_sclk;
+
+ dal_gpu_init_static_clock_info(gpu, &gpu_clk_info);
+ }
+}
+
+static struct gpu *tm_resource_builder_create_gpu(
+ struct tm_resource_builder *tm_rb)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+ struct gpu_init_data gpu_init_data;
+ struct gpu *gpu;
+
+ /* initialise GPU init data */
+ dal_memset(&gpu_init_data, 0, sizeof(gpu_init_data));
+
+ gpu_init_data.dal_context = tm_rb->dal_context;
+ gpu_init_data.adapter_service = tm_rb->as;
+ gpu_init_data.irq_manager = tm_rb->irq_manager;
+
+ /* create GPU object */
+ gpu = dal_gpu_create(&gpu_init_data);
+ if (!gpu) {
+ TM_ERROR("%s: failed to instantiate GPU object!\n", __func__);
+ return NULL;
+ }
+
+ tm_resource_mgr_set_gpu_interface(tm_rb->tm_rm, gpu);
+
+ init_gpu_static_clocks(tm_rb, gpu);
+
+ return gpu;
+}
+
+static enum tm_result tm_resource_builder_add_engines(
+ struct tm_resource_builder *tm_rb)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ uint32_t eng_id;
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ for (eng_id = 0; eng_id < ENGINE_ID_COUNT; eng_id++) {
+
+ tm_resource = tm_resource_mgr_add_engine(tm_rb->tm_rm,
+ eng_id);
+
+ if (NULL == tm_resource) {
+ rc = TM_RESULT_FAILURE;
+ TM_ERROR("%s: Failed to add engine",
+ __func__);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static enum tm_result tm_resource_builder_add_clock_sources(
+ struct tm_resource_builder *tm_rb,
+ struct gpu *gpu)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ uint32_t clock_sources_num;
+ uint32_t clk_src_ind;
+ struct tm_resource *tm_resource;
+ struct clock_source *clock_source;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ clock_sources_num = dal_gpu_get_num_of_clock_sources(gpu);
+
+ TM_RESOURCES("TM_RB: number of Clock Sources: %d\n",
+ clock_sources_num);
+
+ for (clk_src_ind = 0;
+ clk_src_ind < clock_sources_num;
+ clk_src_ind++) {
+
+ clock_source = dal_gpu_create_clock_source(gpu,
+ clk_src_ind);
+ if (NULL == clock_source) {
+ rc = TM_RESULT_FAILURE;
+ TM_ERROR("%s: Failed to create ClockSource\n",
+ __func__);
+ break;
+ }
+
+ tm_resource =
+ dal_tm_resource_mgr_add_resource(tm_rb->tm_rm,
+ dal_tm_resource_clock_source_create(
+ clock_source));
+
+ if (NULL == tm_resource) {
+ rc = TM_RESULT_FAILURE;
+ TM_ERROR("%s: Failed to add ClockSource number:%d\n",
+ __func__, clk_src_ind);
+ dal_clock_source_destroy(&clock_source);
+ break;
+ }
+
+ } /* for() */
+
+ return rc;
+}
+
+static enum tm_result tm_resource_builder_add_controllers(
+ struct tm_resource_builder *tm_rb,
+ struct gpu *gpu)
+
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ uint32_t i;
+ uint32_t controllers_num;
+ struct controller *controller;
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ controllers_num = dal_gpu_get_max_num_of_primary_controllers(gpu) +
+ dal_gpu_get_max_num_of_underlay_controllers(gpu);
+
+ for (i = 0; i < controllers_num; i++) {
+
+ controller = dal_gpu_create_controller(gpu, i);
+
+ if (controller == NULL) {
+ TM_ERROR("%s: Failed to create Controller!\n",
+ __func__);
+ rc = TM_RESULT_FAILURE;
+ break;
+ }
+
+ tm_resource = dal_tm_resource_mgr_add_resource(
+ tm_rb->tm_rm,
+ dal_tm_resource_controller_create(controller));
+
+ if (tm_resource == NULL) {
+ TM_ERROR("%s: Failed to add Controller!\n", __func__);
+ rc = TM_RESULT_FAILURE;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+enum tm_result tm_resource_builder_create_gpu_resources(
+ struct tm_resource_builder *tm_rb)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ struct gpu *gpu;
+
+ do {
+ gpu = tm_resource_builder_create_gpu(tm_rb);
+ if (!gpu) {
+ rc = TM_RESULT_FAILURE;
+ break;
+ }
+
+ /********************************************************
+ Add Engines
+ *********************************************************/
+ rc = tm_resource_builder_add_engines(tm_rb);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ /********************************************************
+ Add Clock Sources
+ *********************************************************/
+ rc = tm_resource_builder_add_clock_sources(tm_rb, gpu);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ /********************************************************
+ Add Controllers
+ *********************************************************/
+ rc = tm_resource_builder_add_controllers(tm_rb, gpu);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ /* TODO: add the rest of resources */
+
+ } while (0);
+
+ /* handle an error */
+ if (TM_RESULT_FAILURE == rc) {
+ if (gpu) {
+ tm_resource_mgr_set_gpu_interface(tm_rb->tm_rm, NULL);
+ dal_gpu_destroy(&gpu);
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * Adds link to display path, which means mandatory encoder
+ * Link is added to the end of the chain (if counting from GPU).
+ *
+ * \param [in] display_path: Display path to which add the link
+ * \param [in] encoder: Mandatory object of the link.
+ */
+static enum tm_result tmrb_add_link(
+ struct tm_resource_builder *tm_rb,
+ struct display_path *display_path,
+ struct encoder *encoder)
+{
+ uint32_t current_num_of_links;
+ struct encoder_feature_support efs;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ /* keep number of links *before* a new one was added */
+ current_num_of_links = dal_display_path_get_number_of_links(
+ display_path);
+
+ efs = dal_encoder_get_supported_features(encoder);
+
+ /* Add link to display path */
+ if (false == dal_display_path_add_link(display_path, encoder)) {
+ TM_ERROR("%s: dal_display_path_add_link failed!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Set the Display Path properties
+ * Also set TM resource for connector used for edid ddc polling feature
+ *
+ * \param [in] init_data: Display Path Init Data
+ * \param [in] display_path: Display path which is currently built
+ */
+static void tmrb_set_display_path_properties(struct tm_resource_builder *tm_rb,
+ const struct tm_display_path_init_data *init_data,
+ struct display_path *display_path)
+{
+ union display_path_properties props;
+ enum signal_type sink_signal;
+ enum signal_type asic_signal;
+ bool hpd_supported = false;
+ bool ddc_supported = false;
+ struct connector *connector;
+ struct connector_feature_support cfs;
+ union audio_support aud_support;
+ enum connector_id connector_id;
+ uint32_t output_signals;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ props.raw = 0;
+
+ sink_signal = init_data->sink_signal;
+ asic_signal = dal_display_path_sink_signal_to_asic_signal(display_path,
+ sink_signal);
+
+ /* Obtain HPD/DDC and audio support from connector */
+ connector = dal_display_path_get_connector(display_path);
+ dal_connector_get_features(connector, &cfs);
+
+ if (asic_signal == SIGNAL_TYPE_RGB) {
+ /* For VGA the hardware may poll DDC and
+ * if this is the case, HPD is supported. */
+ hpd_supported = cfs.HW_DDC_POLLING;
+ } else {
+ /* For non-VGA, if there is HPD line, then
+ * HPD is supported. */
+ hpd_supported = (cfs.hpd_line != HPD_SOURCEID_UNKNOWN);
+ }
+
+ ddc_supported = (cfs.ddc_line != CHANNEL_ID_UNKNOWN);
+
+ /* get audio support from adapter service */
+ aud_support = dal_adapter_service_get_audio_support(tm_rb->as);
+
+ /* get this connector's id, and supported output signals */
+ connector_id = dal_graphics_object_id_get_connector_id(
+ dal_connector_get_graphics_object_id(connector));
+
+ output_signals = dal_connector_enumerate_output_signals(connector);
+
+ /* handle setting this display path's DP audio supported bit */
+ if ((output_signals & SIGNAL_TYPE_DISPLAY_PORT)
+ || (output_signals & SIGNAL_TYPE_DISPLAY_PORT_MST)
+ || (output_signals & SIGNAL_TYPE_EDP)) {
+ /* Connector can output a DP signal, so if DP audio is
+ * supported, then DP audio bit is set accordingly. */
+ props.bits.IS_DP_AUDIO_SUPPORTED = aud_support.bits.DP_AUDIO;
+ }
+
+ /* handle setting this display path's HDMI audio supported bit */
+ if (output_signals & SIGNAL_TYPE_HDMI_TYPE_A) {
+ /* Signal is HDMI, if connector is HDMI, we have the
+ * native case, so we check the HDMI native bit. */
+ if (connector_id == CONNECTOR_ID_HDMI_TYPE_A) {
+ props.bits.IS_HDMI_AUDIO_SUPPORTED =
+ aud_support.bits.HDMI_AUDIO_NATIVE;
+
+ if (!aud_support.bits.HDMI_AUDIO_NATIVE) {
+ /* this should not happen */
+ TM_ERROR("%s: HDMI connector exists, but HDMI native audio not supported",
+ __func__);
+ }
+ } else {
+ /* Otherwise, we have the dongle case, so we check
+ * the HDMI on dongle bit. */
+ props.bits.IS_HDMI_AUDIO_SUPPORTED =
+ aud_support.bits.HDMI_AUDIO_ON_DONGLE;
+ }
+ }
+
+ /* Common initialisation */
+ props.bits.HPD_SUPPORTED = hpd_supported;
+ props.bits.NON_DESTRUCTIVE_POLLING = ddc_supported;
+ props.bits.FAKED_PATH = (init_data->faked_path_device_id.device_type
+ != DEVICE_TYPE_UNKNOWN);
+
+ /* Override based on sink signal */
+ if (dal_is_analog_signal(sink_signal)) {
+
+ props.bits.FORCE_CONNECT_SUPPORTED = 1;
+ } else if (sink_signal == SIGNAL_TYPE_LVDS) {
+
+ props.bits.HPD_SUPPORTED = 1;
+ props.bits.NON_DESTRUCTIVE_POLLING = 1;
+ } else if (sink_signal == SIGNAL_TYPE_WIRELESS) {
+
+ props.bits.HPD_SUPPORTED = 1;
+ props.bits.FORCE_CONNECT_SUPPORTED = 1;
+ }
+
+ dal_display_path_set_properties(display_path, props);
+}
+
+/**
+ * Updates device tag on display path.
+ * For real display path we query VBIOS.
+ * For CF paths and fake paths we generate device tag according
+ * to internal policy.
+ *
+ * \param [in] init_data: Display Path Init Data
+ * \param [in] display_path: Display path which is currently built
+ *
+ * \return TM_RESULT_SUCCESS if at device tag was successfully updated,
+ * TM_RESULT_FAILURE otherwise.
+ */
+static enum tm_result tmrb_update_device_tag(
+ struct tm_resource_builder *tm_rb,
+ const struct tm_display_path_init_data *init_data,
+ struct display_path *display_path)
+{
+ struct connector_device_tag_info device_tag = { 0 };
+ struct graphics_object_id conn_object_id;
+ struct connector *connector;
+ uint32_t i;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ connector = dal_display_path_get_connector(display_path);
+
+ conn_object_id = dal_connector_get_graphics_object_id(connector);
+
+ if (conn_object_id.type != OBJECT_TYPE_CONNECTOR) {
+ TM_ERROR("%s: This path doesn't have connector, something is wrong!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ /* Case 1 - fake display path */
+ if (init_data->faked_path_device_id.device_type
+ != DEVICE_TYPE_UNKNOWN) {
+
+ device_tag.dev_id.device_type =
+ init_data->faked_path_device_id.device_type;
+ device_tag.dev_id.enum_id =
+ init_data->faked_path_device_id.enum_id;
+
+ dal_display_path_set_device_tag(display_path, device_tag);
+
+ TM_BUILD_DSP_PATH(" Device tag \"fake\" [%u:%u] was set on display path [0x%p].\n",
+ device_tag.dev_id.device_type,
+ device_tag.dev_id.enum_id,
+ display_path);
+
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* case 2 - CrossFire display path */
+ if (conn_object_id.id == CONNECTOR_ID_CROSSFIRE) {
+
+ device_tag.dev_id.device_type = DEVICE_TYPE_CF;
+ device_tag.dev_id.enum_id = 1;
+
+ for (i = 0; i < tm_rb->num_of_display_paths; i++) {
+
+ struct connector_device_tag_info *current_device_tag =
+ dal_display_path_get_device_tag(
+ tm_rb->display_paths[i]);
+
+ if (device_tag.dev_id.device_type ==
+ current_device_tag->dev_id.device_type) {
+ /* We found a CF path.*/
+ /* In VBIOS we usually have 2 CF path, one
+ * representing bundleA and one representing
+ * bundleB.
+ * To simplify TM, TM will only enumerate 1 CF
+ * path. The CF path can be configured for
+ * bundleA, bundleB or bundleA+B.
+ * Here if a CF path has already been created
+ * prior to this CF path, this CF path will
+ * fail to create so we'll have only 1 CF path.
+ */
+ return TM_RESULT_FAILURE;
+ }
+ }
+
+ dal_display_path_set_device_tag(display_path, device_tag);
+
+ TM_BUILD_DSP_PATH(" Device tag \"CF\" [%u:%u] was set on display path [0x%p].\n",
+ device_tag.dev_id.device_type,
+ device_tag.dev_id.enum_id,
+ display_path);
+
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* case 3 - Wireless display path */
+ if (conn_object_id.id == CONNECTOR_ID_WIRELESS ||
+ conn_object_id.id == CONNECTOR_ID_MIRACAST) {
+
+ device_tag.dev_id.device_type = DEVICE_TYPE_WIRELESS;
+ device_tag.dev_id.enum_id = 1;
+
+ dal_display_path_set_device_tag(display_path, device_tag);
+
+ TM_BUILD_DSP_PATH(" Device tag \"wireless\" [%u:%u] was set on display path [0x%p].\n",
+ device_tag.dev_id.device_type,
+ device_tag.dev_id.enum_id,
+ display_path);
+
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* case 4 - VBIOS lookup
+ * Index to src table does not correspond to index of device tag.
+ * Therefore here we have to loop through the DeviceTags and find a
+ * matching DeviceTag (to SignalType) to assign to DisplayPath */
+ for (i = 0; ; i++) {
+ if (dal_adapter_service_get_device_tag(tm_rb->as,
+ conn_object_id, i, &device_tag)
+ != true) {
+ break;
+ }
+
+ if (device_tag.dev_id.device_type
+ == tm_utils_signal_type_to_device_type(
+ dal_display_path_get_query_signal(
+ display_path, SINK_LINK_INDEX))) {
+
+ if (true == dal_adapter_service_is_device_id_supported(
+ tm_rb->as, device_tag.dev_id)) {
+
+ dal_display_path_set_device_tag(display_path,
+ device_tag);
+
+ TM_BUILD_DSP_PATH(
+ " Device tag [%u:%u] was set on display path [0x%p].\n",
+ device_tag.dev_id.device_type,
+ device_tag.dev_id.enum_id,
+ display_path);
+
+ return TM_RESULT_SUCCESS;
+ }
+ break;
+ }
+ }
+
+ /* Probably connector (as reported in VBIOS) does not support this
+ * signal and display path should be destroyed */
+ return TM_RESULT_FAILURE;
+}
+
+/**
+ * Creates Link Service of requested type
+ *
+ * \param [in] display_path - Display path which has link associated with
+ * this service.
+ * \param [in] link_index - Link inside display path associated with
+ * this service
+ * \param [in] link_type - Type of link service
+ *
+ * \return pointer to created Link Service
+ */
+static struct link_service *tmrb_create_link_service(
+ struct tm_resource_builder *tm_rb,
+ struct display_path *display_path,
+ uint32_t link_index,
+ enum link_service_type link_type)
+{
+ struct connector *connector;
+ struct link_service *link_service = NULL;
+ uint32_t num_of_paths_per_connector = 0;
+ struct tm_resource *connector_rsrc;
+ enum dal_irq_source hpd_rx_irq_src = DAL_IRQ_SOURCE_INVALID;
+ struct graphics_object_id conn_object_id;
+ struct irq *irq;
+ struct link_service_init_data link_service_init_data;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ connector = dal_display_path_get_connector(display_path);
+ conn_object_id = dal_connector_get_graphics_object_id(connector);
+
+ connector_rsrc = tm_resource_mgr_find_resource(tm_rb->tm_rm,
+ conn_object_id);
+
+ /* Update number of paths bound to the link */
+ if (link_type == LINK_SERVICE_TYPE_DP_MST) {
+ num_of_paths_per_connector =
+ dal_adapter_service_get_num_of_path_per_dp_mst_connector(
+ tm_rb->as);
+ } else {
+ /* non-MST always 1 path */
+ num_of_paths_per_connector = 1;
+ }
+
+ if (num_of_paths_per_connector < 1) {
+ TM_ERROR("%s: invalid num_of_paths_per_connector!\n",
+ __func__);
+ return NULL;
+ }
+
+ /* get the HPD RX source */
+ irq = dal_adapter_service_obtain_hpd_irq(tm_rb->as,
+ GRPH_ID(connector_rsrc) /*conn_object_id*/);
+ if (NULL != irq) {
+ hpd_rx_irq_src = dal_irq_get_rx_source(irq);
+ dal_adapter_service_release_irq(tm_rb->as, irq);
+ }
+
+ dal_memset(&link_service_init_data, 0, sizeof(link_service_init_data));
+
+ /* Create a link service */
+ link_service_init_data.connector_enum_id =
+ GRPH_ID(connector_rsrc).enum_id;
+ link_service_init_data.connector_id =
+ GRPH_ID(connector_rsrc);
+ link_service_init_data.dpcd_access_srv =
+ TO_CONNECTOR_INFO(connector_rsrc)->ddc_service;
+ link_service_init_data.hwss = tm_rb->hwss;
+ /*link_service_init_data.irq_src_dp_sink TODO: ?? */
+ link_service_init_data.irq_src_hpd_rx = hpd_rx_irq_src;
+ link_service_init_data.link_type = link_type;
+ link_service_init_data.num_of_displays = num_of_paths_per_connector;
+ link_service_init_data.adapter_service = tm_rb->as;
+ link_service_init_data.dal_context = tm_rb->dal_context;
+ link_service_init_data.tm = tm_rb->tm;
+
+ link_service = dal_link_service_create(&link_service_init_data);
+
+ if (link_service != NULL) {
+ /* Register link service with resource manager and
+ * display path */
+ bool rc;
+
+ rc = tm_resource_mgr_add_link_service(tm_rb->tm_rm,
+ display_path, link_index, link_service);
+
+ if (false == rc) {
+ dal_link_service_destroy(&link_service);
+
+ link_service = NULL;
+ }
+ }
+
+ return link_service;
+}
+
+/**
+ * Creating link services for each link on the path.
+ *
+ * \return TM_RESULT_SUCCESS: all link services were created successfully,
+ * TM_RESULT_FAILURE: an error occurred
+ */
+static enum tm_result tmrb_create_link_services(
+ struct tm_resource_builder *tm_rb,
+ struct display_path *display_path)
+{
+ struct connector *connector;
+ struct graphics_object_id conn_object_id;
+ uint32_t link_index;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ connector = dal_display_path_get_connector(display_path);
+ conn_object_id = dal_connector_get_graphics_object_id(connector);
+
+ /* Iterate over each link on the path */
+ for (link_index = 0;
+ link_index < dal_display_path_get_number_of_links(display_path);
+ link_index++) {
+
+ switch (dal_display_path_get_query_signal(display_path,
+ link_index)) {
+ /* For display port path ending with DP connector we create
+ * all 3 types of link services since this is
+ * signal-mutable path. */
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (conn_object_id.id == CONNECTOR_ID_DISPLAY_PORT) {
+
+ tmrb_create_link_service(tm_rb, display_path,
+ link_index,
+ LINK_SERVICE_TYPE_LEGACY);
+
+ tmrb_create_link_service(tm_rb, display_path,
+ link_index,
+ LINK_SERVICE_TYPE_DP_MST);
+ }
+
+ /* Default link service for DP signal is DP-SST */
+ link_service = tmrb_create_link_service(tm_rb,
+ display_path, link_index,
+ LINK_SERVICE_TYPE_DP_SST);
+
+ dal_display_path_set_link_query_interface(
+ display_path, link_index, link_service);
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* TODO: add MST manager creation here */
+ link_service = NULL;
+ break;
+
+ /* For EDP case signal is not mutable so we need
+ * only DP-SST link service */
+ case SIGNAL_TYPE_EDP:
+ link_service = tmrb_create_link_service(tm_rb,
+ display_path, link_index,
+ LINK_SERVICE_TYPE_DP_SST);
+
+ dal_display_path_set_link_query_interface(
+ display_path, link_index, link_service);
+ break;
+
+ default:
+ /* All the rest signals have only legacy link
+ * service */
+ link_service = tmrb_create_link_service(tm_rb,
+ display_path, link_index,
+ LINK_SERVICE_TYPE_LEGACY);
+
+ dal_display_path_set_link_query_interface(
+ display_path, link_index, link_service);
+ break;
+ }
+
+ /* If we do not have default link service, we consider this
+ * display path as invalid. */
+ if (link_service == NULL) {
+ TM_ERROR("Failed to create default Link Service!");
+ tm_resource_mgr_release_path_link_services(tm_rb->tm_rm,
+ display_path);
+ return TM_RESULT_FAILURE;
+ }
+ } /* for ()*/
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Marks given resource as active and belonging to a display path.
+ * Resources are marked in TM resource structures - so TM knows whether to
+ * power up or not.
+ * If resource belongs to more then 1 display path, it considered to
+ * be multipath resource.
+ *
+ * \param [in] go: Graphics Object to activate
+ */
+static enum tm_result tmrb_activate_display_path_resource(
+ struct tm_resource_builder *tm_rb,
+ struct graphics_object_id id)
+{
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ tm_resource = tm_resource_mgr_find_resource(tm_rb->tm_rm, id);
+
+ if (NULL == tm_resource) {
+ /* This implies a serious error - someone has a GO but there
+ * is no TM Resource which 'knows' about it. Possible scenario
+ * is BIOS parser created an object but returns invalid
+ * GO pointer for it. */
+ TM_ERROR("%s: No corresponding TM Resource for a Graphics Object!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ /* If resource already marked as "display path resource", it means it
+ * becomes multipath */
+ if (tm_resource->flags.display_path_resource)
+ tm_resource->funcs->set_multi_path(tm_resource, true);
+
+ tm_resource->flags.resource_active = true;
+ tm_resource->flags.display_path_resource = true;
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Creates number of branch MST paths based on given root path
+ *
+ * \param [in] root_display_path: Root MST display path for which we create
+ * branch MST paths
+ *
+ * \return true: if MST path was created successfully, false: otherwise
+ */
+static void tmrb_clone_mst_paths(struct tm_resource_builder *tm_rb,
+ struct display_path *root_display_path)
+{
+ union display_path_properties props;
+ uint32_t num_of_path_per_connector;
+ uint32_t i;
+ struct display_path *branch_display_path;
+ bool success;
+ uint32_t link_index;
+ struct dcs *dcs;
+ struct connector *connector;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_ASSERT(root_display_path != NULL);
+
+ props = dal_display_path_get_properties(root_display_path);
+
+ TM_ASSERT(props.bits.IS_ROOT_DP_MST_PATH);
+
+ /* Total number (including root path) of MST paths to create */
+ num_of_path_per_connector =
+ dal_adapter_service_get_num_of_path_per_dp_mst_connector(
+ tm_rb->as);
+
+ connector = dal_display_path_get_connector(root_display_path);
+
+ /* We start looping from 1 since root display path already exists */
+ for (i = 1; i < num_of_path_per_connector; i++) {
+ /* Create display path */
+ branch_display_path = dal_display_path_clone(root_display_path,
+ false);
+
+ if (NULL == branch_display_path) {
+ TM_ERROR("%s: Failed to clone DisplayPath", __func__);
+ continue;
+ }
+
+ TM_BUILD_DSP_PATH(" Creating branch-MST display path from [0x%p]. New Display path is [0x%p]\n",
+ root_display_path, branch_display_path);
+
+ /* copy signal from source to new display path */
+ success = dal_display_path_set_sink_signal(
+ branch_display_path,
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+
+ if (false == success)
+ TM_ERROR("%s: Failed to copy sink signal", __func__);
+
+ /* Create DCS */
+ if (success) {
+ /* Each display path need a DCS to manage the sink
+ * capability, this apply for the duplicated
+ * MST path as well. DDC service in such path is NULL
+ * because MST only path shouldn't have access to
+ * the DDC lines. */
+ struct dcs_init_data dcs_init_data;
+
+ dal_memset(&dcs_init_data, 0, sizeof(dcs_init_data));
+
+ dcs_init_data.as = tm_rb->as;
+ dcs_init_data.grph_obj_id =
+ dal_connector_get_graphics_object_id(connector);
+ dcs_init_data.interface_type =
+ dal_tm_utils_signal_type_to_interface_type(
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+ dcs_init_data.ts = tm_rb->timing_srvc;
+
+ dcs = dal_dcs_create(&dcs_init_data);
+
+ if (NULL != dcs)
+ dal_display_path_set_dcs(branch_display_path,
+ dcs);
+ /* TODO: when DCS is ready, may need to check
+ * return code here and set 'success' accordingly. */
+ else {
+ TM_BUILD_DSP_PATH(" x Failed to create DCS\n");
+ success = false;
+ }
+
+ TM_ERROR("%s: Failed to create DCS", __func__);
+ }
+
+ /* Update properties and display index */
+ if (success) {
+ union display_path_properties props;
+
+ props = dal_display_path_get_properties(
+ branch_display_path);
+
+ props.bits.IS_BRANCH_DP_MST_PATH = 1;
+ props.bits.IS_ROOT_DP_MST_PATH = 0;
+
+ dal_display_path_set_properties(branch_display_path,
+ props);
+
+ /* Setup initial display index (might change during
+ * sort later) */
+ dal_display_path_set_display_index(branch_display_path,
+ tm_rb->num_of_display_paths);
+ }
+
+ /* Assign Link Services (branch MST path need *only*
+ * MST Link Services) */
+ if (success) {
+ struct link_service *link_service;
+
+ for (link_index = 0;
+ link_index
+ < dal_display_path_get_number_of_links(
+ branch_display_path);
+ link_index++) {
+
+ link_service = tm_resource_mgr_get_link_service(
+ tm_rb->tm_rm,
+ root_display_path,
+ link_index,
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+
+ if (NULL == link_service) {
+ success = false;
+ TM_ERROR("%s: Failed to GET Root Link Service for link index:%d!\n",
+ __func__,
+ link_index);
+ break;
+ }
+
+ if (TM_RESULT_FAILURE ==
+ tm_resource_mgr_add_link_service(
+ tm_rb->tm_rm,
+ branch_display_path,
+ link_index,
+ link_service)) {
+ success = false;
+ TM_ERROR("%s: Failed to add Root Link Service for branch link index:%d!\n",
+ __func__,
+ link_index);
+ break;
+ }
+
+ dal_display_path_set_link_query_interface(
+ branch_display_path,
+ link_index, link_service);
+ } /* for () */
+ } /* if (success) */
+
+ /* Finally we can add the path to TM array */
+ if (success) {
+
+ tm_rb->root_display_paths[tm_rb->num_of_display_paths] =
+ root_display_path;
+
+ tm_rb->display_paths[tm_rb->num_of_display_paths] =
+ branch_display_path;
+
+ tm_rb->num_of_display_paths++;
+ } else {
+
+ TM_BUILD_DSP_PATH(" Destroying unsuccessful branch-MST of display path [0x%p].\n",
+ branch_display_path);
+
+ dcs = dal_display_path_get_dcs(branch_display_path);
+
+ if (NULL != dcs)
+ dal_dcs_destroy(&dcs);
+
+ dal_display_path_destroy(&branch_display_path);
+ }
+ } /* for () */
+}
+
+/**
+ * Once link chain is complete we create and initialise display path.
+ *
+ * \param [in] init_data: Display Path Init Data which includes link chain
+ */
+static void tmrb_create_display_path(
+ struct tm_resource_builder *tm_rb,
+ const struct tm_display_path_init_data *init_data)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+ bool success = true;
+ struct display_path *display_path;
+ uint32_t i;
+
+ TM_ASSERT(init_data != NULL);
+
+ if (init_data->num_of_encoders < 1) {
+ TM_ERROR("%s: Invalid number of Encoders:%d!\n", __func__,
+ init_data->num_of_encoders);
+ return;
+ }
+
+ if (NULL == init_data->connector) {
+ TM_ERROR("%s: No connector!\n", __func__);
+ return;
+ }
+
+ /* Create empty display path */
+ display_path = dal_display_path_create();
+ if (display_path == NULL) {
+ TM_ERROR("%s: Failed to allocate display path!\n", __func__);
+ return;
+ }
+
+ /* Add connector */
+ success = dal_display_path_add_connector(display_path,
+ init_data->connector);
+
+ if (false == success)
+ TM_ERROR("%s: Failed to add connector!\n", __func__);
+
+ if (true == success) {
+ /* Add links for each encoder. Order is GPU --> Connector,
+ * which is reversed to how encoders are discovered (starting
+ * at Connector). */
+
+ /* This is the LAST one discovered, so it is the closest
+ * to GPU. */
+ i = init_data->num_of_encoders;
+ do {
+ enum tm_result tm_result;
+
+ i--; /* "index" is less-by-one than "number" */
+
+ tm_result = tmrb_add_link(tm_rb, display_path,
+ init_data->encoders[i]);
+
+ if (TM_RESULT_SUCCESS != tm_result) {
+ success = false;
+ TM_ERROR("%s: Failed to add link!\n",
+ __func__);
+ break;
+ }
+
+ } while (i != 0);
+ }
+
+ /* Setup properties and validate display path - it will put
+ * display path in valid state including applying active signal */
+ if (true == success) {
+ /* Setup properties */
+ tmrb_set_display_path_properties(tm_rb, init_data,
+ display_path);
+
+ /* Setup initial display index (might change during
+ * sort later) */
+ dal_display_path_set_display_index(display_path,
+ tm_rb->num_of_display_paths);
+
+ if (false == dal_display_path_validate(display_path,
+ init_data->sink_signal)) {
+ TM_ERROR("%s: x Failed to validate display path\n",
+ __func__);
+ success = false;
+ }
+ }
+
+ /* Set device tag */
+ if (true == success) {
+ if (TM_RESULT_SUCCESS != tmrb_update_device_tag(tm_rb,
+ init_data, display_path)) {
+ TM_ERROR("%s: x Failed to update device tag\n",
+ __func__);
+ success = false;
+ }
+ }
+
+ /* Create and assign DCS */
+ if (success) {
+ struct dcs *dcs;
+ struct dcs_init_data dcs_init_data;
+ struct tm_resource *tm_resource_connector;
+
+ dal_memset(&dcs_init_data, 0, sizeof(dcs_init_data));
+
+ tm_resource_connector =
+ tm_resource_mgr_find_resource(
+ tm_rb->tm_rm,
+ dal_connector_get_graphics_object_id(
+ init_data->connector));
+
+ dcs_init_data.as = tm_rb->as;
+ dcs_init_data.grph_obj_id = GRPH_ID(tm_resource_connector);
+ dcs_init_data.interface_type =
+ dal_tm_utils_signal_type_to_interface_type(
+ init_data->sink_signal);
+ dcs_init_data.ts = tm_rb->timing_srvc;
+ dcs_init_data.dal = tm_rb->dal_context;
+
+ dcs = dal_dcs_create(&dcs_init_data);
+
+ if (NULL != dcs) {
+ struct ddc_service *ddc_service;
+
+ ddc_service =
+ TO_CONNECTOR_INFO(tm_resource_connector)->
+ ddc_service;
+
+ if (ddc_service) {
+
+ dal_dcs_update_ddc(dcs, ddc_service);
+
+ dal_display_path_set_dcs(display_path, dcs);
+ } else {
+ TM_ERROR("%s: DDC service is not initialised!\n",
+ __func__);
+ success = false;
+ }
+
+ } else {
+ TM_ERROR("%s: x Failed to create DCS\n", __func__);
+ success = false;
+ }
+ } /* if (success) */
+
+ /* Create link services for each link */
+ if (success)
+ success = tmrb_create_link_services(tm_rb, display_path);
+
+ /* Mark all resources as active */
+ if (success) {
+ if (TM_RESULT_SUCCESS != tmrb_activate_display_path_resource(
+ tm_rb,
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(
+ display_path)))) {
+ success = false;
+ TM_ERROR("%s:Failed to activate Connector resource!\n",
+ __func__);
+ }
+ }
+
+ if (true == success) {
+ struct encoder *encoder;
+ struct audio *audio;
+
+ /* Walk all links on path and activate all non-NULL
+ * (optional) GOs. */
+ for (i = 0;
+ i < dal_display_path_get_number_of_links(display_path);
+ i++) {
+
+ encoder = dal_display_path_get_upstream_object(
+ display_path, i);
+
+ if (encoder &&
+ tmrb_activate_display_path_resource(
+ tm_rb,
+ dal_encoder_get_graphics_object_id(
+ encoder)) != TM_RESULT_SUCCESS){
+ success = false;
+ TM_ERROR("%s:Failed to activate Encoder resource for link:%d!\n",
+ __func__, i);
+ break;
+ }
+
+ audio = dal_display_path_get_audio_object(
+ display_path, i);
+ if (audio && tmrb_activate_display_path_resource(
+ tm_rb,
+ dal_audio_get_graphics_object_id(audio)) !=
+ TM_RESULT_SUCCESS){
+ success = false;
+ TM_ERROR("%s:Failed to activate Audio resource for link:%d!\n",
+ __func__, i);
+ break;
+ }
+ } /* for() */
+ } /* if (success) */
+
+ if (true == success) {
+ struct connector_device_tag_info *current_device_tag;
+
+ /* Update DDI channel mapping */
+ union ddi_channel_mapping mapping;
+
+ mapping.raw = 0;
+
+ /* TODO: get mapping from AS */
+ dal_display_path_set_ddi_channel_mapping(display_path, mapping);
+
+ /* this is not an MST 'root' path (at this point) */
+ tm_rb->root_display_paths[tm_rb->num_of_display_paths] = NULL;
+
+ /* Add display path to repository */
+ tm_rb->display_paths[tm_rb->num_of_display_paths] =
+ display_path;
+
+ tm_rb->num_of_display_paths++;
+
+ current_device_tag = dal_display_path_get_device_tag(
+ display_path);
+
+ if (current_device_tag->dev_id.device_type == DEVICE_TYPE_CF)
+ tm_rb->num_of_cf_paths++;
+
+ } /* if (success) */
+
+ if (success) {
+ /* Set MST Root property and clone MST paths */
+ if (NULL != tm_resource_mgr_find_link_service(tm_rb->tm_rm,
+ display_path, SIGNAL_TYPE_DISPLAY_PORT_MST)){
+
+ union display_path_properties props;
+
+ props = dal_display_path_get_properties(display_path);
+
+ props.bits.IS_ROOT_DP_MST_PATH = 1;
+
+ dal_display_path_set_properties(display_path, props);
+
+ tmrb_clone_mst_paths(tm_rb, display_path);
+ }
+ } /* if (success) */
+
+ if (success) {
+ TM_BUILD_DSP_PATH(" Display path [0x%p] at index %u was successfully created.\n",
+ display_path, tm_rb->num_of_display_paths - 1);
+ } else {
+ struct dcs *dcs;
+
+ TM_BUILD_DSP_PATH(" Destroying unsuccessful path [0x%p].\n",
+ display_path);
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ if (NULL != dcs)
+ dal_dcs_destroy(&dcs);
+
+ dal_display_path_destroy(&display_path);
+ }
+}
+
+/**
+ * Recursively adds link HW components (typically encoder) to display path.
+ * Each loop over all sources (from VBIOS) of last added objectId.
+ * For every source we try to branch and create new link chain.
+ * If no sources exists for this display path (typically when we reached GPU),
+ * we create display path and return.
+ *
+ * \param [in] pInit: Display Path Initialisation Data which includes
+ * "previous" link chain object.
+ */
+
+static void tmrb_build_link_chain(struct tm_resource_builder *tm_rb,
+ struct tm_display_path_init_data *init_data)
+{
+ uint32_t num_of_sources;
+ uint32_t orig_num_of_encoders = init_data->num_of_encoders;
+ uint32_t source;
+ struct graphics_object_id src_object_id;
+ struct adapter_service *as = tm_rb->as;
+ struct tm_resource *tm_resource_input;
+ struct tm_resource *tm_resource_src;
+ struct graphics_object_id this_object_id;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ if (init_data->num_of_encoders > MAX_NUM_OF_LINKS_PER_PATH) {
+ /* We don't support more Links than MAX_NUM_OF_LINKS_PER_PATH.
+ * Most likely the VBIOS image is corrupted. */
+ TM_ERROR("%s: Number of Links=%d exceeds Maximum=%d!\n",
+ __func__,
+ init_data->num_of_encoders,
+ MAX_NUM_OF_LINKS_PER_PATH);
+ return;
+ }
+
+ if (init_data->num_of_encoders) {
+ /* we already discovered at least one encoder */
+ tm_resource_input = tm_resource_mgr_find_resource(
+ tm_rb->tm_rm,
+ dal_encoder_get_graphics_object_id(
+ init_data->
+ encoders[init_data->num_of_encoders - 1]));
+ } else {
+ /* no encoders yet, we start at connector */
+ tm_resource_input = tm_resource_mgr_find_resource(
+ tm_rb->tm_rm,
+ dal_connector_get_graphics_object_id(
+ init_data->connector));
+ }
+
+ this_object_id = GRPH_ID(tm_resource_input);
+
+ num_of_sources = dal_adapter_service_get_src_num(as, this_object_id);
+
+ TM_BUILD_DSP_PATH("%s: Current Object: %s has num_of_sources: %d\n",
+ __func__, tm_utils_go_type_to_str(this_object_id),
+ num_of_sources);
+
+ if (!num_of_sources) {
+ /* Base of recursion - no more sources.
+ * What that really means is that we reached a dead-end.
+ * Since we didn't reach GPU we can't build a path,
+ * so simply return. */
+ TM_BUILD_DSP_PATH("%s: No more sources for Object Type: %d\n",
+ __func__, this_object_id.type);
+ return;
+ }
+
+ for (source = 0; source < num_of_sources; source++) {
+
+ src_object_id = dal_adapter_service_get_src_obj(as,
+ this_object_id, source);
+
+ if (false == dal_graphics_object_id_is_valid(src_object_id)) {
+ TM_ERROR("%s: dal_adapter_service_get_src_obj() returned invalid src!\n",
+ __func__);
+ }
+
+ /* For each 'source' object (at this level) the starting
+ * number of encoders must be the same - the original. */
+ init_data->num_of_encoders = orig_num_of_encoders;
+
+ switch (src_object_id.type) {
+ case OBJECT_TYPE_ENCODER: {
+ struct encoder *enc;
+
+ tm_resource_src = tm_resource_mgr_find_resource(
+ tm_rb->tm_rm, src_object_id);
+
+ if (tm_resource_src == NULL) {
+ /* TM resource for this encoder was not
+ * created yet. */
+ struct encoder_init_data enc_init_data;
+
+ dal_memset(&enc_init_data, 0,
+ sizeof(enc_init_data));
+
+ enc_init_data.adapter_service = as;
+ enc_init_data.encoder = src_object_id;
+ enc_init_data.ctx = dal_context;
+
+ enc = dal_encoder_create(&enc_init_data);
+
+ if (!enc) {
+ TM_ERROR("%s: Failed to create Encoder",
+ __func__);
+ break;
+ }
+
+ tm_resource_src =
+ dal_tm_resource_mgr_add_resource(
+ tm_rb->tm_rm,
+ dal_tm_resource_encoder_create(
+ enc));
+ } else {
+ enc = TO_ENCODER(tm_resource_src);
+ }
+
+ if (NULL == tm_resource_src) {
+ TM_ERROR("%s: Failed to add Encoder Resource to display path",
+ __func__);
+ } else {
+ init_data->
+ encoders[init_data->num_of_encoders] =
+ enc;
+
+ init_data->num_of_encoders++;
+
+ /* recursive call only for Encoder */
+ tmrb_build_link_chain(tm_rb, init_data);
+ }
+ break;
+ }
+ case OBJECT_TYPE_GPU:
+ TM_BUILD_DSP_PATH("%s: reached GPU. Building path...\n",
+ __func__);
+
+ tmrb_create_display_path(tm_rb, init_data);
+ break;
+
+ default:
+ TM_ERROR("%s: Unknown graphics object!", __func__);
+ break;
+
+ } /* switch ()*/
+ } /* for () */
+}
+
+static void tmrb_build_single_display_path(struct tm_resource_builder *tm_rb,
+ uint8_t connector_index)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+ struct graphics_object_id connector_obj_id;
+ struct connector *connector;
+ struct tm_resource *tm_resource;
+ struct connector_signals default_signals;
+ uint32_t signal;
+ struct tm_display_path_init_data path_init_data;
+ struct ddc_service_init_data ddc_init_data;
+
+ connector_obj_id = dal_adapter_service_get_connector_obj_id(tm_rb->as,
+ connector_index);
+
+ if (connector_obj_id.type != OBJECT_TYPE_CONNECTOR) {
+ TM_WARNING("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+ __func__, connector_index);
+ return;
+ }
+
+ /* Note that 'connector' will be deallocated when 'tm_rb->tm_rm' is
+ * destroyed, so Resource Builder should not worry about it. */
+ connector = dal_connector_create(tm_rb->as, connector_obj_id);
+ if (NULL == connector) {
+ TM_WARNING("%s: Failed to create connector object!\n",
+ __func__);
+ return;
+ }
+
+ tm_resource = dal_tm_resource_mgr_add_resource(tm_rb->tm_rm,
+ dal_tm_resource_connector_create(connector));
+
+ if (NULL == tm_resource) {
+ TM_WARNING("TM_RB: failed to add connector resource for connector index: %d\n",
+ connector_index);
+ return;
+ }
+
+ dal_memset(&path_init_data, 0, sizeof(path_init_data));
+
+ ddc_init_data.as = tm_rb->as;
+ ddc_init_data.id = connector_obj_id;
+ ddc_init_data.ctx = dal_context;
+ /* Note that DDC Service is freed by Resource Manager via call to
+ * dal_ddc_service_destroy(). */
+ TO_CONNECTOR_INFO(tm_resource)->ddc_service =
+ dal_ddc_service_create(&ddc_init_data);
+
+ if (NULL == TO_CONNECTOR_INFO(tm_resource)->ddc_service) {
+ TM_WARNING("TM_RB: failed to create DDC service for connector index:%d!\n",
+ connector_index);
+ return;
+ }
+
+ path_init_data.ddc_service =
+ TO_CONNECTOR_INFO(tm_resource)->ddc_service;
+ path_init_data.connector = TO_CONNECTOR_INFO(tm_resource)->connector;
+
+ /* This is a real path so set fake path device type as unknown. */
+ path_init_data.faked_path_device_id.device_type = DEVICE_TYPE_UNKNOWN;
+
+ default_signals = dal_connector_get_default_signals(connector);
+
+ TM_BUILD_DSP_PATH(
+ "TM_RB: connector_index:%d: num_of_default_signals:%d\n",
+ connector_index, default_signals.number_of_signals);
+
+ /* Loop over all default signals of the connector. */
+ for (signal = 0; signal < default_signals.number_of_signals; signal++) {
+
+ path_init_data.num_of_encoders = 0;
+ path_init_data.sink_signal = default_signals.signal[signal];
+
+ tmrb_build_link_chain(tm_rb, &path_init_data);
+ }
+
+ TM_BUILD_DSP_PATH("Finished building display paths for connector index: %d.\n",
+ connector_index);
+}
+
+/**
+ * Builds display paths. Goes over all connectors expanding connector
+ * default signals.
+ * For every such signal tries to create display path which starts at
+ * current connector.
+ *
+ * \param tm_rb A pointer to TM Resource Builder.
+ *
+ * \return true - no error. false - error.
+ */
+enum tm_result tm_resource_builder_build_display_paths(
+ struct tm_resource_builder *tm_rb)
+{
+ uint8_t connectors_num;
+ uint8_t connector_index;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_IFACE_TRACE();
+
+ connectors_num = dal_adapter_service_get_connectors_num(tm_rb->as);
+
+ if (connectors_num == 0 || connectors_num > ENUM_ID_COUNT) {
+ TM_ERROR("%s: Invalid Number of Connectors: %d!\n",
+ __func__, connectors_num);
+ return TM_RESULT_FAILURE;
+ }
+
+ /* As a first thing we need to allocate storage for link services.
+ * We don't know how much storage is needed, so start from
+ * some default. */
+ if (TM_RESULT_FAILURE == tm_resource_mgr_setup_link_storage(
+ tm_rb->tm_rm, TM_RB_MAX_NUM_OF_DISPLAY_PATHS)) {
+ TM_ERROR("%s: tm_resource_mgr_setup_link_storage() failed!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ /* loop over all connectors */
+ for (connector_index = 0;
+ connector_index < connectors_num;
+ connector_index++) {
+
+ tmrb_build_single_display_path(tm_rb, connector_index);
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+
+enum tm_result tm_resource_builder_add_fake_display_paths(
+ struct tm_resource_builder *tm_rb)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * \brief
+ * Creates and adds resources required for audio
+ *
+ * \return
+ * returns Number of successfully added audio resources
+ */
+static uint32_t add_audio_resources(struct tm_resource_builder *tm_rb)
+{
+ uint32_t index = 0;
+ struct graphics_object_id obj_id;
+ struct tm_resource *tm_resource = NULL;
+ struct audio *audio = NULL;
+ struct audio_init_data init_data;
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ do {
+ obj_id = dal_adapter_service_enum_audio_object(tm_rb->as,
+ index);
+
+ if (false == dal_graphics_object_id_is_valid(obj_id)) {
+ /* no more valid audio objects */
+ break;
+ }
+
+ dal_memset(&init_data, 0, sizeof(init_data));
+
+ init_data.as = tm_rb->as;
+ init_data.audio_stream_id = obj_id;
+ init_data.dal_context = tm_rb->dal_context;
+
+ audio = dal_audio_create(&init_data);
+ if (NULL == audio) {
+ TM_ERROR("%s: dal_audio_create() failed!\n", __func__);
+ break;
+ }
+
+ tm_resource = dal_tm_resource_mgr_add_resource(
+ tm_rb->tm_rm,
+ dal_tm_resource_audio_create(audio));
+ if (NULL == tm_resource) {
+ TM_ERROR("%s: failed add audio resource!\n", __func__);
+ dal_audio_destroy(&audio);
+ break;
+ }
+
+ tm_resource->flags.resource_active = true;
+ index++;
+
+ } while (1);
+
+ TM_RESOURCES("Audio resource count:%d\n", index);
+
+ return index; /* this is the count of added audio resources */
+}
+
+/* Add feature such as: Audio, Stereo... */
+enum tm_result tm_resource_builder_add_feature_resources(
+ struct tm_resource_builder *tm_rb)
+{
+ if (!tm_rb->num_of_display_paths) {
+ /* If there are no paths then we can't add features. */
+ return TM_RESULT_FAILURE;
+ }
+
+ add_audio_resources(tm_rb);
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Swaps entries of two displays, including corresponding link services.
+ *
+ * \param [in] index1: display 1 to swap
+ * \param [in] index2: display 2 to swap
+ */
+static void tmrb_swap_entries(struct tm_resource_builder *tm_rb,
+ unsigned int index1, unsigned int index2) {
+ struct display_path *display_path;
+
+ display_path = tm_rb->display_paths[index1];
+ tm_rb->display_paths[index1] = tm_rb->display_paths[index2];
+ tm_rb->display_paths[index2] = display_path;
+
+ display_path = tm_rb->root_display_paths[index1];
+ tm_rb->root_display_paths[index1] = tm_rb->root_display_paths[index2];
+ tm_rb->root_display_paths[index2] = display_path;
+
+ dal_display_path_set_display_index(tm_rb->display_paths[index1],
+ index1);
+
+ dal_display_path_set_display_index(tm_rb->display_paths[index2],
+ index2);
+
+ tm_resource_mgr_swap_link_services(tm_rb->tm_rm, index1, index2);
+}
+
+static void tm_resource_builder_sort_display_paths_by_dev_priority(
+ struct tm_resource_builder *tm_rb)
+{
+ uint32_t num_of_sorted_paths = 0;
+ uint32_t type;
+ uint32_t i;
+ struct connector_device_tag_info *connector_device_tag_info;
+
+ /* Sort by device type priority */
+ for (type = 0; type < tmrb_num_of_devices_in_order_enumeration;
+ type++) {
+ for (i = num_of_sorted_paths;
+ i < tm_rb->num_of_display_paths
+ && num_of_sorted_paths
+ < tm_rb->num_of_display_paths;
+ i++) {
+
+ connector_device_tag_info =
+ dal_display_path_get_device_tag(
+ tm_rb->display_paths[i]);
+
+ if (connector_device_tag_info->dev_id.device_type ==
+ tmrb_device_enumeration_order[type]){
+
+ tmrb_swap_entries(tm_rb,
+ num_of_sorted_paths, i);
+ num_of_sorted_paths++;
+ }
+ } /* for () */
+ } /* for () */
+}
+
+static void tm_resource_builder_sort_display_paths_by_dev_enum(
+ struct tm_resource_builder *tm_rb)
+{
+ bool flipped;
+ uint32_t i;
+ struct connector_device_tag_info *current_device_tag_info;
+ struct connector_device_tag_info *next_device_tag_info;
+
+ flipped = true;
+ while (flipped) {
+ flipped = false;
+ for (i = 0; i < tm_rb->num_of_display_paths - 1; i++) {
+
+ current_device_tag_info =
+ dal_display_path_get_device_tag(
+ tm_rb->display_paths[i]);
+
+ next_device_tag_info =
+ dal_display_path_get_device_tag(
+ tm_rb->display_paths[i + 1]);
+
+ if (current_device_tag_info->dev_id.device_type !=
+ DEVICE_TYPE_UNKNOWN &&
+ (current_device_tag_info->dev_id.device_type ==
+ next_device_tag_info->dev_id.device_type) &&
+ (current_device_tag_info->dev_id.enum_id >
+ next_device_tag_info->dev_id.enum_id)) {
+
+ tmrb_swap_entries(tm_rb, i, i + 1);
+ flipped = true;
+
+ }
+ } /* for () */
+ } /* while () */
+}
+
+/* Move root MST paths before branch MST paths */
+static void tmrb_move_root_mst_paths_before_branch_mst_paths(
+ struct tm_resource_builder *tm_rb)
+{
+ uint32_t i;
+ uint32_t j;
+
+ for (i = 0; i < tm_rb->num_of_display_paths - 1; i++) {
+
+ if (tm_rb->root_display_paths[i] != NULL) {
+
+ for (j = i + 1; j < tm_rb->num_of_display_paths; j++) {
+
+ if (tm_rb->display_paths[j] ==
+ tm_rb->root_display_paths[i]) {
+ tmrb_swap_entries(tm_rb, i, j);
+ break;
+ }
+ } /* for () */
+ } /* if () */
+ } /* for () */
+}
+
+/**
+ * Reads default display type from persistent storage.
+ *
+ * \return TM Display type matching default display stored in
+ * persistent storage.
+ */
+enum tm_display_type tmrb_get_default_display_type(
+ struct tm_resource_builder *tm_rb)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_DISPLAY_TYPE_UNK;
+}
+
+static void tmrb_put_default_display_on_top_of_the_list(
+ struct tm_resource_builder *tm_rb)
+{
+ uint32_t num_of_sorted_paths = 0;
+ uint32_t i;
+ enum tm_display_type default_display_type =
+ tmrb_get_default_display_type(tm_rb);
+ struct connector_device_tag_info *device_tag_info;
+
+ if (TM_DISPLAY_TYPE_UNK == default_display_type)
+ return;
+
+ num_of_sorted_paths = 0;
+ for (i = num_of_sorted_paths;
+ i < tm_rb->num_of_display_paths
+ && num_of_sorted_paths < tm_rb->num_of_display_paths;
+ i++) {
+
+ device_tag_info = dal_display_path_get_device_tag(
+ tm_rb->display_paths[i]);
+
+ if (default_display_type ==
+ tm_utils_device_id_to_tm_display_type(
+ device_tag_info->dev_id)) {
+
+ tmrb_swap_entries(tm_rb, num_of_sorted_paths, i);
+ num_of_sorted_paths++;
+ }
+ }
+}
+
+void tm_resource_builder_sort_display_paths(struct tm_resource_builder *tm_rb)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_ASSERT(tm_rb->num_of_display_paths > 0);
+
+ if (tm_rb->num_of_display_paths <= 0)
+ return;
+
+ /* Sort by device type priority */
+ tm_resource_builder_sort_display_paths_by_dev_priority(tm_rb);
+
+ /* Sort by device enum (within same device type) */
+ tm_resource_builder_sort_display_paths_by_dev_enum(tm_rb);
+
+ /* Move root MST paths before branch MST paths */
+ tmrb_move_root_mst_paths_before_branch_mst_paths(tm_rb);
+
+ /* Feature 8464. Put default display on top of the list */
+ tmrb_put_default_display_on_top_of_the_list(tm_rb);
+}
+
+uint32_t tm_resource_builder_get_num_of_paths(
+ struct tm_resource_builder *tm_rb)
+{
+ return tm_rb->num_of_display_paths;
+}
+
+struct display_path *tm_resource_builder_get_path_at(
+ struct tm_resource_builder *tm_rb,
+ uint32_t index)
+{
+ struct dal_context *dal_context = tm_rb->dal_context;
+
+ TM_ASSERT(index < tm_rb->num_of_display_paths);
+
+ if (index < tm_rb->num_of_display_paths)
+ return tm_rb->display_paths[index];
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.h b/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.h
new file mode 100644
index 000000000000..dc2fd3686672
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource_builder.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#ifndef __DAL_TM_RESOURCE_BUILDER_H__
+#define __DAL_TM_RESOURCE_BUILDER_H__
+
+/* Internal includes */
+#include "tm_resource_mgr.h"
+
+
+/**
+ *****************************************************************************
+ * TM Resource Builder
+ *
+ * TM Resource Builder responsible for creating display paths and
+ * objects for HW resources
+ * Typically this class needed only during initialisation
+ * phase of Topology Manager
+ *****************************************************************************
+ */
+
+/* structures */
+struct tm_resource_builder_init_data {
+ struct dal_context *dal_context;
+ struct adapter_service *adapter_service;
+ struct timing_service *timing_service;
+ struct irq_manager *irq_manager;
+ struct hw_sequencer *hwss;
+ struct tm_resource_mgr *resource_mgr;
+ struct topology_mgr *tm;
+};
+
+/* functions */
+struct tm_resource_builder*
+tm_resource_builder_create(
+ const struct tm_resource_builder_init_data *init_data);
+
+void tm_resource_builder_destroy(struct tm_resource_builder **tm_rb);
+
+enum tm_result tm_resource_builder_create_gpu_resources(
+ struct tm_resource_builder *tm_rb);
+
+enum tm_result tm_resource_builder_build_display_paths(
+ struct tm_resource_builder *tm_rb);
+
+enum tm_result tm_resource_builder_add_fake_display_paths(
+ struct tm_resource_builder *tm_rb);
+
+enum tm_result tm_resource_builder_add_feature_resources(
+ struct tm_resource_builder *tm_rb);
+
+void tm_resource_builder_sort_display_paths(
+ struct tm_resource_builder *tm_rb);
+
+uint32_t tm_resource_builder_get_num_of_paths(
+ struct tm_resource_builder *tm_rb);
+
+struct display_path *tm_resource_builder_get_path_at(
+ struct tm_resource_builder *tm_rb,
+ uint32_t index);
+
+#endif /* __DAL_TM_RESOURCE_BUILDER_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.c b/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.c
new file mode 100644
index 000000000000..42634ed6654d
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.c
@@ -0,0 +1,3178 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/connector_interface.h"
+#include "include/encoder_interface.h"
+#include "include/controller_interface.h"
+#include "include/audio_interface.h"
+#include "include/controller_interface.h"
+#include "include/dcs_interface.h"
+#include "include/ddc_service_interface.h"
+#include "include/vector.h"
+#include "include/flat_set.h"
+
+#include "tm_resource_mgr.h"
+#include "tm_internal_types.h"
+#include "tm_utils.h"
+
+/*****************************************************************************
+ * private data structures
+ ***************************************************************************/
+
+struct tm_resource_mgr {
+ struct dal_context *dal_context;
+ struct adapter_service *as;
+
+ struct vector *link_services;
+ /* This is the number of paths as set by
+ * tm_resource_mgr_setup_link_storage(). */
+ uint32_t link_services_number_of_paths;
+
+ struct gpu *gpu_interface;
+
+ bool prioritize_controllers;
+
+ struct flat_set *resources;
+
+ struct tm_resource_range resources_range[OBJECT_TYPE_COUNT];
+
+ /* This lookup will be used to translate IRQ source to Display Index.
+ * The translation will occur on every Vblank interrupt. */
+ uint32_t controller_to_display_path_lookup[CONTROLLER_ID_MAX + 1];
+
+ /* If true - this RM (Resource Manager) was cloned from the
+ * one-and-only original RM which was created during TM creation.
+ * A cloned RM should not destroy objects which it copied
+ * from the original RM because the original RM continues to
+ * reference them. */
+ bool is_cloned;
+
+ bool pipe_power_gating_enabled;
+};
+
+/* local macro definitions */
+/* TODO: check if this is too much/not enough */
+#define TM_RM_MAX_NUM_OF_RESOURCES 100
+
+DAL_VECTOR_AT_INDEX(link_services, struct link_service **)
+DAL_VECTOR_SET_AT_INDEX(link_services, struct link_service **)
+
+DAL_FLAT_SET_AT_INDEX(tmrm_resources, struct tm_resource **)
+DAL_FLAT_SET_INSERT(tmrm_resources, struct tm_resource **)
+
+/*****************************************************************************
+ * function prototypes
+ ***************************************************************************/
+
+static void tmrm_release_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ struct clock_source *clock_source,
+ enum tm_acquire_method method);
+
+
+/*****************************************************************************
+ * private functions
+ ***************************************************************************/
+static uint32_t tm_resource_ref_counter_increment(
+ const struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ uint32_t current_count = TM_RES_REF_CNT_GET(tm_resource);
+
+ if (current_count != 0) {
+ /* Ideally, we should allow only 0-->1 transition.
+ *
+ * But, in theory a resource could be sharable, this is
+ * why we treat it as a Warning.
+ *
+ * In practice, we get here if resource usage is unbalanced!
+ *
+ * Do NOT remove this warning unless you are absolutely sure
+ * that it should be removed! */
+ TM_WARNING("%s: increment a non-zero count: %d?\n",
+ __func__, current_count);
+ ASSERT(false);
+ }
+
+ TM_RES_REF_CNT_INCREMENT(tm_resource);
+
+ return TM_RES_REF_CNT_GET(tm_resource);
+}
+
+uint32_t tm_resource_mgr_ref_counter_decrement(
+ const struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ uint32_t current_count = TM_RES_REF_CNT_GET(tm_resource);
+
+ if (current_count != 1) {
+ /* Ideally, we should allow only 1-->0 transition.
+ *
+ * But, in theory a resource could be sharable, this is
+ * why we treat it as a Warning.
+ *
+ * In practice, we get here if resource usage is unbalanced!
+ *
+ * Do NOT remove this warning unless you are absolutely sure
+ * that it should be removed! */
+ TM_WARNING("%s: decrement a non-one count: %d?\n",
+ __func__, current_count);
+ ASSERT(false);
+ }
+
+ TM_RES_REF_CNT_DECREMENT(tm_resource);
+
+ return TM_RES_REF_CNT_GET(tm_resource);
+}
+
+
+static bool is_resource_available(const struct tm_resource *tm_resource)
+{
+ if (TM_RES_REF_CNT_GET(tm_resource) == 0)
+ return true;
+
+ return false;
+}
+
+/**
+ * Returns true if during acquire we need to change HW state
+ * in display path context.
+ *
+ * It is very important that we don't change HW State during
+ * cofunctional validation!
+ *
+ * Note: even if this function returns 'true', the action may still depend
+ * on the value of "resource reference count".
+ * Most important cases is the transition of "reference count" from 0 to 1 and
+ * from 1 to 0.
+ *
+ * \param [in] method: How to acquire resources/How resources were acquired
+ *
+ * \return true: if during acquire we need to activate resources,
+ * false: otherwise
+ */
+static inline bool update_hw_state_needed(enum tm_acquire_method method)
+{
+ return (method == TM_ACQUIRE_METHOD_HW);
+}
+
+static bool tm_rm_less_than(
+ const void *tgt_item,
+ const void *ref_item);
+
+static enum tm_result tm_resource_mgr_construct(struct tm_resource_mgr *tm_rm)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct flat_set_init_data init;
+
+ init.capacity = TM_RM_MAX_NUM_OF_RESOURCES;
+ init.funcs.less_than = tm_rm_less_than;
+ init.struct_size = sizeof(struct tm_resource *);
+ /* Note that Resource array will store pointers to structures,
+ * not structures. */
+ tm_rm->resources = dal_flat_set_create(&init);
+
+ if (NULL == tm_rm->resources) {
+ TM_ERROR("%s: failed to create 'resources' vector!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ dal_memset(tm_rm->controller_to_display_path_lookup,
+ INVALID_DISPLAY_INDEX,
+ sizeof(tm_rm->controller_to_display_path_lookup));
+
+ /* This flag is for "Tiled Pipes power gating" feature. */
+ tm_rm->prioritize_controllers = true;
+
+ tm_rm->pipe_power_gating_enabled =
+ dal_adapter_service_is_feature_supported(
+ FEATURE_POWER_GATING_PIPE_IN_TILE);
+
+ TM_PWR_GATING("Display Pipe Power Gating option: %s\n",
+ (tm_rm->pipe_power_gating_enabled == true ? "Enabled" :
+ "Disabled"));
+
+ return TM_RESULT_SUCCESS;
+}
+
+/** create the TM Resource Manager */
+struct tm_resource_mgr*
+tm_resource_mgr_create(struct tm_resource_mgr_init_data *init_data)
+{
+ struct tm_resource_mgr *tm_rm;
+ struct dal_context *dal_context = init_data->dal_context;
+
+ TM_IFACE_TRACE();
+
+ tm_rm = dal_alloc(sizeof(*tm_rm));
+
+ if (!tm_rm) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ tm_rm->dal_context = init_data->dal_context;
+ tm_rm->as = init_data->as;
+
+ if (TM_RESULT_FAILURE == tm_resource_mgr_construct(tm_rm)) {
+ dal_free(tm_rm);
+ return NULL;
+ }
+
+ return tm_rm;
+}
+
+static struct tm_resource *tmrm_display_path_find_connector_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+ struct connector *connector;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ connector = dal_display_path_get_connector(display_path);
+
+ TM_ASSERT(connector != NULL);
+
+ if (NULL == connector) {
+ /* can't proceed */
+ return NULL;
+ }
+
+ id = dal_connector_get_graphics_object_id(connector);
+
+ tm_resource = tm_resource_mgr_find_resource(tm_rm, id);
+
+ TM_ASSERT(tm_resource != NULL);
+
+ return tm_resource;
+}
+
+static struct tm_resource *tmrm_display_path_find_upstream_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ struct tm_resource *tm_resource;
+ struct encoder *encoder;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ encoder = dal_display_path_get_upstream_object(display_path,
+ link_idx);
+
+ if (NULL == encoder) {
+ /* not necessarily an error */
+ return NULL;
+ }
+
+ id = dal_encoder_get_graphics_object_id(encoder);
+
+ tm_resource = tm_resource_mgr_find_resource(tm_rm, id);
+
+ TM_ASSERT(tm_resource != NULL);
+
+ return tm_resource;
+}
+
+/**
+ * Find TM resource which contains pointer to Audio object currently set
+ * on a path.
+ *
+ * \return NULL: 1. Path has no Audio set on it 2. Audio is set on path,
+ * but no matching TM Resource found (should never happen).
+ *
+ * tm_resource pointer: the TM resource for audio object
+ */
+static struct tm_resource *tmrm_display_path_find_audio_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ struct tm_resource *tm_resource;
+ struct audio *audio;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ audio = dal_display_path_get_audio_object(display_path, link_idx);
+
+ if (NULL == audio) {
+ /* not necessarily an error */
+ return NULL;
+ }
+
+ id = dal_audio_get_graphics_object_id(audio);
+
+ tm_resource = tm_resource_mgr_find_resource(tm_rm, id);
+
+ TM_ASSERT(tm_resource != NULL);
+
+ return tm_resource;
+}
+
+static struct tm_resource *tmrm_find_clock_source_resource(
+ struct tm_resource_mgr *tm_rm,
+ const struct clock_source *clock_source)
+{
+ struct tm_resource *tm_resource;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ id = dal_clock_source_get_graphics_object_id(clock_source);
+
+ tm_resource = tm_resource_mgr_find_resource(tm_rm, id);
+
+ TM_ASSERT(tm_resource != NULL);
+
+ return tm_resource;
+}
+
+static struct tm_resource *tmrm_display_path_find_alternative_clock_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+ struct clock_source *clock_source;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ clock_source = dal_display_path_get_alt_clock_source(display_path);
+
+ if (NULL == clock_source) {
+ /* not necessarily an error */
+ return NULL;
+ }
+
+ tm_resource = tmrm_find_clock_source_resource(tm_rm, clock_source);
+
+ /* should not happen */
+ TM_ASSERT(tm_resource != NULL);
+
+ return tm_resource;
+}
+
+static struct link_service *tmrm_get_ls_at_index(struct tm_resource_mgr *tm_rm,
+ uint32_t index)
+{
+ struct link_service **link_service_item;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ link_service_item = link_services_vector_at_index(
+ tm_rm->link_services, index);
+
+ if (NULL == link_service_item) {
+ TM_ERROR("%s: no item at index:%d!\n", __func__, index);
+ return NULL;
+ }
+
+ link_service = *link_service_item;
+
+ return link_service;
+}
+
+static void tmrm_set_ls_at_index(struct tm_resource_mgr *tm_rm, uint32_t index,
+ struct link_service *link_service)
+{
+ link_services_vector_set_at_index(tm_rm->link_services,
+ &link_service, index);
+}
+
+static void tm_resource_mgr_destruct(struct tm_resource_mgr *tm_rm)
+{
+ uint32_t count;
+ uint32_t index;
+ struct tm_resource *resource;
+ /* Only the original RM should delete these objects. */
+ if (false == tm_rm->is_cloned) {
+ tm_resource_mgr_release_all_link_services(tm_rm);
+ }
+
+ /* Go over all entries in tm_rm->resource_vector
+ * and destroy 'struct tm_resource->go_interface' */
+ count = tm_resource_mgr_get_total_resources_num(tm_rm);
+
+ for (index = 0; index < count; index++) {
+ resource = tm_resource_mgr_enum_resource(tm_rm, index);
+ resource->funcs->destroy(&resource);
+ }
+
+ /* Delete vectors which stored the objects, if any. */
+ if (tm_rm->resources != NULL)
+ dal_flat_set_destroy(&tm_rm->resources);
+
+ if (tm_rm->link_services != NULL)
+ dal_vector_destroy(&tm_rm->link_services);
+
+}
+
+/** destroy the TM Resource Manager */
+void tm_resource_mgr_destroy(struct tm_resource_mgr **tm_rm)
+{
+ if (!tm_rm || !*tm_rm) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ tm_resource_mgr_destruct(*tm_rm);
+ dal_free(*tm_rm);
+ *tm_rm = NULL;
+}
+
+static void tmrm_resource_release_hw(struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (tm_resource == NULL) {
+ TM_ERROR("%s: invalid input!\n", __func__);
+ return;
+ }
+
+ tm_resource->funcs->release_hw(tm_resource);
+}
+
+void tm_resource_mgr_release_hw(struct tm_resource_mgr *tm_rm)
+{
+ uint32_t i;
+ struct link_service *link_service;
+ struct tm_resource *tm_resource;
+
+ if (tm_rm->link_services != NULL) {
+
+ /* 1. Call Link Services to release HW access */
+ for (i = 0;
+ i < dal_vector_get_count(tm_rm->link_services);
+ i++) {
+
+ link_service = tmrm_get_ls_at_index(tm_rm, i);
+
+ if (link_service != NULL)
+ dal_ls_release_hw(link_service);
+ }
+ }
+
+ /* 2. Call GPU and all GPU sub-components to release HW access */
+ if (tm_rm->gpu_interface != NULL)
+ dal_gpu_release_hw(tm_rm->gpu_interface);
+
+ /* 3. Release HW access on all graphics objects */
+ for (i = 0; i < tm_resource_mgr_get_total_resources_num(tm_rm); i++) {
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+ tmrm_resource_release_hw(tm_rm, tm_resource);
+ }
+}
+
+/**
+ * Clones Resource Manager and resets usage counters.
+ *
+ * Cloned resources should have exactly same functionality, except the fact,
+ * that during acquire/release it should not update DCS state (this is
+ * temporary, ideally DCS should not be touched at all).
+ *
+ * \return Pointer to cloned Resource Manager
+ */
+struct tm_resource_mgr *tm_resource_mgr_clone(
+ struct tm_resource_mgr *tm_rm_other)
+{
+ struct dal_context *dal_context = tm_rm_other->dal_context;
+ struct tm_resource_mgr *tm_rm_new;
+ struct tm_resource_mgr_init_data init_data;
+ bool error = false;
+ uint32_t i = 0;
+ uint32_t count;
+
+ dal_memset(&init_data, 0, sizeof(init_data));
+
+ init_data.as = tm_rm_other->as;
+ init_data.dal_context = tm_rm_other->dal_context;
+
+ do {
+ tm_rm_new = tm_resource_mgr_create(&init_data);
+ if (NULL == tm_rm_new) {
+ TM_ERROR("%s: failed to create new TMRM!\n", __func__);
+ error = true;
+ break;
+ }
+
+ /* 1. we should properly deallocate vectors as we will clone
+ * them from original resource manager in point 3 */
+
+ dal_flat_set_destroy(&tm_rm_new->resources);
+
+ /* 2. do the 'shallow' copy */
+ dal_memmove(tm_rm_new, tm_rm_other, sizeof(*tm_rm_other));
+
+ /* It doesn't matter if the 'other' is cloned or not,
+ * what is important is that the new one is cloned. */
+ tm_rm_new->is_cloned = true;
+
+ /* 3. clone the vectors */
+ /********** link services vector **********/
+ tm_rm_new->link_services = dal_vector_clone(
+ tm_rm_other->link_services);
+
+ if (NULL == tm_rm_new->link_services) {
+ TM_ERROR("%s: failed to clone LS vector!\n", __func__);
+ error = true;
+ break;
+ }
+
+ /********* resources set ***********/
+
+ {
+ struct flat_set_init_data init_data;
+
+ init_data.capacity = TM_RM_MAX_NUM_OF_RESOURCES;
+ init_data.funcs.less_than = tm_rm_less_than;
+ init_data.struct_size = sizeof(struct tm_resource *);
+ tm_rm_new->resources = dal_flat_set_create(&init_data);
+ }
+ count = dal_flat_set_get_count(tm_rm_other->resources);
+ for (i = 0; i < count; ++i) {
+ struct tm_resource *resource =
+ *tmrm_resources_set_at_index(
+ tm_rm_other->resources,
+ i);
+ resource = resource->funcs->clone(resource);
+ tmrm_resources_set_insert(
+ tm_rm_new->resources,
+ &resource);
+ }
+
+ } while (0);
+
+ tm_resource_mgr_reset_all_usage_counters(tm_rm_new);
+
+ if (true == error) {
+ /* Note that all vectors will be automatically
+ * destroyed by tm_resource_mgr_destroy() */
+ if (tm_rm_new)
+ tm_resource_mgr_destroy(&tm_rm_new);
+
+ return NULL;
+ }
+
+ return tm_rm_new;
+}
+
+/**
+ * Compares two resources. Returns true if tgt_item less then ref_item,
+ * false otherwise.
+ * Compares resource properties in the following order:
+ * 1. Resource type (encoder, audio, etc.)
+ * 2. Resource priority (whithin type it allows to have internal logic
+ * to sort resources)
+ * 3. Resource id (DAC, DVO, Uniphy, etc.)
+ * 4. Resource enum (every object with same ID can have multiple
+ * instances - enums)
+ *
+ * \param [in] tgt_item: Object to compare
+ * \param [in] ref_item: Object compare with
+ *
+ * \return true if tgt_item less then ref_item, false otherwise
+ */
+static bool tm_rm_less_than(const void *lhs, const void *rhs)
+{
+ const struct tm_resource *tgt_item =
+ *((const struct tm_resource **)lhs);
+ const struct tm_resource *ref_item =
+ *((const struct tm_resource **)rhs);
+ uint32_t tgt_priority = 0;
+ uint32_t ref_priority = 0;
+ enum object_type tgt_type;
+ enum object_type ref_type;
+ uint32_t tgt_id;
+ uint32_t ref_id;
+ enum object_enum_id tgt_enum;
+ enum object_enum_id ref_enum;
+
+ tgt_type = GRPH_ID(tgt_item).type;
+ ref_type = GRPH_ID(ref_item).type;
+
+ tgt_id = GRPH_ID(tgt_item).id;
+ ref_id = GRPH_ID(ref_item).id;
+
+ tgt_enum = GRPH_ID(tgt_item).enum_id;
+ ref_enum = GRPH_ID(ref_item).enum_id;
+
+ /* 1. Compare Resource type (encoder, audio, etc.) */
+ if (tgt_type < ref_type)
+ return true;
+ if (tgt_type > ref_type)
+ return false;
+
+ tgt_priority = tgt_item->funcs->get_priority(tgt_item);
+ ref_priority = ref_item->funcs->get_priority(ref_item);
+ /* 2. Compare Resource priority (within type it allows to have
+ * internal logic to sort resources) */
+ if (tgt_priority < ref_priority)
+ return true;
+ if (tgt_priority > ref_priority)
+ return false;
+
+ /* 3. Compare Resource id (DAC, DVO, Uniphy, etc.) */
+ if (tgt_id < ref_id)
+ return true;
+ if (tgt_id > ref_id)
+ return false;
+
+ /* 4. Compare Resource enum (every object with same ID can have
+ * multiple instances - enums) */
+ if (tgt_enum < ref_enum)
+ return true;
+ if (tgt_enum > ref_enum)
+ return false;
+
+ return false;
+}
+
+struct tm_resource *dal_tm_resource_mgr_add_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource_input)
+{
+ uint32_t count;
+ struct tm_resource **tm_resource_output;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (NULL == tm_resource_input) {
+ /* We can get here if memory allocation for resource failed. */
+ TM_ERROR("%s: Can not add a NULL resource!\n", __func__);
+ return NULL;
+ }
+
+ if (false == dal_graphics_object_id_is_valid(
+ GRPH_ID(tm_resource_input))) {
+ /* Some objects are "artificial" (not from BIOS parser)
+ * and may be invalid. TODO: add exceptions for such
+ * objects in dal_graphics_object_id_is_valid(). */
+ TM_RESOURCES("%s: invalid object id!\n", __func__);
+ }
+
+ tm_resource_output =
+ tmrm_resources_set_insert(tm_rm->resources, &tm_resource_input);
+
+ if (!tm_resource_output || !*tm_resource_output) {
+ TM_ERROR("%s: resource storage reached maximum capacity!\n",
+ __func__);
+ return NULL;
+ }
+
+ count = tm_resource_mgr_get_total_resources_num(tm_rm);
+
+ TM_RESOURCES("Added Resource (total %02d): %s\n",
+ count,
+ tm_utils_get_tm_resource_str(*tm_resource_output));
+
+ return *tm_resource_output;
+}
+
+/**
+ * Add engine resource to repository (engine does not have an object, only ID)
+ *
+ * \param [in] engine: engine object ID
+ *
+ * \return
+ * Pointer to added resource on success, NULL otherwise
+ */
+struct tm_resource *tm_resource_mgr_add_engine(
+ struct tm_resource_mgr *tm_rm,
+ enum engine_id engine)
+{
+ struct graphics_object_id id;
+
+ if (engine >= ENGINE_ID_COUNT)
+ return NULL;
+
+ id = dal_graphics_object_id_init(engine, ENUM_ID_1, OBJECT_TYPE_ENGINE);
+
+ return dal_tm_resource_mgr_add_resource(tm_rm,
+ dal_tm_resource_engine_create(id));
+}
+
+/**
+ * Links encoder resources. Our HW unites encoders into pairs which may have
+ * implicit dependency.
+ * This dependency takes impact (depends on signal as well) when calculating
+ * cofunctional paths.
+ * That's why we need to know for each encoder what is the paired one
+ */
+void tm_resource_mgr_relink_encoders(struct tm_resource_mgr *tm_rm)
+{
+ uint32_t i;
+ uint32_t pair;
+ struct tm_resource *encoder_rsrc;
+ struct tm_resource *paired_encoder_rsrc;
+ enum transmitter paired_transmitter_id;
+ struct encoder *encoder;
+ struct encoder *paired_encoder;
+
+ const struct tm_resource_range *range =
+ dal_tmrm_get_resource_range_by_type(tm_rm, OBJECT_TYPE_ENCODER);
+
+ for (i = range->start; i < range->end; i++) {
+
+ encoder_rsrc = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ encoder = TO_ENCODER(encoder_rsrc);
+
+ paired_transmitter_id = dal_encoder_get_paired_transmitter(
+ encoder);
+
+ TO_ENCODER_INFO(encoder_rsrc)->paired_encoder_index =
+ RESOURCE_INVALID_INDEX;
+
+ if (paired_transmitter_id == TRANSMITTER_UNKNOWN ||
+ paired_transmitter_id >= TRANSMITTER_COUNT) {
+ /* there is no paired transmitter, so
+ * nothing to pair with */
+ continue;
+ }
+
+ for (pair = range->start; pair < range->end; pair++) {
+
+ paired_encoder_rsrc = tm_resource_mgr_enum_resource(
+ tm_rm, pair);
+
+ paired_encoder = TO_ENCODER(paired_encoder_rsrc);
+
+ if (dal_encoder_get_transmitter(paired_encoder)
+ == paired_transmitter_id) {
+ /* both have the same transmitter - found it */
+ TO_ENCODER_INFO(encoder_rsrc)->
+ paired_encoder_index = pair;
+ break;
+ }
+ } /* for () */
+ } /* for () */
+}
+
+/**
+ * Returns true if this path requires stereo mixer controller attached.
+ *
+ * \param [in] display_path: display path to be acquired
+ *
+ * \return true: if this path require stereo mixer controller attached,
+ * false: otherwise
+ */
+#if 0
+static bool tmrm_need_stereo_mixer_controller(
+ const struct display_path *display_path)
+{
+ struct dcs *dcs;
+ struct dcs_stereo_3d_features row_interleave;
+ struct dcs_stereo_3d_features column_interleave;
+ struct dcs_stereo_3d_features pixel_interleave;
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ if (NULL == dcs)
+ return false;
+
+ row_interleave = dal_dcs_get_stereo_3d_features(dcs,
+ TIMING_3D_FORMAT_ROW_INTERLEAVE);
+
+ column_interleave = dal_dcs_get_stereo_3d_features(dcs,
+ TIMING_3D_FORMAT_COLUMN_INTERLEAVE);
+
+ pixel_interleave = dal_dcs_get_stereo_3d_features(dcs,
+ TIMING_3D_FORMAT_PIXEL_INTERLEAVE);
+
+ if (row_interleave.flags.SUPPORTED ||
+ column_interleave.flags.SUPPORTED ||
+ pixel_interleave.flags.SUPPORTED) {
+
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+/**
+ * Lookup for resources identified by objectID.
+ *
+ * \param [in] object: object for which to look for TM resource.
+ *
+ * \return Pointer to requested resource on success, NULL otherwise.
+ */
+struct tm_resource*
+tm_resource_mgr_find_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct graphics_object_id object)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+
+ for (i = 0; i < tm_resource_mgr_get_total_resources_num(tm_rm); i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (true == dal_graphics_object_id_is_equal(
+ GRPH_ID(tm_resource), object))
+ return tm_resource;
+ }
+
+ /* If we got here, resource not found. */
+ return NULL;
+}
+
+/**
+ * Returns requested resource for given index - this index is global and may
+ * address any type of resource.
+ * Normally this method used when one wants to iterate over all objects.
+ *
+ * \param [in] index: Index in resource database
+ *
+ * \return Pointer to requested resource on success, NULL otherwise
+ */
+struct tm_resource*
+tm_resource_mgr_enum_resource(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t index)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (index >= tm_resource_mgr_get_total_resources_num(tm_rm)) {
+ TM_ERROR("%s: index out of boundary: %d\n", __func__, index);
+ return NULL;
+ }
+
+ return *tmrm_resources_set_at_index(tm_rm->resources, index);
+}
+
+/**
+ *
+ * Returns total number of resources (regardless of type)
+ *
+ * \return Total number of resources
+ */
+uint32_t tm_resource_mgr_get_total_resources_num(
+ struct tm_resource_mgr *tm_rm)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (NULL == tm_rm->resources) {
+ TM_ERROR("%s: resources list is NULL!\n", __func__);
+ return 0;
+ }
+
+ return dal_flat_set_get_count(tm_rm->resources);
+}
+
+static
+struct tm_resource*
+tm_resource_mgr_find_engine_resource(
+ struct tm_resource_mgr *tm_rm,
+ enum engine_id engine_id)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ const struct tm_resource_range *engines =
+ dal_tmrm_get_resource_range_by_type(tm_rm, OBJECT_TYPE_ENGINE);
+
+ for (i = engines->start; i < engines->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (GRPH_ID(tm_resource).id == engine_id) {
+ /* We can ignore 'id.enum' for engine
+ * because it is not used. */
+ return tm_resource;
+ }
+ }
+
+ /* If we got here, resource not found. */
+ TM_WARNING("%s: Engine '%d' not found!\n", __func__, engine_id);
+
+ return NULL;
+}
+
+/**
+ * Verifies permanent resources required for given display path are available.
+ *
+ * \param [in] display_path: Display path for which we verify resource
+ * availability.
+ *
+ * \return true: if resources are available,
+ * false: otherwise
+ */
+static bool tmrm_resources_available(struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+ struct tm_resource *tm_paired_resource;
+ bool is_dual_link_signal;
+ uint32_t i;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (display_path == NULL) {
+ TM_ERROR("%s: invalid state or input data!\n", __func__);
+ return false;
+ }
+
+ is_dual_link_signal = dal_is_dual_link_signal(
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX));
+
+ /* Check for connector availability */
+ tm_resource = tmrm_display_path_find_connector_resource(tm_rm,
+ display_path);
+
+ if (tm_resource == NULL) {
+ TM_ERROR("%s: Failed to fetch Connector resource!\n",
+ __func__);
+ return false;
+ }
+
+ if (TM_RES_REF_CNT_GET(tm_resource) > 0
+ && !tm_resource->flags.mst_resource) {
+
+ TM_WARNING("%s: Connector resource NOT available! ref_count:%d\n",
+ __func__,
+ TM_RES_REF_CNT_GET(tm_resource));
+ return false;
+ }
+
+ /* Check for encoder availability */
+ for (i = 0; i < dal_display_path_get_number_of_links(display_path);
+ i++) {
+
+ tm_resource = tmrm_display_path_find_upstream_resource(tm_rm,
+ display_path, i);
+
+ if (tm_resource == NULL) {
+ TM_ERROR("%s: Failed to fetch Resource!\n", __func__);
+ return false;
+ }
+
+ /* Primary resource is busy */
+ if (TM_RES_REF_CNT_GET(tm_resource) > 0 &&
+ !tm_resource->flags.mst_resource) {
+ TM_WARNING("%s: Encoder resource NOT available! ref_count:%d, Link Index:%d\n",
+ __func__,
+ TM_RES_REF_CNT_GET(tm_resource),
+ i);
+ return false;
+ }
+
+ /* Get secondary/paired resource */
+ tm_paired_resource = NULL;
+
+ if (is_dual_link_signal
+ && TO_ENCODER_INFO(tm_resource)->paired_encoder_index !=
+ RESOURCE_INVALID_INDEX) {
+
+ TM_ASSERT(!tm_resource->flags.mst_resource);
+
+ tm_paired_resource = tm_resource_mgr_enum_resource(
+ tm_rm,
+ TO_ENCODER_INFO(tm_resource)->
+ paired_encoder_index);
+ }
+
+ if (tm_paired_resource != NULL &&
+ TM_RES_REF_CNT_GET(tm_paired_resource) > 0) {
+ /* Paired resource required, but is busy */
+ TM_WARNING("%s: Paired resource is busy! Link Index:%d\n",
+ __func__, i);
+ return false;
+ }
+ } /* for() */
+
+ /* Stereosync encoder will be present only on already acquired path */
+ tm_resource = tm_resource_mgr_get_stereo_sync_resource(tm_rm,
+ display_path);
+
+ if (tm_resource != NULL && TM_RES_REF_CNT_GET(tm_resource) > 0) {
+ TM_WARNING("%s: Stereosync encoder resource is busy!\n",
+ __func__);
+ return false;
+ }
+
+ /* Sync-output encoder will be present only on already acquired path */
+ tm_resource = tm_resource_mgr_get_sync_output_resource(tm_rm,
+ display_path);
+
+ if (tm_resource != NULL && TM_RES_REF_CNT_GET(tm_resource) > 0) {
+ TM_WARNING("%s: Sync-output encoder resource is busy!\n",
+ __func__);
+ return false;
+ }
+
+ /* No need to check GLSync Connector resources - it is never
+ * acquired within display path and never intersects with other
+ * resources */
+
+ return true;
+}
+
+
+/**
+ * Obtains resource index of available controller.
+ *
+ * \param [in] display_path: Display path for which we look available controller
+ * \param [in] exclude_mask: which controllers should be excluded
+ *
+ * \return index if available controller, RESOURCE_INVALID_INDEX otherwise
+ */
+static uint32_t dal_tmrm_find_controller_for_display_path(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t exclude_mask)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ uint32_t controller_res_ind = RESOURCE_INVALID_INDEX;
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ const struct tm_resource_range *controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (tm_utils_test_bit(&exclude_mask, GRPH_ID(tm_resource).id))
+ continue;
+
+ if (TM_RES_REF_CNT_GET(tm_resource) > 0) {
+ /* already acquired */
+ continue;
+ }
+
+ /* found a free Pirmary (non-underlay) controller */
+ controller_res_ind = i;
+ break;
+ } /* for() */
+
+ if (controller_res_ind == RESOURCE_INVALID_INDEX) {
+ /* That means we ran out of controllers. */
+ TM_WARNING("%s:Failed to find a free Controller!\n", __func__);
+ }
+
+ return controller_res_ind;
+}
+
+/**
+ * Obtains Resource index of available clock source for given display path
+ *
+ * \param [in] display_path: Display path for which we look available
+ * clock source
+ * \param [in] method: How to acquire resources
+ *
+ * \return index if available clock source, RESOURCE_INVALID_INDEX otherwise
+ */
+static uint32_t tmrm_get_available_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct encoder *encoder;
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ struct clock_source *clock_source;
+ enum clock_source_id clock_source_id;
+ enum clock_sharing_group clock_sharing_group;
+ enum signal_type signal;
+ enum clock_sharing_level clock_sharing_level;
+ const struct tm_resource_range *clock_sources;
+
+ TM_ASSERT(display_path != NULL);
+
+ /* Obtain encoder most closest to GPU (we need to know if this encoder
+ * supports selected clock source) */
+ encoder = dal_display_path_get_upstream_object(display_path,
+ ASIC_LINK_INDEX);
+
+ if (encoder == NULL)
+ return RESOURCE_INVALID_INDEX;
+
+ clock_sharing_group = dal_display_path_get_clock_sharing_group(
+ display_path);
+
+ clock_sources =
+ dal_tmrm_get_resource_range_by_type(
+ tm_rm,
+ OBJECT_TYPE_CLOCK_SOURCE);
+
+ /* Round 1: Try to find already used (in shared mode) Clock Source */
+ if (clock_sharing_group != CLOCK_SHARING_GROUP_EXCLUSIVE) {
+
+ for (i = clock_sources->start; i < clock_sources->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ clock_source = TO_CLOCK_SOURCE(tm_resource);
+
+ clock_source_id = dal_clock_source_get_id(clock_source);
+
+ if (!dal_encoder_is_clock_source_supported(encoder,
+ clock_source_id)) {
+ /* not for this encoder */
+ continue;
+ }
+
+ if (clock_sharing_group ==
+ TO_CLOCK_SOURCE_INFO(tm_resource)->
+ clk_sharing_group) {
+ /* resource is found */
+ return i;
+ }
+
+ } /* for() */
+ } /* if() */
+
+ /* Round 2: If shared Clock Source was not found - try to find
+ * available Clock Source */
+ for (i = clock_sources->start; i < clock_sources->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (false == is_resource_available(tm_resource))
+ continue;
+
+ clock_source = TO_CLOCK_SOURCE(tm_resource);
+
+ signal = dal_display_path_get_query_signal(display_path,
+ ASIC_LINK_INDEX);
+
+ if (!dal_clock_source_is_output_signal_supported(clock_source,
+ signal))
+ continue;
+
+ clock_sharing_level = dal_clock_souce_get_clk_sharing_lvl(
+ clock_source);
+
+ if (tm_utils_is_clock_sharing_mismatch(clock_sharing_level,
+ clock_sharing_group))
+ continue;
+
+ clock_source_id = dal_clock_source_get_id(clock_source);
+
+ if (!dal_encoder_is_clock_source_supported(encoder,
+ clock_source_id))
+ continue;
+
+ /* Finally we passed all verifications, this clock source
+ * is valid for use */
+ return i;
+ } /* for () */
+
+ /* not found */
+ TM_WARNING("%s: no clk src found!\n", __func__);
+ return RESOURCE_INVALID_INDEX;
+}
+
+/**
+ * Obtains Engine ID of available engine for given display path
+ *
+ * \param [in] display_path: Display path for which we look available engine
+ * \param [in] method: How to acquire resources
+ *
+ * \return ID of engine, if available. ENGINE_ID_UNKNOWN otherwise.
+ */
+enum engine_id tmrm_get_available_stream_engine(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method)
+{
+ struct tm_resource *tm_resource = NULL;
+ enum tm_engine_priority restricted_priority;
+ enum tm_engine_priority best_priority;
+ struct encoder *encoder;
+ enum engine_id preferred_engine_id = ENGINE_ID_UNKNOWN;
+ enum engine_id available_engine_id = ENGINE_ID_UNKNOWN;
+ union supported_stream_engines supported_stream_engines;
+ uint32_t i;
+ struct dal_context *dal_context = tm_rm->dal_context;
+ const struct tm_resource_range *engines =
+ dal_tmrm_get_resource_range_by_type(tm_rm, OBJECT_TYPE_ENGINE);
+
+ TM_ASSERT(display_path != NULL);
+
+ /* Define restricted priority (which cannot be used for this path,
+ * but all higher priorities are good). */
+ if (dal_display_path_get_query_signal(display_path, SINK_LINK_INDEX)
+ == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ /* restricted to be MST capable */
+ restricted_priority = TM_ENGINE_PRIORITY_NON_MST_CAPABLE;
+ } else {
+ /* no real restriction */
+ restricted_priority = TM_ENGINE_PRIORITY_UNKNOWN;
+ }
+
+ /* Start with invalid priority */
+ best_priority = restricted_priority;
+
+ /* We assign engine only to first encoder - most close to GPU */
+ encoder = dal_display_path_get_upstream_object(display_path,
+ ASIC_LINK_INDEX);
+
+ if (encoder == NULL) {
+ TM_ERROR("%s: no Encoder!\n", __func__);
+ return RESOURCE_INVALID_INDEX;
+ }
+
+ /* First try preferred engine */
+ preferred_engine_id = dal_encoder_get_preferred_stream_engine(encoder);
+
+ if (preferred_engine_id != ENGINE_ID_UNKNOWN) {
+
+ /* Use preferred engine as available engine for now */
+ available_engine_id = preferred_engine_id;
+
+ for (i = engines->start; i < engines->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (GRPH_ID(tm_resource).id == preferred_engine_id)
+ break;
+ }
+
+ if (is_resource_available(tm_resource) &&
+ TO_ENGINE_INFO(tm_resource)->priority <
+ best_priority) {
+ /* found a free resource with a higher priority */
+ best_priority = TO_ENGINE_INFO(tm_resource)->priority;
+ }
+ }
+
+ /* If preferred engine not available - pick supported
+ * engine with highest priority */
+ if (best_priority >= restricted_priority) {
+
+ supported_stream_engines =
+ dal_encoder_get_supported_stream_engines(
+ encoder);
+
+ for (i = engines->start; i < engines->end; i++) {
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ if (false == tm_utils_is_supported_engine(
+ supported_stream_engines,
+ GRPH_ID(tm_resource).id))
+ continue;
+
+ if (is_resource_available(tm_resource) &&
+ TO_ENGINE_INFO(tm_resource)->priority <
+ best_priority) {
+ /* found a non-preferred engine */
+ available_engine_id = GRPH_ID(tm_resource).id;
+ best_priority =
+ TO_ENGINE_INFO(tm_resource)->priority;
+ }
+
+ } /* for() */
+ }
+
+ if (best_priority < restricted_priority) {
+ /* We picked a valid engine - return it's ID to caller */
+ return available_engine_id;
+ }
+
+ TM_ERROR("%s: no stream engine found!\n", __func__);
+ return ENGINE_ID_UNKNOWN;
+}
+
+static void tmrm_acquire_encoder(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ enum signal_type signal = dal_display_path_get_query_signal(
+ display_path, link_idx);
+ struct tm_resource *tm_resource = NULL;
+ struct tm_resource *tm_paired_resource = NULL;
+ struct dal_context *dal_context = tm_rm->dal_context;
+ bool is_dual_link_signal;
+
+ tm_resource = tmrm_display_path_find_upstream_resource(tm_rm,
+ display_path, link_idx);
+ TM_ASSERT(tm_resource != NULL);
+
+ dal_display_path_set_link_active_state(display_path, link_idx, true);
+
+ is_dual_link_signal = dal_is_dual_link_signal(signal);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+
+ tm_resource->flags.mst_resource =
+ (signal == SIGNAL_TYPE_DISPLAY_PORT_MST);
+
+ /* Paired resource required - acquire it as well.
+ * In current design, we do not program paired resources -
+ * only need to handle confunctional enumeration properly */
+ if (is_dual_link_signal &&
+ TO_ENCODER_INFO(tm_resource)->paired_encoder_index !=
+ RESOURCE_INVALID_INDEX) {
+
+ TM_ASSERT(!tm_resource->flags.mst_resource);
+
+ tm_paired_resource = tm_resource_mgr_enum_resource(
+ tm_rm,
+ TO_ENCODER_INFO(tm_resource)->
+ paired_encoder_index);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_paired_resource);
+ }
+}
+
+static void tmrm_acquire_audio(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ struct tm_resource *tm_resource;
+
+ tm_resource = tmrm_display_path_find_audio_resource(tm_rm,
+ display_path, link_idx);
+
+ if (tm_resource != NULL) {
+ /* Audio reference count already updated when it was attached
+ * to display path - need only to activate */
+ dal_display_path_set_audio_active_state(display_path, link_idx,
+ true);
+ }
+}
+
+/**
+ * Acquires resources associated with given link
+ * Assumes resources are available
+ *
+ * \param [in] display_path: Display path for which to acquire resources
+ * \param [in] link_idx: Index of link with which resources associated
+ * \param [in] method: How to acquire resources
+ */
+static void tmrm_acquire_link(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx,
+ enum tm_acquire_method method)
+{
+ tmrm_acquire_encoder(tm_rm, display_path, link_idx);
+
+ tmrm_acquire_audio(tm_rm, display_path, link_idx);
+}
+
+/**
+ * Does power gating on controller.
+ * Returns true if logical power state is updated.
+ * The physical power gating state is updated into controller object through
+ * the call PowerGatingEnable.
+ *
+ * \param [out] tm_resource: TM resource to be modified. Of type controller.
+ * \param [in ] method: How to acquire resources
+ * \param [in ] enable: Boolean parameter. If it is true - enable power
+ * gating, false - disable.
+ */
+static void tmrm_do_controller_power_gating(
+ struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource,
+ enum tm_acquire_method method,
+ bool enable)
+{
+ uint32_t ref_counter;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(GRPH_ID(tm_resource).type == OBJECT_TYPE_CONTROLLER);
+
+ if (false == update_hw_state_needed(method))
+ return;
+
+ ref_counter = TM_RES_REF_CNT_GET(tm_resource);
+
+ if (enable == true) {
+
+ if (false == tm_rm->pipe_power_gating_enabled) {
+ TM_PWR_GATING("Pipe PG Feature disabled --> not gating.\n");
+ return;
+ }
+
+ if (ref_counter != 0) {
+ TM_WARNING("%s: Can NOT power gate with non-zero reference counter:%d!\n",
+ __func__, ref_counter);
+ return;
+ }
+
+ /* This is the fist statefull acquisition.
+ * It must have logical state of "not power gated". */
+ if (TO_CONTROLLER_INFO(tm_resource)->power_gating_state !=
+ TM_POWER_GATE_STATE_OFF) {
+ TM_WARNING("%s: Invalid state:%d! (expected TM_POWER_GATE_STATE_OFF)\n",
+ __func__,
+ TO_CONTROLLER_INFO(tm_resource)->
+ power_gating_state);
+ return;
+ }
+
+ dal_controller_power_gating_enable(TO_CONTROLLER(tm_resource),
+ PIPE_GATING_CONTROL_ENABLE);
+
+ TO_CONTROLLER_INFO(tm_resource)->power_gating_state =
+ TM_POWER_GATE_STATE_ON;
+
+ TM_PWR_GATING("Gated Controller: %s(%d)\n",
+ tm_utils_go_id_to_str(GRPH_ID(tm_resource)),
+ dal_graphics_object_id_get_controller_id(
+ GRPH_ID(tm_resource)));
+ return;
+ }
+
+ if (enable == false) {
+
+ if (ref_counter != 1) {
+ /* Un-gate only once! */
+ TM_WARNING("%s: Can NOT un-gate with reference counter '%d' note equal to one!\n",
+ __func__, ref_counter);
+ return;
+ }
+
+ /* Un-gate the pipe, if NOT un-gated already. */
+ if (TO_CONTROLLER_INFO(tm_resource)->power_gating_state !=
+ TM_POWER_GATE_STATE_ON) {
+ TM_WARNING("%s: Invalid state:%d! (expected TM_POWER_GATE_STATE_ON)\n",
+ __func__,
+ TO_CONTROLLER_INFO(tm_resource)->
+ power_gating_state);
+ return;
+ }
+
+ dal_controller_power_gating_enable(TO_CONTROLLER(tm_resource),
+ PIPE_GATING_CONTROL_DISABLE);
+
+ /* TODO: 'power_gating_state' flag is set in many places, but
+ * it should be set *only* by this function. */
+ TO_CONTROLLER_INFO(tm_resource)->power_gating_state =
+ TM_POWER_GATE_STATE_OFF;
+
+ TM_PWR_GATING("Un-Gated Controller: %s(%d)\n",
+ tm_utils_go_id_to_str(GRPH_ID(tm_resource)),
+ dal_graphics_object_id_get_controller_id(
+ GRPH_ID(tm_resource)));
+ }
+}
+
+uint32_t tm_resource_mgr_get_display_path_index_for_controller(
+ struct tm_resource_mgr *tm_rm,
+ enum controller_id controller_id)
+{
+ uint32_t display_index;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (controller_id < CONTROLLER_ID_D0
+ || controller_id > CONTROLLER_ID_MAX) {
+
+ TM_ERROR("%s: Invalid controller_id:%d !\n",
+ __func__, controller_id);
+ return INVALID_DISPLAY_INDEX;
+ }
+
+ display_index = tm_rm->controller_to_display_path_lookup[controller_id];
+
+ /*TM_RESOURCES("ctrlr-to-path:%s(%d)->%02d",
+ tm_utils_controller_id_to_str(controller_id),
+ controller_id,
+ display_index);*/
+
+ return display_index;
+}
+
+static void tmrm_update_controller_to_path_lookup_table(
+ struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource,
+ struct display_path *display_path)
+{
+ enum controller_id controller_id;
+ uint32_t display_index;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (GRPH_ID(tm_resource).type != OBJECT_TYPE_CONTROLLER) {
+ TM_ERROR("%s: NOT a controller resource!\n", __func__);
+ return;
+ }
+
+ controller_id = GRPH_ID(tm_resource).id;
+
+ if (controller_id < CONTROLLER_ID_D0 ||
+ controller_id > CONTROLLER_ID_MAX) {
+
+ TM_ERROR("%s: Invalid controller_id:%d !\n",
+ __func__, controller_id);
+ return;
+ }
+
+ if (NULL == display_path) {
+ /* this is a request to clear the slot */
+ tm_rm->controller_to_display_path_lookup[controller_id] =
+ INVALID_DISPLAY_INDEX;
+ TM_RESOURCES("clearing-ctrlr idx:%s(%d)\n",
+ tm_utils_controller_id_to_str(controller_id),
+ controller_id);
+ return;
+ }
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ tm_rm->controller_to_display_path_lookup[controller_id] = display_index;
+
+ TM_RESOURCES("path-to-ctrlr:%02d->:%s(%d)\n",
+ display_index,
+ tm_utils_controller_id_to_str(controller_id),
+ controller_id);
+}
+
+/**
+ * Acquires controller to display path. Controller index should be valid
+ * (in scope and available for this path)
+ *
+ * \param [in] display_path: Display path for which we want to attach controller
+ * \param [in] controller_idx: Index of controller in resource database
+ * \param [in] method: How to acquire resources
+ */
+void dal_tmrm_acquire_controller(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t controller_idx,
+ enum tm_acquire_method method)
+{
+ struct tm_resource *tm_resource;
+ uint32_t display_index;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, controller_idx);
+ if (NULL == tm_resource) {
+ TM_ERROR("%s: Path[%02d]:controller resource not found!\n",
+ __func__, display_index);
+ return;
+ }
+
+ if (1 == tm_resource_ref_counter_increment(tm_rm, tm_resource)
+ && update_hw_state_needed(method)) {
+
+ TM_CONTROLLER_ASN("Path[%02d]: Acquired: Controller: %s(%d)\n",
+ display_index,
+ tm_utils_go_id_to_str(GRPH_ID(tm_resource)),
+ dal_graphics_object_id_get_controller_id(
+ GRPH_ID(tm_resource)));
+
+ tmrm_update_controller_to_path_lookup_table(tm_rm, tm_resource,
+ display_path);
+
+ /* If controller was grabbed for set/reset mode operations,
+ * we disable power gating on this controller. */
+ tmrm_do_controller_power_gating(tm_rm, tm_resource, method,
+ false);
+ }
+}
+
+
+/**
+ * Acquires clock source to display path. Clock source index should be valid
+ * (in scope and available for this path).
+ *
+ * \param [in] display_path: Display path for which we want to attach clock
+ * source
+ * \param [in] clk_index: Index or clock source in resource database
+ * \param [in] method: How to acquire resources
+ */
+static void tmrm_acquire_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t clk_index)
+{
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, clk_index);
+
+ if (GRPH_ID(tm_resource).type != OBJECT_TYPE_CLOCK_SOURCE) {
+ TM_ERROR("%s: invalid resource type at index:%d!\n", __func__,
+ clk_index);
+ return;
+ }
+
+ dal_display_path_set_clock_source(display_path,
+ TO_CLOCK_SOURCE(tm_resource));
+
+ TO_CLOCK_SOURCE_INFO(tm_resource)->clk_sharing_group =
+ dal_display_path_get_clock_sharing_group(display_path);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+}
+
+
+/**
+ * Acquires engine to display path. Engine index should be valid (in scope and
+ * available for this path)
+ *
+ * \param [in] display_path: Display path for which we want to attach engine
+ * \param [in] engine_id: ID of engine to acquire
+ * \param [in] method: How to acquire resources
+ */
+static void tmrm_acquire_stream_engine(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum engine_id engine_id)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct tm_resource *tm_rsrc;
+ struct tm_resource *tm_rsrc_upstream;
+
+ TM_ASSERT(display_path != NULL);
+
+ /* We assign engine only to first active encoder - most close to GPU */
+ tm_rsrc_upstream = tmrm_display_path_find_upstream_resource(tm_rm,
+ display_path, ASIC_LINK_INDEX);
+
+ if (NULL == tm_rsrc_upstream) {
+ TM_WARNING("%s: no engine resource!\n", __func__);
+ return;
+ }
+
+ if (GRPH_ID(tm_rsrc_upstream).type != OBJECT_TYPE_ENCODER) {
+ TM_ERROR("%s: upstream resource is NOT an encoder!\n",
+ __func__);
+ return;
+ }
+
+ TM_ASSERT(engine_id >= ENGINE_ID_DIGA);
+ TM_ASSERT(engine_id <= ENGINE_ID_COUNT);
+
+ tm_rsrc = tm_resource_mgr_find_engine_resource(tm_rm, engine_id);
+ if (NULL == tm_rsrc) {
+ TM_ERROR("%s: failed to find engine (0x%X) resource!\n",
+ __func__, engine_id);
+ return;
+ }
+
+ tm_resource_ref_counter_increment(tm_rm, tm_rsrc);
+
+ dal_display_path_set_stream_engine(display_path, ASIC_LINK_INDEX,
+ engine_id);
+
+ TM_ENG_ASN("Path[%02d]: Acquired StreamEngine=%s(%u) Transmitter=%s\n",
+ dal_display_path_get_display_index(display_path),
+ tm_utils_engine_id_to_str(engine_id),
+ engine_id,
+ tm_utils_transmitter_id_to_str(GRPH_ID(tm_rsrc_upstream)));
+}
+
+/**
+ * Releases engine if such was acquired on display path.
+ *
+ * \param [in] display_path: Display path from which we want to detach engine
+ * \param [in] method: How resources were ACQUIRED
+ */
+static void tmrm_release_stream_engine(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ enum engine_id engine_id;
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ engine_id = dal_display_path_get_stream_engine(display_path,
+ ASIC_LINK_INDEX);
+ if (ENGINE_ID_UNKNOWN == engine_id) {
+ /* Most likely tmrm_acquire_stream_engine() was not called. */
+ TM_ERROR("%s: engine NOT set!\n", __func__);
+ return;
+ }
+
+ tm_resource = tm_resource_mgr_find_engine_resource(tm_rm, engine_id);
+ if (NULL == tm_resource) {
+ TM_ERROR("%s: failed to find engine (0x%X) resource!\n",
+ __func__, engine_id);
+ return;
+ }
+
+ dal_display_path_set_stream_engine(display_path, ASIC_LINK_INDEX,
+ ENGINE_ID_UNKNOWN);
+
+ tm_resource_mgr_ref_counter_decrement(tm_rm, tm_resource);
+
+ TM_ENG_ASN("Path[%02d]: Released StreamEngine=%s(%u)\n",
+ dal_display_path_get_display_index(display_path),
+ tm_utils_engine_id_to_str(engine_id),
+ engine_id);
+}
+
+void dal_tmrm_release_non_root_controllers(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct display_path_plane *path_plane;
+ uint8_t planes_no;
+ uint8_t i;
+
+ planes_no = dal_display_path_get_number_of_planes(display_path);
+
+ /* in this situation i can become 255, which will output strange
+ * message. This case need to be debugged */
+ if (planes_no == 0)
+ return;
+ /*
+ * iterate over non-root >1 controllers
+ */
+
+ for (i = planes_no-1; i > 0; i--) {
+
+ /*
+ * release controller (and resources)
+ */
+
+ path_plane = dal_display_path_get_plane_at_index(
+ display_path, i);
+
+ if (path_plane == NULL) {
+ TM_ERROR("%s: Plane at %d is not set!\n", __func__, i);
+ return;
+ }
+
+ dal_tmrm_release_controller(tm_rm, display_path,
+ method,
+ path_plane->controller);
+ }
+}
+
+/**
+ * Releases controller, if such was acquired on display path.
+ *
+ * \param [in] display_path: Display path from which we want to detach
+ * controller
+ * \param [in] method: How resources were ACQUIRED
+ */
+void dal_tmrm_release_controller(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method,
+ struct controller *controller)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct tm_resource *tm_rsrc;
+ struct graphics_object_id id;
+ uint32_t display_index;
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ id = dal_controller_get_graphics_object_id(controller);
+
+ tm_rsrc = tm_resource_mgr_find_resource(tm_rm, id);
+ if (tm_rsrc == NULL) {
+ TM_ERROR("%s: no resource for controller!\n", __func__);
+ return;
+ }
+
+ if (tm_resource_mgr_ref_counter_decrement(tm_rm, tm_rsrc) == 0) {
+
+ if (update_hw_state_needed(method)) {
+
+ tmrm_do_controller_power_gating(
+ tm_rm,
+ tm_rsrc,
+ method,
+ true);
+
+ tmrm_update_controller_to_path_lookup_table(
+ tm_rm,
+ tm_rsrc,
+ NULL);
+
+ TM_CONTROLLER_ASN("Path[%02d]: Released: Controller: %s(%d)\n",
+ display_index,
+ tm_utils_go_id_to_str(GRPH_ID(tm_rsrc)),
+ dal_graphics_object_id_get_controller_id(
+ GRPH_ID(tm_rsrc)));
+ }
+ }
+}
+
+static void tmrm_acquire_connector(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct tm_resource *tm_resource;
+
+ tm_resource = tmrm_display_path_find_connector_resource(tm_rm,
+ display_path);
+
+ TM_ASSERT(tm_resource != NULL);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+
+ tm_resource->flags.mst_resource =
+ (dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX) ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+}
+
+static void tmrm_acquire_stereo_sync(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+
+ tm_resource = tm_resource_mgr_get_stereo_sync_resource(tm_rm,
+ display_path);
+
+ if (tm_resource != NULL)
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+}
+
+static void tmrm_acquire_sync_output(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+
+ tm_resource = tm_resource_mgr_get_sync_output_resource(tm_rm,
+ display_path);
+
+ if (tm_resource != NULL)
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+}
+
+static void tmrm_acquire_alternative_clock(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_resource;
+
+ tm_resource = tmrm_display_path_find_alternative_clock_resource(
+ tm_rm, display_path);
+
+ if (tm_resource != NULL) {
+
+ TO_CLOCK_SOURCE_INFO(tm_resource)->clk_sharing_group =
+ dal_display_path_get_clock_sharing_group(display_path);
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+ }
+}
+
+enum tm_result tmrm_add_root_plane(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t controller_index)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct controller *controller;
+ struct display_path_plane plane;
+ struct tm_resource *tm_resource;
+
+ if (dal_display_path_get_number_of_planes(display_path) != 0) {
+ ASSERT(false);
+ TM_ERROR(
+ "%s: Path Should NOT have any planes! [Path: %d]\n",
+ __func__,
+ dal_display_path_get_display_index(
+ display_path));
+ return TM_RESULT_FAILURE;
+ }
+
+ /* TODO: add real 'plane' initialisation here, based on
+ * parameters passed in. */
+ dal_memset(&plane, 0, sizeof(plane));
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, controller_index);
+
+ controller = TO_CONTROLLER_INFO(tm_resource)->controller;
+
+ plane.controller = controller;
+
+ /* We checked that path has no planes, it means we are adding the 'root'
+ * plane. */
+ if (false == dal_display_path_add_plane(display_path, &plane))
+ return TM_RESULT_FAILURE;
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Acquires resources for requested display path
+ * Exception - Audio resource acquired/released automatically on
+ * connect/disconnect.
+ * Here we only activate audio if required and such acquired
+ *
+ * It is OK to use TM_ACQUIRE_METHOD_SW on a path which is already
+ * acquired by TM_ACQUIRE_METHOD_HW (because it is a noop).
+ *
+ * It is *not* OK to use TM_ACQUIRE_METHOD_HW on a path which is already
+ * acquired by TM_ACQUIRE_METHOD_SW because many HW-update actions depend on
+ * resource usage counter transitions from 1-to-0 and from 0-to-1.
+ * If this is done, then tmrm_resources_available() will fail and this function
+ * will fail too.
+ *
+ * \param [in] display_path: Display path for which to acquire resources
+ * \param [in] method: How to acquire resources
+ *
+ * \return TM_RESULT_SUCCESS: resources were successfully acquired
+ * TM_RESULT_FAILURE: otherwise
+ */
+enum tm_result tm_resource_mgr_acquire_resources(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method)
+{
+ uint32_t controller_index;
+ uint32_t clock_source_index;
+ enum engine_id engine_id;
+ uint32_t i;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (display_path == NULL) {
+ TM_ERROR("%s: invalid state or input data!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ if (true == dal_display_path_is_acquired(display_path)) {
+ if (update_hw_state_needed(method)) {
+ /* If display path is already acquired - increment
+ * reference counter so it could be balanced by
+ * tm_resource_mgr_release_resources() */
+ dal_display_path_acquire(display_path);
+ } else {
+ /* Do nothing because for SW acquire all resources
+ * already there. */
+ }
+
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* Verify that all resources (which are PERMANENT to display path) are
+ * available. */
+ if (false == tmrm_resources_available(tm_rm, display_path))
+ return TM_RESULT_FAILURE;
+
+ /* Obtain indexes of available resources which are NOT permanent
+ * to display path. */
+ controller_index = dal_tmrm_find_controller_for_display_path(tm_rm, 0);
+ if (controller_index == RESOURCE_INVALID_INDEX)
+ return TM_RESULT_FAILURE;
+
+ clock_source_index = tmrm_get_available_clock_source(tm_rm,
+ display_path, method);
+ if (clock_source_index == RESOURCE_INVALID_INDEX)
+ return TM_RESULT_FAILURE;
+
+ engine_id = tmrm_get_available_stream_engine(tm_rm, display_path,
+ method);
+ if (engine_id == ENGINE_ID_UNKNOWN)
+ return TM_RESULT_FAILURE;
+
+ /*****************************************************************
+ * At this point we know that all required resources are available
+ * and we know IDs/indexes of these.
+ * Some of the resources are already in display_path (for example
+ * the connector), but from Resources point of view the acquisition
+ * was not done yet, and this is what will be done.
+ *****************************************************************/
+
+ tmrm_acquire_connector(tm_rm, display_path);
+
+ /* Acquire links (encoder, audio) */
+ for (i = 0;
+ i < dal_display_path_get_number_of_links(display_path);
+ i++) {
+ tmrm_acquire_link(tm_rm, display_path, i, method);
+ }
+
+ tmrm_acquire_stereo_sync(tm_rm, display_path);
+
+ tmrm_acquire_sync_output(tm_rm, display_path);
+
+ /* NOTE: GLSync Connector resources never acquired within
+ * display path and never intersects with other resources */
+
+ /* In the context of checking co-func set, if the alternative
+ * clock source is attached to display path, there is a need
+ * to acquire its resource. */
+ tmrm_acquire_alternative_clock(tm_rm, display_path);
+
+ /* Acquire temporary, but mandatory components - CAN NOT fail,
+ * since we confirmed availability of resources at the beginning of
+ * this function. */
+ dal_tmrm_acquire_controller(tm_rm, display_path, controller_index,
+ method);
+
+ tmrm_acquire_clock_source(tm_rm, display_path, clock_source_index);
+
+ tmrm_acquire_stream_engine(tm_rm, display_path, engine_id);
+
+ if (TM_RESULT_SUCCESS != tmrm_add_root_plane(tm_rm, display_path,
+ controller_index)) {
+ TM_ERROR("%s: failed to add 'root' plane!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ dal_display_path_acquire_links(display_path);
+
+ if (update_hw_state_needed(method)) {
+ dal_display_path_acquire(display_path);
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Releases a resource and its pair.
+ *
+ * \param [in] resource: Resource to release
+ * \param [in] pPairedResource: Paired resource to release
+ */
+static void tmrm_release_resource(struct tm_resource_mgr *tm_rm,
+ struct tm_resource *resource,
+ struct tm_resource *paired_resource)
+{
+ /* Release Resource and clear MST flag for main resource. */
+ if (resource != NULL &&
+ tm_resource_mgr_ref_counter_decrement(tm_rm,
+ resource) == 0) {
+ /* it was the last reference */
+ resource->flags.mst_resource = false;
+ }
+
+ /* Release Resource and clear MST flag for paired resource. */
+ if (paired_resource != NULL &&
+ tm_resource_mgr_ref_counter_decrement(tm_rm,
+ paired_resource) == 0) {
+ /* it was the last reference */
+ paired_resource->flags.mst_resource = false;
+ }
+}
+
+static void tmrm_release_stereo_sync_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *resource = NULL;
+
+ /* Stereosync encoder will be present only on
+ * already acquired path. */
+ resource = tm_resource_mgr_get_stereo_sync_resource(tm_rm,
+ display_path);
+
+ tmrm_release_resource(tm_rm, resource, NULL);
+}
+
+static void tmrm_release_sync_output_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *resource = NULL;
+
+ /* Sync-output encoder will be present only on already acquired path */
+ resource = tm_resource_mgr_get_sync_output_resource(tm_rm,
+ display_path);
+
+ tmrm_release_resource(tm_rm, resource, NULL);
+}
+
+static void tmrm_release_connector_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *resource;
+ struct connector *connector;
+
+ connector = dal_display_path_get_connector(display_path);
+
+ resource = tm_resource_mgr_find_resource(tm_rm,
+ dal_connector_get_graphics_object_id(connector));
+
+ tmrm_release_resource(tm_rm, resource, NULL);
+}
+
+static void tmrm_release_encoder_resource(struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ struct tm_resource *resource = NULL;
+ struct tm_resource *paired_resource = NULL;
+ struct encoder *encoder;
+ bool is_dual_link_signal;
+ enum signal_type sink_signal;
+ uint32_t paired_encoder_index;
+ uint32_t display_index;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ TM_RESOURCES("%s: display_index:%d, link_idx:%d\n", __func__,
+ display_index, link_idx);
+
+ sink_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ is_dual_link_signal = dal_is_dual_link_signal(sink_signal);
+
+ encoder = dal_display_path_get_upstream_object(display_path, link_idx);
+
+ if (encoder) {
+ resource = tm_resource_mgr_find_resource(tm_rm,
+ dal_encoder_get_graphics_object_id(encoder));
+
+ paired_encoder_index =
+ TO_ENCODER_INFO(resource)->paired_encoder_index;
+
+ if (is_dual_link_signal && resource != NULL
+ && paired_encoder_index
+ != RESOURCE_INVALID_INDEX) {
+
+ TM_ASSERT(!resource->flags.mst_resource);
+
+ paired_resource = tm_resource_mgr_enum_resource(tm_rm,
+ paired_encoder_index);
+ }
+
+ tmrm_release_resource(tm_rm, resource, paired_resource);
+ }
+}
+
+static void tmrm_release_link_service_resources(struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ uint32_t i;
+ uint32_t links_num;
+
+ links_num = dal_display_path_get_number_of_links(display_path);
+
+ /* Release links (encoder) Audio release on
+ * connect/disconnected. */
+ for (i = 0; i < links_num; i++) {
+ tmrm_release_encoder_resource(tm_rm, display_path, i);
+ }
+}
+
+/**
+ * Releases resources which were acquired for given display path.
+ * Exception - Audio resource acquired/released automatically
+ * on connect/disconnect.
+ *
+ * \param [in] display_path: Display path on which to release resources
+ * \param [in] method: How resources were ACQUIRED
+ */
+void tm_resource_mgr_release_resources(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (display_path == NULL) {
+ TM_ERROR("%s: invalid state or input data!\n", __func__);
+ return;
+ }
+
+ if (true == dal_display_path_is_acquired(display_path)) {
+ if (update_hw_state_needed(method)) {
+ if (dal_display_path_get_ref_counter(
+ display_path) > 1) {
+ /* We get here when handling HPD-Disconnect.
+ * It is ok because Path is double-acquired:
+ * 1st acquire - to drive the display.
+ * 2nd acquire - to detect the display.
+ *
+ * Decrement reference counter of the path. */
+ dal_display_path_release(display_path);
+ /* We can NOT release path-resources because
+ * someone is still using it.*/
+ return;
+ }
+ } else {
+ /* Path is "in-use" at HW level. We should NOT change
+ * its state. */
+ return;
+ }
+ }
+
+ tmrm_release_stream_engine(tm_rm, display_path);
+
+ /* Clock source should be released before controller. */
+ tmrm_release_clock_source(tm_rm, display_path,
+ dal_display_path_get_clock_source(display_path),
+ method);
+
+ tmrm_release_clock_source(tm_rm, display_path,
+ dal_display_path_get_alt_clock_source(display_path),
+ method);
+
+ tmrm_release_stereo_sync_resource(tm_rm, display_path);
+
+ tmrm_release_sync_output_resource(tm_rm, display_path);
+
+ tmrm_release_connector_resource(tm_rm, display_path);
+
+ tmrm_release_link_service_resources(tm_rm, display_path);
+
+ /* Deactivate all resources */
+ if (update_hw_state_needed(method)) {
+
+ /* Release ALL Planes, including the "root" one.
+ * This is different from "dal_tm_release_plane_resources()"
+ * where we release only NON-ROOT planes. */
+
+ /* Non-root MUST be released BEFORE root because
+ * dal_tmrm_release_non_root_controllers() will NOT release
+ * the 1st controller in the vector.
+ * If we don't do it in this order will "leak" a controller.
+ * (because it is falsely considered a 'root') */
+ dal_tmrm_release_non_root_controllers(
+ tm_rm,
+ display_path,
+ method);
+
+ dal_display_path_release(display_path);
+ }
+
+ /* this will release 'root' controller */
+ dal_tmrm_release_controller(
+ tm_rm,
+ display_path,
+ method,
+ dal_display_path_get_controller(display_path));
+
+ dal_display_path_release_resources(display_path);
+}
+
+/**
+ * Acquire alternative ClockSource on display path with appropriate group
+ * sharing level.
+ *
+ * \param [in] display_path: Display path to which ClockSource will be attached
+ *
+ * \return TM_RESULT_SUCCESS: if additional ClockSource was successfully
+ * attached to requested display path
+ * TM_RESULT_FAILURE: otherwise
+ */
+enum tm_result tm_resource_mgr_acquire_alternative_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ uint32_t clock_source_index;
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ /* Find an appropriate clock source. */
+ clock_source_index = tmrm_get_available_clock_source(tm_rm,
+ display_path, TM_ACQUIRE_METHOD_HW);
+ if (clock_source_index == RESOURCE_INVALID_INDEX)
+ return TM_RESULT_FAILURE;
+
+ /* the actual acquiring */
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, clock_source_index);
+
+ TO_CLOCK_SOURCE_INFO(tm_resource)->clk_sharing_group =
+ dal_display_path_get_clock_sharing_group(display_path);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_resource);
+
+ dal_display_path_set_alt_clock_source(display_path,
+ TO_CLOCK_SOURCE(tm_resource));
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Releases clock source if such was acquired on display path.
+ *
+ * \param [in] display_path: Display path from which we want to detach
+ * clock source
+ * \param [in] clock_source: clock source to release
+ * \param [in] method: How resources were acquired
+ */
+static void tmrm_release_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ struct clock_source *clock_source,
+ enum tm_acquire_method method)
+{
+ struct tm_resource *tm_resource;
+ struct controller *controller;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ if (clock_source == NULL) {
+ /* that means there is no alternative clock source */
+ return;
+ }
+
+ tm_resource = tmrm_find_clock_source_resource(tm_rm, clock_source);
+ if (tm_resource == NULL)
+ return;
+
+ if (tm_resource_mgr_ref_counter_decrement(tm_rm, tm_resource) == 0) {
+ /* Once nobody uses this Clock Source - restore default
+ * sharing group. */
+ TO_CLOCK_SOURCE_INFO(tm_resource)->clk_sharing_group =
+ CLOCK_SHARING_GROUP_EXCLUSIVE;
+
+ if (update_hw_state_needed(method)) {
+
+ /* HWSS cannot not power off PLL due to sharing of
+ * resources (because it doesn't know if anyone else is
+ * still using it).
+ * Do it now when last reference removed. */
+
+ controller = dal_display_path_get_controller(
+ display_path);
+
+ TM_ASSERT(controller != NULL);
+
+ dal_clock_source_power_down_pll(clock_source,
+ dal_controller_get_id(controller));
+ }
+ }
+
+ if (dal_display_path_get_alt_clock_source(display_path) == clock_source)
+ dal_display_path_set_alt_clock_source(display_path, NULL);
+ else
+ dal_display_path_set_clock_source(display_path, NULL);
+}
+
+/**
+ * Queries if an alternative ClockSource resource can be found.
+ *
+ * \param [in] display_path: Display path for which the ClockSource resource
+ * is searched for
+ */
+bool tm_resource_mgr_is_alternative_clk_src_available(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ uint32_t clock_source_index;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ /* Check if alternative clock source can be found. */
+ clock_source_index = tmrm_get_available_clock_source(tm_rm,
+ display_path, TM_ACQUIRE_METHOD_HW);
+
+ if (clock_source_index == RESOURCE_INVALID_INDEX) {
+ /* not found means not available */
+ return false;
+ }
+
+ /* available */
+ return true;
+}
+
+/**
+ * Reset usage counter for ALL resources.
+ */
+void tm_resource_mgr_reset_all_usage_counters(
+ struct tm_resource_mgr *tm_rm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+
+ for (i = 0; i < tm_resource_mgr_get_total_resources_num(tm_rm); i++) {
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+ TM_RES_REF_CNT_RESET(tm_resource);
+ }
+}
+
+/**
+ * Obtains stereo-sync resource currently assigned to display path
+ *
+ * \param [in] display_path: Display path for which we are
+ * looking stereo-sync resources.
+ *
+ * \return Pointer to stereo-sync resource if such found, NULL otherwise
+ */
+struct tm_resource*
+tm_resource_mgr_get_stereo_sync_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct encoder *encoder;
+ struct graphics_object_id id;
+
+ if (display_path == NULL)
+ return NULL;
+
+ encoder = dal_display_path_get_stereo_sync_object(display_path);
+
+ if (encoder == NULL)
+ return NULL;
+
+ id = dal_encoder_get_graphics_object_id(encoder);
+
+ return tm_resource_mgr_find_resource(tm_rm, id);
+}
+
+struct tm_resource *tm_resource_mgr_get_sync_output_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct encoder *encoder;
+ struct graphics_object_id id;
+
+ if (display_path == NULL)
+ return NULL;
+
+ encoder = dal_display_path_get_sync_output_object(display_path);
+
+ if (encoder == NULL)
+ return NULL;
+
+ id = dal_encoder_get_graphics_object_id(encoder);
+
+ return tm_resource_mgr_find_resource(tm_rm, id);
+}
+
+/**
+ * Finds available sync-output resources that can be attached to display path.
+ * Sync-output object can be encoder or.. encoder
+ *
+ * \param [in] display_path: Display path for which sync-output resource
+ * requested
+ * \param [in] sync_output: Identification of sync-output resource
+ *
+ * \return The Sync-output resource for given display path if such resource
+ * were found, NULL otherwise.
+ */
+struct tm_resource*
+tm_resource_mgr_get_available_sync_output_for_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum sync_source sync_output)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ struct tm_resource *sync_output_rsrc = NULL;
+ struct tm_resource *encoder_rsrc = NULL;
+ enum sync_source current_sync_source;
+ bool path_contains_obj;
+ uint32_t i;
+ const struct tm_resource_range *encoders =
+ dal_tmrm_get_resource_range_by_type(tm_rm, OBJECT_TYPE_ENCODER);
+
+ if (display_path == NULL ||
+ !dal_display_path_is_acquired(display_path)) {
+ TM_WARNING("%s: invalid input or path not acquired!\n",
+ __func__);
+ return NULL;
+ }
+
+ /* Loop over all encoders */
+ for (i = encoders->start; i < encoders->end; i++) {
+ encoder_rsrc = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ /* We are looking for an encoder with the requested sync-output
+ * capabilities, that satisfies one of these:
+ * 1. Encoder acquired on issued display path (as encoder or
+ * optional object) - best case, we can stop looping
+ * 2. Encoder is not acquired on any display path - we remember
+ * such encoder, but continue to loop to find encoder
+ * that match first requirement
+ */
+ current_sync_source = dal_encoder_get_vsync_output_source(
+ TO_ENCODER(encoder_rsrc));
+
+ if (current_sync_source == sync_output) {
+
+ path_contains_obj = dal_display_path_contains_object(
+ display_path,
+ GRPH_ID(encoder_rsrc));
+
+ if (path_contains_obj) {
+ sync_output_rsrc = encoder_rsrc;
+ break;
+ } else if (!TM_RES_REF_CNT_GET(encoder_rsrc)) {
+ /* For now this is the one, but keep
+ * searching.*/
+ sync_output_rsrc = encoder_rsrc;
+ }
+ }
+ }
+
+ return sync_output_rsrc;
+}
+
+void tm_resource_mgr_set_gpu_interface(
+ struct tm_resource_mgr *tm_rm,
+ struct gpu *gpu)
+{
+ tm_rm->gpu_interface = gpu;
+}
+
+struct gpu *tm_resource_mgr_get_gpu_interface(
+ struct tm_resource_mgr *tm_rm)
+{
+ return tm_rm->gpu_interface;
+}
+
+
+/**
+ * Attaches an audio to display path if available for the specified
+ * signal type, and increments the reference count.
+ *
+ * \param [in] display_path: Display path on which connect event occurred
+ * \param [in] signal: signal type
+ */
+enum tm_result tm_resource_mgr_attach_audio_to_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum signal_type sig_type)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ union display_path_properties path_props;
+ uint32_t i;
+ struct tm_resource *tm_audio_resource = NULL;
+ const struct tm_resource_range *audios;
+
+ /* First we check if the display path already has an audio assigned,
+ * and if so, we print a warning (should never happen) and
+ * return TM_RESULT_SUCCESS. */
+ if (dal_display_path_get_audio_object(display_path,
+ ASIC_LINK_INDEX) != NULL) {
+ TM_WARNING("%s: audio already attached!\n ", __func__);
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* If DP signal but DP audio not supported in the display path,
+ * or HDMI signal but HDMI audio not supported in the display path,
+ * we return TM_RESULT_FAILURE. */
+ path_props = dal_display_path_get_properties(display_path);
+
+ if ((dal_is_dp_signal(sig_type) &&
+ !path_props.bits.IS_DP_AUDIO_SUPPORTED) ||
+ (dal_is_hdmi_signal(sig_type) &&
+ !path_props.bits.IS_HDMI_AUDIO_SUPPORTED)) {
+ TM_WARNING("%s: can't attach audio - no audio support on path!\n ",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ audios = dal_tmrm_get_resource_range_by_type(tm_rm, OBJECT_TYPE_AUDIO);
+
+ /* Loop over all audio resources, and assign the first free audio
+ * which supports the signal. */
+ for (i = audios->start; i < audios->end; i++) {
+
+ tm_audio_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ /* Allow at most ONE display path to use an audio resource. */
+ if (is_resource_available(tm_audio_resource) == false) {
+ /* This audio is in-use, continue the search. */
+ continue;
+ }
+
+ if (!dal_audio_is_output_signal_supported(
+ TO_AUDIO(tm_audio_resource), sig_type)) {
+ /* Signal is not supported by the audio
+ * resource, continue. */
+ continue;
+ }
+
+ /* Available audio found.
+ * Set audio on display path, increment the reference count and
+ * return TM_RESULT_SUCCESS. */
+ dal_display_path_set_audio(display_path, ASIC_LINK_INDEX,
+ TO_AUDIO_INFO(tm_audio_resource)->audio);
+
+ tm_resource_ref_counter_increment(tm_rm, tm_audio_resource);
+
+ return TM_RESULT_SUCCESS;
+ }
+
+ /* If we got here, we didn't find a free audio resource. */
+ return TM_RESULT_FAILURE;
+}
+
+/**
+ * Release (decrement counter) for audio resource assigned to specified
+ * display path.
+ *
+ * \param [in] display_path: Display path on which to detach audio from
+ */
+void tm_resource_mgr_detach_audio_from_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ struct tm_resource *tm_audio_resource = NULL;
+
+ /* decrement the reference count and reset audio on display path */
+ tm_audio_resource = tmrm_display_path_find_audio_resource(tm_rm,
+ display_path, ASIC_LINK_INDEX);
+
+ if (NULL == tm_audio_resource) {
+ /* nothing to do without the resource */
+ return;
+ }
+
+ tm_resource_mgr_ref_counter_decrement(tm_rm, tm_audio_resource);
+
+ dal_display_path_set_audio_active_state(
+ display_path,
+ ASIC_LINK_INDEX,
+ false);
+
+ dal_display_path_set_audio(display_path, ASIC_LINK_INDEX, NULL);
+}
+
+/**
+ * Allocates or Re-allocates memory to store pointers to link services.
+ * In case of re-allocation, moves existing link services to new
+ * allocated space.
+ * We allocate up to LINK_SERVICE_TYPE_MAX link services for each link,
+ * for each display path. Each Type of link service will be used when
+ * switching between SST/MST/Legacy on the fly.
+ * TODO: consider hiding this switching logic inside of a link service,
+ * so TM simply will command when switching of type is needed. After it is
+ * done LINK_SERVICE_TYPE_MAX should be removed from the calculation.
+ *
+ * \param [in] number_of_paths: number of created display paths
+ *
+ * \return TM_RESULT_SUCCESS: if memory was successfully allocated
+ * TM_RESULT_FAILURE: otherwise
+ */
+enum tm_result tm_resource_mgr_setup_link_storage(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t number_of_paths)
+{
+ struct vector *link_services = NULL;
+ uint32_t num_of_cells_to_copy = 0;
+ uint32_t requested_num_of_cells = 0;
+ uint32_t i;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ /* Calculate new allocation size and how many cells we need to copy
+ * to new memory segment. */
+ if (NULL != tm_rm->link_services) {
+
+ num_of_cells_to_copy =
+ dal_vector_get_count(tm_rm->link_services) *
+ LINK_SERVICE_TYPE_MAX *
+ MAX_NUM_OF_LINKS_PER_PATH;
+ }
+
+ requested_num_of_cells = number_of_paths *
+ LINK_SERVICE_TYPE_MAX * MAX_NUM_OF_LINKS_PER_PATH;
+
+ if (num_of_cells_to_copy > requested_num_of_cells) {
+ /* New storage requirement is smaller than old one,
+ * therefore we'll copy only some of the old link services. */
+ num_of_cells_to_copy = requested_num_of_cells;
+ }
+
+ /* Allocate new array. It will store POINTERS. */
+ if (requested_num_of_cells > 0) {
+
+ link_services = dal_vector_presized_create(
+ requested_num_of_cells,
+ NULL,/* no initial value - leave all zeros */
+ sizeof(struct link_service *));
+ }
+
+ /* Transfer existing link services to the new vector. */
+ if (link_services != NULL) {
+ struct link_service *current_ls;
+
+ for (i = 0; i < num_of_cells_to_copy; i++) {
+
+ current_ls = tmrm_get_ls_at_index(tm_rm, i);
+
+ if (current_ls) {
+ link_services_vector_set_at_index(
+ link_services, &current_ls, i);
+ }
+ }
+ }
+
+ /* Release old vector and reassign data member. */
+ if (tm_rm->link_services != NULL)
+ dal_vector_destroy(&tm_rm->link_services);
+
+ tm_rm->link_services = link_services;
+
+ if (tm_rm->link_services == NULL || requested_num_of_cells == 0) {
+ tm_rm->link_services_number_of_paths = 0;
+ TM_ERROR("%s: no link services were allocated!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ tm_rm->link_services_number_of_paths = number_of_paths;
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Notify all LinkService (either in used or not) that it is invalidated.
+ */
+void tm_resource_mgr_invalidate_link_services(
+ struct tm_resource_mgr *tm_rm)
+{
+ uint32_t i;
+ struct link_service *link_service;
+
+ for (i = 0; i < dal_vector_get_count(tm_rm->link_services); i++) {
+
+ link_service = tmrm_get_ls_at_index(tm_rm, i);
+
+ /* Note here the MST shared link will be notified multiple
+ * times. Its okay for now because this call just sets a flag
+ * and real work is done on link_service->connect_link().
+ * TODO: code this properly so each link get notified once. */
+ if (link_service != NULL)
+ dal_ls_invalidate_down_stream_devices(link_service);
+ }
+}
+
+/**
+ * Release all link services.
+ */
+void tm_resource_mgr_release_all_link_services(
+ struct tm_resource_mgr *tm_rm)
+{
+ uint32_t i;
+ struct link_service *link_service;
+
+ if (tm_rm->link_services == NULL)
+ return;
+
+ for (i = 0; i < dal_vector_get_count(tm_rm->link_services); i++) {
+
+ link_service = tmrm_get_ls_at_index(tm_rm, i);
+
+ if (link_service != NULL) {
+
+ dal_link_service_destroy(&link_service);
+
+ link_service = NULL;
+
+ tmrm_set_ls_at_index(tm_rm, i, link_service);
+ }
+ }
+}
+
+/**
+ * Releases link services for given display path.
+ */
+void tm_resource_mgr_release_path_link_services(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ uint32_t single_path_ls_array_size =
+ LINK_SERVICE_TYPE_MAX * MAX_NUM_OF_LINKS_PER_PATH;
+ uint32_t display_index;
+ uint32_t i;
+ uint32_t index;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ if (display_index >= tm_rm->link_services_number_of_paths) {
+ TM_ERROR("%s: invalid input/state!\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < single_path_ls_array_size; i++) {
+
+ index = (display_index * LINK_SERVICE_TYPE_MAX *
+ MAX_NUM_OF_LINKS_PER_PATH) + i;
+
+ link_service = tmrm_get_ls_at_index(tm_rm, index);
+
+ if (link_service != NULL) {
+
+ dal_link_service_destroy(&link_service);
+
+ link_service = NULL;
+
+ tmrm_set_ls_at_index(tm_rm, index, link_service);
+ }
+ }
+
+ /* We removed *all* link services for the path, so count this path
+ * out of our link_services vector. */
+ /*tm_rm->link_services_number_of_paths--;*/
+}
+
+/**
+ * Adds already created link service to the pool.
+ * The index is calculated based on:
+ * (Display index) x (max num of link per path) x
+ * (max num of link service types per link) +
+ * (Link index) x (max num of link service types per link) +
+ * (Link service type)
+ *
+ * \param [in] display_path: display path associated with link service
+ * \param [in] link_index: link index inside display path associated
+ * with link service
+ * \param [in] new_link_service: link service to add
+ *
+ * \return TM_RESULT_SUCCESS if link service was successfully added,
+ * TM_RESULT_FAILURE otherwise
+ */
+enum tm_result tm_resource_mgr_add_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_index,
+ struct link_service *new_link_service)
+{
+ uint32_t display_index;
+ uint32_t index;
+ struct link_service *old_link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ if (display_index >= tm_rm->link_services_number_of_paths ||
+ link_index >= MAX_NUM_OF_LINKS_PER_PATH ||
+ new_link_service == NULL) {
+ TM_ERROR("%s: Invalid input!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ index = (display_index * LINK_SERVICE_TYPE_MAX *
+ MAX_NUM_OF_LINKS_PER_PATH)
+ + (link_index * LINK_SERVICE_TYPE_MAX)
+ + (dal_ls_get_link_service_type(
+ new_link_service));
+
+ old_link_service = tmrm_get_ls_at_index(tm_rm, index);
+
+ if (old_link_service != NULL) {
+ TM_ERROR("%s: overwriting an existing LS pointer!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ tmrm_set_ls_at_index(tm_rm, index, new_link_service);
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Obtains link service for requested link matching requested signal
+ * The index is calculated based on:
+ * (Display index) x (max num of link per path) x
+ * (max num of link service types per link)
+ * (Link index) x (max num of link service types per link)
+ * (Link service type)
+ *
+ * \param [in] display_path: display path associated with link service
+ * \param [in] linkIndex: link index inside display path associated with
+ * link service
+ * \param [in] signal: signal type matching link service
+ *
+ * \return Pointer to link service associated with given link,
+ * matching given signal
+ */
+struct link_service *tm_resource_mgr_get_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t link_index,
+ enum signal_type sig_type)
+{
+ enum link_service_type link_type;
+ uint32_t display_index;
+ uint32_t link_service_count;
+ uint32_t index;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ link_type = tm_utils_signal_to_link_service_type(sig_type);
+ display_index = dal_display_path_get_display_index(display_path);
+ link_service_count = dal_display_path_get_number_of_links(display_path);
+
+ TM_LINK_SRV("%s: PathIdx:%d, LinkIdx:%d, Signal:0x%08X(%s)\n",
+ __func__, display_index, link_index, sig_type,
+ tm_utils_signal_type_to_str(sig_type));
+
+ if (display_index >= tm_rm->link_services_number_of_paths
+ || link_index >= link_service_count
+ || link_type >= LINK_SERVICE_TYPE_MAX) {
+ TM_ERROR("%s: Invalid input!\n", __func__);
+ return NULL;
+ }
+
+ index = (display_index * LINK_SERVICE_TYPE_MAX
+ * MAX_NUM_OF_LINKS_PER_PATH)
+ + (link_index * LINK_SERVICE_TYPE_MAX) + (link_type);
+
+ link_service = tmrm_get_ls_at_index(tm_rm, index);
+
+ return link_service;
+}
+
+/**
+ * Obtains link service for first link that match requested signal
+ * The index is calculated based on:
+ * (Display index) x (max num of link per path) x
+ * (max num of link service types per link)
+ * (Link index) x (max num of link service types per link)
+ * (Link service type)
+ *
+ * \param [in] display_path: display path associated with link service
+ * \param [in] signal: signal type matching link service
+ *
+ * \return Pointer to link service associated with given signal
+ */
+struct link_service *tm_resource_mgr_find_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum signal_type sig_type)
+{
+ enum link_service_type link_type;
+ uint32_t display_index;
+ uint32_t index;
+ uint32_t i;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ link_type = tm_utils_signal_to_link_service_type(sig_type);
+ display_index = dal_display_path_get_display_index(display_path);
+
+ for (i = 0; i < MAX_NUM_OF_LINKS_PER_PATH; i++) {
+
+ index = (display_index * LINK_SERVICE_TYPE_MAX
+ * MAX_NUM_OF_LINKS_PER_PATH)
+ + (i * LINK_SERVICE_TYPE_MAX) + (link_type);
+
+ link_service = tmrm_get_ls_at_index(tm_rm, index);
+
+ if (link_service != NULL) {
+ /* found the 1st matching one*/
+ return link_service;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Swaps link services of two displays.
+ *
+ * \param [in] display_index1: display 1 to swap
+ * \param [in] display_index2: display 2 to swap
+ */
+
+void tm_resource_mgr_swap_link_services(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t display_index1,
+ uint32_t display_index2)
+{
+ uint32_t i;
+ uint32_t index1;
+ uint32_t index2;
+ struct link_service *link_service_index1;
+ struct link_service *link_service_index2;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ if (display_index1 >= tm_rm->link_services_number_of_paths
+ || display_index2 >= tm_rm->link_services_number_of_paths) {
+ TM_ERROR("%s: Invalid input!\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < LINK_SERVICE_TYPE_MAX * MAX_NUM_OF_LINKS_PER_PATH;
+ i++) {
+
+ index1 = (display_index1 * LINK_SERVICE_TYPE_MAX
+ * MAX_NUM_OF_LINKS_PER_PATH) + i;
+
+ index2 = (display_index2 * LINK_SERVICE_TYPE_MAX
+ * MAX_NUM_OF_LINKS_PER_PATH) + i;
+
+ link_service_index1 = tmrm_get_ls_at_index(tm_rm, index1);
+
+ link_service_index2 = tmrm_get_ls_at_index(tm_rm, index2);
+
+ tmrm_set_ls_at_index(tm_rm, index1, link_service_index2);
+
+ tmrm_set_ls_at_index(tm_rm, index2, link_service_index1);
+ }
+}
+
+/**
+ * Associates links services to display path and type of link.
+ *
+ * \param [in] display_path: display path which to associate with link services
+ */
+void tm_resource_mgr_associate_link_services(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path)
+{
+ uint32_t number_of_links;
+ uint32_t display_index;
+ enum signal_type sink_signal;
+ uint32_t link_idx;
+ bool is_internal_link;
+ uint32_t link_type;
+ uint32_t index;
+ struct link_service *link_service;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ number_of_links = dal_display_path_get_number_of_links(display_path);
+ display_index = dal_display_path_get_display_index(display_path);
+ sink_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ if (display_index >= tm_rm->link_services_number_of_paths) {
+ TM_ERROR("%s: Invalid input!\n", __func__);
+ return;
+ }
+
+ for (link_idx = 0; link_idx < number_of_links; link_idx++) {
+
+ is_internal_link = (sink_signal == SIGNAL_TYPE_EDP
+ || link_idx < number_of_links - 1);
+
+ for (link_type = 0; link_type < LINK_SERVICE_TYPE_MAX;
+ link_type++) {
+
+ index = (display_index * LINK_SERVICE_TYPE_MAX
+ * MAX_NUM_OF_LINKS_PER_PATH)
+ + (link_idx * LINK_SERVICE_TYPE_MAX)
+ + (link_type);
+
+ link_service = tmrm_get_ls_at_index(tm_rm, index);
+
+ if (link_service != NULL) {
+
+ dal_ls_associate_link(link_service,
+ display_index,
+ link_idx,
+ is_internal_link);
+ }
+ }
+ }
+}
+
+void dal_tmrm_set_resources_range_by_type(struct tm_resource_mgr *tm_rm)
+{
+ uint32_t index;
+ struct tm_resource *tm_resource;
+ uint32_t count = tm_resource_mgr_get_total_resources_num(tm_rm);
+ struct tm_resource_range *resources;
+
+ for (index = 0; index < count; index++) {
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, index);
+ resources = &tm_rm->resources_range[GRPH_ID(tm_resource).type];
+
+ if (resources->end == 0) {
+ resources->end = index;
+ resources->start = index;
+ }
+
+ resources->end++;
+ }
+}
+
+const struct tm_resource_range *dal_tmrm_get_resource_range_by_type(
+ struct tm_resource_mgr *tm_rm,
+ enum object_type type)
+{
+ if (type <= OBJECT_TYPE_UNKNOWN ||
+ type >= OBJECT_TYPE_COUNT)
+ return NULL;
+
+ return &tm_rm->resources_range[type];
+}
+
+
+/**
+ * Debug output of all resources
+ */
+void dal_tmrm_dump(struct tm_resource_mgr *tm_rm)
+{
+ uint32_t index;
+ struct tm_resource *tm_resource;
+ struct dal_context *dal_context = tm_rm->dal_context;
+
+ TM_RESOURCES("Total number of TM resources = %u. Resource list:\n",
+ tm_resource_mgr_get_total_resources_num(tm_rm));
+
+ for (index = 0;
+ index < tm_resource_mgr_get_total_resources_num(tm_rm);
+ index++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, index);
+
+ TM_RESOURCES("Resource at [%02d]: %s\n",
+ index,
+ tm_utils_get_tm_resource_str(tm_resource));
+ }
+
+ TM_RESOURCES("End of resource list.\n");
+}
+
+struct controller *dal_tmrm_get_free_controller(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t *controller_index_out,
+ uint32_t exclude_mask)
+{
+ struct dal_context *dal_context = tm_rm->dal_context;
+ uint32_t res_ind;
+ struct tm_resource *tm_resource_tmp = NULL;
+
+ res_ind =
+ dal_tmrm_find_controller_for_display_path(
+ tm_rm,
+ exclude_mask);
+
+ if (RESOURCE_INVALID_INDEX == res_ind) {
+ TM_MPO("%s: failed to find controller!\n", __func__);
+ return NULL;
+ }
+
+ tm_resource_tmp = tm_resource_mgr_enum_resource(tm_rm, res_ind);
+
+ *controller_index_out = res_ind;
+
+ TM_MPO("%s: found controller.\n", __func__);
+
+ return TO_CONTROLLER_INFO(tm_resource_tmp)->controller;
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.h b/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.h
new file mode 100644
index 000000000000..9fc21117c54c
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_resource_mgr.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ *****************************************************************************
+ * tm_resource_mgr responsible to manage all display
+ * HW resources (excluding GPU).
+ * It is also responsible for acquiring/releasing resources.
+ *
+ * Resources are managed in sorted vector. The order of resources
+ * determined by (from MSB to LSB):
+ * 1. Resource type (encoder, audio, etc.)
+ * 2. Resource priority (within type it allows to have internal
+ * logic to sort resources)
+ * 3. Resource id (DAC, DVO, Uniphy, etc.)
+ * 4. Resource enum (every object with same ID can have
+ * multiple instances - enums)
+ *
+ *
+ *****************************************************************************
+ */
+
+#ifndef __DAL_TM_RESOURCE_MGR_H__
+#define __DAL_TM_RESOURCE_MGR_H__
+
+/* External includes */
+#include "include/adapter_service_interface.h"
+#include "include/topology_mgr_interface.h"
+#include "include/display_path_interface.h"
+#include "include/gpu_interface.h"
+#include "include/link_service_interface.h"
+#include "include/clock_source_interface.h"
+
+/* Internal includes */
+#include "tm_internal_types.h"
+#include "tm_resource.h"
+
+
+/* Forward declarations */
+struct tm_resource_mgr;
+struct dal_context;
+
+/** TM Resource Manager initialisation data */
+struct tm_resource_mgr_init_data {
+ struct dal_context *dal_context;
+ struct adapter_service *as;
+};
+
+/**************************************************
+ * Public data structures and macro definitions.
+ **************************************************/
+
+#define RESOURCE_INVALID_INDEX ((uint32_t)(-1))
+
+struct tm_resource_range {
+ uint32_t start;
+ uint32_t end;
+};
+
+/****************************
+ Public interface functions
+*****************************/
+
+/** Call to create the TM Resource Manager */
+struct tm_resource_mgr*
+tm_resource_mgr_create(struct tm_resource_mgr_init_data *init_data);
+
+/** Call to destroy the TM Resource Manager */
+void tm_resource_mgr_destroy(struct tm_resource_mgr **tm_rm);
+
+void tm_resource_mgr_release_hw(struct tm_resource_mgr *tm_rm);
+
+struct tm_resource_mgr *tm_resource_mgr_clone(
+ struct tm_resource_mgr *tm_rm);
+
+/* Try to add an object, and if successful, return pointer to tm_resource,
+ * where the object was stored. */
+struct tm_resource *dal_tm_resource_mgr_add_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource_input);
+
+struct tm_resource*
+tm_resource_mgr_add_engine(
+ struct tm_resource_mgr *tm_rm,
+ enum engine_id engine);
+
+/** Sort the resource list - for faster search. */
+void tm_resource_mgr_reindex(struct tm_resource_mgr *tm_rm);
+
+void tm_resource_mgr_relink_encoders(struct tm_resource_mgr *tm_rm);
+
+struct tm_resource*
+tm_resource_mgr_find_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct graphics_object_id obj);
+
+struct tm_resource*
+tm_resource_mgr_get_resource(
+ struct tm_resource_mgr *tm_rm,
+ enum object_type obj_type,
+ uint32_t index);
+
+struct tm_resource*
+tm_resource_mgr_enum_resource(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t index);
+
+/* Get number of resources of a certain type. */
+uint32_t tm_resource_mgr_get_resources_num(
+ struct tm_resource_mgr *tm_rm,
+ enum object_type obj_type);
+
+/* Get total number of resources. */
+uint32_t tm_resource_mgr_get_total_resources_num(
+ struct tm_resource_mgr *tm_rm);
+
+/* Acquire resources which are in the display_path */
+enum tm_result tm_resource_mgr_acquire_resources(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ enum tm_acquire_method method);
+
+void tm_resource_mgr_release_resources(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ enum tm_acquire_method method);
+
+enum tm_result tm_resource_mgr_acquire_alternative_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+void tm_resource_mgr_release_alternative_clock_source(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+bool tm_resource_mgr_is_alternative_clk_src_available(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+void tm_resource_mgr_reset_all_usage_counters(
+ struct tm_resource_mgr *tm_rm);
+
+struct tm_resource*
+tm_resource_mgr_get_stereo_sync_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+
+struct tm_resource*
+tm_resource_mgr_get_sync_output_resource(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+
+struct tm_resource*
+tm_resource_mgr_get_available_sync_output_for_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ enum sync_source sync_output);
+
+uint32_t tm_resource_mgr_get_crtc_index_for_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ uint32_t exclude_mask);
+
+void tm_resource_mgr_set_gpu_interface(
+ struct tm_resource_mgr *tm_rm,
+ struct gpu *gpu);
+struct gpu *tm_resource_mgr_get_gpu_interface(
+ struct tm_resource_mgr *tm_rm);
+
+enum tm_result tm_resource_mgr_attach_audio_to_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum signal_type sig_type);
+void tm_resource_mgr_detach_audio_from_display_path(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path);
+uint32_t tm_resource_mgr_get_active_audio_resources_num(
+ struct tm_resource_mgr *tm_rm);
+
+
+enum tm_result tm_resource_mgr_setup_link_storage(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t number_of_paths);
+void tm_resource_mgr_invalidate_link_services(
+ struct tm_resource_mgr *tm_rm);
+void tm_resource_mgr_release_all_link_services(
+ struct tm_resource_mgr *tm_rm);
+void tm_resource_mgr_release_path_link_services(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+
+enum tm_result tm_resource_mgr_add_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ uint32_t link_index,
+ struct link_service *ls_interface);
+
+struct link_service *tm_resource_mgr_get_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ uint32_t link_index,
+ enum signal_type sig_type);
+
+struct link_service *tm_resource_mgr_find_link_service(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path,
+ enum signal_type sig_type);
+
+void tm_resource_mgr_swap_link_services(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t display_index1,
+ uint32_t display_index2);
+
+void tm_resource_mgr_associate_link_services(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *path);
+
+void dal_tmrm_dump(struct tm_resource_mgr *tm_rm);
+
+uint32_t tm_resource_mgr_get_display_path_index_for_controller(
+ struct tm_resource_mgr *tm_rm,
+ enum controller_id controller_id);
+
+uint32_t tm_resource_mgr_ref_counter_decrement(
+ const struct tm_resource_mgr *tm_rm,
+ struct tm_resource *tm_resource);
+
+struct controller *dal_tmrm_get_free_controller(
+ struct tm_resource_mgr *tm_rm,
+ uint32_t *controller_index_out,
+ uint32_t exclude_mask);
+
+void dal_tmrm_acquire_controller(struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ uint32_t controller_idx,
+ enum tm_acquire_method method);
+
+void dal_tmrm_release_controller(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method,
+ struct controller *controller);
+
+void dal_tmrm_release_non_root_controllers(
+ struct tm_resource_mgr *tm_rm,
+ struct display_path *display_path,
+ enum tm_acquire_method method);
+
+void dal_tmrm_set_resources_range_by_type(struct tm_resource_mgr *tm_rm);
+
+const struct tm_resource_range *dal_tmrm_get_resource_range_by_type(
+ struct tm_resource_mgr *tm_rm,
+ enum object_type type);
+
+#endif /* __DAL_TM_RESOURCE_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.c b/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.c
new file mode 100644
index 000000000000..fa109095359e
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.c
@@ -0,0 +1,877 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a
+* copy of this software and associated documentation files (the "Software"),
+* to deal in the Software without restriction, including without limitation
+* the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the
+* Software is furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*
+* Authors: Najeeb Ahmed
+*
+*/
+
+#include "dal_services.h"
+#include "tm_subsets_cache.h"
+#include "tm_utils.h"
+#include "include/logger_interface.h"
+
+/**
+* Returns (n choose k).
+* For k<=3 we do direct computation, and for
+* k>3 we use the cached values stored in
+* m_pBinomCoeffs and created in
+* computeBinomCoeffs at class initialization
+*
+* \param [in] n
+* \param [in] k
+*
+* \return
+* (n choose k)
+*/
+static uint32_t get_binom_coeff(
+ struct tm_subsets_cache *tm_subsets_cache,
+ uint32_t n,
+ uint32_t k)
+{
+ /* use direct formula for 0-3 as it's
+ * more efficient and doesn't use caching space
+ * for all others, use cache
+ */
+ if (k > n)
+ return 0;
+ if (k == n || k == 0)
+ return 1;
+ if (k == 1)
+ return n;
+ if (k == 2)
+ return n*(n-1)/2;
+ if (k == 3)
+ return n*(n-1)*(n-2)/6;
+
+ /* should not happen*/
+ if (tm_subsets_cache->binom_coeffs == NULL) {
+ ASSERT_CRITICAL(0);
+ return 0;
+ }
+ /* read from table*/
+ return tm_subsets_cache->binom_coeffs[
+ (n-4)*(tm_subsets_cache->max_num_cofunc_targets-3)+k-4];
+}
+
+/**
+*
+* Computes binomial coefficients and stores
+* them in a table. We only cache (n choose k)
+* for k>3, as for k<=3 it's faster to compute directly.
+* The computation is done recursively
+* (look up Pascal Triangle in Google for details)
+*
+* \return
+* void
+*
+*/
+static void compute_binom_coeffs(
+ struct tm_subsets_cache *tm_subsets_cache)
+{
+ uint32_t n = 0;
+ uint32_t k = 0;
+ struct dal_context *dal_context = tm_subsets_cache->dal_context;
+
+ /* shouldn't happen*/
+ if (tm_subsets_cache->binom_coeffs == 0) {
+ TM_ERROR("%s: binomial_coeff is zero\n", __func__);
+ return;
+ }
+ for (n = 4; n <=
+ tm_subsets_cache->num_display_paths; ++n) {
+
+ int offset = (n-4)*
+ (tm_subsets_cache->max_num_cofunc_targets - 3);
+
+ for (k = 4; k <=
+ tm_subsets_cache->max_num_cofunc_targets; ++k) {
+ if (n == k) {
+ tm_subsets_cache->binom_coeffs[offset+k-4] = 1;
+ break;
+ }
+ /* compute recursively, if cached, it would
+ * have been computed in the previous n-loop
+ */
+ tm_subsets_cache->binom_coeffs[offset+k-4] =
+ get_binom_coeff(tm_subsets_cache, n-1, k-1) +
+ get_binom_coeff(tm_subsets_cache, n-1, k);
+ }
+ }
+}
+
+/**
+* Clears all DP to cache mapping information for both mappings
+*/
+static void reset_dp2_cache_mapping(
+ struct tm_subsets_cache *tm_subset_cache)
+{
+ uint32_t i = 0;
+
+ for (i = 0; i < tm_subset_cache->num_display_paths; ++i)
+ tm_subset_cache->cache_2dp_mapping[i] = MAPPING_NOT_SET;
+
+ for (i = 0; i < tm_subset_cache->num_display_paths; ++i)
+ tm_subset_cache->dp2_cache_mapping[i] = MAPPING_NOT_SET;
+
+}
+
+struct tm_subsets_cache *dal_tm_subsets_cache_create(
+ struct dal_context *dal_context,
+ uint32_t num_of_display_paths,
+ uint32_t max_num_of_cofunc_paths,
+ uint32_t num_of_func_controllers)
+{
+ struct tm_subsets_cache *tm_subsets_cache = NULL;
+ uint32_t cache_size_in_bytes = 0;
+
+ tm_subsets_cache = dal_alloc(
+ sizeof(struct tm_subsets_cache));
+
+ if (tm_subsets_cache == NULL)
+ return NULL;
+
+ tm_subsets_cache->dal_context = dal_context;
+ tm_subsets_cache->num_connected = 0;
+ tm_subsets_cache->num_display_paths = 0;
+ tm_subsets_cache->max_num_cofunc_targets = 0;
+ tm_subsets_cache->num_cur_cached_paths = 0;
+ tm_subsets_cache->binom_coeffs = NULL;
+ tm_subsets_cache->all_connected_supported = CQR_UNKNOWN;
+ tm_subsets_cache->cofunc_cache_single = 0;
+ tm_subsets_cache->cofunc_cache_single_valid = 0;
+ tm_subsets_cache->connected = 0;
+
+ tm_subsets_cache->max_num_combinations = dal_get_num_of_combinations(
+ tm_subsets_cache);
+
+
+ /* need 2 bits per combination, also need to
+ * align to the size of uint32_t
+ * e.g. 53 combinations require 106 bits,
+ * this is 53/4 = 106/8 = 13.25, rounded down
+ * by default to 13 so we add 1 byte to get 14
+ * but because we store in uint32_t*
+ * we actually need 16 bytes assuming 32/64-bit int
+ * Note that we reserve space for all sizes,
+ * including size 1, although we don't use it as
+ * we keep info about single displays
+ */
+ /* separately for performance reasons. We would
+ * save at most a couple bytes,
+ * and it makes some math computations cleaner
+ */
+
+ cache_size_in_bytes = sizeof(uint32_t) *
+ (1 + tm_subsets_cache->max_num_combinations/
+ (4 * sizeof(uint32_t)));
+
+ /* AllocMemory also zeros the cache*/
+ tm_subsets_cache->cofunc_cache = dal_alloc(cache_size_in_bytes);
+
+ tm_subsets_cache->dp2_cache_mapping = dal_alloc(
+ sizeof(uint32_t) * num_of_display_paths);
+
+ tm_subsets_cache->cache_2dp_mapping = dal_alloc(
+ sizeof(uint32_t) * num_of_display_paths);
+
+
+ reset_dp2_cache_mapping(tm_subsets_cache);
+
+ /* we cache binom coeffs (n choose k) only if k>3,
+ * since for k<=3 it's faster to compute directly
+ */
+ if (max_num_of_cofunc_paths > 3) {
+
+ tm_subsets_cache->binom_coeffs = dal_alloc(
+ sizeof(uint32_t) *
+ ((num_of_display_paths-3)*(max_num_of_cofunc_paths-3)));
+
+ compute_binom_coeffs(tm_subsets_cache);
+ }
+ return tm_subsets_cache;
+
+}
+
+void dal_tm_subsets_cache_destroy(
+ struct tm_subsets_cache **ptr)
+{
+ struct tm_subsets_cache *tm_subsets_cache;
+
+ if (!ptr || !*ptr)
+ return;
+
+ tm_subsets_cache = *ptr;
+
+ if (tm_subsets_cache->binom_coeffs != NULL) {
+ dal_free(tm_subsets_cache->binom_coeffs);
+ tm_subsets_cache->binom_coeffs = NULL;
+ }
+
+ if (tm_subsets_cache->cofunc_cache != NULL) {
+ dal_free(tm_subsets_cache->cofunc_cache);
+ tm_subsets_cache->cofunc_cache = NULL;
+ }
+
+ if (tm_subsets_cache->dp2_cache_mapping != NULL) {
+ dal_free(tm_subsets_cache->dp2_cache_mapping);
+ tm_subsets_cache->dp2_cache_mapping = NULL;
+ }
+
+ if (tm_subsets_cache->cache_2dp_mapping != NULL) {
+ dal_free(tm_subsets_cache->cache_2dp_mapping);
+ tm_subsets_cache->cache_2dp_mapping = NULL;
+ }
+
+ dal_free(tm_subsets_cache);
+ *ptr = 0;
+}
+
+/*
+* Returns the number of combinations the
+* cache needs to store. It depends on the
+* number of display paths and the max
+* num of confunctional targets
+*/
+uint32_t dal_get_num_of_combinations(struct tm_subsets_cache *cache)
+{
+ uint32_t num_of_combinations = 0;
+ uint32_t i = 0;
+ /* number of subsets of size i*/
+ uint32_t num_subsets_of_fixed_size = 1;
+
+ for (i = 1; i <= cache->max_num_cofunc_targets; ++i) {
+ if (i > cache->num_display_paths)
+ return num_of_combinations;
+
+
+ /* using the fact that:
+ *(N choose i) = (N choose i-1) * (N-(i-1)) / i
+ */
+ num_subsets_of_fixed_size *= (cache->num_display_paths-i+1);
+ /* it will always be divisible without remainder*/
+ num_subsets_of_fixed_size /= i;
+ num_of_combinations += num_subsets_of_fixed_size;
+ }
+
+ return num_of_combinations;
+
+}
+
+/**
+ * Clears all cached information about subsets
+ * supported. We keep information about
+ * supporting single displays separately,
+ * and there may be no need to wipe that cache too.
+ *
+ * \param
+ * [in] singles_too : whether to invalidate
+ * the cache keeping info about individual displays
+ */
+void dal_invalidate_subsets_cache(
+ struct tm_subsets_cache *tm_subset_cache,
+ bool singles_too)
+{
+ uint32_t cache_size_in_bytes;
+
+ if (tm_subset_cache->cofunc_cache == NULL) {
+ ASSERT_CRITICAL(0);
+ return;
+ }
+ /* need 2 bits per combination, also need
+ * to align to the size of uint32_t
+ * e.g. 53 combinations require 106 bits,
+ * this is 53/4 = 106/8 = 13.25, rounded down
+ * by default to 13 so we add 1 byte to get 14
+ */
+
+ /* but because we store in uint32_t* we
+ * actually need 16 bytes assuming 32/64-bit int
+ */
+ cache_size_in_bytes = sizeof(uint32_t) *
+ (1 + tm_subset_cache->max_num_combinations/
+ (4 * sizeof(uint32_t)));
+
+ dal_memset(tm_subset_cache->cofunc_cache, 0, cache_size_in_bytes);
+ tm_subset_cache->all_connected_supported = CQR_UNKNOWN;
+
+ if (singles_too) {
+ tm_subset_cache->cofunc_cache_single = 0;
+ tm_subset_cache->cofunc_cache_single_valid = 0;
+ }
+}
+
+
+/**
+*
+* Since we keep only connected displays, if connectivity
+* information changes, we need to update cache to DP mapping
+* and possibly clear the cache
+*
+* \param [in] display_index: display index of the path whose
+* connectivity information (may have) changed
+*
+* \param [in] connected: is the display path with the
+* given index connected or disconnected
+*
+*
+*/
+void dal_update_display_mapping(
+ struct tm_subsets_cache *tm_subsets_cache,
+ const uint32_t display_index,
+ bool connected)
+{
+ struct dal_context *dal_context = tm_subsets_cache->dal_context;
+ uint32_t cache_size_in_bytes;
+ uint32_t i;
+
+ if (tm_subsets_cache->cofunc_cache == NULL ||
+ display_index >= tm_subsets_cache->num_display_paths) {
+ TM_ERROR("%s: cofunctional cache is NULL", __func__);
+ return;
+ }
+
+
+ if (connected != tm_utils_test_bit(
+ &tm_subsets_cache->connected, display_index)) {
+
+ if (connected) {
+ tm_utils_set_bit(
+ &tm_subsets_cache->connected,
+ display_index);
+ ++tm_subsets_cache->num_connected;
+ } else {
+ tm_utils_clear_bit(
+ &tm_subsets_cache->connected,
+ display_index);
+ --tm_subsets_cache->num_connected;
+ }
+ } else
+ /* cache is already up-to-date*/
+ return;
+
+
+ /* Need to increase the cache, unfortunately there's
+ * no good way to keep previous cached lookups,
+ * have to wipe out everything
+ */
+ /* so we will have cache misses requiring noncached
+ * lookups. For disconnect, we don't decrease cache size.
+ */
+ if (tm_subsets_cache->num_connected >
+ tm_subsets_cache->num_cur_cached_paths) {
+
+ /* we keep it in sync*/
+ if (tm_subsets_cache->num_connected !=
+ tm_subsets_cache->num_cur_cached_paths + 1)
+ TM_WARNING("%s: Subset cache not in sync\n", __func__);
+
+
+
+
+ ++tm_subsets_cache->num_cur_cached_paths;
+
+ dal_free(tm_subsets_cache->cofunc_cache);
+
+ tm_subsets_cache->cofunc_cache = NULL;
+
+ tm_subsets_cache->max_num_combinations =
+ dal_get_num_of_combinations(
+ tm_subsets_cache);
+
+
+ /* need 2 bits per combination, also need to
+ * align to the size of uint32_t
+ * e.g. 53 combinations require 106 bits,
+ * this is 53/4 = 106/8 = 13.25,
+ * rounded down by default to 13
+ * so we add 1 byte to get 14
+ */
+ /* but because we store in uint32_t*
+ * we actually need 16 bytes assuming 32/64-bit int
+ */
+ cache_size_in_bytes = sizeof(uint32_t) *
+ (1 + tm_subsets_cache->max_num_combinations/
+ (4 * sizeof(uint32_t)));
+ /* AllocMemory also zeros the cache*/
+ tm_subsets_cache->cofunc_cache = dal_alloc(cache_size_in_bytes);
+ }
+
+ /* now update DP mapping arrays*/
+ if (connected) {
+ if (tm_subsets_cache->
+ dp2_cache_mapping[display_index] !=
+ MAPPING_NOT_SET) {
+
+ if (tm_subsets_cache->
+ all_connected_supported ==
+ CQR_SUPPORTED)
+ tm_subsets_cache->
+ all_connected_supported =
+ CQR_UNKNOWN;
+
+ /* this index already mapped into some cache
+ * index, we can skip invalidating cache too
+ */
+ return;
+ }
+ for (i = 0; i < tm_subsets_cache->num_cur_cached_paths; ++i) {
+ if (tm_subsets_cache->
+ cache_2dp_mapping[i] ==
+ MAPPING_NOT_SET) {
+ tm_subsets_cache->
+ cache_2dp_mapping[i] =
+ display_index;
+ tm_subsets_cache->
+ dp2_cache_mapping[display_index] = i;
+ break;
+ }
+
+ /* check if current index is set,
+ * but disconnected, we can reuse it
+ */
+ if (!tm_utils_test_bit(
+ &tm_subsets_cache->connected,
+ tm_subsets_cache->
+ cache_2dp_mapping[i])) {
+
+ uint32_t previous_index =
+ tm_subsets_cache->
+ cache_2dp_mapping[i];
+ tm_subsets_cache->
+ cache_2dp_mapping[i] =
+ display_index;
+ tm_subsets_cache->
+ dp2_cache_mapping[
+ display_index] = i;
+ tm_subsets_cache->
+ dp2_cache_mapping[
+ previous_index] =
+ MAPPING_NOT_SET;
+ break;
+ }
+ }
+ /* whatever happened above, we need
+ * to reset the cache, no need to
+ * reset single index array
+ */
+ dal_invalidate_subsets_cache(tm_subsets_cache, false);
+ } else {
+ if (tm_subsets_cache->
+ all_connected_supported ==
+ CQR_NOT_SUPPORTED)
+ tm_subsets_cache->
+ all_connected_supported =
+ CQR_UNKNOWN;
+ }
+}
+
+/**
+*
+* Check whether the current DP mapping is
+* valid with respect to the display path
+* indices given as input true means that
+* we're currently caching information
+* about this subset.
+*
+* \param [in] displays: array of display paths
+* for which we will check that DP to cache mapping is valid
+* \param [in] array_size: size of the above array
+*
+* \return
+* true - if given display path subset is already mapped
+* false - given display path subset is not mapped
+*/
+static bool is_dp_mapping_valid(
+ struct tm_subsets_cache *tm_subset_cache,
+ const uint32_t *displays,
+ uint32_t array_size)
+{
+ bool ret = true;
+ uint32_t i;
+
+ for (i = 0; i < array_size; ++i) {
+ if (tm_subset_cache->dp2_cache_mapping[displays[i]] ==
+ MAPPING_NOT_SET) {
+ ret = false;
+ break;
+ }
+ }
+ return ret;
+}
+
+/**
+*
+* Whether all the displays paths are currently connected.
+* This is a very common case which can be further optimized
+*
+* \param [in] displays: array of display paths
+* \param [in] array_size: size of the above array
+
+* \return
+* true if all display paths in displays are currently connected
+* false otherwise
+*
+*/
+static bool all_connected(
+ struct tm_subsets_cache *tm_subsets_cache,
+ const uint32_t *displays,
+ uint32_t array_size)
+{
+ uint32_t i;
+
+ for (i = 0; i < array_size; ++i) {
+ if (!tm_utils_test_bit(
+ &tm_subsets_cache->connected,
+ displays[i]))
+
+ return false;
+ }
+ return true;
+}
+
+/**
+* Finds an index for the given subset of cache indices
+* corresponding to display paths (and not DPs themselves).
+* It uses combinatorial number system idea to lay out
+* all subsets of N elements with max size K.
+* If the subset indices are (c1, c2, ..., ck) then
+* the computed index is:
+* M + (ck choose k) + ... + (c2 choose 2) + (c1 choose 1)
+* where M = (N choose 1) + (N choose 2) + ... (N choose k-1)
+* There's a document giving more details.
+* The cache indices (c1, ..., ck) need to be sorted for the
+* formula to work. See the comment in code as a significant
+* part of this function's logic is sorting the indices
+* without allocating an additional array, or disturbing
+* the original array. Technically they are not sorted:
+* we get them in order and apply the formula.
+*
+* \param [in] displays: array of display paths
+* \param [in] array_size: size of the above array
+
+* \return
+* index to the location in the cache where info
+* for subset displays is being stored
+*/
+static uint32_t find_index(
+ struct tm_subsets_cache *tm_subsets_cache,
+ const uint32_t *displays,
+ uint32_t array_size)
+{
+ int index = 0;
+ uint32_t i;
+ uint32_t next_possible_min = 0;
+ uint32_t cur_min = 0;
+
+ /* first add all combinations with fewer display paths set*/
+ for (i = 1; i < array_size; ++i) {
+ index +=
+ get_binom_coeff(tm_subsets_cache,
+ tm_subsets_cache->num_cur_cached_paths, i);
+ }
+
+ /* find index among combinations of size m_subsetSize*/
+ /* see
+ * http://en.wikipedia.org/wiki/Combinatorial_number_system
+ * for more details
+ */
+
+ for (i = 0; i < array_size; ++i) {
+ /* Need to sort mapped indices for the formula to work.
+ * Since KMD/Lnx may send unsorted arrays of display indices,
+ * even if we tried to keep mapping maintain sorting order,
+ * we'd still need to do this. It was requested that we
+ * avoid allocating a temporary array, so this n^2 type
+ * of algorithm will "spit" them out one by one in order.
+ * It finds the minimum in the first run, and because the
+ * array doesn't have duplicates, we know the next possible
+ * element must be at least bigger by 1, so find the
+ * smallest such element, and so on.
+ */
+ uint32_t j = 0;
+
+ while (j < array_size &&
+ tm_subsets_cache->dp2_cache_mapping[displays[j]] <
+ next_possible_min) {
+
+ ++j;
+ }
+ if (j == array_size)
+ /* duplicates in the display array? cannot handle this,
+ * so return invalid value prompting non-cached lookup
+ */
+ return tm_subsets_cache->max_num_combinations + 1;
+
+ cur_min = tm_subsets_cache->dp2_cache_mapping[displays[j++]];
+ for (; j < array_size; ++j) {
+
+ uint32_t cur_dp_mapped_index =
+ tm_subsets_cache->
+ dp2_cache_mapping[displays[j]];
+ if ((cur_dp_mapped_index < cur_min) &&
+ (cur_dp_mapped_index >= next_possible_min))
+
+ cur_min = cur_dp_mapped_index;
+
+ }
+
+ /* apply formula*/
+ if (i < cur_min)
+ index += get_binom_coeff(
+ tm_subsets_cache,
+ cur_min, i+1);
+
+ next_possible_min = cur_min + 1;
+ }
+
+ return index;
+}
+
+/**
+* Check whether the given subset of display
+* paths is supported, i.e. if the display paths
+* can be enabled at the same time.
+*
+* \param [in] displays: array of display paths for
+* which we will check whether they can be
+* enabled at the same time
+* \param [in] array_size: size of the above array
+*
+* \return
+* CacheQueryResult enum:
+* Supported - the given subset is supported (cache hit)
+* NotSupported - the given subset is supported (cache hit)
+*
+* Unknown - this display path subset is currently mapped
+* in the cache, but this is
+* the first query so it is not known whether
+* it's supported or not.
+* The caller must do a noncached lookup
+* and update the cache via
+* SetSubsetSupported() (cache miss)
+*
+* DPMappingNotValid - this display path subset is currently
+* not being cached. The caller must
+* do a noncached lookup and not
+* attempt to update cache, since it will
+* fail (cache miss)
+*
+*/
+enum cache_query_result dal_is_subset_supported(
+ struct tm_subsets_cache *tm_subsets_cache,
+ const uint32_t *displays,
+ uint32_t array_size)
+{
+ uint32_t index;
+ uint32_t word_num;
+ uint32_t bit_mask;
+ uint32_t ret;
+ struct dal_context *dal_context = tm_subsets_cache->dal_context;
+
+ ASSERT(displays != NULL);
+
+ if (tm_subsets_cache->cofunc_cache == NULL ||
+ displays != NULL) {
+ ASSERT_CRITICAL(0);
+ return CQR_DP_MAPPING_NOT_VALID;
+ }
+
+ if (array_size == 1) {
+
+ ASSERT(displays[0] < tm_subsets_cache->num_display_paths);
+
+ if (!tm_utils_test_bit(
+ &tm_subsets_cache->cofunc_cache_single_valid,
+ displays[0]))
+ return CQR_UNKNOWN;
+ if (tm_utils_test_bit(
+ &tm_subsets_cache->cofunc_cache_single_valid,
+ displays[0]))
+ return CQR_SUPPORTED;
+ /* mapping always valid for size == 1*/
+ else
+ return CQR_NOT_SUPPORTED;
+
+ }
+
+ /* check if this is a query for all connected
+ * (enabled) ones, which is the most common query observed
+ */
+ if (array_size <= tm_subsets_cache->num_connected &&
+ array_size <= tm_subsets_cache->max_num_cofunc_targets &&
+ tm_subsets_cache->all_connected_supported != CQR_UNKNOWN) {
+
+ if (all_connected(tm_subsets_cache, displays, array_size)) {
+ if (tm_subsets_cache->all_connected_supported ==
+ CQR_SUPPORTED)
+ return CQR_SUPPORTED;
+ /* if all connected are not supported, and the subset
+ * is smaller, it could be that it's supported,
+ * in that case we don't return here
+ */
+ else if (array_size ==
+ tm_subsets_cache->num_connected)
+ return CQR_NOT_SUPPORTED;
+ }
+ }
+
+ /* array_size > 1*/
+ /* asking for a disconnected one with array_size > 1?*/
+ /* the caller should do noncached lookup
+ * and return result, but not update the cache
+ */
+ if (!is_dp_mapping_valid(tm_subsets_cache, displays, array_size))
+ return CQR_DP_MAPPING_NOT_VALID;
+
+ index = find_index(tm_subsets_cache, displays, array_size);
+ if (index > tm_subsets_cache->max_num_combinations) {
+
+ if (array_size > tm_subsets_cache->max_num_cofunc_targets)
+ return CQR_NOT_SUPPORTED;
+
+ /* this should not happen, fall back
+ * to noncached lookup without updating cache
+ */
+ TM_ERROR("%s: Invalid index", __func__);
+ return CQR_DP_MAPPING_NOT_VALID;
+ }
+
+ /* If we have index K, we want to read
+ * bits 2K and 2K+1 in the cache.
+ * Since cache is internally represented as an
+ * uint32_t array, we first convert this into bytes.
+ * 1 element has sizeof(int)*8 bits, so 2K'th bit is
+ * contained in the integer array element at location
+ * wordNum = (2K) / (sizeof(int)*8) = K / (sizeof(int)*4).
+ * bitMask is the offset within
+ * tm_subsets_cache->cofunc_cache[wordNum] - it's the
+ * remainder of the above division, multiplied by 2
+ * Note that 2 bits directly correspond to the
+ * enum 0x0 = Unknown, 0x1 = NotSupported, 0x2 = Supported...
+ * I.e. it's not that 1 bit is for valid or not, and the other
+ * for supported or not.
+ */
+
+ /* *4 instead of *8 since every subset uses 2 bits*/
+ word_num = index / (sizeof(uint32_t)*4);
+ bit_mask = 0x3 << ((index % (sizeof(uint32_t)*4)) * 2);
+
+ ret = (*(tm_subsets_cache->cofunc_cache +
+ word_num) & bit_mask) >> ((index % (sizeof(uint32_t)*4)) * 2);
+ return (enum cache_query_result)(ret);
+}
+
+
+/**
+*
+* Set/Update cache information for the given display
+* path subset. This function will not do anything
+* if this subset is currently not mapped in the cache
+* (i.e. we're not caching this particular subset)
+*
+* \param [in] displays: array of display paths
+* \param [in] array_size: size of the above array
+* \param [in] supported: true if this display path
+* subset can be enabled at the same time, false otherwise
+*
+*/
+void dal_set_subset_supported(
+ struct tm_subsets_cache *tm_subsets_cache,
+ const uint32_t *displays,
+ uint32_t array_size,
+ bool supported)
+{
+ uint32_t index;
+ uint32_t word_num;
+ uint32_t bit_mask;
+
+ ASSERT(tm_subsets_cache->cofunc_cache != NULL);
+ ASSERT(displays != NULL);
+ if (tm_subsets_cache->cofunc_cache == NULL ||
+ displays == NULL) {
+ ASSERT_CRITICAL(0);
+ return;
+ }
+
+
+ if (array_size == 1) {
+
+ /* only one display path, so check only displays[0]*/
+ if (displays[0] > tm_subsets_cache->num_display_paths)
+ return;
+
+ tm_utils_set_bit(
+ &tm_subsets_cache->cofunc_cache_single_valid,
+ displays[0]);
+
+ if (supported)
+ tm_utils_set_bit(
+ &tm_subsets_cache->cofunc_cache_single,
+ displays[0]);
+
+ return;
+ }
+
+ if (all_connected(tm_subsets_cache, displays, array_size) &&
+ array_size == tm_subsets_cache->num_connected)
+ tm_subsets_cache->all_connected_supported =
+ supported ? CQR_SUPPORTED : CQR_NOT_SUPPORTED;
+
+
+ /* array_size > 1*/
+ if (!is_dp_mapping_valid(tm_subsets_cache, displays, array_size))
+ /* this case should not really happen as TM
+ * should not call SetSubsetSupported if mapping not valid
+ */
+ return;
+
+
+ index = find_index(tm_subsets_cache, displays, array_size);
+ if (index > tm_subsets_cache->max_num_combinations) {
+ /* this should not happen*/
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* If we have index K, we want to modify bits
+ * 2K and 2K+1 in the cache. Since cache is
+ * internally represented as an uint32_t array,
+ * we first convert this into bytes. 1 element
+ * has sizeof(int)*8 bits, so 2K'th bit is
+ * contained in the integer array element at
+ * location
+ * wordNum = (2K) / (sizeof(int)*8) = K / (sizeof(int)*4)
+ * bitMask is the offset within
+ * tm_subsets_cache->cofunc_cache[wordNum] - it's the
+ * remainder of the above division, multiplied by 2
+ * Note that 2 bits directly correspond to the
+ * enum 0x0 = Unknown, 0x1 = NotSupported, 0x2 = Supported...
+ * i.e. it's not that 1 bit is for valid or not,
+ * and the other for supported or not.
+ */
+ /* *4 instead of *8 since every subset uses 2 bits*/
+ word_num = index / (sizeof(uint32_t)*4);
+ bit_mask = supported ? 0x2 : 0x1;
+ /* now move it to the right location within those 32 bits*/
+ bit_mask = bit_mask << ((index % (sizeof(uint32_t)*4)) * 2);
+
+ *(tm_subsets_cache->cofunc_cache + word_num) |= bit_mask;
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.h b/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.h
new file mode 100644
index 000000000000..fd9dec4658fa
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_subsets_cache.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Najeeb Ahmed
+ *
+ */
+
+#ifndef __DAL_TM_SUBSETS_CACHE__
+#define __DAL_TM_SUBSETS_CACHE__
+
+/* internal consts*/
+enum {
+ MAPPING_NOT_SET = 0xFFFF
+};
+
+
+enum cache_query_result {
+ CQR_UNKNOWN = 0,
+ CQR_NOT_SUPPORTED,
+ CQR_SUPPORTED,
+ CQR_DP_MAPPING_NOT_VALID,
+};
+
+/* forward declarations */
+struct dal_context;
+
+struct tm_subsets_cache {
+ struct dal_context *dal_context;
+ uint32_t *cofunc_cache;
+ uint32_t *dp2_cache_mapping;
+ uint32_t *cache_2dp_mapping;
+ uint32_t cofunc_cache_single;
+ uint32_t cofunc_cache_single_valid;
+ uint32_t connected;
+ uint32_t num_connected;
+ uint32_t num_cur_cached_paths;
+ /* for robustness purposes to assure
+ * computed index isn't out of bounds
+ */
+ uint32_t max_num_combinations;
+ enum cache_query_result all_connected_supported;
+
+ /* these two are board specific,
+ * should not change during executable runtime
+ */
+ uint32_t num_display_paths;
+ uint32_t max_num_cofunc_targets;
+
+ /* performance enhancing helper*/
+ uint32_t *binom_coeffs;
+
+};
+
+struct tm_subsets_cache *dal_tm_subsets_cache_create(
+ struct dal_context *dal_context,
+ uint32_t num_of_display_paths,
+ uint32_t max_num_of_cofunc_paths,
+ uint32_t num_of_func_controllers);
+
+void dal_tm_subsets_cache_destroy(
+ struct tm_subsets_cache **tm_subsets_cache);
+
+uint32_t dal_get_num_of_combinations(
+ struct tm_subsets_cache *tm_subsets_cache);
+
+void dal_invalidate_subsets_cache(
+ struct tm_subsets_cache *tm_subset_cache,
+ bool singles_too);
+
+enum cache_query_result dal_is_subset_supported(
+ struct tm_subsets_cache *tm_subset_cache,
+ const uint32_t *displays,
+ uint32_t array_size);
+
+void dal_set_subset_supported(
+ struct tm_subsets_cache *tm_subset_cache,
+ const uint32_t *displays,
+ uint32_t array_size,
+ bool supported);
+
+void dal_update_display_mapping(
+ struct tm_subsets_cache *tm_subset_cache,
+ const uint32_t display_index,
+ bool connected);
+
+#endif /*__DAL_TM_SUBSETS_CACHE_H__*/
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_utils.c b/drivers/gpu/drm/amd/dal/topology/tm_utils.c
new file mode 100644
index 000000000000..6f23a4c5d228
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_utils.c
@@ -0,0 +1,1230 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/ddc_service_interface.h"
+#include "include/connector_interface.h"
+
+#include "tm_utils.h"
+#include "tm_internal_types.h"
+
+/* String representation of graphics objects */
+const char *tm_utils_go_type_to_str(struct graphics_object_id id)
+{
+ switch (id.type) {
+ case OBJECT_TYPE_GPU:
+ return "GPU";
+ case OBJECT_TYPE_ENCODER:
+ return "Encoder";
+ case OBJECT_TYPE_CONNECTOR:
+ return "Connector";
+ case OBJECT_TYPE_ROUTER:
+ return "Router";
+ case OBJECT_TYPE_AUDIO:
+ return "Audio";
+ case OBJECT_TYPE_CONTROLLER:
+ return "Controller";
+ case OBJECT_TYPE_CLOCK_SOURCE:
+ return "ClockSource";
+ case OBJECT_TYPE_ENGINE:
+ return "Engine";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_go_enum_to_str(struct graphics_object_id id)
+{
+ switch (id.type) {
+ case OBJECT_TYPE_UNKNOWN:
+ case OBJECT_TYPE_GPU:
+ return "\b";
+ default:
+ break;
+ }
+
+ switch (id.enum_id) {
+ case ENUM_ID_1:
+ return "1";
+ case ENUM_ID_2:
+ return "2";
+ case ENUM_ID_3:
+ return "3";
+ case ENUM_ID_4:
+ return "4";
+ case ENUM_ID_5:
+ return "5";
+ case ENUM_ID_6:
+ return "6";
+ case ENUM_ID_7:
+ return "7";
+ default:
+ return "?";
+ }
+}
+
+const char *tm_utils_go_id_to_str(struct graphics_object_id id)
+{
+ switch (id.type) {
+ case OBJECT_TYPE_ENCODER:
+ return tm_utils_encoder_id_to_str(
+ dal_graphics_object_id_get_encoder_id(id));
+ case OBJECT_TYPE_CONNECTOR:
+ return tm_utils_connector_id_to_str(
+ dal_graphics_object_id_get_connector_id(id));
+ case OBJECT_TYPE_AUDIO:
+ return tm_utils_audio_id_to_str(
+ dal_graphics_object_id_get_audio_id(id));
+ case OBJECT_TYPE_CONTROLLER:
+ return tm_utils_controller_id_to_str(
+ dal_graphics_object_id_get_controller_id(id));
+ case OBJECT_TYPE_CLOCK_SOURCE:
+ return tm_utils_clock_source_id_to_str(
+ dal_graphics_object_id_get_clock_source_id(id));
+ case OBJECT_TYPE_ENGINE:
+ return tm_utils_engine_id_to_str(
+ dal_graphics_object_id_get_engine_id(id));
+ default:
+ return "\b";
+ }
+}
+
+const char *tm_utils_encoder_id_to_str(enum encoder_id id)
+{
+ switch (id) {
+ case ENCODER_ID_INTERNAL_LVDS:
+ return "Int_LVDS";
+ case ENCODER_ID_INTERNAL_TMDS1:
+ return "Int_TMDS1";
+ case ENCODER_ID_INTERNAL_TMDS2:
+ return "Int_TMDS2";
+ case ENCODER_ID_INTERNAL_DAC1:
+ return "Int_DAC1";
+ case ENCODER_ID_INTERNAL_DAC2:
+ return "Int_DAC2";
+ case ENCODER_ID_INTERNAL_SDVOA:
+ return "Int_SDVOA";
+ case ENCODER_ID_INTERNAL_SDVOB:
+ return "Int_SDVOB";
+ case ENCODER_ID_EXTERNAL_SI170B:
+ return "Ext_Si170B";
+ case ENCODER_ID_EXTERNAL_CH7303:
+ return "Ext_CH7303";
+ case ENCODER_ID_EXTERNAL_CH7301:
+ return "Ext_CH7301";
+ case ENCODER_ID_INTERNAL_DVO1:
+ return "Int_DVO1";
+ case ENCODER_ID_EXTERNAL_SDVOA:
+ return "Ext_SDVOA";
+ case ENCODER_ID_EXTERNAL_SDVOB:
+ return "Ext_SDVOB";
+ case ENCODER_ID_EXTERNAL_TITFP513:
+ return "Ext_TITFP513";
+ case ENCODER_ID_INTERNAL_LVTM1:
+ return "Int_LVTM1";
+ case ENCODER_ID_EXTERNAL_VT1623:
+ return "Ext_VT1623";
+ case ENCODER_ID_EXTERNAL_SI1930:
+ return "Ext_Si1930";
+ case ENCODER_ID_INTERNAL_HDMI:
+ return "Int_HDMI";
+ case ENCODER_ID_INTERNAL_KLDSCP_TMDS1:
+ return "Int_Kldscp_TMDS1";
+ case ENCODER_ID_INTERNAL_KLDSCP_DVO1:
+ return "Int_Kldscp_DVO1";
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+ return "Int_Kldscp_DAC1";
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+ return "Int_Kldscp_DAC2";
+ case ENCODER_ID_EXTERNAL_SI178:
+ return "Ext_Si178";
+ case ENCODER_ID_EXTERNAL_MVPU_FPGA:
+ return "Ext_MVPU_FPGA";
+ case ENCODER_ID_INTERNAL_DDI:
+ return "Int_DDI";
+ case ENCODER_ID_EXTERNAL_VT1625:
+ return "Ext_VT1625";
+ case ENCODER_ID_EXTERNAL_SI1932:
+ return "Ext_Si1932";
+ case ENCODER_ID_EXTERNAL_AN9801:
+ return "Ext_AN9801";
+ case ENCODER_ID_EXTERNAL_DP501:
+ return "Ext_DP501";
+ case ENCODER_ID_INTERNAL_UNIPHY:
+ return "Int_Uniphy";
+ case ENCODER_ID_INTERNAL_KLDSCP_LVTMA:
+ return "Int_Kldscp_LVTMA";
+ case ENCODER_ID_INTERNAL_UNIPHY1:
+ return "Int_Uniphy1";
+ case ENCODER_ID_INTERNAL_UNIPHY2:
+ return "Int_Uniphy2";
+ case ENCODER_ID_EXTERNAL_GENERIC_DVO:
+ return "Ext_Generic_DVO";
+ case ENCODER_ID_EXTERNAL_NUTMEG:
+ return "Ext_Nutmeg";
+ case ENCODER_ID_EXTERNAL_TRAVIS:
+ return "Ext_Travis";
+ case ENCODER_ID_INTERNAL_WIRELESS:
+ return "Int_Wireless";
+ case ENCODER_ID_INTERNAL_UNIPHY3:
+ return "Int_Uniphy3";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_connector_id_to_str(enum connector_id id)
+{
+ switch (id) {
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ return "SingleLinkDVII";
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ return "DualLinkDVII";
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ return "SingleLinkDVID";
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ return "DualLinkDVID";
+ case CONNECTOR_ID_VGA:
+ return "VGA";
+ case CONNECTOR_ID_COMPOSITE:
+ return "Composite";
+ case CONNECTOR_ID_SVIDEO:
+ return "SVideo";
+ case CONNECTOR_ID_YPBPR:
+ return "YPbPr";
+ case CONNECTOR_ID_DCONNECTOR:
+ return "DConnector";
+ case CONNECTOR_ID_9PIN_DIN:
+ return "9pinDIN";
+ case CONNECTOR_ID_SCART:
+ return "SCART";
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ return "HDMITypeA";
+ case CONNECTOR_ID_LVDS:
+ return "LVDS";
+ case CONNECTOR_ID_7PIN_DIN:
+ return "7pinDIN";
+ case CONNECTOR_ID_PCIE:
+ return "PCIE";
+ case CONNECTOR_ID_DISPLAY_PORT:
+ return "DisplayPort";
+ case CONNECTOR_ID_EDP:
+ return "EDP";
+ case CONNECTOR_ID_WIRELESS:
+ return "Wireless";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_audio_id_to_str(enum audio_id id)
+{
+ switch (id) {
+ case AUDIO_ID_INTERNAL_AZALIA:
+ return "Azalia";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_controller_id_to_str(enum controller_id id)
+{
+ switch (id) {
+ case CONTROLLER_ID_D0:
+ return "D0";
+ case CONTROLLER_ID_D1:
+ return "D1";
+ case CONTROLLER_ID_D2:
+ return "D2";
+ case CONTROLLER_ID_D3:
+ return "D3";
+ case CONTROLLER_ID_D4:
+ return "D4";
+ case CONTROLLER_ID_D5:
+ return "D5";
+ case CONTROLLER_ID_UNDERLAY0:
+ return "UNDERLAY0";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_clock_source_id_to_str(enum clock_source_id id)
+{
+ switch (id) {
+ case CLOCK_SOURCE_ID_PLL0:
+ return "CLOCK_SOURCE_PLL0";
+ case CLOCK_SOURCE_ID_PLL1:
+ return "CLOCK_SOURCE_PLL1";
+ case CLOCK_SOURCE_ID_PLL2:
+ return "CLOCK_SOURCE_PLL2";
+ case CLOCK_SOURCE_ID_DCPLL:
+ return "CLOCK_SOURCE_DCPLL";
+ case CLOCK_SOURCE_ID_EXTERNAL:
+ return "CLOCK_SOURCE_External";
+ case CLOCK_SOURCE_ID_DFS:
+ return "CLOCK_SOURCE_DFS";
+ case CLOCK_SOURCE_ID_VCE:
+ return "CLOCK_SOURCE_ID_VCE";
+ case CLOCK_SOURCE_ID_DP_DTO:
+ return "CLOCK_SOURCE_ID_DP_DTO";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_engine_id_to_str(enum engine_id id)
+{
+ switch (id) {
+ case ENGINE_ID_DACA:
+ return "DACA";
+ case ENGINE_ID_DACB:
+ return "DACB";
+ case ENGINE_ID_DVO:
+ return "DVO";
+ case ENGINE_ID_DIGA:
+ return "DIGA";
+ case ENGINE_ID_DIGB:
+ return "DIGB";
+ case ENGINE_ID_DIGC:
+ return "DIGC";
+ case ENGINE_ID_DIGD:
+ return "DIGD";
+ case ENGINE_ID_DIGE:
+ return "DIGE";
+ case ENGINE_ID_DIGF:
+ return "DIGF";
+ case ENGINE_ID_DIGG:
+ return "DIGG";
+ case ENGINE_ID_VCE:
+ return "VCE";
+ default:
+ return "Unknown";
+ }
+}
+
+const char *tm_utils_signal_type_to_str(enum signal_type type)
+{
+ switch (type) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return "DVISingleLink";
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ return "DVISingleLink1";
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ return "DVIDualLink";
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return "HDMITypeA";
+ case SIGNAL_TYPE_LVDS:
+ return "LVDS";
+ case SIGNAL_TYPE_RGB:
+ return "RGB";
+ case SIGNAL_TYPE_YPBPR:
+ return "YPbPr";
+ case SIGNAL_TYPE_SCART:
+ return "SCART";
+ case SIGNAL_TYPE_COMPOSITE:
+ return "Composite";
+ case SIGNAL_TYPE_SVIDEO:
+ return "SVideo";
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ return "DisplayPort";
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return "DisplayPortMst";
+ case SIGNAL_TYPE_EDP:
+ return "EDP";
+ case SIGNAL_TYPE_DVO:
+ return "SIGNAL_TYPE_DVO";
+ case SIGNAL_TYPE_DVO24:
+ return "SIGNAL_TYPE_DVO24";
+ case SIGNAL_TYPE_MVPU_A:
+ return "SIGNAL_TYPE_MVPU_A";
+ case SIGNAL_TYPE_MVPU_B:
+ return "SIGNAL_TYPE_MVPU_B";
+ case SIGNAL_TYPE_MVPU_AB:
+ return "SIGNAL_TYPE_MVPU_AB";
+ case SIGNAL_TYPE_WIRELESS:
+ return "SIGNAL_TYPE_WIRELESS";
+ default:
+ return "Unknown";
+
+ }
+}
+
+const char *tm_utils_engine_priority_to_str(enum tm_engine_priority priority)
+{
+ switch (priority) {
+ case TM_ENGINE_PRIORITY_MST_DP_MST_ONLY:
+ return "Priority_MST_DPMstOnly";
+ case TM_ENGINE_PRIORITY_MST_DP_CONNECTED:
+ return "Priority_MST_DPConnected";
+ case TM_ENGINE_PRIORITY_MST_DVI:
+ return "Priority_MST_Dvi";
+ case TM_ENGINE_PRIORITY_MST_HDMI:
+ return "Priority_MST_Hdmi";
+ case TM_ENGINE_PRIORITY_MST_DVI_CONNECTED:
+ return "Priority_MST_DviConnected";
+ case TM_ENGINE_PRIORITY_MST_HDMI_CONNECTED:
+ return "Priority_MST_HdmiConnected";
+ case TM_ENGINE_PRIORITY_NON_MST_CAPABLE:
+ return "Priority_Non_MST_Capable";
+ default:
+ return "Priority_Unknown";
+ }
+}
+
+const char *tm_utils_transmitter_id_to_str(struct graphics_object_id encoder)
+{
+ if (encoder.type != OBJECT_TYPE_ENCODER)
+ return "\b";
+
+ switch (dal_graphics_object_id_get_encoder_id(encoder)) {
+ case ENCODER_ID_INTERNAL_UNIPHY: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "PhyA";
+ case ENUM_ID_2:
+ return "PhyB";
+ default:
+ break;
+ }
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_UNIPHY1: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "PhyC";
+ case ENUM_ID_2:
+ return "PhyD";
+ default:
+ break;
+ }
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_UNIPHY2: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "PhyE";
+ case ENUM_ID_2:
+ return "PhyF";
+ default:
+ break;
+ }
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_UNIPHY3: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "PhyG";
+ default:
+ break;
+ }
+ }
+ break;
+
+ case ENCODER_ID_EXTERNAL_NUTMEG: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "NutmegCRT";
+ default:
+ break;
+ }
+ }
+ break;
+ case ENCODER_ID_EXTERNAL_TRAVIS: {
+ switch (encoder.enum_id) {
+ case ENUM_ID_1:
+ return "TravisCRT";
+ case ENUM_ID_2:
+ return "TravisLCD";
+ default:
+ break;
+ }
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_DAC1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC1: {
+ return "DACA";
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_DAC2:
+ case ENCODER_ID_INTERNAL_KLDSCP_DAC2: {
+ return "DACB";
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_DVO1:
+ case ENCODER_ID_INTERNAL_KLDSCP_DVO1: {
+ return "DVO";
+ }
+ break;
+
+ case ENCODER_ID_INTERNAL_WIRELESS: {
+ return "Wireless";
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+const char *tm_utils_hpd_line_to_str(enum hpd_source_id line)
+{
+ switch (line) {
+ case HPD_SOURCEID1:
+ return "HPD-1";
+ case HPD_SOURCEID2:
+ return "HPD-2";
+ case HPD_SOURCEID3:
+ return "HPD-3";
+ case HPD_SOURCEID4:
+ return "HPD-4";
+ case HPD_SOURCEID5:
+ return "HPD-5";
+ case HPD_SOURCEID6:
+ return "HPD-6";
+ default:
+ break;
+ }
+
+ return "No HPD";
+}
+
+const char *tm_utils_ddc_line_to_str(enum channel_id line)
+{
+ switch (line) {
+ case CHANNEL_ID_DDC1:
+ return "DDC-1";
+ case CHANNEL_ID_DDC2:
+ return "DDC-2";
+ case CHANNEL_ID_DDC3:
+ return "DDC-3";
+ case CHANNEL_ID_DDC4:
+ return "DDC-4";
+ case CHANNEL_ID_DDC5:
+ return "DDC-5";
+ case CHANNEL_ID_DDC6:
+ return "DDC-6";
+ case CHANNEL_ID_DDC_VGA:
+ return "DDC-VGA";
+ case CHANNEL_ID_I2C_PAD:
+ return "DDC-I2CPAD";
+ default:
+ break;
+ }
+
+ return "No DDC";
+}
+
+const char *tm_utils_device_type_to_str(enum dal_device_type device)
+{
+ switch (device) {
+ case DEVICE_TYPE_LCD: return "LCD";
+ case DEVICE_TYPE_CRT: return "CRT";
+ case DEVICE_TYPE_DFP: return "DFP";
+ case DEVICE_TYPE_CV: return "CV";
+ case DEVICE_TYPE_TV: return "TV";
+ case DEVICE_TYPE_CF: return "CF";
+ case DEVICE_TYPE_WIRELESS: return "Wireless";
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+
+bool tm_utils_is_edid_connector_type_valid_with_signal_type(
+ enum display_dongle_type dongle_type,
+ enum dcs_edid_connector_type edid_conn,
+ enum signal_type signal)
+{
+ bool is_signal_digital;
+ bool is_edid_digital;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP: {
+ is_signal_digital = true;
+ }
+ break;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST: {
+ /* DP connector can have converter (active dongle) attached,
+ that may convert digital signal to analog. In this case
+ EDID connector type will be analog. Here we need to check
+ the dongle type and switch to analog signal */
+ if (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)
+ is_signal_digital = false;
+ else
+ is_signal_digital = true;
+
+ }
+ break;
+ case SIGNAL_TYPE_RGB:
+ case SIGNAL_TYPE_YPBPR:
+ case SIGNAL_TYPE_SCART:
+ case SIGNAL_TYPE_COMPOSITE:
+ case SIGNAL_TYPE_SVIDEO: {
+ is_signal_digital = false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ switch (edid_conn) {
+ case EDID_CONNECTOR_DIGITAL:
+ case EDID_CONNECTOR_DVI:
+ case EDID_CONNECTOR_HDMIA:
+ case EDID_CONNECTOR_MDDI:
+ case EDID_CONNECTOR_DISPLAYPORT: {
+ is_edid_digital = true;
+ }
+ break;
+ case EDID_CONNECTOR_ANALOG: {
+ is_edid_digital = false;
+ }
+ break;
+ default:
+ return false;
+ }
+
+ return (is_edid_digital == is_signal_digital);
+}
+
+
+enum tm_utils_display_type tm_utils_signal_to_display_type(
+ enum signal_type signal)
+{
+ enum tm_utils_display_type res = DISPLAY_DFP;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_WIRELESS:
+ res = DISPLAY_DFP;
+ break;
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ res = DISPLAY_LCD_PANEL;
+ break;
+ case SIGNAL_TYPE_YPBPR:
+ res = DISPLAY_COMPONENT_VIDEO;
+ break;
+ case SIGNAL_TYPE_RGB:
+ res = DISPLAY_MONITOR;
+ break;
+ case SIGNAL_TYPE_SCART:
+ case SIGNAL_TYPE_COMPOSITE:
+ case SIGNAL_TYPE_SVIDEO:
+ res = DISPLAY_TELEVISION;
+ break;
+ case SIGNAL_TYPE_DVO:
+ default:
+ /* @todo probably shouldn't need to handle DVO case */
+ break;
+
+ }
+ return res;
+}
+
+
+enum dcs_interface_type dal_tm_utils_signal_type_to_interface_type(
+ enum signal_type signal)
+{
+ /* VGA will be default interface */
+ enum dcs_interface_type interface_type = INTERFACE_TYPE_VGA;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ interface_type = INTERFACE_TYPE_DVI;
+ break;
+
+ case SIGNAL_TYPE_LVDS:
+ interface_type = INTERFACE_TYPE_LVDS;
+ break;
+
+ case SIGNAL_TYPE_EDP:
+ interface_type = INTERFACE_TYPE_EDP;
+ break;
+
+ case SIGNAL_TYPE_RGB:
+ interface_type = INTERFACE_TYPE_VGA;
+ break;
+
+ case SIGNAL_TYPE_YPBPR:
+ interface_type = INTERFACE_TYPE_CV;
+ break;
+
+ case SIGNAL_TYPE_SCART:
+ case SIGNAL_TYPE_COMPOSITE:
+ case SIGNAL_TYPE_SVIDEO:
+ interface_type = INTERFACE_TYPE_TV;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ interface_type = INTERFACE_TYPE_DP;
+ break;
+
+ case SIGNAL_TYPE_MVPU_A:
+ case SIGNAL_TYPE_MVPU_B:
+ case SIGNAL_TYPE_MVPU_AB:
+ interface_type = INTERFACE_TYPE_CF;
+ break;
+
+ case SIGNAL_TYPE_WIRELESS:
+ interface_type = INTERFACE_TYPE_WIRELESS;
+ break;
+
+ default:
+ break;
+ };
+
+ return interface_type;
+}
+
+
+enum dal_device_type tm_utils_signal_type_to_device_type(
+ enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ return DEVICE_TYPE_LCD;
+
+ case SIGNAL_TYPE_RGB:
+ return DEVICE_TYPE_CRT;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DEVICE_TYPE_DFP;
+
+ case SIGNAL_TYPE_YPBPR:
+ return DEVICE_TYPE_CV;
+
+ case SIGNAL_TYPE_SCART:
+ case SIGNAL_TYPE_COMPOSITE:
+ case SIGNAL_TYPE_SVIDEO:
+ return DEVICE_TYPE_TV;
+
+ case SIGNAL_TYPE_WIRELESS:
+ return DEVICE_TYPE_WIRELESS;
+
+ default:
+ return DEVICE_TYPE_UNKNOWN;
+ };
+
+ return DEVICE_TYPE_UNKNOWN;
+}
+
+enum link_service_type tm_utils_signal_to_link_service_type(
+ enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ return LINK_SERVICE_TYPE_DP_SST;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return LINK_SERVICE_TYPE_DP_MST;
+
+ default:
+ return LINK_SERVICE_TYPE_LEGACY;
+ };
+
+ return LINK_SERVICE_TYPE_LEGACY;
+}
+
+
+enum tm_display_type tm_utils_device_id_to_tm_display_type(struct device_id id)
+{
+ enum tm_display_type type = TM_DISPLAY_TYPE_UNK;
+
+ if (id.device_type == DEVICE_TYPE_CRT && id.enum_id == 1)
+ type = TM_DISPLAY_TYPE_CRT;
+ else if (id.device_type == DEVICE_TYPE_CRT && id.enum_id == 2)
+ type = TM_DISPLAY_TYPE_CRT_DAC2;
+ else if (id.device_type == DEVICE_TYPE_LCD && id.enum_id == 1)
+ type = TM_DISPLAY_TYPE_LCD;
+ else if (id.device_type == DEVICE_TYPE_TV && id.enum_id == 1)
+ type = TM_DISPLAY_TYPE_TV;
+ else if (id.device_type == DEVICE_TYPE_CV && id.enum_id == 1)
+ type = TM_DISPLAY_TYPE_CV;
+ else if (id.device_type == DEVICE_TYPE_DFP)
+ type = TM_DISPLAY_TYPE_DFP;
+ else if (id.device_type == DEVICE_TYPE_WIRELESS)
+ type = TM_DISPLAY_TYPE_WIRELESS;
+
+ return type;
+}
+
+
+/**
+ * Function returns the downgraded signal (or the same) based on
+ * the rule that DVI < HDMI and SingleLink < DualLink.
+ * We assign to all signal types and connector types their
+ * corresponding weights in terms of DVI/HDMI and SL/DL.
+ */
+enum signal_type tm_utils_get_downgraded_signal_type(
+ enum signal_type signal,
+ enum dcs_edid_connector_type connector_type)
+{
+ /*
+ Connector types
+ EDID_CONNECTOR_DIGITAL DL DVI
+ EDID_CONNECTOR_DVI DL DVI
+ EDID_CONNECTOR_HDMIA SL HDMI
+ EDID_CONNECTOR_MDDI SL DVI
+ EDID_CONNECTOR_DISPLAYPORT SL DVI
+
+ Signal types
+ SIGNAL_TYPE_HDMI_TYPE_A SL HDMI
+ SIGNAL_TYPE_DVI_SINGLE_LINK SL DVI
+ SIGNAL_TYPE_DVI_DUAL_LINK DL DVI
+ */
+
+ bool dl1 = false;
+ bool hdmi1 = false;
+ bool dl2 = false;
+ bool hdmi2 = false;
+ bool dl3;
+ bool hdmi3;
+
+ switch (connector_type) {
+ case EDID_CONNECTOR_DIGITAL:
+ case EDID_CONNECTOR_DVI:
+ dl1 = true;
+ hdmi1 = false;
+ break;
+ case EDID_CONNECTOR_HDMIA:
+ dl1 = false;
+ hdmi1 = true;
+ break;
+ case EDID_CONNECTOR_MDDI:
+ case EDID_CONNECTOR_DISPLAYPORT:
+ dl1 = false;
+ hdmi1 = false;
+ break;
+ default:
+ return signal; /* No need to downgrade the signal */
+
+ };
+
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ dl2 = false;
+ hdmi2 = true;
+ break;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ dl2 = true;
+ hdmi2 = false;
+ break;
+ default:
+ return signal; /* No need to downgrade the signal */
+
+ };
+
+ dl3 = dl1 && dl2;
+ hdmi3 = hdmi1 && hdmi2;
+
+ if (dl3 && !hdmi3)
+ signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ else if (!dl3 && hdmi3)
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ else if (!dl3 && !hdmi3)
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
+ return signal;
+}
+
+
+/* HdmiA --> SingleLink
+ * DP remains DP (DP audio capability does not change signal) */
+enum signal_type tm_utils_downgrade_to_no_audio_signal(enum signal_type signal)
+{
+ enum signal_type downgraded_signal = SIGNAL_TYPE_NONE;
+
+ switch (signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ downgraded_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+
+ default:
+ downgraded_signal = signal;
+ break;
+ }
+
+ return downgraded_signal;
+}
+
+
+enum ddc_transaction_type tm_utils_get_ddc_transaction_type(
+ enum signal_type sink_signal,
+ enum signal_type asic_signal)
+{
+ enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+ if (sink_signal == asic_signal) {
+ switch (sink_signal) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_RGB:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ /* MST does not use I2COverAux, but there is the
+ * SPECIAL use case for "immediate dwnstrm device
+ * access" (EPR#370830). */
+ transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ break;
+
+ default:
+ break;
+ }
+ } else {
+ switch (asic_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+
+ if (sink_signal == SIGNAL_TYPE_RGB
+ || sink_signal == SIGNAL_TYPE_LVDS) {
+ transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER;
+ }
+ break;
+
+ case SIGNAL_TYPE_DVO:
+ case SIGNAL_TYPE_DVO24:
+ if (sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK
+ || sink_signal == SIGNAL_TYPE_DVI_DUAL_LINK
+ || sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK1
+ || sink_signal == SIGNAL_TYPE_HDMI_TYPE_A
+ || sink_signal == SIGNAL_TYPE_RGB) {
+ transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ return transaction_type;
+}
+
+
+/* We require that clock sharing group matches clock sharing level */
+bool tm_utils_is_clock_sharing_mismatch(
+ enum clock_sharing_level sharing_level,
+ enum clock_sharing_group sharing_group)
+{
+ /* Common case we allow sharing group to be used at any sharing level */
+ bool mismatch = false;
+
+ switch (sharing_group) {
+ case CLOCK_SHARING_GROUP_DISPLAY_PORT:
+ case CLOCK_SHARING_GROUP_ALTERNATIVE_DP_REF:
+ mismatch = (sharing_level
+ < CLOCK_SHARING_LEVEL_DISPLAY_PORT_SHAREABLE);
+ break;
+
+ case CLOCK_SHARING_GROUP_DP_MST:
+ mismatch = (sharing_level
+ < CLOCK_SHARING_LEVEL_DP_MST_SHAREABLE);
+ break;
+
+ default:
+ break;
+ }
+
+ return mismatch;
+}
+
+
+bool tm_utils_is_destructive_method(enum tm_detection_method method)
+{
+ switch (method) {
+ case DETECTION_METHOD_DESTRUCTIVE:
+ case DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED:
+ case DETECTION_METHOD_HOTPLUG:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+void tm_utils_set_bit(uint32_t *bitmap, uint8_t bit)
+{
+ *bitmap |= (1 << bit);
+}
+
+void tm_utils_clear_bit(uint32_t *bitmap, uint8_t bit)
+{
+ *bitmap &= ~(1 << bit);
+}
+
+bool tm_utils_test_bit(uint32_t *bitmap, uint8_t bit)
+{
+ return ((*bitmap & (1 << bit)) != 0);
+}
+
+void tm_utils_init_formatted_buffer(char *buf, uint32_t buf_size,
+ const char *format, ...)
+{
+ va_list args;
+
+ va_start(args, format);
+
+ dal_log_to_buffer(buf, buf_size, format, args);
+
+ va_end(args);
+}
+
+/******************************************************************************
+ * TM Calc Subset implementation.
+ *****************************************************************************/
+struct tm_calc_subset *dal_tm_calc_subset_create(void)
+{
+ struct tm_calc_subset *tm_calc_subset;
+
+ tm_calc_subset = dal_alloc(
+ sizeof(struct tm_calc_subset));
+
+ if (tm_calc_subset == NULL)
+ return NULL;
+
+ tm_calc_subset->max_subset_size = 0;
+ tm_calc_subset->max_value = 0;
+ tm_calc_subset->subset_size = 0;
+ dal_memset(tm_calc_subset->buffer, 0,
+ sizeof(tm_calc_subset->buffer));
+
+ return tm_calc_subset;
+}
+
+void dal_tm_calc_subset_destroy(struct tm_calc_subset *subset)
+{
+ dal_free(subset);
+ subset = NULL;
+}
+
+bool dal_tm_calc_subset_start(
+ struct tm_calc_subset *subset,
+ uint32_t max_value,
+ uint32_t max_subset_size)
+{
+ if (max_subset_size < 1 ||
+ max_subset_size > MAX_COFUNCTIONAL_PATHS)
+ return false;
+
+ subset->max_value = max_value;
+ subset->max_subset_size = max_subset_size;
+ subset->subset_size = 1;
+ subset->buffer[0] = 0;
+
+ return true;
+}
+
+uint32_t dal_tm_calc_subset_get_value(
+ struct tm_calc_subset *subset,
+ uint32_t index)
+{
+ if (index < subset->subset_size)
+ return subset->buffer[index];
+
+ return (uint32_t)(-1);
+}
+
+bool dal_tm_calc_subset_step(struct tm_calc_subset *subset)
+{
+ uint32_t next_value;
+
+ if (subset->subset_size == 0 ||
+ subset->subset_size > subset->max_subset_size)
+ return false;
+
+ /* Try to increase subset size. new entry will
+ * be assigned subsequent value
+ */
+ next_value = subset->buffer[subset->subset_size - 1] + 1;
+ if (next_value < subset->max_value &&
+ subset->subset_size < subset->max_subset_size) {
+
+ subset->buffer[subset->subset_size++] = next_value;
+ return true;
+ }
+
+ /*If we cannot increase subset size, try to
+ * increase value of last entry in subset.
+ * If we cannot increase value of last entry,
+ * we reduce the size of the subset and try again
+ */
+ return dal_tm_calc_subset_skip(subset);
+
+}
+
+bool dal_tm_calc_subset_skip(struct tm_calc_subset *subset)
+{
+ uint32_t next_value;
+
+ /* Try to increase value of last entry in subset
+ * If we cannot increase value of last entry,
+ * we reduce the size of the subset and try again
+ */
+ while (subset->subset_size > 0) {
+
+ next_value = subset->buffer[subset->subset_size-1] + 1;
+ if (next_value < subset->max_value) {
+
+ subset->buffer[subset->subset_size-1] = next_value;
+ return true;
+ }
+ subset->subset_size--;
+ }
+
+ /* We failed to advance and reach empty subset*/
+ return false;
+
+}
+
+char *tm_utils_get_tm_resource_str(struct tm_resource *tm_resource)
+{
+ static char tmp_buf[128];
+ struct graphics_object_id id =
+ tm_resource->funcs->get_grph_id(tm_resource);
+
+ tm_utils_init_formatted_buffer(
+ tmp_buf,
+ sizeof(tmp_buf),
+ "0x%08X:[%u-%u-%u]: (%s %s-%s %s)",
+ *(uint32_t *)(&id),
+ id.type,
+ id.id,
+ id.enum_id,
+ tm_utils_go_type_to_str(id),
+ tm_utils_go_id_to_str(id),
+ tm_utils_go_enum_to_str(id),
+ tm_utils_transmitter_id_to_str(id));
+
+ return tmp_buf;
+}
+
+bool tm_utils_is_supported_engine(union supported_stream_engines se,
+ enum engine_id engine)
+{
+ bool rc = false;
+
+ switch (engine) {
+ case ENGINE_ID_DIGA:
+ rc = se.engine.ENGINE_ID_DIGA;
+ break;
+ case ENGINE_ID_DIGB:
+ rc = se.engine.ENGINE_ID_DIGB;
+ break;
+ case ENGINE_ID_DIGC:
+ rc = se.engine.ENGINE_ID_DIGC;
+ break;
+ case ENGINE_ID_DIGD:
+ rc = se.engine.ENGINE_ID_DIGD;
+ break;
+ case ENGINE_ID_DIGE:
+ rc = se.engine.ENGINE_ID_DIGE;
+ break;
+ case ENGINE_ID_DIGF:
+ rc = se.engine.ENGINE_ID_DIGF;
+ break;
+ case ENGINE_ID_DIGG:
+ rc = se.engine.ENGINE_ID_DIGG;
+ break;
+ case ENGINE_ID_DVO:
+ rc = se.engine.ENGINE_ID_DVO;
+ break;
+ case ENGINE_ID_DACA:
+ rc = se.engine.ENGINE_ID_DACA;
+ break;
+ case ENGINE_ID_DACB:
+ rc = se.engine.ENGINE_ID_DACB;
+ break;
+ case ENGINE_ID_VCE:
+ rc = se.engine.ENGINE_ID_VCE;
+ break;
+ default:
+ break;
+ }
+
+ return rc;
+}
+
+bool tm_utils_is_dp_connector(const struct connector *cntr)
+{
+ if (dal_graphics_object_id_get_connector_id(
+ dal_connector_get_graphics_object_id(cntr)) ==
+ CONNECTOR_ID_DISPLAY_PORT)
+ return true;
+ else
+ return false;
+}
+
+bool tm_utils_is_dp_asic_signal(const struct display_path *display_path)
+{
+ return dal_is_dp_signal(dal_display_path_get_query_signal(
+ display_path, ASIC_LINK_INDEX));
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/tm_utils.h b/drivers/gpu/drm/amd/dal/topology/tm_utils.h
new file mode 100644
index 000000000000..d57339eef8d6
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/tm_utils.h
@@ -0,0 +1,142 @@
+#ifndef __DAL_TM_UTILS_H__
+#define __DAL_TM_UTILS_H__
+
+#include "include/grph_object_id.h"
+#include "include/signal_types.h"
+#include "include/grph_object_ctrl_defs.h"
+#include "include/ddc_service_types.h"
+#include "include/dcs_types.h"
+#include "include/link_service_interface.h"
+#include "include/clock_source_types.h"
+#include "include/topology_mgr_interface.h"
+
+#include "tm_internal_types.h"
+#include "tm_resource_mgr.h"
+
+struct tm_calc_subset {
+ /* defines upper limit (exclusive) of possible values
+ * lower limit is 0
+ */
+ uint32_t max_value;
+ /* defines upper limit (inclusive) of subset size
+ * lower limit is 1
+ */
+ uint32_t max_subset_size;
+ /* size of current subset */
+ uint32_t subset_size;
+ /* stores display indices which are checked for co-func*/
+ uint32_t buffer[MAX_COFUNCTIONAL_PATHS];
+};
+
+enum tm_utils_display_type {
+ DISPLAY_MONITOR,
+ DISPLAY_TELEVISION,
+ DISPLAY_LCD_PANEL,
+ DISPLAY_DFP,
+ DISPLAY_COMPONENT_VIDEO
+};
+
+const char *tm_utils_encoder_id_to_str(enum encoder_id id);
+
+const char *tm_utils_connector_id_to_str(enum connector_id id);
+
+const char *tm_utils_audio_id_to_str(enum audio_id id);
+
+const char *tm_utils_controller_id_to_str(enum controller_id id);
+
+const char *tm_utils_clock_source_id_to_str(enum clock_source_id id);
+
+const char *tm_utils_engine_id_to_str(enum engine_id id);
+
+const char *tm_utils_go_type_to_str(struct graphics_object_id id);
+
+const char *tm_utils_go_id_to_str(struct graphics_object_id id);
+
+const char *tm_utils_go_enum_to_str(struct graphics_object_id id);
+
+const char *tm_utils_transmitter_id_to_str(struct graphics_object_id id);
+
+enum dal_device_type tm_utils_signal_type_to_device_type(
+ enum signal_type signal);
+
+enum dcs_interface_type dal_tm_utils_signal_type_to_interface_type(
+ enum signal_type signal);
+
+const char *tm_utils_signal_type_to_str(enum signal_type type);
+
+const char *tm_utils_engine_priority_to_str(enum tm_engine_priority priority);
+
+enum tm_display_type tm_utils_device_id_to_tm_display_type(struct device_id id);
+
+const char *tm_utils_device_type_to_str(enum dal_device_type device);
+
+const char *tm_utils_hpd_line_to_str(enum hpd_source_id line);
+
+const char *tm_utils_ddc_line_to_str(enum channel_id line);
+
+void tm_utils_set_bit(uint32_t *bitmap, uint8_t bit);
+
+void tm_utils_clear_bit(uint32_t *bitmap, uint8_t bit);
+
+bool tm_utils_test_bit(uint32_t *bitmap, uint8_t bit);
+
+bool tm_utils_is_clock_sharing_mismatch(
+ enum clock_sharing_level sharing_level,
+ enum clock_sharing_group sharing_group);
+
+enum link_service_type tm_utils_signal_to_link_service_type(
+ enum signal_type signal);
+
+bool tm_utils_is_destructive_method(enum tm_detection_method method);
+
+bool tm_utils_is_edid_connector_type_valid_with_signal_type(
+ enum display_dongle_type dongle_type,
+ enum dcs_edid_connector_type edid_conn,
+ enum signal_type signal);
+
+enum signal_type tm_utils_get_downgraded_signal_type(
+ enum signal_type signal,
+ enum dcs_edid_connector_type connector_type);
+
+enum signal_type tm_utils_downgrade_to_no_audio_signal(
+ enum signal_type signal);
+
+enum ddc_transaction_type tm_utils_get_ddc_transaction_type(
+ enum signal_type sink_signal,
+ enum signal_type asic_signal);
+
+/******************************************************************************
+ * TM Subset. A helper object to prepare a buffer of display indices to be
+ * checked for co-functionality
+ *****************************************************************************/
+struct tm_calc_subset *dal_tm_calc_subset_create(void);
+
+void dal_tm_calc_subset_destroy(struct tm_calc_subset *subset);
+
+uint32_t dal_tm_calc_subset_get_value(
+ struct tm_calc_subset *subset,
+ uint32_t index);
+
+bool dal_tm_calc_subset_start(
+ struct tm_calc_subset *subset,
+ uint32_t max_value,
+ uint32_t max_subset_size);
+
+bool dal_tm_calc_subset_step(struct tm_calc_subset *subset);
+
+bool dal_tm_calc_subset_skip(struct tm_calc_subset *subset);
+
+/******************************************************************************
+ * Miscellaneous functions
+ *****************************************************************************/
+char *tm_utils_get_tm_resource_str(struct tm_resource *tm_resource);
+
+bool tm_utils_is_supported_engine(union supported_stream_engines se,
+ enum engine_id engine);
+
+bool tm_utils_is_dp_connector(const struct connector *cntr);
+
+/* Check if signal at "ASIC link" is Display Port. */
+bool tm_utils_is_dp_asic_signal(const struct display_path *display_path);
+
+#endif /* __DAL_TM_UTILS_H__ */
diff --git a/drivers/gpu/drm/amd/dal/topology/topology.c b/drivers/gpu/drm/amd/dal/topology/topology.c
new file mode 100644
index 000000000000..9ac01e86c1a6
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/topology.c
@@ -0,0 +1,5313 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/logger_interface.h"
+#include "include/topology_mgr_interface.h"
+#include "include/display_path_interface.h"
+#include "include/connector_interface.h"
+#include "include/encoder_interface.h"
+#include "include/controller_interface.h"
+#include "include/display_path_interface.h"
+#include "include/audio_interface.h"
+#include "include/dcs_interface.h"
+#include "include/vector.h"
+#include "include/display_path_set_interface.h"
+#include "include/adapter_service_interface.h"
+#include "include/mode_manager_interface.h"
+
+#include "topology.h"
+#include "tm_internal_types.h"
+#include "tm_resource.h"
+#include "tm_resource_mgr.h"
+#include "tm_detection_mgr.h"
+#include "tm_resource_builder.h"
+
+#include "tm_utils.h"
+#include "tm_subsets_cache.h"
+
+/* file-level globals */
+
+/* file-level structures */
+struct topology_mgr {
+ struct dal_context *dal_context;
+ struct adapter_service *adapter_srv;
+ struct timing_service *timing_srv;
+ struct hw_sequencer *hwss_srvr;
+ struct mode_manager *mm;
+
+ struct tm_resource_mgr *tm_rm;
+ struct tm_detection_mgr *tm_dm;
+ struct tm_resource_builder *tm_rb;
+
+ struct vector *display_paths;
+
+ struct tm_subsets_cache *display_subsets;
+
+ uint32_t max_num_of_non_dp_paths;
+ /* A bitmap of signals which support 'single selected timing'
+ * feature. */
+ uint32_t single_selected_timing_signals;
+
+ uint32_t max_num_of_cofunctional_paths;
+ uint32_t max_num_of_cofunctional_targets;
+
+ uint32_t max_num_of_supported_hdmi;
+
+ bool valid_cofunc_sets;
+
+ enum dal_video_power_state current_power_state;
+ enum dal_video_power_state previous_power_state;
+
+ enum clock_sharing_level clock_sharing_level;
+
+ uint32_t display_detection_mask;
+ bool report_detection_changes;
+
+ /* This flag indicates that if DAL is in process to power down HW.*/
+ bool hw_power_down_required;
+
+ uint32_t attached_hdmi_num;
+};
+
+
+/******************************************************************************
+ * Prototypes of private functions.
+ *****************************************************************************/
+
+static enum tm_result tm_init_during_construct(struct topology_mgr *tm);
+static enum tm_result create_gpu_resources(struct topology_mgr *tm);
+static enum tm_result create_real_display_paths(struct topology_mgr *tm);
+static enum tm_result tm_update_encoder_implementations(
+ struct topology_mgr *tm);
+
+static enum tm_result add_fake_crt_vga_dvi_paths(struct topology_mgr *tm);
+static enum tm_result miscellaneous_init(struct topology_mgr *tm);
+static enum tm_result transfer_paths_from_resource_builder_to_tm(
+ struct topology_mgr *tm);
+
+static enum tm_result allocate_storage_for_link_services(
+ struct topology_mgr *tm);
+static void associate_link_services_with_display_paths(
+ struct topology_mgr *tm);
+
+static void tm_init_features(struct topology_mgr *tm);
+static enum tm_result tm_update_internal_database(struct topology_mgr *tm);
+
+static enum tm_result tm_handle_detection_register_display(
+ struct topology_mgr *tm);
+
+static bool tm_is_display_index_valid(struct topology_mgr *tm,
+ uint32_t display_index, const char *caller_func);
+
+static void tm_update_stream_engine_priorities(
+ struct topology_mgr *tm);
+
+static bool tm_create_initial_cofunc_display_subsets(
+ struct topology_mgr *tm);
+
+static enum clock_sharing_group tm_get_default_clock_sharing_group(
+ struct topology_mgr *tm,
+ enum signal_type signal,
+ bool allow_per_timing_sharing);
+
+static bool tm_check_num_of_cofunc_displays(
+ struct topology_mgr *tm,
+ uint32_t max_value,
+ uint32_t max_subset_size);
+
+static bool tm_can_display_paths_be_enabled_at_the_same_time(
+ struct topology_mgr *tm,
+ struct tm_resource_mgr *tm_rm_clone,
+ const uint32_t *displays,
+ uint32_t array_size);
+
+static void handle_signal_downgrade(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum signal_type new_signal);
+
+static void tm_update_on_connection_change(struct topology_mgr *tm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status);
+
+static bool is_display_index_array_valid(
+ struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size);
+
+static struct controller *get_controller_for_plane_index(
+ struct topology_mgr *tm,
+ struct display_path *path,
+ uint32_t plane_index,
+ const struct plane_config *plcfg,
+ uint32_t *controller_index_out);
+
+/******************************************************************************
+ * type-safe macro definitions for vector handling
+ *****************************************************************************/
+DAL_VECTOR_AT_INDEX(display_paths, struct display_path **);
+DAL_VECTOR_INSERT_AT(display_paths, struct display_path **);
+
+/******************************************************************************
+ * Implementation of private functions.
+ *****************************************************************************/
+
+static bool construct(struct topology_mgr_init_data *init_data,
+ struct topology_mgr *tm)
+{
+ bool init_err = false;
+ struct tm_resource_mgr *tm_rm = NULL;
+ struct tm_resource_mgr_init_data tm_rm_init_data;
+ struct tm_detection_mgr *tm_dm = NULL;
+ struct tm_detection_mgr_init_data tm_dm_init_data;
+ struct tm_resource_builder *tm_rb = NULL;
+ struct tm_resource_builder_init_data tm_rb_init_data;
+ struct dal_context *dal_context = init_data->dal_context;
+
+ TM_IFACE_TRACE();
+
+ tm->dal_context = init_data->dal_context;
+ tm->mm = init_data->mm;
+ tm->adapter_srv = init_data->adapter_srv;
+ tm->hwss_srvr = init_data->hwss_srvr;
+ tm->timing_srv = init_data->timing_srv;
+
+ tm->current_power_state = DAL_VIDEO_POWER_UNSPECIFIED;
+ tm->previous_power_state = DAL_VIDEO_POWER_UNSPECIFIED;
+
+ tm->hw_power_down_required = true;
+ tm->clock_sharing_level = CLOCK_SHARING_LEVEL_NOT_SHAREABLE;
+
+ dal_memset(&tm_rm_init_data, 0, sizeof(tm_rm_init_data));
+ dal_memset(&tm_dm_init_data, 0, sizeof(tm_dm_init_data));
+ dal_memset(&tm_rb_init_data, 0, sizeof(tm_rb_init_data));
+
+ do {
+ /* create/initialise Resource Manager */
+ tm_rm_init_data.dal_context = init_data->dal_context;
+ tm_rm_init_data.as = init_data->adapter_srv;
+
+ tm_rm = tm_resource_mgr_create(&tm_rm_init_data);
+ if (!tm_rm) {
+ init_err = true;
+ TM_ERROR("tm_resource_mgr_create() failed!\n");
+ break;
+ }
+
+ tm->tm_rm = tm_rm;
+
+ /* create/initialise Detection Manager */
+ tm_dm_init_data.dal_context = init_data->dal_context;
+ tm_dm_init_data.as = init_data->adapter_srv;
+ tm_dm_init_data.hwss = init_data->hwss_srvr;
+ tm_dm_init_data.resource_mgr = tm_rm;
+ tm_dm_init_data.tm = tm;
+
+ tm_dm = dal_tm_detection_mgr_create(&tm_dm_init_data);
+ if (!tm_dm) {
+ TM_ERROR("dal_tm_detection_mgr_create() failed!\n");
+ init_err = true;
+ break;
+ }
+
+ tm->tm_dm = tm_dm;
+
+ /* create/initialise Resource Builder */
+ tm_rb_init_data.dal_context = tm->dal_context;
+ tm_rb_init_data.adapter_service = init_data->adapter_srv;
+ tm_rb_init_data.timing_service = init_data->timing_srv;
+ /* TODO: Possibly remove irq_manager */
+ /* tm_rb_init_data.irq_manager = init_data->irq_manager; */
+ tm_rb_init_data.hwss = init_data->hwss_srvr;
+ tm_rb_init_data.resource_mgr = tm->tm_rm;
+ tm_rb_init_data.tm = tm;
+
+ tm_rb = tm_resource_builder_create(&tm_rb_init_data);
+ if (!tm_rb) {
+ TM_ERROR("tm_resource_builder_create() failed!\n");
+ init_err = true;
+ break;
+ }
+
+ tm->tm_rb = tm_rb;
+
+ } while (0);
+
+ if (false == init_err)
+ init_err = (tm_init_during_construct(tm) != TM_RESULT_SUCCESS);
+
+ if (true == init_err) {
+ /* Clean-up.
+ * Note: Do NOT call dal_tm_destroy()! */
+ if (tm->display_paths)
+ dal_vector_destroy(&tm->display_paths);
+
+ if (tm->tm_rm)
+ tm_resource_mgr_destroy(&tm->tm_rm);
+
+ if (tm->tm_dm)
+ dal_tm_detection_mgr_destroy(&tm->tm_dm);
+
+ if (tm->tm_rb)
+ tm_resource_builder_destroy(&tm->tm_rb);
+
+ return false;
+ }
+
+ /* All O.K. */
+ return true;
+}
+
+static struct display_path *tm_get_display_path_at_index(
+ struct topology_mgr *tm,
+ uint32_t index)
+{
+ struct display_path **display_path_item;
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (NULL == tm->display_paths) {
+ /* We may get here if OS ignores error returned by
+ * dal_tm_create(). */
+ TM_ERROR("%s: No display path were created!\n", __func__);
+ return NULL;
+ }
+
+ display_path_item = display_paths_vector_at_index(
+ tm->display_paths, index);
+
+ if (NULL == display_path_item) {
+ TM_ERROR("%s: no item at index:%d!\n", __func__, index);
+ return NULL;
+ }
+
+ display_path = *display_path_item;
+
+ return display_path;
+}
+
+static uint32_t tm_get_display_path_count(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (NULL == tm->display_paths) {
+ /* We may get here if OS ignores error returned by
+ * dal_tm_create(). */
+ TM_ERROR("%s: No display path were created!\n", __func__);
+ return 0;
+ }
+
+ return dal_vector_get_count(tm->display_paths);
+}
+
+static void destruct(struct topology_mgr *tm)
+{
+ struct gpu *gpu = NULL;
+
+ /* TODO: call dal_tm_unregister_from_display_detection_interrupt() */
+
+ gpu = tm_resource_mgr_get_gpu_interface(tm->tm_rm);
+
+ if (tm->display_paths) {
+ uint32_t i;
+
+ for (i = 0; i < tm_get_display_path_count(tm); i++) {
+ struct display_path *display_path =
+ tm_get_display_path_at_index(tm, i);
+ struct dcs *dcs = dal_display_path_get_dcs(
+ display_path);
+ dal_dcs_destroy(&dcs);
+ dal_display_path_destroy(&display_path);
+ }
+
+ dal_vector_destroy(&tm->display_paths);
+ }
+
+ if (gpu != NULL)
+ dal_gpu_destroy(&gpu);
+
+ if (tm->tm_rm)
+ tm_resource_mgr_destroy(&tm->tm_rm);
+
+ if (tm->tm_dm)
+ dal_tm_detection_mgr_destroy(&tm->tm_dm);
+
+ if (tm->tm_rb)
+ tm_resource_builder_destroy(&tm->tm_rb);
+
+ if (tm->display_subsets)
+ dal_tm_subsets_cache_destroy(&tm->display_subsets);
+}
+
+/******************************************************************************
+ * Implementation of public functions.
+ *****************************************************************************/
+
+struct topology_mgr *dal_tm_create(struct topology_mgr_init_data *init_data)
+{
+ struct topology_mgr *tm = NULL;
+ struct dal_context *dal_context = init_data->dal_context;
+
+ tm = dal_alloc(sizeof(*tm));
+
+ if (!tm) {
+ TM_ERROR("dal_alloc() failed!\n");
+ return NULL;
+ }
+
+ if (!construct(init_data, tm) == true) {
+ dal_free(tm);
+ return NULL;
+ }
+
+ return tm;
+}
+
+void dal_tm_destroy(struct topology_mgr **tm)
+{
+ if (!tm || !(*tm))
+ return;
+
+ /***************************************
+ * deallocate all subcomponents of TM
+ ***************************************/
+ destruct(*tm);
+
+ /***************************************
+ * deallocate TM itself
+ ***************************************/
+ dal_free(*tm);
+ *tm = NULL;
+}
+
+/**********************************
+ Per-Display Path handlers/queries
+***********************************/
+
+/**
+ * Acquires display path and all mandatory resources which belong to it.
+ *
+ * \param [in] display_index: Index of display path which should be acquired
+ *
+ * \return
+ * TM_RESULT_SUCCESS: if display path was successfully acquired
+ * TM_RESULT_FAILURE: otherwise
+ */
+enum tm_result dal_tm_acquire_display_path(struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return TM_RESULT_FAILURE;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ if (dal_display_path_is_acquired(display_path) == true) {
+ TM_ERROR("%s: path 0x%p (index: %d) already acquired!\n",
+ __func__, display_path, display_index);
+ return TM_RESULT_FAILURE;
+ }
+
+ if (TM_RESULT_SUCCESS != tm_resource_mgr_acquire_resources(
+ tm->tm_rm,
+ display_path,
+ TM_ACQUIRE_METHOD_HW)) {
+ TM_ERROR("%s: path 0x%p (index: %d) : error in TMRM!\n",
+ __func__, display_path, display_index);
+ return TM_RESULT_FAILURE;
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Releases display path and all resources (including optional) which
+ * belong to it
+ *
+ * \param [in] display_index: Index of display path which should be released
+ */
+void dal_tm_release_display_path(struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ if (dal_display_path_is_acquired(display_path) == false) {
+ TM_ERROR("%s: path 0x%p (index: %d) NOT acquired!\n",
+ __func__, display_path, display_index);
+ return;
+ }
+
+ /* Release optional objects which should be detached explicitly
+ * from display path. */
+ dal_tm_detach_stereo_sync_from_display_path(tm, display_index);
+
+ dal_tm_detach_sync_output_from_display_path(tm, display_index);
+
+ tm_resource_mgr_release_resources(tm->tm_rm, display_path,
+ TM_ACQUIRE_METHOD_HW);
+}
+
+/**
+ * Releases display path enabled by vBIOS.
+ *
+ * \param [in] display_index: Index of display path which should be released
+ */
+void dal_tm_release_vbios_enabled_display_path(struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ if (dal_display_path_is_acquired(display_path) == false) {
+ TM_ERROR("%s: path 0x%p (index: %d) NOT acquired!\n",
+ __func__, display_path, display_index);
+ return;
+ }
+
+ tm_resource_mgr_release_resources(tm->tm_rm, display_path,
+ TM_ACQUIRE_METHOD_HW);
+}
+
+/**
+ * Allocates, duplicates and acquires single path.
+ * The caller is responsible to destroy this display path
+ * This function is used when HW and display path context need to be
+ * accessed in reentrant mode.
+ *
+ * @param [in] display_index: Index of display path to duplicate
+ *
+ * @return
+ * Pointer to allocated display path if succeeded, NULL otherwise
+ */
+struct display_path *dal_tm_create_resource_context_for_display_index(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ struct display_path *src_display_path;
+ struct display_path *dst_display_path;
+ bool is_dst_path_acquired;
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return NULL;
+
+ src_display_path = tm_get_display_path_at_index(tm, display_index);
+
+ /* We are cloning CURRENT state - this is why the 'true' flag.
+ * Note that validation code works only with a COPY of the path,
+ * and when it is done, the copy is destroyed.
+ * The idea is NOT to change SW or HW state of the ORIGINAL path.
+ * However, the Resources are NOT copies, that means all resources
+ * which are acquired by this function must be released by calling
+ * dal_tm_destroy_resource_context_for_display_path() */
+ dst_display_path = dal_display_path_clone(src_display_path, true);
+
+ if (dst_display_path == NULL) {
+ TM_ERROR("%s: failed to clone Path:%d!\n", __func__,
+ display_index);
+ return NULL;
+ }
+
+ is_dst_path_acquired = dal_display_path_is_acquired(dst_display_path);
+
+ /* Re-acquire links and signals on already active path or
+ * acquire resources on inactive path. */
+ if (is_dst_path_acquired)
+ dal_display_path_acquire_links(dst_display_path);
+ else {
+ enum tm_result tm_result = tm_resource_mgr_acquire_resources(
+ tm->tm_rm, dst_display_path,
+ /* Validation only - no need to change
+ * HW state. */
+ TM_ACQUIRE_METHOD_SW);
+
+ if (tm_result != TM_RESULT_SUCCESS) {
+ dal_display_path_destroy(&dst_display_path);
+ dst_display_path = NULL;
+ }
+ }
+
+ return dst_display_path;
+}
+
+void dal_tm_destroy_resource_context_for_display_path(
+ struct topology_mgr *tm_mgr,
+ struct display_path *display_path)
+{
+ tm_resource_mgr_release_resources(
+ tm_mgr->tm_rm,
+ display_path,
+ /* Validation only - no need to change
+ * HW state. */
+ TM_ACQUIRE_METHOD_SW);
+
+ dal_display_path_destroy(&display_path);
+}
+
+
+/** Acquire stereo-sync object on display path (the display path itself
+ * should be already acquired) */
+enum tm_result dal_tm_attach_stereo_synch_to_display_path(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/**
+ * Detaches stereo-sync object from display path (the display path itself
+ * expected to be already acquired).
+ * If stereo-sync object is not attached to display path, then this
+ * function does nothing.
+ * If stereo-sync object not used anymore, we will power it down.
+ *
+ * \param [in] display_index: Index of display path from which stereo-sync
+ * object should be detached
+ */
+void dal_tm_detach_stereo_sync_from_display_path(struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct encoder *stereo_sync;
+ bool recache_needed = false;
+ struct tm_resource *stereo_resource;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ stereo_sync = dal_display_path_get_stereo_sync_object(display_path);
+
+ if (stereo_sync == NULL)
+ return;
+
+ /* Find encoder RESOURCE which matches stereosync object in the
+ * display path. */
+ stereo_resource = tm_resource_mgr_find_resource(tm->tm_rm,
+ dal_encoder_get_graphics_object_id(stereo_sync));
+
+ if (stereo_resource != NULL) {
+
+ tm_resource_mgr_ref_counter_decrement(tm->tm_rm,
+ stereo_resource);
+
+ /* Optimisation - if refCount > 0 then stereosync
+ * encoder points to encoder on the acquired
+ * display path.
+ * In this case no cofunctional paths changed. */
+ recache_needed =
+ (stereo_resource->flags.display_path_resource
+ &&
+ !TM_RES_REF_CNT_GET(stereo_resource));
+
+ /* Once reference count falls to 0 - we need to
+ * power down the object. */
+ if (TM_RES_REF_CNT_GET(stereo_resource) == 0)
+ dal_encoder_disable_sync_output(stereo_sync);
+ }
+
+ /* Remove stereosync object from display path (need to be done before
+ * we recache cofunctional paths, but after we disable
+ * stereo in HWSS) */
+ dal_display_path_set_stereo_sync_object(display_path, NULL);
+
+ /* Recalculate cofunctional sets the next time it is required
+ * (need to be done after we remove stereo object from the path). */
+ if (recache_needed)
+ tm->valid_cofunc_sets = false;
+}
+
+/** Returns stereo ranking (higher value = higher priority) for given
+ * display path */
+uint32_t dal_tm_get_display_path_stereo_priority(
+ struct topology_mgr *tm,
+ uint32_t display_index,
+ bool display_stereo_active)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return 0;
+}
+
+/** Acquire sync-output resources on display path (the display path itself
+ * should be already acquired) */
+enum tm_result dal_tm_attach_sync_output_to_display_path(
+ struct topology_mgr *tm,
+ uint32_t display_index,
+ enum sync_source sync_output)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/**
+ * Detaches sync-output resources from display path (the display path itself
+ * expected to be already acquired).
+ * If sync-output resources is not attached to display path, then this
+ * function does nothing.
+ * If sync-output resource not used anymore, we will power it down.
+ *
+ * \param [in] display_index: Index of display path from which sync-output
+ * resource should be detached.
+ */
+void dal_tm_detach_sync_output_from_display_path(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct encoder *sync_output_object;
+ enum sync_source sync_output;
+ bool turn_off_sync_output = false;
+ bool recache_needed = false;
+ struct tm_resource *sync_output_rsrc;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ if (!tm_is_display_index_valid(tm, display_index, __func__))
+ return;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ sync_output_object = dal_display_path_get_sync_output_object(
+ display_path);
+
+ sync_output = dal_display_path_get_sync_output_source(display_path);
+
+ if (sync_output >= SYNC_SOURCE_GSL_IO_FIRST &&
+ sync_output <= SYNC_SOURCE_GSL_IO_LAST)
+ turn_off_sync_output = true;
+
+ if (sync_output_object != NULL) {
+ /* Find encoder RESOURCE which matches stereosync object in the
+ * display path. */
+ sync_output_rsrc = tm_resource_mgr_find_resource(tm->tm_rm,
+ dal_encoder_get_graphics_object_id(sync_output_object));
+
+ if (sync_output_rsrc != NULL) {
+
+ tm_resource_mgr_ref_counter_decrement(tm->tm_rm,
+ sync_output_rsrc);
+
+ /* Optimisation - if refCount > 0 then
+ * syncoutput encoder points to encoder on the
+ * acquired display path.
+ * In this case no cofunctional paths
+ * changed. */
+ recache_needed =
+ (sync_output_rsrc->
+ flags.display_path_resource &&
+ TM_RES_REF_CNT_GET(
+ sync_output_rsrc) == 0);
+
+
+ /* Once reference count falls to 0 - we need to power
+ * down the object. */
+ if (TM_RES_REF_CNT_GET(sync_output_rsrc) == 0)
+ turn_off_sync_output = true;
+ }
+ }
+
+ /* Turn off sync-output resources */
+ if (turn_off_sync_output)
+ dal_hw_sequencer_disable_sync_output(tm->hwss_srvr,
+ display_path);
+
+ /* Remove sync-output object from display path (need to be done
+ * before we re-cache co-functional paths, but after we disable
+ * sync-output in HWSS). */
+ dal_display_path_set_sync_output_object(display_path,
+ SYNC_SOURCE_NONE, NULL);
+
+ /* Recalculate cofunctional sets the next time it is required
+ * (need to be done after we remove sync-output object from the path)*/
+ if (recache_needed)
+ tm->valid_cofunc_sets = false;
+}
+
+/** Moves sync-output resources from one display path to other */
+enum tm_result dal_tm_move_sync_output_object(struct topology_mgr *tm,
+ uint32_t src_display_index,
+ uint32_t tgt_display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/** Convert Display Path Interface to display index */
+uint32_t dal_tm_display_path_to_display_index(
+ struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ uint32_t ind;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ for (ind = 0; ind < display_paths_num; ind++) {
+ if (display_path == tm_get_display_path_at_index(tm, ind)) {
+ /* found it */
+ return ind;
+ }
+ }
+
+ return INVALID_DISPLAY_INDEX;
+}
+
+/** Convert display index to DisplayPathInterface*/
+struct display_path *dal_tm_display_index_to_display_path(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ return tm_get_display_path_at_index(tm, display_index);
+}
+
+uint32_t dal_tm_get_wireless_display_index(
+ struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct display_path *disp_path = NULL;
+ enum signal_type signal;
+ uint num_of_display_path = dal_tm_get_num_display_paths(tm, false);
+
+ for (i = 0; i < num_of_display_path; i++) {
+ disp_path = dal_tm_display_index_to_display_path(tm, i);
+ signal = dal_display_path_get_query_signal(
+ disp_path, SINK_LINK_INDEX);
+ if (signal == SIGNAL_TYPE_WIRELESS)
+ return i;
+ }
+
+ return INVALID_DISPLAY_INDEX;
+}
+
+
+/************************************
+Display combinations handlers/queries
+*************************************/
+/** Gets the number of available display paths */
+uint32_t dal_tm_get_num_display_paths(struct topology_mgr *tm,
+ bool display_targets_only)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* TODO: add code for 'targets only' */
+
+ TM_IFACE_TRACE();
+
+ return tm_get_display_path_count(tm);
+}
+
+/** Query the max number of display paths (excluding CF paths) that can be
+ * enabled simultaneously */
+uint32_t dal_tm_max_num_cofunctional_targets(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* TODO: implement multidisplay */
+
+ TM_IFACE_TRACE();
+
+ return 1; /* currently only one display is supported */
+}
+
+/** Queries the number of connected displays that support audio */
+uint32_t dal_tm_get_num_connected_audio_displays(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return 0;
+}
+
+static bool is_display_index_array_valid(
+ struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ uint32_t i;
+
+ if (array_size > tm_get_display_path_count(tm)) {
+ TM_ERROR("%s: array_size=%d greater than maximum=%d!\n",
+ __func__, array_size,
+ tm_get_display_path_count(tm));
+ return false;
+ }
+
+ for (i = 0; i < array_size; i++) {
+ if (!tm_get_display_path_at_index(tm, display_index_array[i])) {
+ TM_ERROR(
+ "%s: display_index_array contains invalid index=%d!\n",
+ __func__, display_index_array[i]);
+ return false;
+ }
+ }
+
+ /* array is valid */
+ return true;
+}
+
+/** Request if the specified array of DisplayPaths can be enabled
+ * simultaneously */
+bool dal_tm_can_display_paths_be_enabled_at_the_same_time(
+ struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size)
+{
+ bool ret = true;
+ enum cache_query_result query_result;
+ struct tm_resource_mgr *resource_mgr;
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (!is_display_index_array_valid(tm, display_index_array, array_size))
+ return false;
+
+ /*if cache of co-functional sets are invalid, recalculate.*/
+ if (!tm->valid_cofunc_sets) {
+ dal_invalidate_subsets_cache(tm->display_subsets, true);
+ tm->valid_cofunc_sets = true;
+ }
+
+ query_result = dal_is_subset_supported(
+ tm->display_subsets,
+ display_index_array,
+ array_size);
+
+ if (query_result == CQR_SUPPORTED)
+ ret = true;
+ else if (query_result == CQR_NOT_SUPPORTED)
+ ret = false;
+ else {
+ /*Allocate temporary resources*/
+ resource_mgr = tm_resource_mgr_clone(tm->tm_rm);
+ if (resource_mgr == NULL) {
+
+ TM_ERROR("%s: Failed to clone resources", __func__);
+ /* KK: no way to inform the caller that
+ * there was an internal error, false
+ * is meaningless here!
+ */
+ return false;
+ }
+
+ ret = tm_can_display_paths_be_enabled_at_the_same_time(
+ tm,
+ resource_mgr,
+ display_index_array,
+ array_size);
+
+ if (query_result != CQR_DP_MAPPING_NOT_VALID)
+ dal_set_subset_supported(
+ tm->display_subsets,
+ display_index_array,
+ array_size, ret);
+
+
+ tm_resource_mgr_destroy(&resource_mgr);
+ }
+
+ return ret;
+}
+
+/** Return an array of display indexes sorted according to display
+ * selection priority */
+const uint32_t *dal_tm_get_display_selection_priority_array(
+ struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/**
+ * Allocates and duplicates a subset of display paths.
+ * Duplicated display paths are acquired (so all resources within display path
+ * will be available).
+ * The caller is responsible to deallocate the subset.
+ * This function used when HW and display path context need to be
+ * accessed in reentrant mode.
+ *
+ * \param [in] pDisplayIndexes: indexes of displays in requested subset
+ * \param [in] arraySize: size of the above array
+ *
+ * \return Pointer to subset of display paths if succeeded, NULL otherwise
+ */
+struct display_path_set *dal_tm_create_resource_context_for_display_indices(
+ struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size)
+{
+ struct display_path_set *display_path_set;
+ struct tm_resource_mgr *resource_mgr;
+ struct display_path *display_path;
+ uint32_t num_of_display_path;
+ uint32_t i;
+ struct display_path_set_init_data dps_init_data;
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (!is_display_index_array_valid(tm, display_index_array, array_size))
+ return NULL;
+
+ resource_mgr = tm_resource_mgr_clone(tm->tm_rm);
+
+ if (resource_mgr == NULL) {
+ TM_ERROR("%s: Failed to clone resources", __func__);
+ return NULL;
+ }
+
+ /* Validate requested displays are confunctional*/
+ if (tm->valid_cofunc_sets) {
+ if (!dal_tm_can_display_paths_be_enabled_at_the_same_time(
+ tm,
+ display_index_array,
+ array_size))
+ goto release_rm;
+ } else {
+ if (!tm_can_display_paths_be_enabled_at_the_same_time(
+ tm,
+ resource_mgr,
+ display_index_array,
+ array_size))
+ goto release_rm;
+ tm_resource_mgr_reset_all_usage_counters(resource_mgr);
+ }
+
+ dps_init_data.dal_context = tm->dal_context;
+ dps_init_data.display_path_num = array_size;
+ display_path_set = dal_display_path_set_create(&dps_init_data);
+
+ if (!display_path_set)
+ goto release_rm;
+
+ /* Copy display paths*/
+ num_of_display_path = tm_get_display_path_count(tm);
+ for (i = 0; i < array_size; i++) {
+
+ display_path = tm_get_display_path_at_index(
+ tm,
+ display_index_array[i]);
+
+ if (display_index_array[i] >= num_of_display_path) {
+ TM_ERROR("%s: Invalid display index", __func__);
+ goto release_dps;
+ }
+
+ if (!dal_display_path_set_add_path(
+ display_path_set, display_path)) {
+ TM_ERROR("%s: AddDisplayPath failed", __func__);
+ goto release_dps;
+ }
+ }
+
+ /* Acquire resources on display paths. Once
+ * acquired (or failed to acquired) we do
+ * not need these resources anymore - it means we can delete
+ * the temporary TM Resource Manager.
+ */
+ for (i = 0; i < array_size; i++) {
+ if (!tm_resource_mgr_acquire_resources(
+ resource_mgr,
+ dal_display_path_set_path_at_index(
+ display_path_set, i),
+ /* Validation of views etc. No need to
+ * change HW state. */
+ TM_ACQUIRE_METHOD_SW)) {
+
+ TM_ERROR("%s: Failed to acquire resources", __func__);
+ goto release_dps;
+ }
+ }
+
+ /* Release temporary resources*/
+ tm_resource_mgr_destroy(&resource_mgr);
+
+ return display_path_set;
+release_dps:
+ dal_display_path_set_destroy(&display_path_set);
+release_rm:
+ /* Release temporary resources*/
+ tm_resource_mgr_destroy(&resource_mgr);
+ return NULL;
+}
+
+void dal_tm_display_path_set_destroy(
+ struct topology_mgr *tm,
+ struct display_path_set **display_path_set)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* TODO: call display_path_set_destroy() */
+ TM_NOT_IMPLEMENTED();
+}
+
+
+/** return a bit vector of controllers mapped to given array of display
+ * path indexes */
+enum tm_result dal_tm_get_controller_mapping(struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size,
+ bool use_current_mapping,
+ struct display_controller_pair *pairs)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (!is_display_index_array_valid(tm, display_index_array, array_size))
+ return TM_RESULT_FAILURE;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+
+/*******************
+Display Path lookup
+********************/
+/** Finds a display path given encoder, connector and signal type */
+struct display_path *dal_tm_find_display_path(
+ struct topology_mgr *tm,
+ struct graphics_object_id encoder_id,
+ struct graphics_object_id connector_id,
+ enum signal_type sig_type)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Find display path according to device type */
+struct display_path *dal_tm_find_display_path_with_device_type(
+ struct topology_mgr *tm,
+ enum dal_device_type dev_type)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Returns an active display index given a controller handle */
+uint32_t dal_tm_find_display_path_with_controller(
+ struct topology_mgr *tm,
+ uint32_t controller_handle)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return 0;
+}
+
+/* get number of audios which allowed over a dongle */
+static uint32_t get_dvi_audio_number(struct dal_context *dal_context,
+ uint32_t max_num_of_supported_hdmi,
+ uint32_t hdmi_connectors_num,
+ uint32_t dvi_connectors_num)
+{
+ uint32_t dvi_audio_num;
+
+ if (max_num_of_supported_hdmi - hdmi_connectors_num >
+ dvi_connectors_num)
+ dvi_audio_num = dvi_connectors_num;
+ else
+ dvi_audio_num = max_num_of_supported_hdmi - hdmi_connectors_num;
+
+ TM_RESOURCES("dvi_audio_num:%d\n", dvi_audio_num);
+
+ return dvi_audio_num;
+}
+
+/**
+ * co_func_audio_endpoint = MINIMUM( NumberOfPhysicalAudioEnds,
+ * NumberofDisplayPipe,
+ * NumberOfAudioCapableConnectors).
+ *
+ * Here NumberOfAudioCapableConnectors = number of HDMI connectors +
+ * NumberofDPConnectors*4 (in MST mode each DP can drive up to 4 streams) +
+ * (if (Wireless capable) ? 1 : 0).
+ */
+static uint32_t get_number_of_audio_capable_display_paths(
+ struct topology_mgr *tm)
+{
+ uint32_t audio_capable_path_num = 0;
+ uint32_t hdmi_connectors_num = 0;
+ uint32_t dvi_connectors_num = 0;
+ uint32_t paths_per_mst_connector;
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ union audio_support audio_support;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *connectors;
+
+ paths_per_mst_connector =
+ dal_adapter_service_get_num_of_path_per_dp_mst_connector(
+ tm->adapter_srv);
+
+ if (paths_per_mst_connector == 0) {
+ /* has to me at least one path - for SST mode */
+ paths_per_mst_connector = 1;
+ }
+
+ connectors =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONNECTOR);
+
+ for (i = connectors->start; i < connectors->end; i++) {
+ struct graphics_object_id object_id;
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ object_id = GRPH_ID(tm_resource);
+
+ switch (object_id.id) {
+ case CONNECTOR_ID_HDMI_TYPE_A:
+ audio_capable_path_num++;
+ hdmi_connectors_num++;
+ break;
+ case CONNECTOR_ID_DISPLAY_PORT:
+ /* consider DP connector as DP MST connector */
+ audio_capable_path_num += paths_per_mst_connector;
+ break;
+ case CONNECTOR_ID_DUAL_LINK_DVII:
+ case CONNECTOR_ID_DUAL_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVID:
+ case CONNECTOR_ID_SINGLE_LINK_DVII:
+ dvi_connectors_num++;
+ break;
+ default:
+ /* irrelevant from audio point of view */
+ break;
+ }
+ } /* for () */
+
+ audio_support = dal_adapter_service_get_audio_support(
+ tm->adapter_srv);
+
+ /* If discrete ASIC, allow DVI-HDMI dongle.
+ * Note: APU does not support DVI-HDMI dongle. */
+ if (dal_adapter_service_is_fusion(tm->adapter_srv)) {
+ /* Check feature flag for wireless, here can not use ASIC CAP.
+ * currently wireless only enabled for APU. */
+
+ if (true == dal_adapter_service_is_feature_supported(
+ FEATURE_WIRELESS_ENABLE)) {
+ /* value exist and is set to true */
+ audio_capable_path_num++;
+ }
+ } else if (audio_support.bits.HDMI_AUDIO_ON_DONGLE == 1) {
+
+ uint32_t max_num_of_supported_hdmi = 1;
+ uint32_t dvi_audio_num;/* this is "audio over dongle" number */
+
+ if (true == dal_adapter_service_get_feature_value(
+ FEATURE_SUPPORTED_HDMI_CONNECTION_NUM,
+ &max_num_of_supported_hdmi,
+ sizeof(max_num_of_supported_hdmi))) {
+
+ if (max_num_of_supported_hdmi > hdmi_connectors_num) {
+ /* We support DVI->HDMI dongle if strapping is
+ * set.
+ * Strapping allows DP/HDMI audio over dongle
+ * is the same bit.
+ */
+ dvi_audio_num = get_dvi_audio_number(
+ tm->dal_context,
+ max_num_of_supported_hdmi,
+ hdmi_connectors_num,
+ dvi_connectors_num);
+ audio_capable_path_num += dvi_audio_num;
+ }
+ }
+ }
+
+ TM_RESOURCES("audio_capable_path_num:%d\n", audio_capable_path_num);
+
+ return audio_capable_path_num;
+}
+
+static void tm_update_audio_connectivity(struct topology_mgr *tm)
+{
+ uint32_t audio_capable_path_num =
+ get_number_of_audio_capable_display_paths(tm);
+
+ dal_adapter_service_update_audio_connectivity(tm->adapter_srv,
+ audio_capable_path_num);
+}
+
+/**
+ * For each Controller:
+ * - disable power gating and save power gating state,
+ * - power-up Controller,
+ * - enable clock gating.
+ */
+static void tm_reset_controllers(struct topology_mgr *tm)
+{
+ uint32_t i;
+ uint32_t controller_index = 0;
+ struct tm_resource *tm_resource;
+ struct tm_resource_controller_info *controller_info;
+ struct controller *controller;
+ struct dal_asic_runtime_flags asic_runtime_flags;
+ const struct tm_resource_range *controllers;
+
+ asic_runtime_flags = dal_adapter_service_get_asic_runtime_flags(
+ tm->adapter_srv);
+
+ controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ controller_info = TO_CONTROLLER_INFO(tm_resource);
+
+ controller = controller_info->controller;
+
+ if (controller_index == 0 &&
+ asic_runtime_flags.bits.GNB_WAKEUP_SUPPORTED == 1)
+ dal_controller_power_gating_enable(controller,
+ PIPE_GATING_CONTROL_INIT);
+
+ dal_controller_power_gating_enable(controller,
+ PIPE_GATING_CONTROL_DISABLE);
+
+ controller_info->power_gating_state = TM_POWER_GATE_STATE_OFF;
+
+ dal_controller_power_up(controller);
+
+ dal_controller_enable_display_pipe_clock_gating(controller,
+ true);
+
+ ++controller_index;
+ }
+}
+
+/**
+ * Initialises encoder context structure with connector information and
+ * other related information.
+ * The 'context' is for encoder, which is an 'upstream' object
+ * relative to 'link_index'.
+ *
+ * \param [in] display_path: Display path to which encoder belongs
+ * \param [in] link_idx: index of link of encoder which context to initialise
+ * \param [out] encoder_context: Encoder context to initialise
+ */
+static void tm_build_encoder_context(
+ struct dal_context *dal_context,
+ struct display_path *display_path,
+ uint32_t link_idx,
+ struct encoder_context *encoder_context)
+{
+ struct connector_feature_support cfs;
+ struct encoder *dwn_strm_encoder;
+ struct connector *connector;
+ struct graphics_object_id connector_obj_id;
+
+ TM_ASSERT(display_path != NULL);
+ TM_ASSERT(encoder_context != NULL);
+
+ connector = dal_display_path_get_connector(display_path);
+
+ dal_connector_get_features(connector, &cfs);
+
+ connector_obj_id = dal_connector_get_graphics_object_id(connector);
+
+ encoder_context->connector = connector_obj_id;
+
+ encoder_context->hpd_source = cfs.hpd_line;
+ encoder_context->channel = cfs.ddc_line;
+ encoder_context->signal = dal_display_path_get_query_signal(
+ display_path, link_idx);
+
+ encoder_context->engine = dal_display_path_get_stream_engine(
+ display_path, link_idx);
+
+ dwn_strm_encoder = dal_display_path_get_downstream_object(display_path,
+ link_idx);
+
+ if (dwn_strm_encoder != NULL) {
+ /* get the id of downstream encoder */
+ encoder_context->downstream =
+ dal_encoder_get_graphics_object_id(dwn_strm_encoder);
+ } else {
+ /* downstream object is connector */
+ encoder_context->downstream = connector_obj_id;
+ }
+
+ /* If this encoder doesn't have engine set (because not active),
+ * we try its native engine. */
+ if (encoder_context->engine == ENGINE_ID_UNKNOWN) {
+ struct encoder *this_encoder =
+ dal_display_path_get_upstream_object(display_path,
+ link_idx);
+
+ encoder_context->engine =
+ dal_encoder_get_preferred_stream_engine(this_encoder);
+ }
+}
+
+/**
+ * The function will return the priority of display path from encoder context
+ * perspective.
+ *
+ * \param [in] display_path: Display path which contains requested encoder
+ *
+ * \return Encoder context priority
+ */
+static enum tm_encoder_ctx_priority tm_get_encoder_ctx_priority(
+ struct display_path *display_path)
+{
+ enum tm_encoder_ctx_priority priority = TM_ENCODER_CTX_PRIORITY_DEFAULT;
+ bool acquried = dal_display_path_is_acquired(display_path);
+ bool connected = dal_display_path_is_target_connected(display_path);
+
+ if (acquried) {
+ if (connected)
+ priority = TM_ENCODER_CTX_PRIORITY_ACQUIRED_CONNECTED;
+ else
+ priority = TM_ENCODER_CTX_PRIORITY_ACQUIRED;
+ } else {
+ if (connected)
+ priority = TM_ENCODER_CTX_PRIORITY_CONNECTED;
+ }
+
+ return priority;
+}
+
+/**
+ * Power up an encoder.
+ * If the encoder is on AN active display path, update (or setup) encoder
+ * implementation for the display path.
+ * The implementation will be updated only for "highest priority" context.
+ * In the end we need to setup current implementation
+ *
+ * \param [in] tm_resource: resource of encoder which is to be powered up
+ */
+static void tm_power_up_encoder(struct topology_mgr *tm,
+ struct encoder *enc_input)
+{
+ struct encoder *enc_upstrm;
+ struct display_path *active_display_path = NULL;
+ struct display_path *display_path;
+ uint32_t active_link_idx = 0;
+ enum tm_encoder_ctx_priority best_priority =
+ TM_ENCODER_CTX_PRIORITY_INVALID;
+ struct graphics_object_id enc_input_obj_id =
+ dal_encoder_get_graphics_object_id(enc_input);
+ struct graphics_object_id enc_upstrm_obj_id;
+ uint32_t i;
+ uint32_t link_idx;
+ uint32_t num_of_links;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct encoder_context context;
+ enum tm_encoder_ctx_priority priority;
+ uint32_t display_path_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Find all path in which "enc_input" is active. */
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+ num_of_links = dal_display_path_get_number_of_links(
+ display_path);
+ display_path_index = dal_display_path_get_display_index(
+ display_path);
+
+ /* Check if "enc_input" is in on link of a display path. */
+ for (link_idx = 0; link_idx < num_of_links; link_idx++) {
+
+ enc_upstrm = dal_display_path_get_upstream_object(
+ display_path, link_idx);
+
+ enc_upstrm_obj_id =
+ dal_encoder_get_graphics_object_id(enc_upstrm);
+
+ if (false == dal_graphics_object_id_is_equal(
+ enc_upstrm_obj_id,
+ enc_input_obj_id)) {
+ /* go to next link */
+ continue;
+ }
+
+ /* Found Link Index for "enc_input". */
+ dal_memset(&context, 0, sizeof(context));
+ context.engine = ENGINE_ID_UNKNOWN;
+
+ priority = tm_get_encoder_ctx_priority(display_path);
+
+ if (priority > best_priority) {
+ best_priority = priority;
+ active_display_path = display_path;
+ active_link_idx = link_idx;
+ }
+
+ tm_build_encoder_context(tm->dal_context,
+ display_path, link_idx, &context);
+
+ if (ENCODER_RESULT_OK != dal_encoder_power_up(
+ enc_input, &context)) {
+ /* TODO: should we return error to caller? */
+ TM_ERROR("%s: failed encoder power up!\n",
+ __func__);
+ }
+
+ TM_ENCODER_CTL("%s:[PowerUp]: %s, Path=%u,"\
+ " Link=%u, Engine=%s, Signal=%s",
+ __func__,
+ tm_utils_transmitter_id_to_str(
+ dal_encoder_get_graphics_object_id(
+ enc_input)),
+ display_path_index,
+ link_idx,
+ tm_utils_engine_id_to_str(context.engine),
+ tm_utils_signal_type_to_str(context.signal));
+
+ } /* for() */
+ } /* for() */
+
+ /* Update encoder implementation on ACTIVE display path */
+ if (NULL != active_display_path &&
+ best_priority > TM_ENCODER_CTX_PRIORITY_DEFAULT) {
+
+ dal_memset(&context, 0, sizeof(context));
+ context.engine = ENGINE_ID_UNKNOWN;
+
+ tm_build_encoder_context(tm->dal_context, active_display_path,
+ active_link_idx, &context);
+
+ if (ENCODER_RESULT_OK != dal_encoder_update_implementation(
+ enc_input, &context)) {
+ /* TODO: should we return error to caller? */
+ TM_ERROR("%s: failed to update encoder implementation!\n",
+ __func__);
+ }
+
+ TM_ENCODER_CTL("%s:[UpdateImpl]: %s, on Active Path=%u, Link=%u, Engine=%s, Signal=%s",
+ __func__,
+ tm_utils_transmitter_id_to_str(
+ dal_encoder_get_graphics_object_id(
+ enc_input)),
+ dal_display_path_get_display_index(active_display_path),
+ active_link_idx,
+ tm_utils_engine_id_to_str(context.engine),
+ tm_utils_signal_type_to_str(context.signal));
+ }
+}
+
+static void tm_power_up_encoders(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ struct encoder_context context;
+ struct encoder *encoder;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *encoders =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_ENCODER);
+
+ /* Power Up encoders.
+ * Order is important - from first (internal) to last (external).
+ * The order is enforced by resource list being sorted according to
+ * priorities (in tm_rm_add_tm_resource()). */
+ for (i = encoders->start; i < encoders->end; i++) {
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ if (!tm_resource->flags.resource_active)
+ continue;
+
+ encoder = TO_ENCODER(tm_resource);
+
+ if (tm_resource->flags.display_path_resource) {
+ /* power up AND update implementation */
+ tm_power_up_encoder(tm, encoder);
+ } else {
+ /* only power up */
+ dal_memset(&context, 0, sizeof(context));
+ context.engine = ENGINE_ID_UNKNOWN;
+
+ if (ENCODER_RESULT_OK != dal_encoder_power_up(
+ encoder, &context)) {
+ /* TODO: should we return error to caller? */
+ TM_ERROR("%s: failed encoder power up!\n",
+ __func__);
+ }
+ }
+ } /* for () */
+}
+
+/**
+ * This is called for all Encoders, for Shutdown and Standby.
+ * We need to do powerDown once per PHY, but we need to find best encoder
+ * context for this.
+ * In case we could not fetch engine from the context, we will try to
+ * power down all encoder with all supported engines.
+ *
+ * \param [in] tm_resource: resource of encoder which is to be powered down
+ */
+static void tm_power_down_encoder(struct topology_mgr *tm,
+ struct encoder *enc_input,
+ bool turn_off_vcc)
+{
+ struct encoder *enc_upstrm;
+ struct display_path *active_display_path = NULL;
+ struct display_path *display_path;
+ uint32_t active_link_idx = 0;
+ enum tm_encoder_ctx_priority best_priority =
+ TM_ENCODER_CTX_PRIORITY_INVALID;
+ struct graphics_object_id enc_input_obj_id =
+ dal_encoder_get_graphics_object_id(enc_input);
+ struct graphics_object_id enc_upstrm_obj_id;
+ uint32_t i;
+ uint32_t link_idx;
+ uint32_t num_of_links;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ enum tm_encoder_ctx_priority priority;
+ bool powered_down = false;
+ struct encoder_output enc_output;
+ union supported_stream_engines engines;
+ struct dcs *dcs;
+ const struct monitor_patch_info *mon_patch_info;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Find all path in which "enc_input" is active. */
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+
+ num_of_links = dal_display_path_get_number_of_links(
+ display_path);
+
+ /* Check if "enc_input" is on a link of a display path. */
+ for (link_idx = 0; link_idx < num_of_links; link_idx++) {
+
+ enc_upstrm = dal_display_path_get_upstream_object(
+ display_path, link_idx);
+
+ enc_upstrm_obj_id =
+ dal_encoder_get_graphics_object_id(enc_upstrm);
+
+ if (false == dal_graphics_object_id_is_equal(
+ enc_upstrm_obj_id,
+ enc_input_obj_id)) {
+ /* go to next link */
+ continue;
+ }
+
+ /* Found Link Index for "enc_input". */
+ priority = tm_get_encoder_ctx_priority(display_path);
+
+ if (priority > best_priority) {
+ best_priority = priority;
+ active_display_path = display_path;
+ active_link_idx = link_idx;
+ }
+
+ if (priority == TM_ENCODER_CTX_PRIORITY_HIGHEST) {
+ /* No need to continue search because found
+ * the highest priority. */
+ break;
+ }
+
+ } /* for() */
+ } /* for() */
+
+ if (NULL == active_display_path) {
+ /* If not on an active path, no need to power down. */
+ return;
+ }
+
+ dal_memset(&enc_output, 0, sizeof(enc_output));
+ engines.u_all = 0;
+
+ /* Build context for power down */
+ tm_build_encoder_context(tm->dal_context, active_display_path,
+ active_link_idx, &enc_output.ctx);
+
+ dcs = dal_display_path_get_dcs(active_display_path);
+
+ mon_patch_info = dal_dcs_get_monitor_patch_info(dcs,
+ MONITOR_PATCH_TYPE_KEEP_DP_RECEIVER_POWERED);
+
+ if (NULL != mon_patch_info)
+ enc_output.flags.bits.KEEP_RECEIVER_POWERED =
+ mon_patch_info->param;
+ else
+ enc_output.flags.bits.KEEP_RECEIVER_POWERED = false;
+
+ mon_patch_info = dal_dcs_get_monitor_patch_info(dcs,
+ MONITOR_PATCH_TYPE_VID_STREAM_DIFFER_TO_SYNC);
+
+ if (NULL != mon_patch_info)
+ enc_output.flags.bits.VID_STREAM_DIFFER_TO_SYNC =
+ mon_patch_info->param;
+ else
+ enc_output.flags.bits.VID_STREAM_DIFFER_TO_SYNC = false;
+
+ enc_output.flags.bits.TURN_OFF_VCC = turn_off_vcc;
+ enc_output.flags.bits.NO_WAIT_FOR_HPD_LOW = false;
+
+ /*************************/
+ /* get supported engines */
+ engines = dal_encoder_get_supported_stream_engines(enc_input);
+
+ /* In case we could not fetch engine from context, but we know this
+ * encoder supports at least one engine. */
+ if (enc_output.ctx.engine == ENGINE_ID_UNKNOWN && engines.u_all > 0) {
+
+ enum engine_id first_valid_engine = ENGINE_ID_UNKNOWN;
+
+ /* Try to power down encoder with all supported engines
+ * which were not yet powered down. */
+ for (i = 0; i < ENGINE_ID_COUNT; i++) {
+
+ if (!tm_utils_is_supported_engine(engines, i)) {
+ /* not a supported engine */
+ continue;
+ }
+
+ if (first_valid_engine == ENGINE_ID_UNKNOWN)
+ first_valid_engine = i;
+
+ enc_output.ctx.engine = i;
+
+ if (ENCODER_RESULT_OK != dal_encoder_power_down(
+ enc_input, &enc_output))
+ TM_ERROR("%s: encoder power down failed (1)!\n",
+ __func__);
+
+ TM_ENCODER_CTL(
+ "TM Encoder PowerDown [Supported Engine]: %s, Active Path=%u, Link=%u, Engine=%s, Signal=%s",
+ tm_utils_transmitter_id_to_str(
+ enc_input_obj_id),
+ dal_display_path_get_display_index(
+ active_display_path),
+ active_link_idx,
+ tm_utils_engine_id_to_str(
+ enc_output.ctx.engine),
+ tm_utils_signal_type_to_str(
+ enc_output.ctx.signal));
+
+ powered_down = true;
+ } /* for () */
+
+ /* If we did NOT powered down encoder at all, means all
+ * supported engines already powered down and we need to care
+ * about transmitter only.
+ * Means we can use any engine (first one is good enough). */
+ if (!powered_down && first_valid_engine != ENGINE_ID_UNKNOWN) {
+
+ enc_output.ctx.engine = first_valid_engine;
+
+ if (ENCODER_RESULT_OK != dal_encoder_power_down(
+ enc_input, &enc_output))
+ TM_ERROR("%s: encoder power down failed (2)!\n",
+ __func__);
+
+ TM_ENCODER_CTL(
+ "TM Encoder PowerDown [1st Valid Engine]: %s, Active Path=%u, Link=%u, Engine=%s, Signal=%s",
+ tm_utils_transmitter_id_to_str(
+ enc_input_obj_id),
+ dal_display_path_get_display_index(
+ active_display_path),
+ active_link_idx,
+ tm_utils_engine_id_to_str(
+ enc_output.ctx.engine),
+ tm_utils_signal_type_to_str(
+ enc_output.ctx.signal));
+
+ powered_down = true;
+ }
+ } /* if() */
+
+ /* Either engine was initially valid (it had a real context), or
+ * no engine required/supported by this encoder. */
+ if (!powered_down) {
+
+ if (ENCODER_RESULT_OK != dal_encoder_power_down(
+ enc_input, &enc_output))
+ TM_ERROR("%s: encoder power down failed (3)!\n",
+ __func__);
+
+ TM_ENCODER_CTL(
+ "TM Encoder PowerDown [Input Engine]: %s, Active Path=%u, Link=%u, Engine=%s, Signal=%s",
+ tm_utils_transmitter_id_to_str(
+ enc_input_obj_id),
+ dal_display_path_get_display_index(
+ active_display_path),
+ active_link_idx,
+ tm_utils_engine_id_to_str(
+ enc_output.ctx.engine),
+ tm_utils_signal_type_to_str(
+ enc_output.ctx.signal));
+ }
+}
+
+static void tm_power_down_encoders(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ bool turn_off_vcc = true;
+
+ const struct tm_resource_range *encoders =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_ENCODER);
+
+ /* Power Down encoders.
+ * Order is important - from last (external) to first (internal).
+ * The order is enforced by resource list being sorted according to
+ * priorities (in tm_rm_add_tm_resource()). */
+ i = encoders->end;
+
+ do {
+ i--;
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ if (!tm_resource->flags.resource_active)
+ continue;
+
+ /*TODO: do_not_turn_off_vcc will be updated properly after
+ * implementing the optimization code
+ */
+ tm_power_down_encoder(
+ tm,
+ TO_ENCODER(tm_resource),
+ turn_off_vcc);
+
+ } while (i != encoders->start);
+}
+
+static void tm_power_down_controller(struct topology_mgr *tm,
+ struct tm_resource *tm_resource)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ struct tm_resource_controller_info *info =
+ TO_CONTROLLER_INFO(tm_resource);
+
+ if (info->power_gating_state != TM_POWER_GATE_STATE_ON) {
+ /* No power gating means power is on and it is OK to
+ * access the controller. */
+ dal_controller_power_down(info->controller);
+ } else {
+ /* Resource is power gated and we could not
+ * access it to PowerDown(). */
+ TM_PWR_GATING("Could not PowerDown Controller Id:%d because it is power gated.",
+ dal_controller_get_graphics_object_id(
+ info->controller));
+ }
+}
+
+/* We should not power down all controllers because we could not do
+ * this with power gated tiles. */
+static void tm_power_down_controllers(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+
+ const struct tm_resource_range *controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ tm_power_down_controller(tm, tm_resource);
+
+ } /* for () */
+}
+
+static void tm_power_down_clock_sources(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ enum controller_id first_controller_id = CONTROLLER_ID_UNDEFINED;
+ const struct tm_resource_range *resources =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ tm_resource =
+ tm_resource_mgr_enum_resource(tm->tm_rm, resources->start);
+ first_controller_id = GRPH_ID(tm_resource).id;
+
+ resources =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CLOCK_SOURCE);
+
+ for (i = resources->start; i < resources->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ dal_clock_source_power_down_pll(
+ TO_CLOCK_SOURCE(tm_resource),
+ first_controller_id);
+ }
+}
+
+static void tm_power_down_update_all_display_path_logical_power_state(
+ struct topology_mgr *tm)
+{
+ uint32_t i;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct display_path *display_path;
+
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+
+ /* When we power down the all the HW blocks, we must update
+ * the states to unknown.
+ * This ensures the correct HW programming sequence is
+ * performed upon resuming from FS DOS, resume from sleep, or
+ * resume from hibernate. */
+ dal_display_path_set_target_powered_on(display_path,
+ DISPLAY_TRI_STATE_UNKNOWN);
+
+ dal_display_path_set_target_blanked(display_path,
+ DISPLAY_TRI_STATE_UNKNOWN);
+
+ dal_display_path_set_source_blanked(display_path,
+ DISPLAY_TRI_STATE_UNKNOWN);
+ }
+}
+
+/*
+ * tm_can_optimize_resume_sequence
+ *
+ * @brief Check if TM can optimize S3/S4 resume sequence
+ *
+ * @param
+ * feature: for which purpose we want to optimize resume sequence
+ *
+ * @return
+ * true if sequence can be optimized, false otherwise
+ */
+static bool tm_can_optimize_resume_sequence(
+ struct topology_mgr *tm,
+ enum optimization_feature feature)
+{
+
+ if (tm->previous_power_state != DAL_VIDEO_POWER_SUSPEND)
+ return false;
+
+ if (!dal_adapter_service_should_optimize(tm->adapter_srv, feature))
+ return false;
+
+ return true;
+
+}
+
+static void power_up_audio_objects(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ struct audio *audio;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *audios =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_AUDIO);
+
+ for (i = audios->start; i < audios->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ if (!tm_resource->flags.resource_active)
+ continue;
+
+ audio = TO_AUDIO(tm_resource);
+
+ if (AUDIO_RESULT_OK != dal_audio_power_up(audio)) {
+ /* TODO: should we return error to caller? */
+ TM_ERROR("%s: failed audio power up!\n", __func__);
+ }
+ } /* for () */
+}
+
+/********************
+Programming sequences
+*********************/
+/** Initialise all HW blocks at boot/resume/tdr, needed for detection
+ * prior set mode. */
+enum tm_result dal_tm_init_hw(struct topology_mgr *tm)
+{
+ struct bios_parser *bp;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ /* 0. PowerUp controllers, reset front pipe*/
+ tm_reset_controllers(tm);
+
+ /* 1. PowerUp GPU */
+ dal_gpu_power_up(tm_resource_mgr_get_gpu_interface(tm->tm_rm));
+
+ /* 2. Reinitialise VBIOS wrapper. */
+ bp = dal_adapter_service_get_bios_parser(tm->adapter_srv);
+ dal_bios_parser_power_up(bp);
+
+ /* 3. Updates audio connectivity based on connector,
+ * number of pipes and wireless etc. */
+ tm_update_audio_connectivity(tm);
+
+ /* 4. TODO PowerUp DMCU - This should be done before Encoder */
+
+ /* 5. PowerUp controllers :move to step0 to make sure init front pipe*/
+
+ /* 6. PowerUp encoders. */
+ tm_power_up_encoders(tm);
+
+ /* 7. PowerUp connectors */
+ /* TODO: implement when/if connector powerup() interface is ready. */
+
+ /* 8. PowerUp audios */
+ power_up_audio_objects(tm);
+
+ /* 9. Initialise detection-related HW */
+ dal_tm_detection_mgr_init_hw(tm->tm_dm);
+
+ /* 10. notify all LS that HW has been init'ed so LS can act accordingly
+ * if required. */
+ tm_resource_mgr_invalidate_link_services(tm->tm_rm);
+
+ dal_bios_parser_set_scratch_acc_mode_change(bp);
+
+ /* */
+ tm->hw_power_down_required =
+ !tm_can_optimize_resume_sequence(
+ tm,
+ OF_SKIP_RESET_OF_ALL_HW_ON_S3RESUME);
+
+ return TM_RESULT_SUCCESS;
+}
+
+/** power down all HW blocks before ACPI non-D0 state */
+/**
+ * dal_tm_power_down_hw
+ *
+ * Powers down all HW blocks in the following order:
+ * 1. GLSyncConnectors
+ * 2. All remaining HW blocks except GPU
+ * 3. VBIOS
+ * 3. GPU
+ */
+
+enum tm_result dal_tm_power_down_hw(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource = NULL;
+ struct display_path *display_path = NULL;
+ struct dal_context *dal_context = tm->dal_context;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct bios_parser *bp = NULL;
+ const struct tm_resource_range *controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ enum dal_video_power_state power_state =
+ dal_tm_get_current_power_state(tm);
+
+ /*1. PowerDown GLSync Connectors*/
+ /*TODO: Power Down GLSyncConnector*/
+ TM_NOT_IMPLEMENTED();
+
+ /*2. TODO PowerDown DMCU*/
+
+ /* 3.0 If we are going to S4 or BACO,
+ * then we only need to invalidate states
+ */
+ if (power_state == DAL_VIDEO_POWER_HIBERNATE ||
+ power_state == DAL_VIDEO_POWER_ULPS) {
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource =
+ tm_resource_mgr_enum_resource(
+ tm->tm_rm, i);
+
+ TO_CONTROLLER_INFO(tm_resource)->power_gating_state =
+ TM_POWER_GATE_STATE_ON;
+ }
+
+ /* Update display logical power state*/
+ for (i = 0; i < display_paths_num; i++) {
+ /* When we power down the all the HW blocks,
+ * we must update the states to unknown.
+ * This ensures the correct HW programming
+ * sequence is performed upon resuming from
+ * FS DOS, resume from sleep, or resume
+ * from hibernate.
+ */
+ display_path = tm_get_display_path_at_index(tm, i);
+ dal_display_path_set_target_powered_on(
+ display_path, DISPLAY_TRI_STATE_UNKNOWN);
+ dal_display_path_set_target_blanked(
+ display_path, DISPLAY_TRI_STATE_UNKNOWN);
+ dal_display_path_set_source_blanked(
+ display_path, DISPLAY_TRI_STATE_UNKNOWN);
+ }
+
+ tm->hw_power_down_required = false;
+
+ } else { /* 3.1 Otherwise we need to do full powerdown.*/
+
+ /* 3.1.1 PowerDown all displays paths*/
+ /* use driver code instead of using command table.*/
+ dal_tm_power_down_path_elements(tm);
+
+ /* 3.1.2 Power gating enable for all controllers
+ * We could move this into GPU object
+ */
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ struct controller *controller = NULL;
+
+ tm_resource =
+ tm_resource_mgr_enum_resource(
+ tm->tm_rm, i);
+
+ controller =
+ TO_CONTROLLER_INFO(tm_resource)->controller;
+ dal_controller_enable_display_pipe_clock_gating(
+ controller, false);
+
+ /* if already power gated we do nothing*/
+ if (TO_CONTROLLER_INFO(
+ tm_resource)->power_gating_state !=
+ TM_POWER_GATE_STATE_ON) {
+ dal_controller_power_gating_enable(
+ controller, PIPE_GATING_CONTROL_ENABLE);
+ TO_CONTROLLER_INFO(tm_resource)->
+ power_gating_state =
+ TM_POWER_GATE_STATE_ON;
+ } else
+ TM_WARNING("Controller %d already power gated\n",
+ dal_controller_get_id(controller));
+ }
+ }
+
+ bp = dal_adapter_service_get_bios_parser(tm->adapter_srv);
+ dal_bios_parser_power_down(bp);
+
+ dal_gpu_power_down(
+ tm_resource_mgr_get_gpu_interface(tm->tm_rm),
+ power_state);
+
+ return true;
+
+}
+
+/** power down all HW blocks before ACPI non-D0 state */
+enum tm_result dal_tm_power_down_hw_active(struct topology_mgr *tm,
+ const uint32_t display_index_array[],
+ uint32_t array_size)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (!is_display_index_array_valid(tm, display_index_array, array_size))
+ return TM_RESULT_FAILURE;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+
+/**
+ * Powers down all HW blocks that compose display paths in the following
+ * order (reverse order from PowerUp):
+ * 1. Audios
+ * 2. Connectors
+ * 3. Routers
+ * 4. Encoders
+ * 5. Controllers
+ * 6. Clock Sources
+ *
+ * \return TM_RESULT_SUCCESS: no error
+ * TM_RESULT_FAILURE: error
+ */
+enum tm_result dal_tm_power_down_path_elements(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ /* 1. Power Down audios */
+ /* TODO: implement when/if audio power_down() interface is ready. */
+
+ /* 2. Power Down connectors */
+ /* TODO: implement when/if connector power_down() interface is ready. */
+
+ /* 3. Power Down encoders. */
+ tm_power_down_encoders(tm);
+
+ /* 4. PowerDown controllers */
+ tm_power_down_controllers(tm);
+
+ /* 5. PowerDown clock sources */
+ tm_power_down_clock_sources(tm);
+
+ tm_power_down_update_all_display_path_logical_power_state(tm);
+
+ /* HW is powered down, update state */
+ tm->hw_power_down_required = false;
+
+ return TM_RESULT_SUCCESS;
+}
+
+/** reset logical state for controllers */
+void dal_tm_reset_vbios_controllers(struct topology_mgr *tm)
+/*ResetControllersForFSDOSToWindows()*/
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/**
+ * "Locks" the path to allow exclusive detection.
+ * This lock is not 100% safe, since no OS mutex/lock used
+ * However it is guaranteed no deadlock occur
+ * It returns the "safe detection method" (i.e. if lock failed, it is safe
+ * to do only cached detection).
+ *
+ * \param [in] display_path: Display path to lock
+ * \param [in] method: Purpose of lock
+ *
+ * \return Original (requested) method if lock succeeded.
+ * DetectionMethod_Cached otherwise.
+ */
+static enum tm_detection_method tm_lock_path(
+ struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum tm_detection_method method)
+{
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ if (method == DETECTION_METHOD_CACHED) {
+ /* always safe to do cached detection */
+ return method;
+ }
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ if (!tm_utils_test_bit(&tm->display_detection_mask, display_index)) {
+
+ tm_utils_set_bit(&tm->display_detection_mask, display_index);
+ return method;
+ }
+
+ return DETECTION_METHOD_CACHED;
+}
+
+
+/**
+ * Unlocks the path which was locked to allow exclusive detection.
+ *
+ * \param [in] display_path: Display path to unlock
+ * \param [in] method: Purpose of previous lock
+ */
+static void tm_unlock_path(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum tm_detection_method method)
+{
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ if (method == DETECTION_METHOD_CACHED)
+ return;
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ tm_utils_clear_bit(&tm->display_detection_mask, display_index);
+}
+
+/**
+ * Checks if current path is locked for detection
+ *
+ * \param [in] display_path: Display path to check for lock
+ *
+ * \return true if path locked, false otherwise
+ */
+static bool tm_is_path_locked(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(display_path != NULL);
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ return tm_utils_test_bit(&tm->display_detection_mask, display_index);
+}
+
+/*****************************************************************************
+ * Audio-related code.
+ ****************************************************************************/
+
+/**
+ * This function handles arbitration of audio resources when display was
+ * just connected.
+ *
+ * According to "Output Device Management" spec, audio resource is assigned on
+ * device arrival (if display requests audio) and released on device removal.
+ *
+ * Signal can be downgraded only on connected display path.
+ *
+ * So in this function we do:
+ * 1. Start audio device on enabled connected display path. We need to
+ * do it every time we connect enabled display, to allow OS to switch to
+ * new audio device, so sound can to be heard.
+ *
+ * 2. Call AttachAudioToDisplayPath to set the audio to the display path
+ *
+ * \param [in] display_path: Display path which got connected.
+ */
+static void arbitrate_audio_on_connect(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ enum signal_type sink_signal;
+ enum signal_type new_signal;
+ struct dcs *dcs;
+ struct dal_context *dal_context = tm->dal_context;
+
+ sink_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+ new_signal = sink_signal;
+
+ if (dal_is_hdmi_signal(sink_signal) &&
+ tm->attached_hdmi_num >=
+ tm->max_num_of_supported_hdmi) {
+ /* we ran out of HDMI audios */
+ new_signal = tm_utils_downgrade_to_no_audio_signal(sink_signal);
+ }
+
+ dcs = dal_display_path_get_dcs(display_path);
+ if (!dcs) {
+ TM_ERROR("%s: no DCS on the Path!\n", __func__);
+ return;
+ }
+
+ if (dal_is_audio_capable_signal(new_signal) &&
+ dal_dcs_is_audio_supported(dcs)) {
+ enum tm_result tm_result;
+
+ tm_result = tm_resource_mgr_attach_audio_to_display_path(
+ tm->tm_rm, display_path, sink_signal);
+
+ if (tm_result != TM_RESULT_SUCCESS) {
+ /* could not attach audio resource for some reason */
+ new_signal = tm_utils_downgrade_to_no_audio_signal(
+ sink_signal);
+ }
+ }
+
+ if (new_signal != sink_signal) {
+ /* signal was downgraded for audio-related reasons */
+ handle_signal_downgrade(tm, display_path, new_signal);
+ }
+
+ if (dal_is_hdmi_signal(new_signal))
+ tm->attached_hdmi_num++;
+}
+
+/**
+ * This function handles arbitration of audio resources when display was
+ * just disconnected.
+ *
+ * According to "Output Device Management" spec, audio resource is assigned on
+ * device arrival (if display requests audio) and released on device removal.
+ *
+ * Signal can be downgraded only on connected display path.
+ *
+ * So in this function we do:
+ * 1. Stop audio device on enabled disconnected display path. We need to
+ * do it every time we disconnect enabled display, to allow OS to switch to
+ * another audio device, so sound will continue to be heard.
+ *
+ * 2. Call DetachAudioFromDisplayPath to remove the audio from the display path
+ * NOTE: Even though disconnected display path loses it's audio resources,
+ * it's signal is not changed.
+ *
+ * \param [in] display_path: Display path which got disconnected.
+ */
+static void arbitrate_audio_on_disconnect(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ struct audio *audio;
+ enum signal_type current_signal;
+
+ if (false == tm_is_path_locked(tm, display_path)) {
+ TM_WARNING("%s: Path is NOT locked!\n",
+ __func__);
+ }
+
+ audio = dal_display_path_get_audio_object(display_path,
+ ASIC_LINK_INDEX);
+
+ if (audio != NULL) {
+ /* Stop audio device on acquired display path */
+ if (dal_display_path_is_acquired(display_path)) {
+ dal_hw_sequencer_reset_audio_device(tm->hwss_srvr,
+ display_path);
+ }
+
+ tm_resource_mgr_detach_audio_from_display_path(tm->tm_rm,
+ display_path);
+ }
+
+ current_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ if (dal_is_hdmi_signal(current_signal)) {
+ if (tm->attached_hdmi_num > 0)
+ tm->attached_hdmi_num--;
+ else
+ TM_ERROR("%s: can NOT reduce attached_hdmi_num below zero!\n",
+ __func__);
+ }
+}
+
+/**
+ * This function handles arbitration of audio resources when no
+ * connect/disconnect event occurred, but due to detection logic, signal
+ * was changed.
+ *
+ * According to "Output Device Management" spec, audio resource is assigned on
+ * device arrival (if display requests audio) and released on device removal.
+ *
+ * So in this function no audio resources reassignment happens.
+ *
+ * The only thing we need to do here is to make sure we do not report audio
+ * capability for connected display path that does not have audio resources
+ * assigned.
+ * We ignore signal changes on disconnected display path, since on disconnect,
+ * signal is not really updated.
+ * So for connected HDMI display which does not have audio resource,
+ * we downgrade signal to DVI.
+ * For DP we do nothing, since currently we do not report DP audio capability
+ * to upper layers. Also, DP remains DP (DP audio capability does not change
+ * signal).
+ *
+ * \param [in] pDisplayPath: Display path on which signal change event
+ * occurred
+ * \param [in/out] detect_status: Most recent detection status.
+ * detected_signal field in this structure maybe downgraded
+ */
+
+static void arbitrate_audio_on_signal_change(struct topology_mgr *tm,
+ struct display_path *display_path,
+ struct tm_detection_status *detect_status)
+{
+ enum signal_type sink_signal;
+
+ if (false == detect_status->connected)
+ return;
+
+ if (false == dal_is_hdmi_signal(detect_status->detected_signal))
+ return;
+
+ sink_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ if (dal_is_dvi_signal(sink_signal)) {
+ /* This is an HDMI display, but it doesn't have
+ * audio capability. */
+ detect_status->detected_signal =
+ tm_utils_downgrade_to_no_audio_signal(
+ detect_status->detected_signal);
+ }
+}
+
+/**
+ * This function handles arbitration of audio resources PER PATH.
+ *
+ * Most of the work is for "disconnect", where resources are freed.
+ *
+ * For the "connect" only internal state is updated, so basically
+ * it prepares everything for assign_audio_by_signal_priority(), where
+ * a decision is made which path gets what audio, and then audio is assigned.
+ *
+ * This function should be called in safe detection context.
+ *
+ * According to "Output Device Management" spec, audio resource is assigned on
+ * device arrival (if display requests audio) and released on device removal.
+ *
+ * In case of signal change event (if no connectivity change occurred) we just
+ * downgrade signal if audio resources not available for this display path.
+ *
+ * \param [in] display_path: Display path on which signal/connection
+ * state changed
+ * \param [in/out] detect_status: Most recent detection status.
+ * detected_signal field in this structure
+ * maybe downgraded.
+ */
+static void update_path_audio(struct topology_mgr *tm,
+ struct display_path *display_path,
+ struct tm_detection_status *detect_status)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ bool target_connected;
+ struct dcs *dcs;
+ bool connect_event;
+ bool disconnect_event;
+ bool signal_change_event;
+ bool dongle_changed;
+ bool monitor_event;
+ bool connectivity_changed;
+ enum signal_type sink_signal;
+
+ if (false == tm_is_path_locked(tm, display_path)) {
+ TM_WARNING("%s: Path is NOT locked!\n",
+ __func__);
+ }
+
+ dcs = dal_display_path_get_dcs(display_path);
+ if (!dcs) {
+ TM_ERROR("%s: no DCS on the Path!\n", __func__);
+ return;
+ }
+
+ target_connected = dal_display_path_is_target_connected(display_path);
+
+ /* Define signal/connection state change events */
+ connect_event = (detect_status->connected && !target_connected);
+
+ disconnect_event = (!detect_status->connected && target_connected);
+
+ sink_signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ signal_change_event = (detect_status->detected_signal != sink_signal);
+
+ dongle_changed = (dal_dcs_get_dongle_type(dcs) !=
+ detect_status->sink_capabilities.dongle_type);
+
+ monitor_event = (detect_status->monitor_changed ||
+ detect_status->audio_cap_changed || dongle_changed);
+
+ connectivity_changed = (detect_status->connected != target_connected);
+
+ /* Handle events */
+ if (monitor_event && !connectivity_changed) {
+ /* This is the case where we missed disconnected event (due to
+ * sleep for example).
+ * i.e. new monitor connected while path state refers to
+ * the old one. */
+ /* handle disconnect on the path */
+ arbitrate_audio_on_disconnect(tm, display_path);
+ } else if (connect_event) {
+ arbitrate_audio_on_connect(tm, display_path);
+ } else if (disconnect_event) {
+ arbitrate_audio_on_disconnect(tm, display_path);
+ } else if (signal_change_event) {
+ arbitrate_audio_on_signal_change(tm, display_path,
+ detect_status);
+ }
+}
+
+static void handle_signal_downgrade(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum signal_type new_signal)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ /* Signal changed - we need to update connection status */
+ struct tm_detection_status detection_status;
+ struct dcs *dcs;
+ enum tm_detection_method safe_method;
+
+ dal_memset(&detection_status, 0, sizeof(detection_status));
+
+ detection_status.detected_signal = new_signal;
+ detection_status.capability_changed = true;
+ detection_status.connected = dal_display_path_is_target_connected(
+ display_path);
+
+ if (false == detection_status.connected) {
+ TM_WARNING("%s: downgrading disconnected path?!\n",
+ __func__);
+ }
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ dal_dcs_get_sink_capability(dcs, &detection_status.sink_capabilities);
+
+ safe_method = tm_lock_path(tm, display_path,
+ DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED);
+
+ tm_update_on_connection_change(tm, display_path, &detection_status);
+
+ tm_unlock_path(tm, display_path, safe_method);
+}
+
+static void tm_vbios_set_scratch_connection_state(
+ struct topology_mgr *tm,
+ struct display_path *display_path,
+ bool connected)
+{
+ struct bios_parser *bp;
+ struct connector *connector;
+ struct graphics_object_id id;
+ struct connector_device_tag_info *device_tag;
+
+ bp = dal_adapter_service_get_bios_parser(tm->adapter_srv);
+
+ connector = dal_display_path_get_connector(display_path);
+
+ id = dal_connector_get_graphics_object_id(connector);
+
+ device_tag = dal_display_path_get_device_tag(display_path);
+
+ dal_bios_parser_set_scratch_connected(bp, id, connected, device_tag);
+}
+
+/* Get current DRR from DCS and update cached values in DisplayPath. */
+static void tm_display_path_set_drr_from_dcs(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ struct drr_config drr_from_dcs;
+ struct dcs *dcs;
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ dal_dcs_get_drr_config(dcs, &drr_from_dcs);
+
+ dal_display_path_set_drr_config(display_path, &drr_from_dcs);
+}
+
+/* Set initial Static Screen detection values in DisplayPath
+ * once a display is connected. */
+static void tm_initialize_static_screen_events(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ /* Initialize static screen events on display connect time. */
+ struct static_screen_events events;
+
+ /* Initialize to set no events. */
+ events.u_all = 0;
+
+ /* Try to find runtime parameter forced events. */
+ dal_adapter_service_get_feature_value(
+ FEATURE_FORCE_STATIC_SCREEN_EVENT_TRIGGERS,
+ &events.u_all,
+ sizeof(events.u_all));
+
+ /* If runtime parameter is not set or set to 0, we should use Driver
+ * defaults, which is defined by the logic below. */
+ if (events.u_all == 0) {
+ /* Set initial Static Screen trigger events. */
+ events.bits.CURSOR_MOVE = 1;
+ events.bits.GFX_UPDATE = 1;
+ events.bits.OVL_UPDATE = 1;
+
+ /*
+ * On Linux the OS might write directly to the primary
+ * surface. Enable memory trigger events.
+ */
+ if (dal_adapter_service_is_feature_supported(
+ FEATURE_ALLOW_DIRECT_MEMORY_ACCESS_TRIG)) {
+
+ /* By default, enable all hit regions. Later when
+ * region address range is defined, there should be
+ * a call to set_static_screen_triggers to update
+ * to the updated setting. */
+ events.bits.MEM_REGION0_WRITE = 1;
+ events.bits.MEM_REGION1_WRITE = 1;
+ events.bits.MEM_REGION2_WRITE = 1;
+ events.bits.MEM_REGION3_WRITE = 1;
+ }
+ }
+
+ /* Store the initialized triggers events in the display path.
+ * These settings are usually stored once on display connection.
+ * They may be updated later through a test application, or internal
+ * update of memory hit regions after regions are programmed. */
+ dal_display_path_set_static_screen_triggers(display_path, &events);
+}
+
+static void tm_update_on_connect_link_services_and_encoder_implementation(
+ struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ uint32_t link_idx;
+ struct link_service *link_service;
+ struct encoder_context context;
+ enum signal_type signal_type;
+ struct encoder *encoder;
+ struct graphics_object_id id;
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ for (link_idx = 0;
+ link_idx < dal_display_path_get_number_of_links(display_path);
+ link_idx++) {
+
+ /* Set link service according to signal type */
+ signal_type = dal_display_path_get_query_signal(display_path,
+ link_idx);
+
+ link_service = tm_resource_mgr_get_link_service(tm->tm_rm,
+ display_path, link_idx, signal_type);
+
+ dal_display_path_set_link_query_interface(display_path,
+ link_idx, link_service);
+
+ /* update encoder implementation according to signal type */
+ dal_memset(&context, 0, sizeof(context));
+
+ context.engine = ENGINE_ID_UNKNOWN;
+
+ tm_build_encoder_context(tm->dal_context, display_path,
+ link_idx, &context);
+
+ encoder = dal_display_path_get_upstream_object(display_path,
+ link_idx);
+
+ if (ENCODER_RESULT_OK != dal_encoder_update_implementation(
+ encoder, &context)) {
+ /* should never happen */
+ TM_ERROR("%s:dal_encoder_update_implementation() failed!\n",
+ __func__);
+ }
+
+ id = dal_encoder_get_graphics_object_id(encoder);
+ display_index = dal_display_path_get_display_index(
+ display_path);
+
+ TM_ENCODER_CTL("OnConnect[UpdateImpl]: Transmitter=%s, Path=%u, LinkIdx=%u, Engine=%s, Signal=%s\n",
+ tm_utils_transmitter_id_to_str(id),
+ display_index,
+ link_idx,
+ tm_utils_engine_id_to_str(context.engine),
+ tm_utils_signal_type_to_str(context.signal));
+ } /* for() */
+}
+
+/*
+ * Delegate to handle update DAL subcomponents when Display Connection changed.
+ * This function does the same thing as
+ * display_capability_changed_at_display_index()
+ * but in ADDITION, it resets the preferred colour depth to 10 bpc.
+ *
+ * display_index: the display path index that changed
+ */
+static void display_connection_changed_at_display_index(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ /* update Timing Service with modes from Display Capability Service */
+ struct display_path *display_path;
+ struct dcs *dcs;
+ struct mode_manager *mm = tm->mm;
+ struct bestview_options opts =
+ dal_mode_manager_get_bestview_options(
+ mm,
+ display_index);
+
+ struct bestview_options default_opts =
+ dal_mode_manager_get_default_bestview_options(
+ mm,
+ display_index);
+
+ display_path = dal_tm_display_index_to_display_path(tm, display_index);
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ dal_dcs_update_ts_timing_list_on_display(dcs, display_index);
+
+ opts.prefered_color_depth = default_opts.prefered_color_depth;
+
+ if (dal_adapter_service_is_feature_supported(
+ FEATURE_ENABLE_GPU_SCALING)) {
+ opts.base_timing_select = TIMING_SELECT_NATIVE_ONLY;
+ opts.ENABLE_SCALING = true;
+ opts.MAINTAIN_ASPECT_RATIO = true;
+ }
+
+ dal_mode_manager_set_bestview_options(
+ mm,
+ display_index,
+ &opts,
+ true,
+ dal_timing_service_get_mode_timing_list_for_path(
+ tm->timing_srv,
+ display_index));
+}
+
+/* A possible use-case which will run this function is "Change monitor during
+ * suspend".
+ *
+ * It means there were no 'disconnect' event but the monitor was changed,
+ * so the capability was changed, without change in connectivity. */
+static void display_capability_changed_at_display_index(
+ struct topology_mgr *tm,
+ uint32_t display_idx)
+{
+ struct display_path *display_path =
+ dal_tm_display_index_to_display_path(tm, display_idx);
+ struct dcs *dcs = dal_display_path_get_dcs(display_path);
+
+ dal_dcs_update_ts_timing_list_on_display(
+ dcs,
+ display_idx);
+
+ dal_mode_manager_update_disp_path_func_view_tbl(
+ tm->mm,
+ display_idx,
+ dal_timing_service_get_mode_timing_list_for_path(
+ tm->timing_srv,
+ display_idx));
+}
+
+/**
+ * Updates connectivity state of display path and rebuilds display timing list
+ * This function should be called in safe detection context
+ *
+ * \param [in] display_path: Display path which connectivity changed
+ * \param [in] connected: New connectivity state of display
+ * \param [in] update_timing_list: Whether display timing list should be rebuilt
+ */
+static void tm_update_connection_state_and_timing(
+ struct topology_mgr *tm,
+ struct display_path *display_path,
+ bool connected,
+ bool update_timing_list)
+{
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(tm_is_path_locked(tm, display_path));
+
+ dal_display_path_set_target_connected(display_path, connected);
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ /* TODO: Update display path mappings in the cache. */
+
+ /* Update IRQ registrations for this path */
+ dal_tm_detection_mgr_update_active_state(tm->tm_dm, display_path);
+
+ /* update timing list */
+ if (update_timing_list) {
+ /* For this special case (capability change), we want
+ * to reset the preferred colour depth in BestviewOptions. */
+ display_connection_changed_at_display_index(tm, display_index);
+ }
+}
+
+static void tm_update_spread_spectrum_capability_for_display_path(
+ struct topology_mgr *tm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status)
+{
+ enum signal_type signal;
+ bool is_dp_force_ss;
+
+ signal = dal_display_path_get_query_signal(display_path,
+ ASIC_LINK_INDEX);
+
+ is_dp_force_ss = dal_adapter_service_is_feature_supported(
+ FEATURE_DP_DISPLAY_FORCE_SS_ENABLE);
+
+ if (dal_is_dp_signal(signal) && (true == is_dp_force_ss)) {
+
+ /* Feature is set -> set SS enabled in DisplayPath */
+ dal_display_path_set_ss_support(display_path, true);
+
+ } else {
+ /* Feature is not set -> set SS support in DisplayPath
+ * based on sink capabilities. */
+ dal_display_path_set_ss_support(display_path,
+ detection_status->sink_capabilities.ss_supported);
+ }
+}
+
+/**
+ * Updates display and TM state when connectivity/capability change
+ * This function should be called in safe detection context
+ *
+ * \param [in] display_path: Display path on which connectivity/capability
+ * changed
+ * \param [in] detection_status: New sink state
+ */
+static void tm_update_on_connection_change(struct topology_mgr *tm,
+ struct display_path *display_path,
+ struct tm_detection_status *detection_status)
+{
+ bool update_timing_list = false;
+ uint32_t link_idx;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(tm_is_path_locked(tm, display_path));
+
+ if (detection_status->connected) {
+ /*************************************************
+ * Update sink properties for CONNECTED display.
+ *************************************************/
+
+ dal_display_path_set_sink_signal(display_path,
+ detection_status->detected_signal);
+
+ tm_display_path_set_drr_from_dcs(tm, display_path);
+
+ tm_initialize_static_screen_events(tm, display_path);
+
+ tm_update_on_connect_link_services_and_encoder_implementation(
+ tm, display_path);
+ } else {
+ /*************************************************
+ * Handle DISCONNECT.
+ *************************************************/
+ struct goc_link_service_data ls_data;
+
+ dal_memset(&ls_data, 0, sizeof(ls_data));
+
+ for (link_idx = 0;
+ link_idx < dal_display_path_get_number_of_links(
+ display_path);
+ link_idx++) {
+ /* clear ls data */
+ dal_display_path_set_link_service_data(display_path,
+ link_idx, &ls_data);
+ }
+
+ /* Clear DisplayPath DRR on disconnect */
+ dal_display_path_set_drr_config(display_path, NULL);
+
+ /* Clear Static Screen trigger events on disconnect */
+ dal_display_path_set_static_screen_triggers(display_path, NULL);
+ }
+
+ tm_update_spread_spectrum_capability_for_display_path(tm,
+ display_path, detection_status);
+
+ /* Update connection status and timing list - need to be done before
+ * clock arbitration timing list should be updated (for any display
+ * type), only if display connected OR if it's embedded. */
+ update_timing_list = (detection_status->connected
+ || dal_is_embedded_signal(
+ detection_status->detected_signal));
+
+ tm_update_connection_state_and_timing(tm, display_path,
+ detection_status->connected, update_timing_list);
+
+ /* TODO: update clock sharing category */
+
+ if (dal_display_path_is_target_connected(display_path)) {
+ /* Recalculate cofunctional sets the next time
+ * it is required. */
+ tm->valid_cofunc_sets = false;
+ }
+
+ /* Configuration changed - we need to re-prioritise Stream Engines.
+ * Device connection/disconnection causes stream engine priority
+ * to be changed. */
+ tm_update_stream_engine_priorities(tm);
+}
+
+/**
+ * Reset the transmitter on displays which have corruption on power on.
+ * This is a workaround for specific displays only (i.e. EDID patch).
+ *
+ * \param [in] display_path: Display path on which to reset transmitter
+ *
+ * \return true: if transmitter has to be reset (and was reset)
+ * on this path,
+ * false: otherwise
+ */
+static bool reset_transmitter_on_display_power_on(
+ struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ bool apply_patch = false;
+ union dcs_monitor_patch_flags patch_flags;
+ struct dcs *dcs;
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ patch_flags = dal_dcs_get_monitor_patch_flags(dcs);
+
+ if (patch_flags.flags.RESET_TX_ON_DISPLAY_POWER_ON) {
+ /* TODO: Force set mode to reset transmitter? */
+ apply_patch = true;
+ }
+
+ return apply_patch;
+}
+
+/**
+ * Updates internal state and notifies external components
+ * This function should be called in safe detection context
+ *
+ * \param [in] display_path: Display path on which to perform detection
+ * \param [in] method: Detection method
+ * \param [in] detection_status: Detection status
+ */
+static void tm_post_target_detection(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detect_status)
+{
+ bool sink_signal_changed = false;
+ bool connectivity_changed = false;
+ bool fake_hpd = false;
+ uint32_t display_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_ASSERT(tm_is_path_locked(tm, display_path));
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ /* Reassign audio resources when signal/connection state changes
+ * Inside detect_status.detected_signal may be updated. */
+ update_path_audio(tm, display_path, detect_status);
+
+ /* Update BIOS scratch registers */
+ tm_vbios_set_scratch_connection_state(tm, display_path,
+ detect_status->connected);
+
+ TM_DISPLAY_DETECT("%s:[%u]: New detected_signal: %s\n", __func__,
+ display_index, tm_utils_signal_type_to_str(
+ detect_status->detected_signal));
+
+ TM_DISPLAY_DETECT("%s:[%u]: Old signal at SINK_LINK_INDEX: %s\n",
+ __func__, display_index, tm_utils_signal_type_to_str(
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX)));
+
+ /* Define connectivity/capability change. */
+ if (detect_status->detected_signal !=
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX)) {
+ /* signal changed */
+ sink_signal_changed = true;
+ }
+
+ TM_DISPLAY_DETECT("%s:[%u]: sink_signal_changed: %s\n",
+ __func__, display_index,
+ (sink_signal_changed == true ? "true" : "false"));
+
+ if (detect_status->connected !=
+ dal_display_path_is_target_connected(display_path)) {
+ /* transition from one state to another occurred */
+ connectivity_changed = true;
+ }
+
+ TM_DISPLAY_DETECT("%s:[%u]: connectivity_changed: %s\n",
+ __func__, display_index,
+ (connectivity_changed == true ? "true" : "false"));
+
+ if (false == connectivity_changed &&
+ false == detect_status->capability_changed) {
+ /* 1. If connectivity and capability UNchanged, then it is
+ * a fake HPD.
+ * 2. Some OSs provide "manual" detection UI, so we can get
+ * here if user clicked on a "Detect" button in UI.
+ */
+ fake_hpd = true;
+ }
+
+ TM_DISPLAY_DETECT("%s:[%u]: fake_hpd: %s\n", __func__, display_index,
+ (fake_hpd == true ? "true" : "false"));
+
+ /* Update connectivity state internally, including timing list
+ * update based on connectivity change, capability change, or
+ * embedded signal. */
+ if (dal_is_embedded_signal(detect_status->detected_signal)) {
+ /* note that embedded is always connected */
+ tm_update_on_connection_change(tm, display_path, detect_status);
+
+ } else if (connectivity_changed || detect_status->capability_changed) {
+
+ tm_update_on_connection_change(tm, display_path, detect_status);
+
+ /* Workaround for monitors which need longer HPD disconnect
+ * delay. */
+ dal_tm_detection_mgr_program_hpd_filter(tm->tm_dm,
+ display_path);
+
+ } else if (sink_signal_changed && detect_status->connected) {
+ /* Signal should not change on connected display path if
+ * connectivity/capability did not. */
+ TM_WARNING("%s: Signal changed on connected Path: %d!\n",
+ __func__, display_index);
+ }
+
+ /* Report connectivity changes */
+ if (tm->report_detection_changes) {
+ if (connectivity_changed) {
+ dal_notify_hotplug(
+ tm->dal_context,
+ display_index,
+ detect_status->connected);
+ } else if (detect_status->capability_changed &&
+ detect_status->connected) {
+ dal_notify_capability_change(
+ tm->dal_context,
+ display_index);
+ }
+ }
+
+ /* TODO: Handle MST detection reporting */
+
+ /* Apply workaround for displays that show corruption when
+ * its power is toggled. */
+ if (fake_hpd && method == DETECTION_METHOD_HOTPLUG)
+ reset_transmitter_on_display_power_on(tm, display_path);
+
+ if (method == DETECTION_METHOD_HOTPLUG &&
+ detect_status->hpd_pin_failure &&
+ detect_status->connected) {
+ /* Reschedule detection if HDP line is low and display
+ * considered connected (inconsistent result). */
+ dal_tm_detection_mgr_reschedule_detection(tm->tm_dm,
+ display_path, true);
+
+ } else if (tm_utils_is_destructive_method(method)) {
+ /* Reset pending detection if this was a destructive method. */
+ dal_tm_detection_mgr_reschedule_detection(tm->tm_dm,
+ display_path, false);
+ }
+}
+
+/**
+ * Handles MST sink connectivity/capability update
+ *
+ * \param [in] display_path: Display Path where connection state was changed
+ * \param [in] method: Detection/Update method
+ * \param [in] detection_status: Output structure which contains all
+ * new detected info
+ *
+ * \return True: if actual update was performed,
+ * False: if detection was skipped for any reason
+ * (not necessarily failure).
+ */
+static bool tm_process_mst_sink_update(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum tm_detection_method method,
+ struct tm_detection_status *detection_status)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return false;
+}
+
+
+/**
+ * Performs display detection, updates internal state and notifies
+ * external components.
+ *
+ * \param [in] display_path: Display path on which to perform detection
+ * \param [in] method: Detection method
+ *
+ * return true: if display is connected,
+ * false: otherwise.
+ */
+static bool tm_detect_display(struct topology_mgr *tm,
+ struct display_path *display_path,
+ enum tm_detection_method method)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ struct tm_detection_status detection_status;
+ enum tm_detection_method safe_method;
+ bool detection_performed;
+ uint32_t display_index;
+
+ TM_ASSERT(display_path != NULL);
+
+ dal_memset(&detection_status, 0, sizeof(detection_status));
+
+ detection_status.detected_signal = SIGNAL_TYPE_NONE;
+
+ safe_method = tm_lock_path(tm, display_path, method);
+
+ /* Perform detection */
+ detection_performed = dal_tm_detection_mgr_detect_display(tm->tm_dm,
+ display_path, safe_method, &detection_status);
+
+ display_index = dal_display_path_get_display_index(display_path);
+
+ if (safe_method != method) {
+ TM_WARNING("Re-entry during detection for display index:%d!\n",
+ display_index);
+ }
+
+ if (detection_performed) {
+ if (detection_status.connected
+ != dal_display_path_is_target_connected(
+ display_path)
+ || tm_utils_is_destructive_method(safe_method)) {
+
+ TM_DISPLAY_DETECT("%s:[%u]: %s\n", __func__,
+ display_index,
+ (detection_status.connected ? "Connected" :
+ "Not connected"));
+ }
+ } else {
+ TM_WARNING("%s:[%u]: No detection done!\n", __func__,
+ display_index);
+ }
+
+ if (!detection_status.dp_mst_detection) {
+ /* Proceed with post detect update for non-MST paths */
+ union display_path_properties path_props;
+
+ path_props = dal_display_path_get_properties(display_path);
+
+ if (detection_performed ||
+ (path_props.bits.ALWAYS_CONNECTED &&
+ safe_method != DETECTION_METHOD_CACHED)) {
+
+ TM_DISPLAY_DETECT("%s:[%u]: non-MST post-detect...\n",
+ __func__, display_index);
+
+ tm_post_target_detection(tm, display_path,
+ safe_method, &detection_status);
+ }
+ } else {
+ TM_DISPLAY_DETECT("%s:[%u]: MST post-detect...\n",
+ __func__, display_index);
+
+ /* Proceed with post detect update for MST paths
+ * (For non-blocking case, update will be issued
+ * asynchronously by MstMgr) */
+ if (dal_tm_detection_mgr_is_blocking_detection(tm->tm_dm)) {
+
+ dal_memset(&detection_status, 0,
+ sizeof(detection_status));
+
+ tm_process_mst_sink_update(tm, display_path,
+ safe_method, &detection_status);
+ }
+ }
+
+ tm_unlock_path(tm, display_path, safe_method);
+
+ return detection_status.connected;
+}
+
+
+/** does detection on all display paths and assigns audio resources
+ * based on priority */
+void dal_tm_do_initial_detection(struct topology_mgr *tm)
+{
+ uint32_t ind;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ dal_tm_detection_mgr_set_blocking_detection(tm->tm_dm, true);
+
+ for (ind = 0; ind < display_paths_num; ind++) {
+ display_path = tm_get_display_path_at_index(tm, ind);
+
+ /* TODO: Create EDID emulator??? (TM does not care if it fails)
+ * DCS doesn't have interface to do it. */
+
+ /* Assume not connected.
+ * If display not connected then we basically force creation
+ * of default display timings list. */
+ dal_display_path_set_target_connected(display_path, false);
+
+ if (false == tm_detect_display(tm, display_path,
+ DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED)) {
+
+ /* Assign default timing list to non-connected
+ * displays: */
+ display_capability_changed_at_display_index(tm, ind);
+ }
+ } /* for() */
+
+ /* After initial detection, we can start reporting detection
+ * changes (to base driver). */
+ tm->report_detection_changes = true;
+
+ /* After initial detection, we always do asynchronous (non-blocking)
+ * MST link data fetching. */
+ dal_tm_detection_mgr_set_blocking_detection(tm->tm_dm, false);
+}
+
+/** does detection on specific connector */
+void dal_tm_do_detection_for_connector(struct topology_mgr *tm,
+ uint32_t connector_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/** does detection on all display paths in certain order to make sure
+ * resources allocated properly */
+uint32_t dal_tm_do_complete_detection(
+ struct topology_mgr *tm,
+ enum tm_detection_method method,
+ bool emulate_connectivity_change)
+{
+ uint32_t connected_num = 0;
+ uint32_t detected_displays = 0;
+ uint32_t i;
+
+ uint32_t disp_path_number = tm_get_display_path_count(tm);
+
+ if (method == DETECTION_METHOD_CACHED ||
+ tm->display_detection_mask != 0) {
+ ASSERT(method == DETECTION_METHOD_CACHED);
+
+ for (i = 0; i < disp_path_number; i++) {
+ if (dal_display_path_is_target_connected(
+ *display_paths_vector_at_index(
+ tm->display_paths,
+ i)))
+ connected_num++;
+ }
+
+ return connected_num;
+ }
+
+ /* First round - only previously connected displays
+ * TODO: Here should be number of (display paths - number of cf display
+ * paths)
+ */
+ for (i = 0; i < disp_path_number; i++) {
+ struct display_path *display_path =
+ tm_get_display_path_at_index(tm, i);
+ if (!tm_utils_test_bit(&detected_displays, i) &&
+ dal_display_path_is_target_connected(display_path)) {
+ if (tm_detect_display(tm, display_path, method))
+ connected_num++;
+ tm_utils_set_bit(&detected_displays, i);
+ }
+ }
+
+ /* Second round - all the rest
+ * TODO: please see round 1 for comment */
+ for (i = 0; i < disp_path_number; i++) {
+ struct display_path *display_path =
+ *display_paths_vector_at_index(tm->display_paths, i);
+ if (!tm_utils_test_bit(&detected_displays, i)) {
+ if (tm_detect_display(tm, display_path, method))
+ connected_num++;
+ }
+ }
+
+ return connected_num;
+}
+
+/** Does detection in separate thread in order not to delay the
+ * calling thread. Used during S3->S0 transition. */
+enum tm_result dal_tm_do_asynchronous_detection(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/** enables or disables the Base Light Sleep */
+void dal_tm_toggle_hw_base_light_sleep(struct topology_mgr *tm,
+ bool enable)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/**
+ * When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
+ * 1. Power down all DC HW blocks
+ * 2. Disable VGA engine on all controllers
+ * 3. Enable power gating for controller
+ * 4. Set acc_mode_change bit (VBIOS will clear this bit when going to FSDOS)
+ */
+void dal_tm_enable_accelerated_mode(struct topology_mgr *tm)
+{
+ uint32_t i;
+ uint32_t resource_num;
+ struct tm_resource *tm_resource;
+ struct controller *controller;
+ struct bios_parser *bp;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ TM_IFACE_TRACE();
+
+ /* 1. Power Down all blocks. */
+ if (tm->hw_power_down_required || !tm_can_optimize_resume_sequence(
+ tm, OF_SKIP_RESET_OF_ALL_HW_ON_S3RESUME))
+ dal_tm_power_down_path_elements(tm);
+
+ /* 2. Disable VGA engine on all controllers. */
+ resource_num = tm_resource_mgr_get_total_resources_num(tm->tm_rm);
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ controller = TO_CONTROLLER_INFO(tm_resource)->controller;
+
+ /* we should call DisableVGA for each pipe */
+ dal_controller_disable_vga(controller);
+
+ /* enable clock gating for each pipe before
+ * powergating occurs */
+ dal_controller_enable_display_pipe_clock_gating(controller,
+ true);
+
+ /* Only power gate controllers which not acquired by a
+ * display path.
+ * Controllers acquired by a display path should be released
+ * by ResetMode sequence or reprogrammed. */
+ if (TM_RES_REF_CNT_GET(tm_resource) == 0) {
+ dal_controller_power_gating_enable(controller,
+ PIPE_GATING_CONTROL_ENABLE);
+
+ TO_CONTROLLER_INFO(tm_resource)->power_gating_state =
+ TM_POWER_GATE_STATE_ON;
+ } else {
+ TO_CONTROLLER_INFO(tm_resource)->power_gating_state =
+ TM_POWER_GATE_STATE_OFF;
+ }
+ } /* for () */
+
+ bp = dal_adapter_service_get_bios_parser(tm->adapter_srv);
+
+ dal_bios_parser_set_scratch_acc_mode_change(bp);
+}
+
+/** block interrupt if we are under VBIOS (FSDOS) */
+void dal_tm_block_interrupts(struct topology_mgr *tm, bool blocking)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/** Acquiring Embedded display based on current HW config */
+enum tm_result dal_tm_setup_embedded_display_path(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/** Release HW access and possibly restore some HW registers to its
+ * default state */
+void dal_tm_release_hw(struct topology_mgr *tm)
+{
+ dal_tm_detection_mgr_release_hw(tm->tm_dm);
+
+ /* TODO: check if needed to add release_hw for resource manager */
+}
+
+/**
+ * Update previous adapter power state to current
+ * Update current adapter power state to new (passed as parameter)
+ *
+ * \param [in] power_state: New adapter power state
+ */
+
+void dal_tm_set_current_power_state(struct topology_mgr *tm,
+ enum dal_video_power_state power_state)
+{
+ tm->previous_power_state = tm->current_power_state;
+ tm->current_power_state = power_state;
+}
+
+bool dal_tm_update_display_edid(struct topology_mgr *tm,
+ uint32_t display_index,
+ uint8_t *edid_buffer,
+ uint32_t buffer_len)
+{
+ struct display_path *display_path;
+ struct dcs *dcs;
+ enum edid_retrieve_status ret = EDID_RETRIEVE_FAIL;
+ struct tm_detection_status detection_status;
+ enum tm_detection_method safe_method;
+ struct dal_context *dal_context = tm->dal_context;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ dcs = dal_display_path_get_dcs(display_path);
+
+ ret = dal_dcs_override_raw_edid(dcs, buffer_len, edid_buffer);
+
+ dal_memset(&detection_status, 0, sizeof(detection_status));
+
+ /* For the use case "DSAT Override EDID":
+ * 1. Currently HPD IRQ is not working.
+ * 2. We test by connecting display AFTER system boot.
+ * 3. In order to get the new display to light-up, call
+ * dal_display_path_set_target_connected(display_path, true)
+ * TODO: remove this comment when HPD IRQ is working. */
+ /*dal_display_path_set_target_connected(display_path, true);*/
+
+ detection_status.detected_signal =
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+ detection_status.capability_changed = true;
+ detection_status.monitor_changed = true;
+ detection_status.connected = dal_display_path_is_target_connected(
+ display_path);
+
+ if (false == detection_status.connected) {
+ dal_logger_write(dal_context->logger, LOG_MAJOR_DSAT,
+ LOG_MINOR_DSAT_EDID_OVERRIDE,
+ "%s: updating EDID on disconnected path: %d!\n",
+ __func__, display_index);
+ }
+
+ dal_dcs_get_sink_capability(dcs, &detection_status.sink_capabilities);
+
+ safe_method = tm_lock_path(tm, display_path,
+ DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED);
+
+ /* Program Encoder according to new connector state */
+ tm_update_on_connection_change(tm, display_path, &detection_status);
+
+ arbitrate_audio_on_signal_change(tm, display_path, &detection_status);
+
+ tm_unlock_path(tm, display_path, safe_method);
+
+ dal_logger_write(dal_context->logger, LOG_MAJOR_DSAT,
+ LOG_MINOR_DSAT_EDID_OVERRIDE,
+ "%s(): DisplayInd: %d, DCS return code: %s (%d).\n",
+ __func__, display_index,
+ DCS_DECODE_EDID_RETRIEVE_STATUS(ret), ret);
+
+ return ret == EDID_RETRIEVE_SUCCESS ||
+ ret == EDID_RETRIEVE_SAME_EDID;
+}
+
+/**************
+General queries
+***************/
+/** check is it coming from VBIOS or driver already made mode set at least
+ * once */
+bool dal_tm_is_hw_state_valid(struct topology_mgr *tm)
+{
+ /* Going to power down HW */
+ if (tm->hw_power_down_required)
+ return false;
+
+ if (!dal_adapter_service_is_in_accelerated_mode(tm->adapter_srv)) {
+ /* DAL driver has not taken control of HW from VBIOS yet */
+ return false;
+ }
+
+ return true;
+}
+
+/** Query whether sync output can be attached to display path */
+bool dal_tm_is_sync_output_available_for_display_path(
+ struct topology_mgr *tm,
+ uint32_t display_index,
+ enum sync_source sync_output)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return false;
+}
+
+/**
+ * Obtains display index of embedded display path (eDP or LCD)
+ *
+ * \return display index of embedded display path if such exists,
+ * INVALID_DISPLAYINDEX otherwise
+ */
+uint32_t dal_tm_get_embedded_device_index(struct topology_mgr *tm)
+{
+ uint32_t i;
+ uint32_t display_paths_num;
+ struct display_path *display_path;
+ struct connector *connector;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ display_paths_num = tm_get_display_path_count(tm);
+
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+
+ connector = dal_display_path_get_connector(display_path);
+
+ id = dal_connector_get_graphics_object_id(connector);
+
+ if (id.id == CONNECTOR_ID_LVDS || id.id == CONNECTOR_ID_EDP) {
+ /* found it */
+ return i;
+ }
+ }
+
+ return INVALID_DISPLAY_INDEX;
+}
+
+/** Get GPU Clock Interface */
+struct gpu_clock_interface *dal_tm_get_gpu_clock_interface(
+ struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Get DPCD Access Service Interface */
+struct ddc *dal_tm_get_dpcd_access_interface(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Get DDC Access Service Interface - by display index */
+struct ddc *dal_tm_get_ddc_access_interface_by_index(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Get DdcAccessServiceInterface - by connector */
+struct ddc *dal_tm_get_ddc_access_interface_by_connector(
+ struct topology_mgr *tm,
+ struct graphics_object_id connector)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return NULL;
+}
+
+/** Report the number of functional controllers */
+uint32_t dal_tm_get_num_functional_controllers(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return 0;
+}
+
+uint32_t dal_tm_get_display_path_index_for_controller(
+ struct topology_mgr *tm,
+ enum controller_id controller_id)
+{
+ return tm_resource_mgr_get_display_path_index_for_controller(
+ tm->tm_rm, controller_id);
+}
+
+/** Returns current adapter power state */
+enum dal_video_power_state dal_tm_get_current_power_state(
+ struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+ return tm->current_power_state;
+}
+
+/** Returns previous adapter power state */
+enum dal_video_power_state dal_tm_get_previous_power_state(
+ struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+ return tm->previous_power_state;
+}
+
+struct gpu *dal_tm_get_gpu(
+ struct topology_mgr *tm)
+{
+ return tm_resource_mgr_get_gpu_interface(tm->tm_rm);
+}
+
+/**********************
+General functionallity
+***********************/
+/** update signal type of CrossFire Display Path according to upper
+ * layer's request */
+enum tm_result dal_tm_set_signal_type(struct topology_mgr *tm,
+ uint32_t display_index,
+ enum signal_type signal)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/** Sets connectivity state to a display path if it supports "force connect" */
+enum tm_result dal_tm_set_force_connected(struct topology_mgr *tm,
+ uint32_t display_index, bool connected)
+{
+ struct display_path *disp_path =
+ tm_get_display_path_at_index(tm, display_index);
+ enum tm_detection_method detection_method = DETECTION_METHOD_CACHED;
+ struct tm_detection_status detection_status = {0};
+
+ /* Update display path property connection state*/
+ union display_path_properties props =
+ dal_display_path_get_properties(disp_path);
+
+ props.bits.ALWAYS_CONNECTED = connected;
+ dal_display_path_set_properties(disp_path, props);
+
+ /* Get detection status and update connection status */
+ detection_method = tm_lock_path(
+ tm,
+ disp_path,
+ DETECTION_METHOD_DESTRUCTIVE_AND_EMBEDDED);
+
+ detection_status.detected_signal = dal_display_path_get_query_signal(
+ disp_path,
+ SINK_LINK_INDEX);
+
+ dal_dcs_query_sink_capability(
+ dal_display_path_get_dcs(disp_path),
+ &detection_status.sink_capabilities,
+ true);
+ detection_status.connected = connected;
+
+ /* Arbitrate audio, update connection state and notify external */
+ tm_post_target_detection(
+ tm, disp_path, detection_method, &detection_status);
+
+ tm_unlock_path(tm, disp_path, detection_method);
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Updates VBIOS with properties (signal, device tag) of current active
+ * display paths.
+ */
+void dal_tm_force_update_scratch_active_and_requested(
+ struct topology_mgr *tm)
+{
+ uint32_t i;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+ struct display_path *display_path;
+ struct bios_parser *bp;
+ struct connector_device_tag_info *device_tag;
+ enum signal_type signal;
+ struct controller *controller;
+ struct graphics_object_id id;
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_IFACE_TRACE();
+
+ bp = dal_adapter_service_get_bios_parser(tm->adapter_srv);
+
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+
+ if (false == dal_display_path_is_acquired(display_path))
+ continue;
+
+ controller = dal_display_path_get_controller(display_path);
+
+ id = dal_controller_get_graphics_object_id(controller);
+
+ signal = dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX);
+
+ device_tag = dal_display_path_get_device_tag(display_path);
+
+ dal_bios_parser_prepare_scratch_active_and_requested(
+ bp, id.id, signal, device_tag);
+ }
+
+ dal_bios_parser_set_scratch_active_and_requested(bp);
+}
+
+/** Perform target connectivity check */
+enum tm_result dal_tm_detect_and_notify_target_connection(
+ struct topology_mgr *tm,
+ uint32_t display_index,
+ enum tm_detection_method method)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+ return TM_RESULT_FAILURE;
+}
+
+/** External entity notifies TM about the new connectivity (via Escape) */
+void dal_tm_detect_notify_connectivity_change(struct topology_mgr *tm,
+ uint32_t display_index,
+ bool connected)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/** External entity requests to re-enumerate the mode list of a device
+ * and notify OS about the change. */
+void dal_tm_notify_capability_change(struct topology_mgr *tm,
+ uint32_t display_index,
+ enum tm_reenum_modes_reason reason)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ TM_NOT_IMPLEMENTED();
+}
+
+/**************
+Debug interface
+***************/
+/** Prints the content of a display path*/
+void dal_tm_dump_display_path(struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct connector_device_tag_info *connector_device_tag;
+ uint32_t i;
+ uint32_t links_num;
+ struct encoder *encoder;
+ struct audio *audio;
+ struct connector *connector;
+ struct graphics_object_id id;
+ struct controller *controller;
+ /* Internal number, must be equal to the display_index (which is an
+ * index in TM storage for display path objects). */
+ uint32_t display_path_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+ links_num = dal_display_path_get_number_of_links(display_path);
+ display_path_index = dal_display_path_get_display_index(display_path);
+
+ if (display_path_index != display_index) {
+ TM_ERROR("%s: internal path index (%d) != storage index (%d)\n",
+ __func__, display_path_index, display_index);
+ }
+
+ TM_INFO("DisplayPath[%02d]: Sink Signal:%s, ASIC Signal:%s\n",
+ display_index,
+ tm_utils_signal_type_to_str(
+ dal_display_path_get_query_signal(
+ display_path,
+ SINK_LINK_INDEX)),
+ tm_utils_signal_type_to_str(
+ dal_display_path_get_query_signal(
+ display_path,
+ ASIC_LINK_INDEX)));
+
+ connector_device_tag = dal_display_path_get_device_tag(display_path);
+
+ if (connector_device_tag != NULL) {
+ TM_INFO(" (ACPI Device Tag: %s-%u ACPI=0x%X)\n",
+ tm_utils_device_type_to_str(
+ connector_device_tag->dev_id.device_type),
+ connector_device_tag->dev_id.enum_id,
+ connector_device_tag->acpi_device);
+ }
+
+ controller = dal_display_path_get_controller(display_path);
+ if (controller != NULL) {
+ id = dal_controller_get_graphics_object_id(controller);
+ TM_INFO(" (%s %s-%s)\n",
+ tm_utils_go_type_to_str(id),
+ tm_utils_go_id_to_str(id),
+ tm_utils_go_enum_to_str(id));
+ }
+
+ for (i = 0; i < links_num; i++) {
+
+ encoder = dal_display_path_get_upstream_object(display_path, i);
+ if (encoder != NULL) {
+ id = dal_encoder_get_graphics_object_id(encoder);
+
+ TM_INFO(" (Link[%u]: %s %s-%s. Transmitter: %s)\n",
+ i,
+ tm_utils_go_type_to_str(id),
+ tm_utils_go_id_to_str(id),
+ tm_utils_go_enum_to_str(id),
+ tm_utils_transmitter_id_to_str(id));
+
+ }
+
+ audio = dal_display_path_get_audio(display_path, i);
+ if (audio != NULL) {
+ id = dal_audio_get_graphics_object_id(audio);
+
+ TM_INFO(" (Link[%u]: %s %s-%s)\n",
+ i,
+ tm_utils_go_type_to_str(id),
+ tm_utils_go_id_to_str(id),
+ tm_utils_go_enum_to_str(id));
+ }
+
+ } /* for() */
+
+ connector = dal_display_path_get_connector(display_path);
+ if (connector != NULL) {
+ struct connector_feature_support cfs;
+
+ dal_connector_get_features(connector, &cfs);
+
+ id = dal_connector_get_graphics_object_id(connector);
+
+ TM_INFO(" (%s %s-%s [%s] [%s])\n",
+ tm_utils_go_type_to_str(id),
+ tm_utils_go_id_to_str(id),
+ tm_utils_go_enum_to_str(id),
+ tm_utils_hpd_line_to_str(cfs.hpd_line),
+ tm_utils_ddc_line_to_str(cfs.ddc_line));
+ }
+
+ TM_INFO("\n");
+}
+
+/** Prints the content of all display paths and some other content*/
+void dal_tm_dump(struct topology_mgr *tm)
+{
+ uint32_t ind;
+ uint32_t display_paths_num = tm_get_display_path_count(tm);
+
+ for (ind = 0; ind < display_paths_num; ind++)
+ dal_tm_dump_display_path(tm, ind);
+}
+
+/** Blank CRTC and disable memory requests */
+void dal_tm_disable_all_dcp_pipes(struct topology_mgr *tm)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ struct controller *controller;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *controllers =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CONTROLLER);
+
+ TM_IFACE_TRACE();
+
+ for (i = controllers->start; i < controllers->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ controller = TO_CONTROLLER_INFO(tm_resource)->controller;
+
+ dal_controller_disable_vga(controller);
+
+ /* Blank controller using driver code instead of
+ * command table. */
+ dal_controller_blank_crtc(controller,
+ COLOR_SPACE_SRGB_FULL_RANGE);
+ }
+}
+
+/* Callback interface - a way for tm_detection_mgr to notify
+ * TM about hotplug event */
+enum tm_result tm_handle_hpd_event(struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ uint32_t display_index = dal_display_path_get_display_index(
+ display_path);
+ struct tm_detection_status detection_status;
+
+ dal_memset(&detection_status, 0, sizeof(detection_status));
+
+ if (NULL == display_path) {
+ TM_ERROR("%s: No display_path found for display_index:%d!\n",
+ __func__, display_index);
+ return TM_RESULT_FAILURE;
+ }
+
+ detection_status.connected = tm_detect_display(tm, display_path,
+ DETECTION_METHOD_HOTPLUG);
+
+ TM_INFO("Display Path %d is now %s.\n", display_index,
+ (dal_display_path_is_target_connected(display_path) == true ?
+ "Connected" : "Disconnected"));
+
+ if (true == detection_status.connected)
+ return TM_RESULT_DISPLAY_CONNECTED;
+ else
+ return TM_RESULT_DISPLAY_DISCONNECTED;
+}
+
+struct controller *dal_tm_get_controller_from_display_path(
+ struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ struct controller *controller;
+ struct dal_context *dal_context = tm->dal_context;
+
+ if (NULL == display_path) {
+ TM_ERROR("%s: Path pointer is NULL!\n", __func__);
+ return NULL;
+ }
+
+ if (false == dal_display_path_is_acquired(display_path)) {
+ TM_RESOURCES("%s: No Controller: Path[%02d] is not acquired!\n",
+ __func__,
+ dal_display_path_get_display_index(
+ display_path));
+ return NULL;
+ }
+
+ controller = dal_display_path_get_controller(display_path);
+
+ if (NULL == controller) {
+ TM_RESOURCES("%s: Path[%02d] acquired but No Controller!\n",
+ __func__,
+ dal_display_path_get_display_index(
+ display_path));
+ return NULL;
+ }
+
+ return controller;
+}
+
+/********************************
+ *
+ * Private functions.
+ *
+ *********************************/
+
+static enum tm_result tm_init_during_construct(struct topology_mgr *tm)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /******************************************************
+ Create Resources and display paths - ORDER IS IMPORTANT!
+ *******************************************************/
+ do {
+ rc = create_gpu_resources(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = create_real_display_paths(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = add_fake_crt_vga_dvi_paths(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = miscellaneous_init(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = transfer_paths_from_resource_builder_to_tm(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = allocate_storage_for_link_services(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ associate_link_services_with_display_paths(tm);
+
+ dal_tmrm_set_resources_range_by_type(tm->tm_rm);
+
+ tm_init_features(tm);
+
+ rc = tm_update_internal_database(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ rc = tm_handle_detection_register_display(tm);
+ if (TM_RESULT_FAILURE == rc)
+ break;
+
+ /* destroy temporary objects */
+ tm_resource_builder_destroy(&tm->tm_rb);
+
+ TM_INFO("Number of Display Paths: %u\n",
+ tm_get_display_path_count(tm));
+
+ TM_INFO("Number of Confunctional Paths: %u\n",
+ tm->max_num_of_cofunctional_paths);
+
+ TM_INFO("Number of Confunctional Targets: %u\n",
+ tm->max_num_of_cofunctional_targets);
+
+ TM_INFO("Display Paths:\n");
+
+ dal_tm_dump(tm);
+
+ dal_tmrm_dump(tm->tm_rm);
+
+ } while (0);
+
+ return rc;
+}
+
+static enum tm_result create_gpu_resources(struct topology_mgr *tm)
+{
+ enum tm_result rc;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Step 1. Create GPU resources */
+ rc = tm_resource_builder_create_gpu_resources(tm->tm_rb);
+
+ if (TM_RESULT_FAILURE == rc) {
+ TM_ERROR("%s: tm_resource_builder_create_gpu_resources() failed!\n",
+ __func__);
+ }
+
+ return rc;
+}
+
+static enum tm_result create_real_display_paths(struct topology_mgr *tm)
+{
+ enum tm_result rc;
+
+ /* Step 2. Create real display path (i.e. reported by VBIOS) */
+ rc = tm_resource_builder_build_display_paths(tm->tm_rb);
+ if (TM_RESULT_FAILURE == rc)
+ return rc;
+
+ /* Step 3. Add resources for various features (like stereo, audio) */
+ return tm_resource_builder_add_feature_resources(tm->tm_rb);
+}
+
+static enum tm_result add_fake_crt_vga_dvi_paths(struct topology_mgr *tm)
+{
+ /* Step 4. Add Fake CRT/VGA/DVI paths */
+ return tm_resource_builder_add_fake_display_paths(tm->tm_rb);
+}
+
+static enum tm_result miscellaneous_init(struct topology_mgr *tm)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Step 5. Sort Displays */
+ tm_resource_builder_sort_display_paths(tm->tm_rb);
+
+ /* Step 6. Assign display path specific resources */
+
+ /* Step 7. Check number of paths */
+ if (!tm_resource_builder_get_num_of_paths(tm->tm_rb)) {
+ TM_ERROR("%s: No Display Paths were built!\n", __func__);
+ rc = TM_RESULT_FAILURE;
+ }
+
+ return rc;
+}
+
+/**
+ * Step 8. Transfer paths from the temporary Resource Builder object
+ * to TM data member.
+ * Note that paths are expected to be sorted in Resource Builder,
+ * which is done by tm_resource_builder_sort_display_paths() call in Step 5.
+ */
+static enum tm_result transfer_paths_from_resource_builder_to_tm(
+ struct topology_mgr *tm)
+{
+ uint32_t ind;
+ uint32_t display_paths_num;
+ struct display_path *display_path;
+ uint32_t path_internal_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ display_paths_num = tm_resource_builder_get_num_of_paths(tm->tm_rb);
+
+ tm->display_paths = dal_vector_create(display_paths_num,
+ sizeof(struct display_path *));
+
+ if (NULL == tm->display_paths) {
+ TM_ERROR("%s:tm_resource_mgr_get_gpu_interface() failed!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ for (ind = 0; ind < display_paths_num; ind++) {
+
+ display_path = tm_resource_builder_get_path_at(tm->tm_rb,
+ ind);
+ if (NULL == display_path) {
+ TM_ERROR("%s: NULL == display_path!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ path_internal_index = dal_display_path_get_display_index(
+ display_path);
+
+ /* Insert each Path into TM storage at index which matches
+ * internal index of Path. It is important because internal
+ * index of Path dictates priority of Path, and we want
+ * the the two indexes to match.
+ * See tmrb_swap_entries() for details about path priorities.*/
+ if (false == display_paths_vector_insert_at(
+ tm->display_paths,
+ &display_path,
+ path_internal_index)) {
+ TM_ERROR("%s: failed to add path!\n", __func__);
+ return TM_RESULT_FAILURE;
+ }
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Step 9. Allocate storage for link services according to the actual number
+ * of paths.
+ */
+static enum tm_result allocate_storage_for_link_services(
+ struct topology_mgr *tm)
+{
+ return tm_resource_mgr_setup_link_storage(tm->tm_rm,
+ tm_get_display_path_count(tm));
+}
+
+/**
+ * Step 10. Once display paths are built and all resources allocated
+ * we can create resource index and associate link services with
+ * display paths.
+ */
+static void associate_link_services_with_display_paths(
+ struct topology_mgr *tm)
+{
+ struct tm_resource_mgr *tm_rm = tm->tm_rm;
+ uint32_t ind;
+ uint32_t display_paths_num;
+
+ tm_resource_mgr_relink_encoders(tm_rm);
+
+ display_paths_num = tm_get_display_path_count(tm);
+
+ for (ind = 0; ind < display_paths_num; ind++) {
+
+ tm_resource_mgr_associate_link_services(tm_rm,
+ tm_get_display_path_at_index(tm, ind));
+ }
+}
+
+/**
+ * Initialise Topology Manager features.
+ * Get Features from Adapter Service and set internal flags accordingly.
+ */
+static void tm_init_features(struct topology_mgr *tm)
+{
+ uint32_t i = 0;
+ uint32_t resource_num;
+ struct tm_resource *tm_resource;
+ struct clock_source *clock_source;
+ enum clock_sharing_level clock_sharing_level;
+ const struct tm_resource_range *clock_sources =
+ dal_tmrm_get_resource_range_by_type(
+ tm->tm_rm,
+ OBJECT_TYPE_CLOCK_SOURCE);
+
+ /* TODO: is there a 'force-connect' or 'always connected' Display
+ * Path feature, as in DAL2? */
+
+ tm->max_num_of_non_dp_paths =
+ dal_adapter_service_get_max_cofunc_non_dp_displays();
+
+ tm->single_selected_timing_signals =
+ dal_adapter_service_get_single_selected_timing_signals();
+
+ dal_adapter_service_get_feature_value(
+ FEATURE_SUPPORTED_HDMI_CONNECTION_NUM,
+ &tm->max_num_of_supported_hdmi,
+ sizeof(tm->max_num_of_supported_hdmi));
+
+ resource_num = tm_resource_mgr_get_total_resources_num(tm->tm_rm);
+
+ for (i = clock_sources->start; i < clock_sources->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm->tm_rm, i);
+
+ clock_source = TO_CLOCK_SOURCE_INFO(tm_resource)->clock_source;
+
+ clock_sharing_level = dal_clock_souce_get_clk_sharing_lvl(
+ clock_source);
+
+ if (clock_sharing_level > tm->clock_sharing_level)
+ tm->clock_sharing_level = clock_sharing_level;
+ }
+
+}
+
+static enum tm_result tm_update_single_encoder_implementation(
+ struct topology_mgr *tm,
+ struct display_path *display_path,
+ uint32_t link_index)
+{
+ struct encoder_context ctx;
+ struct encoder *this_encoder;
+ struct graphics_object_id obj_id;
+ const char *transmitter_str;
+ uint32_t dsp_index;
+ struct dal_context *dal_context = tm->dal_context;
+
+ dal_memset(&ctx, 0, sizeof(ctx));
+ ctx.engine = ENGINE_ID_UNKNOWN;
+
+ this_encoder = dal_display_path_get_upstream_object(
+ display_path, link_index);
+
+ if (NULL == this_encoder) {
+ TM_ERROR("%s: Encoder is NULL for link index:%d!\n",
+ __func__, link_index);
+ return TM_RESULT_FAILURE;
+ }
+
+ tm_build_encoder_context(tm->dal_context, display_path, link_index,
+ &ctx);
+
+ if (ENCODER_RESULT_OK != dal_encoder_update_implementation(
+ this_encoder, &ctx)) {
+ TM_ERROR("%s:dal_encoder_update_implementation() failed!\n",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ obj_id = dal_encoder_get_graphics_object_id(this_encoder);
+
+ transmitter_str = tm_utils_transmitter_id_to_str(obj_id);
+
+ dsp_index = dal_display_path_get_display_index(display_path);
+
+ TM_ENCODER_CTL("Encoder Update Impl: %s, Path=%u, Link=%u, Engine=%s, Signal=%s\n",
+ transmitter_str,
+ dsp_index,
+ link_index,
+ tm_utils_engine_id_to_str(ctx.engine),
+ tm_utils_signal_type_to_str(ctx.signal));
+
+ return TM_RESULT_SUCCESS;
+
+}
+
+static enum tm_result tm_update_encoder_implementations(
+ struct topology_mgr *tm)
+{
+ uint32_t dsp_index;
+ uint32_t paths_num;
+ uint32_t link_index;
+ uint32_t num_links;
+ struct display_path *display_path;
+
+ paths_num = tm_get_display_path_count(tm);
+
+ for (dsp_index = 0; dsp_index < paths_num; dsp_index++) {
+
+ display_path = tm_get_display_path_at_index(tm,
+ dsp_index);
+
+ num_links = dal_display_path_get_number_of_links(display_path);
+
+ for (link_index = 0; link_index < num_links; link_index++) {
+
+ if (TM_RESULT_SUCCESS !=
+ tm_update_single_encoder_implementation(tm,
+ display_path, link_index)){
+ /* should never happen */
+ return TM_RESULT_FAILURE;
+ }
+ }
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+/**
+ * Check if 'this encoder' can use more than one Engine.
+ * This function is mainly written for eDP Resource sharing feature
+ *
+ * \return true: yes, more than one Engine can be used.
+ * false: no, only a single Engine can be used.
+ */
+static bool tm_are_alternative_engines_supported(
+ const struct display_path *display_path)
+{
+ uint32_t engine_count = 0;
+ uint32_t ind;
+ struct encoder *encoder = dal_display_path_get_upstream_object(
+ display_path, ASIC_LINK_INDEX);
+ union supported_stream_engines supported_stream_engines =
+ dal_encoder_get_supported_stream_engines(encoder);
+
+ for (ind = 0; ind < ENGINE_ID_COUNT; ind++) {
+
+ if (tm_utils_is_supported_engine(supported_stream_engines, ind))
+ engine_count++;
+ }
+
+ return (engine_count > 1 ? true : false);
+}
+
+/**
+ * Get stream engines priority based on sink signal type and connectivity state.
+ * Entry with highest priority will be acquired first by MST Display Path
+ *
+ * \param [in] display_path: Display path to which engine somehow related
+ * \param [in] is_preferred_engine: True if this engine is preferred for
+ * given display path
+ *
+ * \return
+ * Stream Engine priority
+ */
+static enum tm_engine_priority tm_get_stream_engine_priority(
+ struct display_path *display_path, bool is_preferred_engine)
+{
+ enum signal_type signal = dal_display_path_get_query_signal(
+ display_path, ASIC_LINK_INDEX);
+ bool connected = dal_display_path_is_target_connected(display_path);
+ enum tm_engine_priority requested_priority = TM_ENGINE_PRIORITY_UNKNOWN;
+ bool is_embedded_signal;
+ bool is_alternative_engines_supported;
+
+ /* For embedded panels(LVDS only), we want to reserve one stream engine
+ * resource to guarantee the embedded panel can be used.
+ * We don't reserve for eDP (controlled by runtime parameter) because on
+ * some ASICs for e.g. Kabini where there are only 2 DIGs and if we
+ * reserve one for eDP then we can drive only one MST monitor even if
+ * the user disables eDP, by not reserving for eDP, the user will have
+ * the option to disable eDP and then be able to drive 2 MST monitors.
+ * To avoid reserving all stream engines for embedded use, reserve only
+ * the preferred engine. */
+
+ is_embedded_signal = dal_is_embedded_signal(
+ dal_display_path_get_query_signal(display_path,
+ SINK_LINK_INDEX));
+
+ is_alternative_engines_supported =
+ tm_are_alternative_engines_supported(display_path);
+
+ if (is_preferred_engine &&
+ is_embedded_signal &&
+ is_alternative_engines_supported) {
+ /* This Engine can NOT be used by MST */
+ return TM_ENGINE_PRIORITY_NON_MST_CAPABLE;
+ }
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_EDP:
+ if (connected && is_preferred_engine) {
+ requested_priority =
+ TM_ENGINE_PRIORITY_MST_DP_CONNECTED;
+ } else {
+ requested_priority = TM_ENGINE_PRIORITY_MST_DP_MST_ONLY;
+ }
+ break;
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ if (connected) {
+ requested_priority =
+ TM_ENGINE_PRIORITY_MST_DVI_CONNECTED;
+ } else {
+ requested_priority = TM_ENGINE_PRIORITY_MST_DVI;
+ }
+ break;
+
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ if (connected) {
+ requested_priority =
+ TM_ENGINE_PRIORITY_MST_HDMI_CONNECTED;
+ } else {
+ requested_priority = TM_ENGINE_PRIORITY_MST_HDMI;
+ }
+ break;
+
+ default:
+ requested_priority = TM_ENGINE_PRIORITY_NON_MST_CAPABLE;
+ break;
+
+ }
+
+ return requested_priority;
+}
+
+static void tm_update_stream_engine_priorities_for_path(
+ struct topology_mgr *tm,
+ struct display_path *display_path)
+{
+ uint32_t i;
+ struct tm_resource *tm_resource;
+ struct tm_resource_engine_info *engine_info;
+ struct encoder *encoder;
+ union supported_stream_engines supported_stream_engines;
+ enum engine_id preferred_engine_id;
+ bool is_preferred_engine;
+ enum tm_engine_priority priority;
+ uint32_t engine_id;
+ struct tm_resource_mgr *tm_rm = tm->tm_rm;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *engines =
+ dal_tmrm_get_resource_range_by_type(
+ tm_rm,
+ OBJECT_TYPE_ENGINE);
+
+ /* We check engine only to first encoder - most close to GPU. */
+ encoder = dal_display_path_get_upstream_object(display_path,
+ ASIC_LINK_INDEX);
+
+ if (NULL == encoder) {
+ TM_ERROR("%s: No encoder after GPU!?\n", __func__);
+ return;
+ }
+
+ supported_stream_engines = dal_encoder_get_supported_stream_engines(
+ encoder);
+
+ preferred_engine_id = dal_encoder_get_preferred_stream_engine(encoder);
+
+ for (i = engines->start; i < engines->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ engine_id = GRPH_ID(tm_resource).id;
+
+ if (!tm_utils_is_supported_engine(supported_stream_engines,
+ engine_id)) {
+ /* not a supported engine */
+ continue;
+ }
+
+ engine_info = TO_ENGINE_INFO(tm_resource);
+
+ if (preferred_engine_id ==
+ dal_graphics_object_id_get_engine_id(
+ GRPH_ID(tm_resource))) {
+ /* This engine is the same as the one
+ * preferred by Encoder. */
+ is_preferred_engine = true;
+ } else {
+ /* This engine is different from the one
+ * preferred by Encoder. */
+ is_preferred_engine = false;
+ }
+
+ priority = tm_get_stream_engine_priority(display_path,
+ is_preferred_engine);
+
+ if (engine_info->priority == TM_ENGINE_PRIORITY_UNKNOWN
+ || engine_info->priority < priority) {
+
+ engine_info->priority = priority;
+
+ TM_ENG_ASN(" New Engine Priority[%s]=%s(%u).\n",
+ tm_utils_engine_id_to_str(engine_id),
+ tm_utils_engine_priority_to_str(
+ priority),
+ priority);
+ } /* if() */
+ } /* for() */
+}
+
+/**
+ * Sets stream engines priority.
+ * Entry with highest priority (lowest value) will be acquired first
+ * by MST Display Path.
+ */
+static void tm_update_stream_engine_priorities(struct topology_mgr *tm)
+{
+ struct tm_resource_mgr *tm_rm = tm->tm_rm;
+ struct tm_resource *tm_resource;
+ uint32_t i;
+ uint32_t paths_num;
+ struct dal_context *dal_context = tm->dal_context;
+ const struct tm_resource_range *engines =
+ dal_tmrm_get_resource_range_by_type(
+ tm_rm,
+ OBJECT_TYPE_ENGINE);
+
+ TM_ENG_ASN("%s() - Start\n", __func__);
+
+ for (i = engines->start; i < engines->end; i++) {
+
+ tm_resource = tm_resource_mgr_enum_resource(tm_rm, i);
+
+ TO_ENGINE_INFO(tm_resource)->priority =
+ TM_ENGINE_PRIORITY_UNKNOWN;
+ }
+
+ /* Update Stream Engine Priorities based on engine mapping of
+ * each path.
+ * We choose lowest priority (highest value) among priorities
+ * reported by all paths which MAY use this engine.
+ */
+
+ paths_num = tm_get_display_path_count(tm);
+
+ for (i = 0; i < paths_num; i++) {
+
+ tm_update_stream_engine_priorities_for_path(
+ tm,
+ tm_get_display_path_at_index(tm, i));
+ }
+
+ TM_ENG_ASN("%s() - End\n", __func__);
+}
+
+/**
+ * Creates initial cache of cofunctional displays and
+ * calculates maximal number of cofunctional displays
+ * Should be called only once at bootup
+ */
+
+static bool tm_create_initial_cofunc_display_subsets(
+ struct topology_mgr *tm)
+{
+ uint32_t num_func_controllers;
+ uint32_t i;
+ uint32_t display_paths_num;
+ bool allow_sharing = false;
+ struct gpu *gpu;
+ enum signal_type signal;
+ struct display_path *display_path = NULL;
+ enum clock_sharing_group clock_sharing_grp;
+ union display_path_properties properties;
+ struct dal_context *dal_context = tm->dal_context;
+
+ display_paths_num = tm_get_display_path_count(tm);
+
+ gpu = tm_resource_mgr_get_gpu_interface(tm->tm_rm);
+ num_func_controllers = dal_gpu_get_num_of_functional_controllers(gpu);
+
+ ASSERT(tm->display_subsets == NULL);
+
+ /* Calculate max number of cofunc displays.
+ * We do it in 4 steps when clock sharing supported
+ * If clock sharing not supported, we keep clock_sharing_group
+ * of displaypath as exclusive and can skip the first and last
+ * steps and can skip first and last steps
+ */
+
+ /* 1. Force settings on display paths whenever possible to reduce
+ * limitations when we calculate cofunc displays
+ */
+
+ for (i = 0; i < display_paths_num; i++) {
+
+ /* Force sharing on display paths whenever possible
+ * This will reduce clock resource limitation when we
+ * calculate cofunc displays. However crossfire path we still
+ * still want to share clock_source exclusively
+ */
+
+ display_path = tm_get_display_path_at_index(tm, i);
+ signal = dal_display_path_get_query_signal(
+ display_path,
+ ASIC_LINK_INDEX);
+
+ /*TODO: Add reportSingleSelectedTiming check*/
+ TM_NOT_IMPLEMENTED();
+
+ clock_sharing_grp = tm_get_default_clock_sharing_group(
+ tm,
+ signal,
+ allow_sharing);
+
+ dal_display_path_set_clock_sharing_group(
+ display_path,
+ clock_sharing_grp);
+
+ /* For root DP MST Display Path, we need to set
+ * SignalType_DisplayPort_MST to correctly calculate
+ * the maximum number of cofunctional targets.
+ * After calculating the cofunctional targets, we will
+ * revert the default signal back to DP.
+ */
+ properties = dal_display_path_get_properties(display_path);
+ if (properties.bits.IS_ROOT_DP_MST_PATH)
+ dal_display_path_set_sink_signal(
+ display_path,
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
+ }
+
+ /* 2. Figure out the greatest confunctional display paths subset.
+ * We start with total number of display paths and reduce the size
+ * until successful confunctional subset allocation
+ */
+ tm->max_num_of_cofunctional_paths = display_paths_num <
+ num_func_controllers ? display_paths_num :
+ num_func_controllers;
+
+ while (tm->max_num_of_cofunctional_paths > 0) {
+
+ if (tm_check_num_of_cofunc_displays(tm,
+ display_paths_num,
+ tm->max_num_of_cofunctional_paths))
+ break;
+
+ tm->max_num_of_cofunctional_paths--;
+
+ }
+
+ /* 3. Figure out the greatest confunctional targets subset.
+ * We start with total max number of confunctional display
+ * paths and reduce the size until successful
+ * confunctional subset allocation
+ */
+ tm->max_num_of_cofunctional_targets =
+ tm->max_num_of_cofunctional_paths;
+
+ if (tm_resource_builder_get_num_of_paths(tm->tm_rb) !=
+ display_paths_num) {
+
+ while (tm->max_num_of_cofunctional_targets > 0) {
+ if (tm_check_num_of_cofunc_displays(tm,
+ display_paths_num,
+ tm->max_num_of_cofunctional_targets))
+ break;
+
+ tm->max_num_of_cofunctional_targets--;
+ }
+ }
+
+ /* 4. Setup defaults on all display paths. */
+ for (i = 0; i < display_paths_num; i++) {
+
+ display_path = tm_get_display_path_at_index(tm, i);
+
+ /*Setup default clock sharing group on all display paths.*/
+ /* Same here - we skip CF path*/
+ signal = dal_display_path_get_query_signal(
+ display_path,
+ ASIC_LINK_INDEX);
+
+ clock_sharing_grp = tm_get_default_clock_sharing_group(
+ tm,
+ signal,
+ false);
+
+ dal_display_path_set_clock_sharing_group(
+ display_path,
+ clock_sharing_grp);
+
+ /* We finished calculating the maximum number
+ * of cofunctional targets. Now we revert the
+ * default signal back to DP.
+ */
+ properties = dal_display_path_get_properties(display_path);
+ if (properties.bits.IS_ROOT_DP_MST_PATH)
+ dal_display_path_set_sink_signal(
+ display_path,
+ SIGNAL_TYPE_DISPLAY_PORT);
+
+ }
+
+ ASSERT(tm->max_num_of_cofunctional_paths > 0);
+ ASSERT(tm->max_num_of_cofunctional_targets > 0);
+
+ /* If if successfully calculated maximum
+ * number of cofunctional displays we can
+ * proceed to next step - init relevant
+ * data members and allocate cache of
+ * confunctional subsets
+ */
+ if (tm->max_num_of_cofunctional_paths > 0) {
+
+ tm->display_subsets = dal_tm_subsets_cache_create(
+ tm->dal_context,
+ display_paths_num,
+ tm->max_num_of_cofunctional_paths,
+ num_func_controllers);
+ }
+
+ return (tm->display_subsets != NULL);
+
+}
+
+static bool tm_check_num_of_cofunc_displays(
+ struct topology_mgr *tm,
+ uint32_t max_value,
+ uint32_t max_subset_size)
+{
+ bool calc_result;
+ bool ret_value = false;
+ struct tm_resource_mgr *resource_mgr;
+ struct tm_calc_subset *calc_subset;
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Allocate Temporary resources*/
+ resource_mgr = tm_resource_mgr_clone(tm->tm_rm);
+ if (resource_mgr == NULL) {
+ TM_ERROR("%s: Failed to clone resources", __func__);
+ return false;
+ }
+
+ TM_COFUNC_PATH(
+ "%s Max size of subset: %u. Display index range 0-%u.\n",
+ __func__, max_subset_size, max_value-1);
+
+ calc_subset = dal_tm_calc_subset_create();
+ calc_result = dal_tm_calc_subset_start(
+ calc_subset,
+ max_value,
+ max_subset_size);
+
+ while (calc_result) {
+
+ if (calc_subset->subset_size == max_subset_size) {
+
+ /*TODO:dumpSubset(count++, &calc_subset);*/
+
+ if (tm_can_display_paths_be_enabled_at_the_same_time(
+ tm,
+ resource_mgr,
+ calc_subset->buffer,
+ calc_subset->subset_size)) {
+
+ ret_value = true;
+ break;
+ }
+
+ TM_COFUNC_PATH(
+ "Subset not valid. Continue to iterate...\n");
+ }
+ calc_result = dal_tm_calc_subset_step(calc_subset);
+ }
+
+ tm_resource_mgr_destroy(&resource_mgr);
+ dal_tm_calc_subset_destroy(calc_subset);
+ return ret_value;
+}
+
+static bool tm_can_display_paths_be_enabled_at_the_same_time(
+ struct topology_mgr *tm,
+ struct tm_resource_mgr *tm_rm_clone,
+ const uint32_t *displays,
+ uint32_t array_size)
+{
+ bool success = true;
+ uint32_t num_of_non_dp_paths = 0;
+ struct display_path *display_path;
+ struct link_service *last_mst_link_service = NULL;
+ struct link_service *mst_link_service = NULL;
+ uint32_t i;
+
+ ASSERT(tm->tm_rm != NULL);
+ ASSERT(displays != NULL);
+ ASSERT(array_size > 0);
+
+ tm_resource_mgr_reset_all_usage_counters(tm_rm_clone);
+
+ /* Try to acquire resources temporarily*/
+ for (i = 0; i < array_size; i++) {
+
+ display_path = tm_get_display_path_at_index(
+ tm,
+ displays[i]);
+
+ if (!tm_resource_mgr_acquire_resources(
+ tm_rm_clone,
+ display_path,
+ /* Validation doesn't require change of HW state! */
+ TM_ACQUIRE_METHOD_SW)) {
+
+ success = false;
+ break;
+ }
+
+ if (!dal_is_dp_signal(
+ dal_display_path_get_query_signal(
+ display_path,
+ ASIC_LINK_INDEX))) {
+
+ num_of_non_dp_paths++;
+
+ /* make sure we do not exceed
+ * limitations on number of non-DP paths
+ */
+ if (num_of_non_dp_paths >
+ tm->max_num_of_non_dp_paths) {
+ success = false;
+ break;
+ }
+ }
+ }
+
+ /* Release acquired resources*/
+ for (i = 0; i < array_size; i++) {
+
+ display_path = tm_get_display_path_at_index(
+ tm,
+ displays[i]);
+
+ tm_resource_mgr_release_resources(
+ tm_rm_clone,
+ display_path,
+ TM_ACQUIRE_METHOD_SW);
+ }
+
+ /* validate against MST bandwidth*/
+ for (i = 0; i < array_size; i++) {
+
+ if (!success)
+ break;
+
+ display_path = tm_get_display_path_at_index(
+ tm,
+ displays[i]);
+
+ mst_link_service =
+ dal_display_path_get_mst_link_service(display_path);
+
+ /* only need to call each MST Link
+ * Service once with all display indices
+ */
+ if (mst_link_service != NULL &&
+ mst_link_service != last_mst_link_service) {
+
+ success = dal_ls_are_mst_displays_cofunctional(
+ mst_link_service,
+ displays,
+ array_size);
+ last_mst_link_service = mst_link_service;
+ }
+ }
+
+ return success;
+
+}
+
+/**
+ * Returns the default clock sharing group based on signal
+ */
+static enum clock_sharing_group tm_get_default_clock_sharing_group(
+ struct topology_mgr *tm,
+ enum signal_type signal,
+ bool allow_per_timing_sharing)
+{
+ enum clock_sharing_group clk_sharing_grp =
+ CLOCK_SHARING_GROUP_EXCLUSIVE;
+
+ switch (signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ if (tm->clock_sharing_level >=
+ CLOCK_SHARING_LEVEL_DISPLAY_PORT_SHAREABLE)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_DISPLAY_PORT;
+ else if (tm->clock_sharing_level >=
+ CLOCK_SHARING_LEVEL_DP_MST_SHAREABLE)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_DP_MST;
+ else if (allow_per_timing_sharing)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_GROUP1;
+
+ break;
+ case SIGNAL_TYPE_EDP:
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (tm->clock_sharing_level >=
+ CLOCK_SHARING_LEVEL_DISPLAY_PORT_SHAREABLE)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_DISPLAY_PORT;
+ else if (allow_per_timing_sharing)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_GROUP1;
+
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK1:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ if (allow_per_timing_sharing)
+ clk_sharing_grp = CLOCK_SHARING_GROUP_GROUP1;
+ break;
+ default:
+ break;
+ }
+
+ return clk_sharing_grp;
+}
+
+/**
+ * Update Topology Manager internal database
+ */
+static enum tm_result tm_update_internal_database(struct topology_mgr *tm)
+{
+ struct dal_context *dal_context = tm->dal_context;
+
+ /* Update encoder implementation PRIOR to call
+ * tm_update_stream_engine_priorities(). */
+ if (TM_RESULT_FAILURE == tm_update_encoder_implementations(tm))
+ return TM_RESULT_FAILURE;
+
+ tm_update_stream_engine_priorities(tm);
+
+ if (!tm_create_initial_cofunc_display_subsets(tm)) {
+ TM_ERROR("%s: Failed to create cofunctional subsets",
+ __func__);
+ return TM_RESULT_FAILURE;
+ }
+
+ return TM_RESULT_SUCCESS;
+}
+
+static bool tm_is_display_index_valid(struct topology_mgr *tm,
+ uint32_t display_index, const char *caller_func)
+{
+ uint32_t path_count;
+ struct dal_context *dal_context = tm->dal_context;
+
+ path_count = tm_get_display_path_count(tm);
+
+ if (display_index >= path_count) {
+ TM_ERROR("%s: display_index '%d' greater than maximum of %d!\n",
+ caller_func, display_index, path_count);
+ return false;
+ }
+
+ return true;
+}
+
+/* Register to recieve HPD interrupt. */
+enum tm_result dal_tm_register_for_display_detection_interrupt(
+ struct topology_mgr *tm)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ uint32_t i;
+ struct display_path *path;
+ union display_path_properties props;
+
+ for (i = 0; i < tm_get_display_path_count(tm); i++) {
+
+ path = tm_get_display_path_at_index(tm, i);
+
+ props = dal_display_path_get_properties(path);
+
+ if (props.bits.FAKED_PATH || props.bits.IS_BRANCH_DP_MST_PATH)
+ continue;
+
+ if (!dal_tm_detection_mgr_register_hpd_irq(tm->tm_dm, path)) {
+ rc = TM_RESULT_FAILURE;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/* Register IRQ Sources. */
+static enum tm_result tm_handle_detection_register_display(
+ struct topology_mgr *tm)
+{
+ enum tm_result rc = TM_RESULT_SUCCESS;
+ uint32_t i;
+ struct display_path *path;
+ union display_path_properties props;
+
+ for (i = 0; i < tm_get_display_path_count(tm); i++) {
+
+ path = tm_get_display_path_at_index(tm, i);
+
+ props = dal_display_path_get_properties(path);
+
+ if (props.bits.FAKED_PATH || props.bits.IS_BRANCH_DP_MST_PATH)
+ continue;
+
+ if (!dal_tm_detection_mgr_register_display(tm->tm_dm, path)) {
+ rc = TM_RESULT_FAILURE;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/****************************
+ * Plane-related interfaces.
+ ****************************/
+/**
+ * Acquire resources for the set of Planes.
+ * For each acquired resource, set configuration options which will be used
+ * for HW programming.
+ *
+ * NOTE: it is assumed that SetMode was already called and acquired the
+ * "root" controller for the "main" plain.
+ * Because of this assumption this function will acquire resources for planes
+ * AFTER the 1st one. But, the configuration of "root" plane will be changed.
+ *
+ * \param [in] num_planes: number of planes.
+ *
+ * \param [in] configs: array of Plane Configuration structures.
+ *
+ * \return : NO return code! It is assumed that caller validated the
+ * configuration *before* setting it.
+ * The advantage is that even for an incorrect configuration
+ * we will see something on the screen (no all planes), instead
+ * of a black screen.
+ * If we run out of resources we print an error message into
+ * the logger.
+ */
+void dal_tm_acquire_plane_resources(
+ struct topology_mgr *tm,
+ uint32_t display_index,
+ uint32_t num_planes,
+ const struct plane_config *configs)
+{
+ struct display_path *path;
+ struct dal_context *dal_context = tm->dal_context;
+ uint32_t plane_ind;
+ struct display_path_plane plane;
+ const struct plane_config *curr_config;
+ struct controller *controller;
+ uint32_t controller_index;
+ struct display_path_plane *root_plane;
+ uint32_t dp_planes_num;
+
+ path = tm_get_display_path_at_index(tm, display_index);
+ if (NULL == path) {
+ TM_ERROR("%s: invalid display_index:%d!\n", __func__,
+ display_index);
+ return;
+ }
+
+ root_plane = dal_display_path_get_plane_at_index(path, 0);
+ if (NULL == root_plane) {
+ TM_ERROR("%s: Invalid State! Was the Mode Set? [Path: %d]\n",
+ __func__, display_index);
+ return;
+
+ }
+
+ /* handle the 'root' plane 1st */
+ plane_ind = 0;
+ curr_config = &configs[plane_ind];
+ if (!dal_controller_is_surface_supported(root_plane->controller,
+ curr_config)) {
+ TM_MPO("%s: Surface is NOT supported on 'root'! Path:%d\n",
+ __func__, display_index);
+ /* This is (most likely) the case of Full Screen Video which
+ * we want to display via Underlay.
+ * In this case OS supplied us a single surface.
+ * And we can power-gate parts of root-pipe FE. */
+ root_plane->disabled = true;
+ root_plane->blnd_mode = BLENDER_MODE_OTHER_PIPE;
+ /* Note that the loop below will still start from config[0]
+ * and will add a new plane on top of the root one. */
+ if (num_planes != 1) {
+ TM_WARNING(
+ "%s: Number of Planes NOT equals one! [Path: %d]\n",
+ __func__, display_index);
+ }
+ } else {
+ /* TODO: add real root 'plane' initialisation here,
+ * based on parameters passed in */
+ root_plane->disabled = false;
+ root_plane->blnd_mode = BLENDER_MODE_BLENDING;
+ /* the loop below will skip the search for 'root' controller
+ * because the one it has now supports the surface. */
+ plane_ind++;
+ }
+
+ for (; plane_ind < num_planes; plane_ind++) {
+
+ curr_config = &configs[plane_ind];
+ controller = get_controller_for_plane_index(tm, path,
+ plane_ind, curr_config, &controller_index);
+
+ if (controller) {
+ dal_memset(&plane, 0, sizeof(plane));
+ /* TODO: add real 'plane' initialisation here,
+ * based on parameters passed in */
+ plane.controller = controller;
+ plane.disabled = false;
+ plane.blnd_mode = BLENDER_MODE_BLENDING;
+ /* found free controller -> add the plane to the path */
+ dal_display_path_add_plane(path, &plane);
+
+ dal_tmrm_acquire_controller(tm->tm_rm, path,
+ controller_index,
+ TM_ACQUIRE_METHOD_HW);
+ }
+ } /* for() */
+
+ dp_planes_num = dal_display_path_get_number_of_planes(path);
+
+ {
+ struct display_path_plane *plane =
+ dal_display_path_get_plane_at_index(
+ path,
+ dp_planes_num - 1);
+
+ if (!plane->disabled)
+ plane->blnd_mode = BLENDER_MODE_CURRENT_PIPE;
+ }
+
+ TM_MPO("%s: acquired resources for %d planes out of %d.\n",
+ __func__,
+ dp_planes_num,
+ num_planes);
+}
+
+static struct controller *get_controller_for_plane_index(
+ struct topology_mgr *tm,
+ struct display_path *path,
+ uint32_t plane_index,
+ const struct plane_config *plcfg,
+ uint32_t *controller_index_out)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ uint32_t display_index = dal_display_path_get_display_index(path);
+ uint32_t controller_mask = 0;
+ struct controller *controller;
+
+ while (true) {
+ *controller_index_out = RESOURCE_INVALID_INDEX;
+
+ controller =
+ dal_tmrm_get_free_controller(
+ tm->tm_rm,
+ controller_index_out,
+ controller_mask);
+
+ /* if we fail to acquire underlay try to get another controller
+ */
+ if (NULL == controller) {
+ TM_ERROR("%s: Failed to get controller! Path:%d, Plane:%d\n",
+ __func__,
+ display_index,
+ plane_index);
+ break;
+ }
+
+ if (dal_controller_is_surface_supported(controller, plcfg))
+ break;
+
+ controller_mask |=
+ 1 <<
+ dal_controller_get_graphics_object_id(controller).id;
+ }
+
+ return controller;
+}
+
+/* Release resource acquired by dal_tm_acquire_plane_resources() */
+void dal_tm_release_plane_resources(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = tm->dal_context;
+ struct display_path *display_path;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+ if (NULL == display_path) {
+ TM_ERROR("%s: invalid display_index:%d!\n", __func__,
+ display_index);
+ return;
+ }
+
+ dal_tmrm_release_non_root_controllers(tm->tm_rm, display_path,
+ TM_ACQUIRE_METHOD_HW);
+
+ dal_display_path_release_non_root_planes(display_path);
+}
+
+/***********************************
+ * End-of-Plane-related interfaces.
+ ***********************************/
+
+/* Handles hotplug/hotunplug event -just performs
+ * detection on requested display */
+void dal_tm_handle_sink_connectivity_change(
+ struct topology_mgr *tm,
+ uint32_t display_index)
+{
+ struct display_path *display_path;
+ struct dal_context *dal_context = tm->dal_context;
+
+ display_path = tm_get_display_path_at_index(tm, display_index);
+
+ if (NULL == display_path) {
+ TM_ERROR("%s: invalid display_index:%d!\n", __func__,
+ display_index);
+ return;
+ }
+
+ tm_detect_display(tm, display_path, DETECTION_METHOD_HOTPLUG);
+}
diff --git a/drivers/gpu/drm/amd/dal/topology/topology.h b/drivers/gpu/drm/amd/dal/topology/topology.h
new file mode 100644
index 000000000000..3ce30ec14af3
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/topology/topology.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* This file is for private definitions, which are for use only
+ * by subcomponents of Topology Manager. */
+
+#ifndef __DAL_TOPOLOGY_H__
+#define __DAL_TOPOLOGY_H__
+
+#include "include/topology_mgr_interface.h"
+
+/* Callback interface - a way for tm_detection_mgr to notify
+ * TM about hotplug event */
+enum tm_result tm_handle_hpd_event(struct topology_mgr *tm_mgr,
+ struct display_path *display_path);
+
+#endif /* __DAL_TOPOLOGY_H__ */