summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2024-04-17 16:56:34 +1000
committerDave Airlie <airlied@redhat.com>2024-04-17 16:56:34 +1000
commit4bf1929d4f8ae111f1288efb9d2cb8d81052134e (patch)
tree16f47e5e4e6b007266dc2cf43839fabc39d5973a
parent428eff43dbe31e25713a728afc8d3ee6c6c83c66 (diff)
2024y-04m-17d-06h-55m-29s UTC: drm-tip rerere cache update
git version 2.44.0
-rw-r--r--rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/postimage.15825
-rw-r--r--rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/preimage.15835
-rw-r--r--rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/postimage.1638
-rw-r--r--rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/preimage.1642
-rw-r--r--rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/postimage.1141
-rw-r--r--rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/preimage.1152
-rw-r--r--rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/postimage.11166
-rw-r--r--rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/preimage.11169
-rw-r--r--rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/postimage11567
-rw-r--r--rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/preimage11571
-rw-r--r--rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/postimage.111567
-rw-r--r--rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/preimage.111571
12 files changed, 61844 insertions, 0 deletions
diff --git a/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/postimage.1 b/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/postimage.1
new file mode 100644
index 000000000000..145cdab92ca0
--- /dev/null
+++ b/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/postimage.1
@@ -0,0 +1,5825 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "amdgpu.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+
+#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
+
+#include "gpio_service_interface.h"
+#include "clk_mgr.h"
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "bios/bios_parser_helper.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dmcu.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "abm.h"
+#include "virtual/virtual_link_encoder.h"
+#include "hubp.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+#include "link_enc_cfg.h"
+
+#include "link.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+
+#include "dc_dmub_srv.h"
+
+#include "dsc.h"
+
+#include "vm_helper.h"
+
+#include "dce/dce_i2c.h"
+
+#include "dmub/dmub_srv.h"
+
+#include "dce/dmub_psr.h"
+
+#include "dce/dmub_hw_lock_mgr.h"
+
+#include "dc_trace.h"
+
+#include "hw_sequencer_private.h"
+
+#include "dml2/dml2_internal_types.h"
+
+#include "dce/dmub_outbox.h"
+
+#define CTX \
+ dc->ctx
+
+#define DC_LOGGER \
+ dc->ctx->logger
+
+static const char DC_BUILD_ID[] = "production-build";
+
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
+/* Private functions */
+
+static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+{
+ if (new > *original)
+ *original = new;
+}
+
+static void destroy_links(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (NULL != dc->links[i])
+ dc->link_srv->destroy_link(&dc->links[i]);
+ }
+}
+
+static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
+{
+ int i;
+ uint32_t count = 0;
+
+ for (i = 0; i < num_links; i++) {
+ if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
+ links[i]->is_internal_display)
+ count++;
+ }
+
+ return count;
+}
+
+static int get_seamless_boot_stream_count(struct dc_state *ctx)
+{
+ uint8_t i;
+ uint8_t seamless_boot_stream_count = 0;
+
+ for (i = 0; i < ctx->stream_count; i++)
+ if (ctx->streams[i]->apply_seamless_boot_optimization)
+ seamless_boot_stream_count++;
+
+ return seamless_boot_stream_count;
+}
+
+static bool create_links(
+ struct dc *dc,
+ uint32_t num_virtual_links)
+{
+ int i;
+ int connectors_num;
+ struct dc_bios *bios = dc->ctx->dc_bios;
+
+ dc->link_count = 0;
+
+ connectors_num = bios->funcs->get_connectors_number(bios);
+
+ DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
+
+ if (connectors_num > ENUM_ID_COUNT) {
+ dm_error(
+ "DC: Number of connectors %d exceeds maximum of %d!\n",
+ connectors_num,
+ ENUM_ID_COUNT);
+ return false;
+ }
+
+ dm_output_to_console(
+ "DC: %s: connectors_num: physical:%d, virtual:%d\n",
+ __func__,
+ connectors_num,
+ num_virtual_links);
+
+ // condition loop on link_count to allow skipping invalid indices
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
+ link_init_params.ctx = dc->ctx;
+ /* next BIOS object table connector */
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link = dc->link_srv->create_link(&link_init_params);
+
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ DC_LOG_DC("BIOS object table - end");
+
+ /* Create a link for each usb4 dpia port */
+ for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link_init_params.is_dpia_link = true;
+
+ link = dc->link_srv->create_link(&link_init_params);
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ for (i = 0; i < num_virtual_links; i++) {
+ struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+ struct encoder_init_data enc_init = {0};
+
+ if (link == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
+ link->ctx = dc->ctx;
+ link->dc = dc;
+ link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ link->link_id.enum_id = ENUM_ID_1;
+ link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ enc_init.ctx = dc->ctx;
+ enc_init.channel = CHANNEL_ID_UNKNOWN;
+ enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc_init.transmitter = TRANSMITTER_UNKNOWN;
+ enc_init.connector = link->link_id;
+ enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+ enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+ enc_init.encoder.enum_id = ENUM_ID_1;
+ virtual_link_encoder_construct(link->link_enc, &enc_init);
+ }
+
+ dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
+
+ return true;
+
+failed_alloc:
+ return false;
+}
+
+/* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction. This can happen if the
+ * number of physical connectors is less than the number of DIGs.
+ */
+static bool create_link_encoders(struct dc *dc)
+{
+ bool res = true;
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return res;
+
+ /* Create as many link encoder objects as the platform supports. DPIA
+ * endpoints can be programmably mapped to any DIG.
+ */
+ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
+ link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
+ (enum engine_id)(ENGINE_ID_DIGA + i));
+ if (link_enc) {
+ dc->res_pool->link_encoders[i] = link_enc;
+ dc->res_pool->dig_link_enc_count++;
+ } else {
+ res = false;
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+/* Destroy any additional DIG link encoder objects created by
+ * create_link_encoders().
+ * NB: Must only be called after destroy_links().
+ */
+static void destroy_link_encoders(struct dc *dc)
+{
+ unsigned int num_usb4_dpia;
+ unsigned int num_dig_link_enc;
+ int i;
+
+ if (!dc->res_pool)
+ return;
+
+ num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return;
+
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (link_enc) {
+ link_enc->funcs->destroy(&link_enc);
+ dc->res_pool->link_encoders[i] = NULL;
+ dc->res_pool->dig_link_enc_count--;
+ }
+ }
+}
+
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
+static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
+{
+ if (!dc || !stream || !adjust)
+ return false;
+
+ if (!dc->current_state)
+ return false;
+
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ if (dc->hwss.set_long_vtotal)
+ dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
+ *
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
+ *
+ * Return: %true if the pipe context is found and adjusted;
+ * %false if the pipe context is not found.
+ */
+bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ int i;
+
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ stream->adjust.v_total_max = adjust->v_total_max;
+ stream->adjust.v_total_mid = adjust->v_total_mid;
+ stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+ stream->adjust.v_total_min = adjust->v_total_min;
+ stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
+
+ if (dc->caps.max_v_total != 0 &&
+ (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ if (adjust->allow_otg_v_count_halt)
+ return set_long_vtotal(dc, stream, adjust);
+ else
+ return false;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ dc->hwss.set_drr(&pipe,
+ 1,
+ *adjust);
+
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
+ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
+ *
+ * @dc: [in] dc reference
+ * @stream: [in] Initial dc stream state
+ * @refresh_rate: [in] new refresh_rate
+ *
+ * Return: %true if the pipe context is found and there is an associated
+ * timing_generator for the DC;
+ * %false if the pipe context is not found or there is no
+ * timing_generator for the DC.
+ */
+bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t *refresh_rate)
+{
+ bool status = false;
+
+ int i = 0;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ /* Only execute if a function pointer has been defined for
+ * the DC version in question
+ */
+ if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
+ pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
+
+ status = true;
+
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+ /* TODO: Support multiple streams */
+ const struct dc_stream_state *stream = streams[0];
+ int i;
+ bool ret = false;
+ struct crtc_position position;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+static inline void
+dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
+
+ if (is_stop) {
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
+ cmd.secure_display.roi_info.x_start = rect->x;
+ cmd.secure_display.roi_info.y_start = rect->y;
+ cmd.secure_display.roi_info.x_end = rect->x + rect->width;
+ cmd.secure_display.roi_info.y_end = rect->y + rect->height;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static inline void
+dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
+ struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
+{
+ if (is_stop)
+ dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
+ else
+ dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
+}
+
+bool
+dc_stream_forward_crc_window(struct dc_stream_state *stream,
+ struct rect *rect, bool is_stop)
+{
+ struct dmcu *dmcu;
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmcu = dc->res_pool->dmcu;
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub */
+ if (dmub_srv)
+ dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
+ /* forward to dmcu */
+ else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
+ else
+ return false;
+
+ return true;
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
+ * @dc: DC Object
+ * @stream: The stream to configure CRC on.
+ * @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
+ * @continuous: Capture CRC on every frame if true. Otherwise, only capture
+ * once.
+ *
+ * By default, only CRC0 is configured, and the entire frame is used to
+ * calculate the CRC.
+ *
+ * Return: %false if the stream is not found or CRC capture is not supported;
+ * %true if the stream has been configured.
+ */
+bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
+ struct crc_params *crc_window, bool enable, bool continuous)
+{
+ struct pipe_ctx *pipe;
+ struct crc_params param;
+ struct timing_generator *tg;
+
+ pipe = resource_get_otg_master_for_stream(
+ &dc->current_state->res_ctx, stream);
+
+ /* Stream not found */
+ if (pipe == NULL)
+ return false;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ /* By default, capture the full frame */
+ param.windowa_x_start = 0;
+ param.windowa_y_start = 0;
+ param.windowa_x_end = pipe->stream->timing.h_addressable;
+ param.windowa_y_end = pipe->stream->timing.v_addressable;
+ param.windowb_x_start = 0;
+ param.windowb_y_start = 0;
+ param.windowb_x_end = pipe->stream->timing.h_addressable;
+ param.windowb_y_end = pipe->stream->timing.v_addressable;
+
+ if (crc_window) {
+ param.windowa_x_start = crc_window->windowa_x_start;
+ param.windowa_y_start = crc_window->windowa_y_start;
+ param.windowa_x_end = crc_window->windowa_x_end;
+ param.windowa_y_end = crc_window->windowa_y_end;
+ param.windowb_x_start = crc_window->windowb_x_start;
+ param.windowb_y_start = crc_window->windowb_y_start;
+ param.windowb_x_end = crc_window->windowb_x_end;
+ param.windowb_y_end = crc_window->windowb_y_end;
+ }
+
+ param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+ param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
+ /* Default to the union of both windows */
+ param.selection = UNION_WINDOW_A_B;
+ param.continuous_mode = continuous;
+ param.enable = enable;
+
+ tg = pipe->stream_res.tg;
+
+ /* Only call if supported */
+ if (tg->funcs->configure_crc)
+ return tg->funcs->configure_crc(tg, &param);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
+}
+
+/**
+ * dc_stream_get_crc() - Get CRC values for the given stream.
+ *
+ * @dc: DC object.
+ * @stream: The DC stream state of the stream to get CRCs from.
+ * @r_cr: CRC value for the red component.
+ * @g_y: CRC value for the green component.
+ * @b_cb: CRC value for the blue component.
+ *
+ * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
+ *
+ * Return:
+ * %false if stream is not found, or if CRCs are not enabled.
+ */
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream)
+ break;
+ }
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_crc)
+ return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
+}
+
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
+void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option)
+{
+ struct bit_depth_reduction_params params;
+ struct dc_link *link = stream->link;
+ struct pipe_ctx *pipes = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+ stream) {
+ pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipes)
+ return;
+ if (option > DITHER_OPTION_MAX)
+ return;
+
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
+ stream->dither_option = option;
+
+ memset(&params, 0, sizeof(params));
+ resource_build_bit_depth_reduction_params(stream, &params);
+ stream->bit_depth_params = params;
+
+ if (pipes->plane_res.xfm &&
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipes->plane_res.xfm,
+ pipes->plane_res.scl_data.lb_params.depth,
+ &stream->bit_depth_params);
+ }
+
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+ int i;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_output_csc(dc,
+ pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix,
+ pipes->stream_res.opp->inst);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void dc_stream_set_static_screen_params(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_params *params)
+{
+ int i, j;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
+}
+
+static void dc_destruct(struct dc *dc)
+{
+ // reset link encoder assignment table on destruct
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ link_enc_cfg_init(dc, dc->current_state);
+
+ if (dc->current_state) {
+ dc_state_release(dc->current_state);
+ dc->current_state = NULL;
+ }
+
+ destroy_links(dc);
+
+ destroy_link_encoders(dc);
+
+ if (dc->clk_mgr) {
+ dc_destroy_clk_mgr(dc->clk_mgr);
+ dc->clk_mgr = NULL;
+ }
+
+ dc_destroy_resource_pool(dc);
+
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+ kfree(dc->bw_vbios);
+ dc->bw_vbios = NULL;
+
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+ kfree(dc->dcn_ip);
+ dc->dcn_ip = NULL;
+
+ kfree(dc->vm_helper);
+ dc->vm_helper = NULL;
+
+}
+
+static bool dc_construct_ctx(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ if (!dc_ctx)
+ return false;
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
+ dc_ctx->dc_stream_id_count = 0;
+ dc_ctx->dce_environment = init_params->dce_environment;
+ dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
+ dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
+
+ /* Create logger */
+ dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
+
+ if (!dc_ctx->logger) {
+ kfree(dc_ctx);
+ return false;
+ }
+
+ dc_ctx->logger->dev = adev_to_drm(init_params->driver);
+ dc->dml.logger = dc_ctx->logger;
+
+ dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
+
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ kfree(dc_ctx);
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+
+ dc->ctx = dc_ctx;
+
+ dc->link_srv = link_create_link_service();
+ if (!dc->link_srv)
+ return false;
+
+ return true;
+}
+
+static bool dc_construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+
+ dc->config = init_params->flags;
+
+ // Allocate memory for the vm_helper
+ dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+ if (!dc->vm_helper) {
+ dm_error("%s: failed to create dc->vm_helper\n", __func__);
+ goto fail;
+ }
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
+ dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
+ if (!dc_dceip) {
+ dm_error("%s: failed to create dceip\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_dceip = dc_dceip;
+
+ dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
+ if (!dc_vbios) {
+ dm_error("%s: failed to create vbios\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_vbios = dc_vbios;
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_soc = dcn_soc;
+
+ dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+ if (!dcn_ip) {
+ dm_error("%s: failed to create dcn_ip\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_ip = dcn_ip;
+
+ if (!dc_construct_ctx(dc, init_params)) {
+ dm_error("%s: failed to create ctx\n", __func__);
+ goto fail;
+ }
+
+ dc_ctx = dc->ctx;
+
+ /* Resource should construct all asic specific resources.
+ * This should be the only place where we need to parse the asic id
+ */
+ if (init_params->vbios_override)
+ dc_ctx->dc_bios = init_params->vbios_override;
+ else {
+ /* Create BIOS parser */
+ struct bp_init_data bp_init_data;
+
+ bp_init_data.ctx = dc_ctx;
+ bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+ dc_ctx->dc_bios = dal_bios_parser_create(
+ &bp_init_data, dc_ctx->dce_version);
+
+ if (!dc_ctx->dc_bios) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc_ctx->created_bios = true;
+ }
+
+ dc->vendor_signature = init_params->vendor_signature;
+
+ /* Create GPIO service */
+ dc_ctx->gpio_service = dal_gpio_service_create(
+ dc_ctx->dce_version,
+ dc_ctx->dce_environment,
+ dc_ctx);
+
+ if (!dc_ctx->gpio_service) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
+ if (!dc->res_pool)
+ goto fail;
+
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+ if (dc->caps.max_optimizable_video_width == 0)
+ dc->caps.max_optimizable_video_width = 5120;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
+
+ if (dc->res_pool->funcs->update_bw_bounding_box) {
+ DC_FP_START();
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ DC_FP_END();
+ }
+#endif
+
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+ dc->current_state = dc_state_create(dc, NULL);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ return false;
+}
+
+static void disable_all_writeback_pipes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i].wb_enabled = false;
+}
+
+static void apply_ctx_interdependent_lock(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ bool lock)
+{
+ int i;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
+ get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
+ get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else {
+ if (dc->ctx->dce_version < DCN_VERSION_2_0)
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
+ get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ }
+ }
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+ int i, j;
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
+ struct dc_state *current_ctx;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ if (dangling_context == NULL)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *old_stream =
+ dc->current_state->res_ctx.pipe_ctx[i].stream;
+ bool should_disable = true;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+
+ for (j = 0; j < context->stream_count; j++) {
+ if (old_stream == context->streams[j]) {
+ should_disable = false;
+ break;
+ }
+ }
+ if (!should_disable && pipe_split_change &&
+ dc->current_state->stream_count != context->stream_count)
+ should_disable = true;
+
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
+ struct pipe_ctx *old_pipe, *new_pipe;
+
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (old_pipe->plane_state && !new_pipe->plane_state)
+ should_disable = true;
+ }
+
+ if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ /* When disabling plane for a phantom pipe, we must turn on the
+ * phantom OTG so the disable programming gets the double buffer
+ * update. Otherwise the pipe will be left in a partially disabled
+ * state that can result in underflow or hang when enabling it
+ * again for different use.
+ */
+ if (is_phantom) {
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
+
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
+ if (dc->hwss.blank_phantom)
+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ /* We need to put the phantom OTG back into it's default (disabled) state or we
+ * can get corruption when transition from one SubVP config to a different one.
+ * The OTG is set to disable on falling edge of VUPDATE so the plane disable
+ * will still get it's double buffer update.
+ */
+ if (is_phantom) {
+ if (tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+ }
+ }
+ }
+
+ current_ctx = dc->current_state;
+ dc->current_state = dangling_context;
+ dc_state_release(current_ctx);
+}
+
+static void disable_vbios_mode_if_required(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ unsigned int i, j;
+
+ /* check if timing_changed, disable stream*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = NULL;
+ struct dc_link *link = NULL;
+ struct pipe_ctx *pipe = NULL;
+
+ pipe = &context->res_ctx.pipe_ctx[i];
+ stream = pipe->stream;
+ if (stream == NULL)
+ continue;
+
+ if (stream->apply_seamless_boot_optimization)
+ continue;
+
+ // only looking for first odm pipe
+ if (pipe->prev_odm_pipe)
+ continue;
+
+ if (stream->link->local_sink &&
+ stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link = stream->link;
+ }
+
+ if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int enc_inst, tg_inst = 0;
+ unsigned int pix_clk_100hz;
+
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ if (enc_inst != ENGINE_ID_UNKNOWN) {
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (dc->res_pool->stream_enc[j]->id == enc_inst) {
+ tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+
+ dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+ dc->res_pool->dp_clock_source,
+ tg_inst, &pix_clk_100hz);
+
+ if (link->link_status.link_active) {
+ uint32_t requested_pix_clk_100hz =
+ pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
+
+ if (pix_clk_100hz != requested_pix_clk_100hz) {
+ dc->link_srv->set_dpms_off(pipe);
+ pipe->stream->dpms_off = false;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ PERF_TRACE();
+ for (i = 0; i < MAX_PIPES; i++) {
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+ PERF_TRACE();
+}
+
+/* Public functions */
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+{
+ struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ unsigned int full_pipe_count;
+
+ if (!dc)
+ return NULL;
+
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
+ } else {
+ if (!dc_construct(dc, init_params))
+ goto destruct_dc;
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
+
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
+
+ dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+ }
+
+ dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
+ dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc->clk_reg_offsets = init_params->clk_reg_offsets;
+
+ /* Populate versioning information */
+ dc->versions.dc_ver = DC_VER;
+
+ dc->build_id = DC_BUILD_ID;
+
+ DC_LOG_DC("Display Core initialized\n");
+
+
+
+ return dc;
+
+destruct_dc:
+ dc_destruct(dc);
+ kfree(dc);
+ return NULL;
+}
+
+static void detect_edp_presence(struct dc *dc)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ struct dc_link *edp_link = NULL;
+ enum dc_connection_type type;
+ int i;
+ int edp_num;
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (!edp_num)
+ return;
+
+ for (i = 0; i < edp_num; i++) {
+ edp_link = edp_links[i];
+ if (dc->config.edp_not_connected) {
+ edp_link->edp_sink_present = false;
+ } else {
+ dc_link_detect_connection_type(edp_link, &type);
+ edp_link->edp_sink_present = (type != dc_connection_none);
+ }
+ }
+}
+
+void dc_hardware_init(struct dc *dc)
+{
+
+ detect_edp_presence(dc);
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+ dc->hwss.init_hw(dc);
+}
+
+void dc_init_callbacks(struct dc *dc,
+ const struct dc_callback_init *init_params)
+{
+ dc->ctx->cp_psp = init_params->cp_psp;
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+}
+
+void dc_destroy(struct dc **dc)
+{
+ dc_destruct(*dc);
+ kfree(*dc);
+ *dc = NULL;
+}
+
+static void enable_timing_multisync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, multisync_count = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream ||
+ !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
+ continue;
+ if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
+ continue;
+ multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
+ multisync_count++;
+ }
+
+ if (multisync_count > 0) {
+ dc->hwss.enable_per_frame_crtc_position_reset(
+ dc, multisync_count, multisync_pipes);
+ }
+}
+
+static void program_timing_sync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, j, k;
+ int group_index = 0;
+ int num_group = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream
+ || ctx->res_ctx.pipe_ctx[i].top_pipe
+ || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
+ continue;
+
+ unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+ }
+
+ for (i = 0; i < pipe_count; i++) {
+ int group_size = 1;
+ enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
+ struct pipe_ctx *pipe_set[MAX_PIPES];
+
+ if (!unsynced_pipes[i])
+ continue;
+
+ pipe_set[0] = unsynced_pipes[i];
+ unsynced_pipes[i] = NULL;
+
+ /* Add tg to the set, search rest of the tg's for ones with
+ * same timing, add all tgs with same timing to the group
+ */
+ for (j = i + 1; j < pipe_count; j++) {
+ if (!unsynced_pipes[j])
+ continue;
+ if (sync_type != TIMING_SYNCHRONIZABLE &&
+ dc->hwss.enable_vblanks_synchronization &&
+ unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
+ resource_are_vblanks_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ sync_type = VBLANK_SYNCHRONIZABLE;
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ } else
+ if (sync_type != VBLANK_SYNCHRONIZABLE &&
+ resource_are_streams_timing_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ sync_type = TIMING_SYNCHRONIZABLE;
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ }
+ }
+
+ /* set first unblanked pipe as master */
+ for (j = 0; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ if (j == 0)
+ break;
+
+ swap(pipe_set[0], pipe_set[j]);
+ break;
+ }
+ }
+
+ for (k = 0; k < group_size; k++) {
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
+
+ status->timing_sync_info.group_id = num_group;
+ status->timing_sync_info.group_size = group_size;
+ if (k == 0)
+ status->timing_sync_info.master = true;
+ else
+ status->timing_sync_info.master = false;
+
+ }
+
+ /* remove any other unblanked pipes as they have already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
+ }
+ } else {
+ /* remove any other pipes by checking valid plane */
+ for (j = j + 1; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+ }
+
+ if (group_size > 1) {
+ if (sync_type == TIMING_SYNCHRONIZABLE) {
+ dc->hwss.enable_timing_synchronization(
+ dc, ctx, group_index, group_size, pipe_set);
+ } else
+ if (sync_type == VBLANK_SYNCHRONIZABLE) {
+ dc->hwss.enable_vblanks_synchronization(
+ dc, group_index, group_size, pipe_set);
+ }
+ group_index++;
+ }
+ num_group++;
+ }
+}
+
+static bool streams_changed(struct dc *dc,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ uint8_t i;
+
+ if (stream_count != dc->current_state->stream_count)
+ return true;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i] != streams[i])
+ return true;
+ if (!streams[i]->link->link_state_valid)
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_validate_boot_timing(const struct dc *dc,
+ const struct dc_sink *sink,
+ struct dc_crtc_timing *crtc_timing)
+{
+ struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
+ struct dc_link *link = sink->link;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ /* Support seamless boot on EDP displays only */
+ if (sink->sink_signal != SIGNAL_TYPE_EDP) {
+ return false;
+ }
+
+ if (dc->debug.force_odm_combine)
+ return false;
+
+ /* Check for enabled DIG to identify enabled display */
+ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ return false;
+
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (enc_inst == ENGINE_ID_UNKNOWN)
+ return false;
+
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
+
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
+
+ if (tg_inst >= dc->res_pool->timing_generator_count)
+ return false;
+
+ if (tg_inst != link->link_enc->preferred_engine)
+ return false;
+
+ tg = dc->res_pool->timing_generators[tg_inst];
+
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ return false;
+
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
+ return false;
+
+ /* block DSC for now, as VBIOS does not currently support DSC timings */
+ if (crtc_timing->flags.DSC)
+ return false;
+
+ if (dc_is_dp_signal(link->connector_signal)) {
+ unsigned int pix_clk_100hz;
+ uint32_t numOdmPipes = 1;
+ uint32_t id_src[4] = {0};
+
+ dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+ dc->res_pool->dp_clock_source,
+ tg_inst, &pix_clk_100hz);
+
+ if (tg->funcs->get_optc_source)
+ tg->funcs->get_optc_source(tg,
+ &numOdmPipes, &id_src[0], &id_src[1]);
+
+ if (numOdmPipes == 2)
+ pix_clk_100hz *= 2;
+ if (numOdmPipes == 4)
+ pix_clk_100hz *= 4;
+
+ // Note: In rare cases, HW pixclk may differ from crtc's pixclk
+ // slightly due to rounding issues in 10 kHz units.
+ if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
+ }
+
+ if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ return false;
+ }
+
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool should_update_pipe_for_stream(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ return (pipe_ctx->stream && pipe_ctx->stream == stream);
+}
+
+static inline bool should_update_pipe_for_plane(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ return (pipe_ctx->plane_state == plane_state);
+}
+
+void dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ int i, j;
+ struct pipe_ctx *pipe;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ } else {
+ context = dc->current_state;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ }
+
+ for (j = 0; pipe && j < stream_count; j++) {
+ if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+}
+
+void dc_trigger_sync(struct dc *dc, struct dc_state *context)
+{
+ if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
+ enable_timing_multisync(dc, context);
+ program_timing_sync(dc, context);
+ }
+}
+
+static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ unsigned int stream_mask = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ stream_mask |= 1 << i;
+ }
+
+ return stream_mask;
+}
+
+void dc_z10_restore(const struct dc *dc)
+{
+ if (dc->hwss.z10_restore)
+ dc->hwss.z10_restore(dc);
+}
+
+void dc_z10_save_init(struct dc *dc)
+{
+ if (dc->hwss.z10_save_init)
+ dc->hwss.z10_save_init(dc);
+}
+
+/**
+ * dc_commit_state_no_check - Apply context to the hardware
+ *
+ * @dc: DC object with the current status to be updated
+ * @context: New state that will become the current status at the end of this function
+ *
+ * Applies given context to the hardware and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ *
+ * Return: an enum dc_status result code for the operation
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, k, l;
+ struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+ struct dc_state *old_state;
+ bool subvp_prev_use = false;
+
+ dc_z10_restore(dc);
+ dc_allow_idle_optimizations(dc, false);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
+ for (i = 0; i < context->stream_count; i++)
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ disable_vbios_mode_if_required(dc, context);
+ dc->hwss.enable_accelerated_mode(dc, context);
+ }
+
+ if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ context->stream_count == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* When SubVP is active, all HW programming must be done while
+ * SubVP lock is acquired
+ */
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ disable_dangling_plane(dc, context);
+ /* re-program planes for existing stream, in case we need to
+ * free up plane resource for later use
+ */
+ if (dc->hwss.apply_ctx_for_surface) {
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+ }
+
+ /* Program hardware */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (result != DC_OK) {
+ /* Application of dc_state to hardware stopped. */
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
+ return result;
+ }
+
+ dc_trigger_sync(dc, context);
+
+ /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
+ for (i = 0; i < context->stream_count; i++) {
+ uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
+
+ context->streams[i]->update_flags.raw = 0xFFFFFFFF;
+ context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
+ }
+
+ /* Program all planes within new context*/
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (!context->streams[i]->mode_changed)
+ continue;
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+
+ /*
+ * enable stereo
+ * TODO rework dc_enable_stereo call to work with validation sets?
+ */
+ for (k = 0; k < MAX_PIPES; k++) {
+ pipe = &context->res_ctx.pipe_ctx[k];
+
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
+ context->streams[i]->timing.h_addressable,
+ context->streams[i]->timing.v_addressable,
+ context->streams[i]->timing.h_total,
+ context->streams[i]->timing.v_total,
+ context->streams[i]->timing.pix_clk_100hz / 10);
+ }
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ context->stream_count == 0) {
+ /* Must wait for no flips to be pending before doing optimize bw */
+ wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
+ /* Need to do otg sync again as otg could be out of sync due to otg
+ * workaround applied during clock update
+ */
+ dc_trigger_sync(dc, context);
+ }
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+ else
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+
+ context->stream_mask = get_stream_mask(dc, context);
+
+ if (context->stream_mask != dc->current_state->stream_mask)
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
+
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->mode_changed = false;
+
+ /* Clear update flags that were set earlier to avoid redundant programming */
+ for (i = 0; i < context->stream_count; i++) {
+ context->streams[i]->update_flags.raw = 0x0;
+ }
+
+ old_state = dc->current_state;
+ dc->current_state = context;
+
+ dc_state_release(old_state);
+
+ dc_state_retain(dc->current_state);
+
+ return result;
+}
+
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context);
+
+/**
+ * dc_commit_streams - Commit current stream state
+ *
+ * @dc: DC object with the commit state to be configured in the hardware
+ * @params: Parameters for the commit, including the streams to be committed
+ *
+ * Function responsible for commit streams change to the hardware.
+ *
+ * Return:
+ * Return DC_OK if everything work as expected, otherwise, return a dc_status
+ * code.
+ */
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
+{
+ int i, j;
+ struct dc_state *context;
+ enum dc_status res = DC_OK;
+ struct dc_validation_set set[MAX_STREAMS] = {0};
+ struct pipe_ctx *pipe;
+ bool handle_exit_odm2to1 = false;
+
+ if (!params)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
+ return res;
+
+ if (!streams_changed(dc, params->streams, params->stream_count) &&
+ dc->current_state->power_source == params->power_source)
+ return res;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
+
+ for (i = 0; i < params->stream_count; i++) {
+ struct dc_stream_state *stream = params->streams[i];
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ dc_stream_log(dc, stream);
+
+ set[i].stream = stream;
+
+ if (status) {
+ set[i].plane_count = status->plane_count;
+ for (j = 0; j < status->plane_count; j++)
+ set[i].plane_states[j] = status->plane_states[j];
+ }
+ }
+
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
+ */
+ if (params->stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->next_odm_pipe)
+ handle_exit_odm2to1 = true;
+ }
+ }
+
+ if (handle_exit_odm2to1)
+ res = commit_minimal_transition_state(dc, dc->current_state);
+
+ context = dc_state_create_current_copy(dc);
+ if (!context)
+ goto context_alloc_fail;
+
+ context->power_source = params->power_source;
+
+ res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+
+ res = dc_commit_state_no_check(dc, context);
+
+ for (i = 0; i < params->stream_count; i++) {
+ for (j = 0; j < context->stream_count; j++) {
+ if (params->streams[i]->stream_id == context->streams[j]->stream_id)
+ params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
+
+ if (dc_is_embedded_signal(params->streams[i]->signal)) {
+ struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
+
+ if (dc->hwss.is_abm_supported)
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
+ else
+ status->is_abm_supported = true;
+ }
+ }
+ }
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+
+ DC_LOG_DC("%s Finished.\n", __func__);
+
+ return res;
+}
+
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &dc->current_state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ // Don't check flip pending on phantom pipes
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
+}
+
+/* Perform updates here which need to be deferred until next vupdate
+ *
+ * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
+ * but forcing lut memory to shutdown state is immediate. This causes
+ * single frame corruption as lut gets disabled mid-frame unless shutdown
+ * is deferred until after entering bypass.
+ */
+static void process_deferred_updates(struct dc *dc)
+{
+ int i = 0;
+
+ if (dc->debug.enable_mem_low_power.bits.cm) {
+ ASSERT(dc->dcn_ip->max_num_dpp);
+ for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
+ if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
+ dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
+ }
+}
+
+void dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+ int i;
+ struct dc_state *context = dc->current_state;
+
+ if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
+ return;
+
+ post_surface_trace(dc);
+
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
+ }
+
+ process_deferred_updates(dc);
+
+ dc->hwss.optimize_bandwidth(dc, context);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
+
+ dc->optimized_required = false;
+ dc->wm_optimized_required = false;
+}
+
+bool dc_set_generic_gpio_for_stereo(bool enable,
+ struct gpio_service *gpio_service)
+{
+ enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ struct gpio_pin_info pin_info;
+ struct gpio *generic;
+ struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
+ GFP_KERNEL);
+
+ if (!config)
+ return false;
+ pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
+
+ if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
+ kfree(config);
+ return false;
+ } else {
+ generic = dal_gpio_service_create_generic_mux(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+ }
+
+ if (!generic) {
+ kfree(config);
+ return false;
+ }
+
+ gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
+
+ config->enable_output_from_mux = enable;
+ config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
+
+ if (gpio_result == GPIO_RESULT_OK)
+ gpio_result = dal_mux_setup_config(generic, config);
+
+ if (gpio_result == GPIO_RESULT_OK) {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return true;
+ } else {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return false;
+ }
+}
+
+static bool is_surface_in_context(
+ const struct dc_state *context,
+ const struct dc_plane_state *plane_state)
+{
+ int j;
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (plane_state == pipe_ctx->plane_state) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
+{
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+ enum surface_update_type update_type = UPDATE_TYPE_FAST;
+
+ if (!u->plane_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->plane_info->color_space != u->surface->color_space) {
+ update_flags->bits.color_space_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
+ update_flags->bits.horizontal_mirror_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->rotation != u->surface->rotation) {
+ update_flags->bits.rotation_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->format != u->surface->format) {
+ update_flags->bits.pixel_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->stereo_format != u->surface->stereo_format) {
+ update_flags->bits.stereo_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
+ update_flags->bits.per_pixel_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
+ update_flags->bits.global_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
+ || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
+ /* During DCC on/off, stutter period is calculated before
+ * DCC has fully transitioned. This results in incorrect
+ * stutter period calculation. Triggering a full update will
+ * recalculate stutter period.
+ */
+ update_flags->bits.dcc_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (resource_pixel_format_to_bpp(u->plane_info->format) !=
+ resource_pixel_format_to_bpp(u->surface->format)) {
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+ update_flags->bits.bpp_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
+ || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
+ update_flags->bits.plane_size_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ update_flags->bits.swizzle_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+
+ /* todo: below are HW dependent, we should add a hook to
+ * DCE/N resource and validated there.
+ */
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ /* swizzled mode requires RQ to be setup properly,
+ * thus need to run DML to calculate RQ settings
+ */
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+ }
+
+ /* This should be UPDATE_TYPE_FAST if nothing has changed. */
+ return update_type;
+}
+
+static enum surface_update_type get_scaling_info_update_type(
+ const struct dc *dc,
+ const struct dc_surface_update *u)
+{
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
+ if (!u->scaling_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->scaling_quality.integer_scaling !=
+ u->surface->scaling_quality.integer_scaling
+ ) {
+ update_flags->bits.scaling_change = 1;
+
+ if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
+ && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
+ /* Making dst rect smaller requires a bandwidth change */
+ update_flags->bits.bandwidth_change = 1;
+ }
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
+
+ update_flags->bits.scaling_change = 1;
+ if (u->scaling_info->src_rect.width > u->surface->src_rect.width
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ /* Making src rect bigger requires a bandwidth change */
+ update_flags->bits.clock_change = 1;
+ }
+
+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
+ /* Changing clip size of a large surface may result in MPC slice count change */
+ update_flags->bits.bandwidth_change = 1;
+
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
+ update_flags->bits.clip_size_change = 1;
+
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+ || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+ || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ update_flags->bits.position_change = 1;
+
+ if (update_flags->bits.clock_change
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.scaling_change)
+ return UPDATE_TYPE_FULL;
+
+ if (update_flags->bits.position_change ||
+ update_flags->bits.clip_size_change)
+ return UPDATE_TYPE_MED;
+
+ return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(const struct dc *dc,
+ const struct dc_surface_update *u)
+{
+ const struct dc_state *context = dc->current_state;
+ enum surface_update_type type;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
+ return UPDATE_TYPE_FULL;
+ }
+
+ update_flags->raw = 0; // Reset all flags
+
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
+
+ type = get_scaling_info_update_type(dc, u);
+ elevate_update_type(&overall_type, type);
+
+ if (u->flip_addr) {
+ update_flags->bits.addr_update = 1;
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
+ if (u->in_transfer_func)
+ update_flags->bits.in_transfer_func_change = 1;
+
+ if (u->input_csc_color_matrix)
+ update_flags->bits.input_csc_change = 1;
+
+ if (u->coeff_reduction_factor)
+ update_flags->bits.coeff_reduction_change = 1;
+
+ if (u->gamut_remap_matrix)
+ update_flags->bits.gamut_remap_change = 1;
+
+ if (u->blend_tf)
+ update_flags->bits.gamma_change = 1;
+
+ if (u->gamma) {
+ enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
+
+ if (u->plane_info)
+ format = u->plane_info->format;
+ else if (u->surface)
+ format = u->surface->format;
+
+ if (dce_use_lut(format))
+ update_flags->bits.gamma_change = 1;
+ }
+
+ if (u->lut3d_func || u->func_shaper)
+ update_flags->bits.lut_3d = 1;
+
+ if (u->hdr_mult.value)
+ if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ update_flags->bits.hdr_mult = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ }
+
+ if (update_flags->bits.in_transfer_func_change) {
+ type = UPDATE_TYPE_MED;
+ elevate_update_type(&overall_type, type);
+ }
+
+ if (update_flags->bits.lut_3d) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+
+ if (dc->debug.enable_legacy_fast_update &&
+ (update_flags->bits.gamma_change ||
+ update_flags->bits.gamut_remap_change ||
+ update_flags->bits.input_csc_change ||
+ update_flags->bits.coeff_reduction_change)) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+ return overall_type;
+}
+
+static enum surface_update_type check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (dc->idle_optimizations_allowed)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update && stream_update->pending_test_pattern) {
+ overall_type = UPDATE_TYPE_FULL;
+ }
+
+ /* some stream updates require passive update */
+ if (stream_update) {
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
+
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update)
+ su_flags->bits.scaling = 1;
+
+ if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ su_flags->bits.out_tf = 1;
+
+ if (stream_update->abm_level)
+ su_flags->bits.abm_level = 1;
+
+ if (stream_update->dpms_off)
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
+
+ if (stream_update->wb_update)
+ su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
+ if (stream_update->mst_bw_update)
+ su_flags->bits.mst_bw = 1;
+
+ if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
+ (stream_update->vrr_infopacket || stream_update->allow_freesync ||
+ stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
+ su_flags->bits.fams_changed = 1;
+
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
+
+ /* Output transfer function changes do not require bandwidth recalculation,
+ * so don't trigger a full update
+ */
+ if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ su_flags->bits.out_tf = 1;
+ }
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+ det_surface_update(dc, &updates[i]);
+
+ elevate_update_type(&overall_type, type);
+ }
+
+ return overall_type;
+}
+
+/*
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type type;
+
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0;
+
+ type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+
+ dc->optimized_required |= dc->wm_optimized_required;
+ }
+
+ return type;
+}
+
+static struct dc_stream_status *stream_get_status(
+ struct dc_state *ctx,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < ctx->stream_count; i++) {
+ if (stream == ctx->streams[i]) {
+ return &ctx->stream_status[i];
+ }
+ }
+
+ return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+static void copy_surface_update_to_plane(
+ struct dc_plane_state *surface,
+ struct dc_surface_update *srf_update)
+{
+ if (srf_update->flip_addr) {
+ surface->address = srf_update->flip_addr->address;
+ surface->flip_immediate =
+ srf_update->flip_addr->flip_immediate;
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ srf_update->flip_addr->flip_timestamp_in_us -
+ surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us =
+ srf_update->flip_addr->flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
+ }
+
+ if (srf_update->scaling_info) {
+ surface->scaling_quality =
+ srf_update->scaling_info->scaling_quality;
+ surface->dst_rect =
+ srf_update->scaling_info->dst_rect;
+ surface->src_rect =
+ srf_update->scaling_info->src_rect;
+ surface->clip_rect =
+ srf_update->scaling_info->clip_rect;
+ }
+
+ if (srf_update->plane_info) {
+ surface->color_space =
+ srf_update->plane_info->color_space;
+ surface->format =
+ srf_update->plane_info->format;
+ surface->plane_size =
+ srf_update->plane_info->plane_size;
+ surface->rotation =
+ srf_update->plane_info->rotation;
+ surface->horizontal_mirror =
+ srf_update->plane_info->horizontal_mirror;
+ surface->stereo_format =
+ srf_update->plane_info->stereo_format;
+ surface->tiling_info =
+ srf_update->plane_info->tiling_info;
+ surface->visible =
+ srf_update->plane_info->visible;
+ surface->per_pixel_alpha =
+ srf_update->plane_info->per_pixel_alpha;
+ surface->global_alpha =
+ srf_update->plane_info->global_alpha;
+ surface->global_alpha_value =
+ srf_update->plane_info->global_alpha_value;
+ surface->dcc =
+ srf_update->plane_info->dcc;
+ surface->layer_index =
+ srf_update->plane_info->layer_index;
+ }
+
+ if (srf_update->gamma) {
+ memcpy(&surface->gamma_correction.entries,
+ &srf_update->gamma->entries,
+ sizeof(struct dc_gamma_entries));
+ surface->gamma_correction.is_identity =
+ srf_update->gamma->is_identity;
+ surface->gamma_correction.num_entries =
+ srf_update->gamma->num_entries;
+ surface->gamma_correction.type =
+ srf_update->gamma->type;
+ }
+
+ if (srf_update->in_transfer_func) {
+ surface->in_transfer_func.sdr_ref_white_level =
+ srf_update->in_transfer_func->sdr_ref_white_level;
+ surface->in_transfer_func.tf =
+ srf_update->in_transfer_func->tf;
+ surface->in_transfer_func.type =
+ srf_update->in_transfer_func->type;
+ memcpy(&surface->in_transfer_func.tf_pts,
+ &srf_update->in_transfer_func->tf_pts,
+ sizeof(struct dc_transfer_func_distributed_points));
+ }
+
+ if (srf_update->func_shaper)
+ memcpy(&surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(surface->in_shaper_func));
+
+ if (srf_update->lut3d_func)
+ memcpy(&surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(surface->lut3d_func));
+
+ if (srf_update->hdr_mult.value)
+ surface->hdr_mult =
+ srf_update->hdr_mult;
+
+ if (srf_update->blend_tf)
+ memcpy(&surface->blend_tf, srf_update->blend_tf,
+ sizeof(surface->blend_tf));
+
+ if (srf_update->input_csc_color_matrix)
+ surface->input_csc_color_matrix =
+ *srf_update->input_csc_color_matrix;
+
+ if (srf_update->coeff_reduction_factor)
+ surface->coeff_reduction_factor =
+ *srf_update->coeff_reduction_factor;
+
+ if (srf_update->gamut_remap_matrix)
+ surface->gamut_remap_matrix =
+ *srf_update->gamut_remap_matrix;
+}
+
+static void copy_stream_update_to_stream(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *update)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+
+ if (update == NULL || stream == NULL)
+ return;
+
+ if (update->src.height && update->src.width)
+ stream->src = update->src;
+
+ if (update->dst.height && update->dst.width)
+ stream->dst = update->dst;
+
+ if (update->out_transfer_func) {
+ stream->out_transfer_func.sdr_ref_white_level =
+ update->out_transfer_func->sdr_ref_white_level;
+ stream->out_transfer_func.tf = update->out_transfer_func->tf;
+ stream->out_transfer_func.type =
+ update->out_transfer_func->type;
+ memcpy(&stream->out_transfer_func.tf_pts,
+ &update->out_transfer_func->tf_pts,
+ sizeof(struct dc_transfer_func_distributed_points));
+ }
+
+ if (update->hdr_static_metadata)
+ stream->hdr_static_metadata = *update->hdr_static_metadata;
+
+ if (update->abm_level)
+ stream->abm_level = *update->abm_level;
+
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
+
+ if (update->gamut_remap)
+ stream->gamut_remap_matrix = *update->gamut_remap;
+
+ /* Note: this being updated after mode set is currently not a use case
+ * however if it arises OCSC would need to be reprogrammed at the
+ * minimum
+ */
+ if (update->output_color_space)
+ stream->output_color_space = *update->output_color_space;
+
+ if (update->output_csc_transform)
+ stream->csc_color_matrix = *update->output_csc_transform;
+
+ if (update->vrr_infopacket)
+ stream->vrr_infopacket = *update->vrr_infopacket;
+
+ if (update->allow_freesync)
+ stream->allow_freesync = *update->allow_freesync;
+
+ if (update->vrr_active_variable)
+ stream->vrr_active_variable = *update->vrr_active_variable;
+
+ if (update->vrr_active_fixed)
+ stream->vrr_active_fixed = *update->vrr_active_fixed;
+
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
+ if (update->dpms_off)
+ stream->dpms_off = *update->dpms_off;
+
+ if (update->hfvsif_infopacket)
+ stream->hfvsif_infopacket = *update->hfvsif_infopacket;
+
+ if (update->vtem_infopacket)
+ stream->vtem_infopacket = *update->vtem_infopacket;
+
+ if (update->vsc_infopacket)
+ stream->vsc_infopacket = *update->vsc_infopacket;
+
+ if (update->vsp_infopacket)
+ stream->vsp_infopacket = *update->vsp_infopacket;
+
+ if (update->adaptive_sync_infopacket)
+ stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+
+ if (update->dither_option)
+ stream->dither_option = *update->dither_option;
+
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
+ /* update current stream with writeback info */
+ if (update->wb_update) {
+ int i;
+
+ stream->num_wb_info = update->wb_update->num_wb_info;
+ ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i] =
+ update->wb_update->writeback_info[i];
+ }
+ if (update->dsc_config) {
+ struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
+ uint32_t old_dsc_enabled = stream->timing.flags.DSC;
+ uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
+ update->dsc_config->num_slices_v != 0);
+
+ /* Use temporarry context for validating new DSC config */
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
+
+ if (dsc_validate_context) {
+ stream->timing.dsc_cfg = *update->dsc_config;
+ stream->timing.flags.DSC = enable_dsc;
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ stream->timing.dsc_cfg = old_dsc_cfg;
+ stream->timing.flags.DSC = old_dsc_enabled;
+ update->dsc_config = NULL;
+ }
+
+ dc_state_release(dsc_validate_context);
+ } else {
+ DC_ERROR("Failed to allocate new validate context for DSC change\n");
+ update->dsc_config = NULL;
+ }
+ }
+}
+
+static void backup_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++) {
+ scratch->plane_states[i] = *status->plane_states[i];
+ }
+ scratch->stream_state = *stream;
+}
+
+static void restore_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++) {
+ *status->plane_states[i] = scratch->plane_states[i];
+ }
+ *stream = scratch->stream_state;
+}
+
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+/**
+ * update_planes_and_stream_state() - The function takes planes and stream
+ * updates as inputs and determines the appropriate update type. If update type
+ * is FULL, the function allocates a new context, populates and validates it.
+ * Otherwise, it updates current dc context. The function will return both
+ * new_context and new_update_type back to the caller. The function also backs
+ * up both current and new contexts into corresponding dc state scratch memory.
+ * TODO: The function does too many things, and even conditionally allocates dc
+ * context memory implicitly. We should consider to break it down.
+ *
+ * @dc: Current DC state
+ * @srf_updates: an array of surface updates
+ * @surface_count: surface update count
+ * @stream: Corresponding stream to be updated
+ * @stream_update: stream update
+ * @new_update_type: [out] determined update type by the function
+ * @new_context: [out] new context allocated and validated if update type is
+ * FULL, reference to current context if update type is less than FULL.
+ *
+ * Return: true if a valid update is populated into new_context, false
+ * otherwise.
+ */
+static bool update_planes_and_stream_state(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type *new_update_type,
+ struct dc_state **new_context)
+{
+ struct dc_state *context;
+ int i, j;
+ enum surface_update_type update_type;
+ const struct dc_stream_status *stream_status;
+ struct dc_context *dc_ctx = dc->ctx;
+
+ stream_status = dc_stream_get_status(stream);
+
+ if (!stream_status) {
+ if (surface_count) /* Only an error condition if surf_count non-zero*/
+ ASSERT(false);
+
+ return false; /* Cannot commit surface to stream that is not committed */
+ }
+
+ context = dc->current_state;
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.current_state, stream);
+
+ /* update current stream with the new updates */
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+ /* do not perform surface update if surface has invalid dimensions
+ * (all zero) and no scaling_info is provided
+ */
+ if (surface_count > 0) {
+ for (i = 0; i < surface_count; i++) {
+ if ((srf_updates[i].surface->src_rect.width == 0 ||
+ srf_updates[i].surface->src_rect.height == 0 ||
+ srf_updates[i].surface->dst_rect.width == 0 ||
+ srf_updates[i].surface->dst_rect.height == 0) &&
+ (!srf_updates[i].scaling_info ||
+ srf_updates[i].scaling_info->src_rect.width == 0 ||
+ srf_updates[i].scaling_info->src_rect.height == 0 ||
+ srf_updates[i].scaling_info->dst_rect.width == 0 ||
+ srf_updates[i].scaling_info->dst_rect.height == 0)) {
+ DC_ERROR("Invalid src/dst rects in surface update!\n");
+ return false;
+ }
+ }
+ }
+
+ if (update_type >= update_surface_trace_level)
+ update_surface_trace(dc, srf_updates, surface_count);
+
+ for (i = 0; i < surface_count; i++)
+ copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+ struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
+
+ for (i = 0; i < surface_count; i++)
+ new_planes[i] = srf_updates[i].surface;
+
+ /* initialize scratch memory for building context */
+ context = dc_state_create_copy(dc->current_state);
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return false;
+ }
+
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
+
+ /*remove old surfaces from context */
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
+
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+
+ /* add surface to context */
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
+ /* save update parameters into surface */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ if (update_type != UPDATE_TYPE_MED)
+ continue;
+ if (surface->update_flags.bits.clip_size_change ||
+ surface->update_flags.bits.position_change) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+ update_seamless_boot_flags(dc, context, surface_count, stream);
+
+ *new_context = context;
+ *new_update_type = update_type;
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.new_state, stream);
+
+ return true;
+
+fail:
+ dc_state_release(context);
+
+ return false;
+
+}
+
+static void commit_planes_do_stream_update(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int j;
+
+ // Stream updates
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
+
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
+
+ if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->vtem_infopacket) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ }
+
+ if (stream_update->hdr_static_metadata &&
+ stream->use_dynamic_meta &&
+ dc->hwss.set_dmdata_attributes &&
+ pipe_ctx->stream->dmdata_address.quad_part != 0)
+ dc->hwss.set_dmdata_attributes(pipe_ctx);
+
+ if (stream_update->gamut_remap)
+ dc_stream_set_gamut_remap(dc, stream);
+
+ if (stream_update->output_csc_transform)
+ dc_stream_program_csc_matrix(dc, stream);
+
+ if (stream_update->dither_option) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ while (odm_pipe) {
+ odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+ }
+
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ if (stream_update->dsc_config)
+ dc->link_srv->update_dsc_config(pipe_ctx);
+
+ if (stream_update->mst_bw_update) {
+ if (stream_update->mst_bw_update->is_increase)
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ }
+
+ if (stream_update->pending_test_pattern) {
+ /*
+ * test pattern params depends on ODM topology
+ * changes that we could be applying to front
+ * end. Since at the current stage front end
+ * changes are not yet applied. We can only
+ * apply test pattern in hw based on current
+ * state and populate the final test pattern
+ * params in new state. If current and new test
+ * pattern params are different as result of
+ * different ODM topology being used, it will be
+ * detected and handle during front end
+ * programming update.
+ */
+ dc->link_srv->dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
+ }
+
+ if (stream_update->dpms_off) {
+ if (*stream_update->dpms_off) {
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ /* for dpms, keep acquired resources*/
+ if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ dc->optimized_required = true;
+
+ } else {
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
+ }
+ } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
+ && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*
+ * Workaround for firmware issue in some receivers where they don't pick up
+ * correct output color space unless DP link is disabled/re-enabled
+ */
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
+ }
+
+ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ bool should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ } else {
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
+ }
+ }
+ }
+}
+
+static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
+{
+ if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
+ || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ if (stream->link->replay_settings.config.replay_supported)
+ return true;
+
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
+ return false;
+}
+
+void dc_dmub_update_dirty_rect(struct dc *dc,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ struct dc_state *context)
+{
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
+ unsigned int i, j;
+ unsigned int panel_inst = 0;
+
+ if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
+ return;
+
+ if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+ return;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
+ cmd.update_dirty_rect.header.sub_type = 0;
+ cmd.update_dirty_rect.header.payload_bytes =
+ sizeof(cmd.update_dirty_rect) -
+ sizeof(cmd.update_dirty_rect.header);
+ update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
+
+ if (!srf_updates[i].surface || !flip_addr)
+ continue;
+ /* Do not send in immediate flip mode */
+ if (srf_updates[i].surface->flip_immediate)
+ continue;
+
+ update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
+ memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
+ sizeof(flip_addr->dirty_rects));
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ }
+ }
+}
+
+static void build_dmub_update_dirty_rect(
+ struct dc *dc,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
+ unsigned int i, j;
+ unsigned int panel_inst = 0;
+
+ if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
+ return;
+
+ if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+ return;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
+ cmd.update_dirty_rect.header.sub_type = 0;
+ cmd.update_dirty_rect.header.payload_bytes =
+ sizeof(cmd.update_dirty_rect) -
+ sizeof(cmd.update_dirty_rect.header);
+ update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
+
+ if (!srf_updates[i].surface || !flip_addr)
+ continue;
+ /* Do not send in immediate flip mode */
+ if (srf_updates[i].surface->flip_immediate)
+ continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
+ memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
+ sizeof(flip_addr->dirty_rects));
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+ dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
+ dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+ (*dmub_cmd_count)++;
+ }
+ }
+}
+
+
+/**
+ * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
+ *
+ * @dc: Current DC state
+ * @srf_updates: Array of surface updates
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ * @context: New DC state to be programmed
+ *
+ * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
+ * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
+ *
+ * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
+ * to build an array of commands and have them sent while the OTG lock is acquired.
+ *
+ * Return: void
+ */
+static void build_dmub_cmd_list(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ // Initialize cmd count to 0
+ *dmub_cmd_count = 0;
+ build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
+}
+
+static void commit_planes_for_stream_fast(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
+ }
+ }
+ }
+
+ stream_status = dc_state_get_stream_status(context, stream);
+
+ build_dmub_cmd_list(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ context,
+ context->dc_dmub_cmd,
+ &(context->dmub_cmd_count));
+ hwss_build_fast_sequence(dc,
+ context->dc_dmub_cmd,
+ context->dmub_cmd_count,
+ context->block_sequence,
+ &(context->block_sequence_steps),
+ top_pipe_to_program,
+ stream_status,
+ context);
+ hwss_execute_sequence(dc,
+ context->block_sequence,
+ context->block_sequence_steps);
+ /* Clear update flags so next flip doesn't have redundant programming
+ * (if there's no stream update, the update flags are not cleared).
+ * Surface updates are cleared unconditionally at the beginning of each flip,
+ * so no need to clear here.
+ */
+ if (top_pipe_to_program->stream)
+ top_pipe_to_program->stream->update_flags.raw = 0;
+}
+
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->res_cap->num_opp;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+ wait_for_odm_update_pending_complete(dc, dc_context);
+}
+
+static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool subvp_prev_use = false;
+ bool subvp_curr_use = false;
+ uint8_t current_stream_mask = 0;
+
+ // Once we apply the new subvp context to hardware it won't be in the
+ // dc->current_state anymore, so we have to cache it before we apply
+ // the new SubVP context
+ subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ dc_allow_idle_optimizations(dc, false);
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ context_clock_trace(dc, context);
+ }
+
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+ ASSERT(top_pipe_to_program != NULL);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Check old context for SubVP
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ subvp_curr_use = true;
+ break;
+ }
+ }
+
+ if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
+ dc->hwss.interdependent_update_lock(dc, context, true);
+
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
+
+ dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
+
+ // Stream updates
+ if (stream_update)
+ commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
+
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+ * just return after program blank.
+ */
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ } else {
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
+ dc->hwss.post_unlock_program_front_end(dc, context);
+
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
+ return;
+ }
+
+ if (update_type != UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
+ dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
+ pipe_ctx->stream && pipe_ctx->plane_state) {
+ /* Only update visual confirm for SUBVP and Mclk switching here.
+ * The bar appears on all pipes, so we need to update the bar on all displays,
+ * so the information doesn't get stale.
+ */
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
+ pipe_ctx->plane_res.hubp->inst);
+ }
+ }
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
+ }
+ }
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
+ }
+
+ // Update Type FULL, Surface updates
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ should_update_pipe_for_stream(context, pipe_ctx, stream)) {
+ struct dc_stream_status *stream_status = NULL;
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ stream_status =
+ stream_get_status(context, pipe_ctx->stream);
+
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+ }
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
+ dc->hwss.program_front_end_for_ctx(dc, context);
+ if (dc->debug.validate_dml_output) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+ if (cur_pipe->stream == NULL)
+ continue;
+
+ cur_pipe->plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe->plane_res.hubp, dc->ctx,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs);
+ }
+ }
+ }
+
+ // Update Type FAST, Surface updates
+ if (update_type == UPDATE_TYPE_FAST) {
+ if (dc->hwss.set_flip_control_gsl)
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
+ continue;
+
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+
+ // GSL has to be used for flip immediate
+ dc->hwss.set_flip_control_gsl(pipe_ctx,
+ pipe_ctx->plane_state->flip_immediate);
+ }
+ }
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
+ continue;
+
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+
+ /*program triple buffer after lock based on flip type*/
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*only enable triplebuffer for fast_update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ if (pipe_ctx->plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+ }
+ }
+ }
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ } else {
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+
+ if (should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (subvp_curr_use) {
+ /* If enabling subvp or transitioning from subvp->subvp, enable the
+ * phantom streams before we program front end for the phantom pipes.
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.enable_phantom_streams)
+ dc->hwss.enable_phantom_streams(dc, context);
+ }
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
+
+ if (subvp_prev_use && !subvp_curr_use) {
+ /* If disabling subvp, disable phantom streams after front end
+ * programming has completed (we turn on phantom OTG in order
+ * to complete the plane disable for phantom pipes).
+ */
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+ }
+
+ // Fire manual trigger only when bottom plane is flipped
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
+ !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
+ !pipe_ctx->plane_state->update_flags.bits.addr_update ||
+ pipe_ctx->plane_state->skip_manual_trigger)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ }
+
+ current_stream_mask = get_stream_mask(dc, context);
+ if (current_stream_mask != context->stream_mask) {
+ context->stream_mask = current_stream_mask;
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
+ }
+}
+
+/**
+ * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
+ *
+ * @dc: Used to get the current state status
+ * @stream: Target stream, which we want to remove the attached planes
+ * @srf_updates: Array of surface updates
+ * @surface_count: Number of surface update
+ * @is_plane_addition: [in] Fill out with true if it is a plane addition case
+ *
+ * DCN32x and newer support a feature named Dynamic ODM which can conflict with
+ * the MPO if used simultaneously in some specific configurations (e.g.,
+ * 4k@144). This function checks if the incoming context requires applying a
+ * transition state with unnecessary pipe splitting and ODM disabled to
+ * circumvent our hardware limitations to prevent this edge case. If the OPP
+ * associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ *
+ * Return:
+ * Return true if OPP and MPCC might change, otherwise, return false.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+ bool subvp_active = false;
+ uint32_t i;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
+ subvp_active = true;
+ break;
+ }
+ }
+
+ /* For SubVP when adding or removing planes we need to add a minimal transition
+ * (even when disabling all planes). Whenever disabling a phantom pipe, we
+ * must use the minimal transition path to disable the pipe correctly.
+ *
+ * We want to use the minimal transition whenever subvp is active, not only if
+ * a plane is being added / removed from a subvp stream (MPO plane can be added
+ * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
+ * a min transition to disable subvp.
+ */
+ if (cur_stream_status && subvp_active) {
+ /* determine if minimal transition is required due to SubVP*/
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
+struct pipe_split_policy_backup {
+ bool dynamic_odm_policy;
+ bool subvp_policy;
+ enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
+};
+
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ int i;
+
+ if (!dc->config.is_vmin_only_asic) {
+ policy->mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+ policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+ policy->subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *minimal_transition_context,
+ struct dc_state *base_context,
+ struct pipe_split_policy_backup *policy)
+{
+ restore_minimal_pipe_split_policy(dc, base_context, policy);
+ dc_state_release(minimal_transition_context);
+}
+
+static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
+{
+ uint8_t i;
+ int j;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < context->stream_count; i++) {
+ stream_status = &context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++)
+ stream_status->plane_states[j]->flip_immediate = false;
+ }
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = NULL;
+
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
+
+ backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
+ /* commit minimal state */
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ /* prevent underflow and corruption when reconfiguring pipes */
+ force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
+ } else {
+ /*
+ * This should never happen, minimal transition state should
+ * always be validated first before adding pipe split features.
+ */
+ release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
+ BREAK_TO_DEBUGGER();
+ minimal_transition_context = NULL;
+ }
+ return minimal_transition_context;
+}
+
+static bool is_pipe_topology_transition_seamless_with_intermediate_step(
+ struct dc *dc,
+ struct dc_state *initial_state,
+ struct dc_state *intermediate_state,
+ struct dc_state *final_state)
+{
+ return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
+ intermediate_state) &&
+ dc->hwss.is_pipe_topology_transition_seamless(dc,
+ intermediate_state, final_state);
+}
+
+static void swap_and_release_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_state *old = dc->current_state;
+ struct pipe_ctx *pipe_ctx;
+
+ /* Since memory free requires elevated IRQ, an interrupt
+ * request is generated by mem free. If this happens
+ * between freeing and reassigning the context, our vsync
+ * interrupt will call into dc and cause a memory
+ * corruption. Hence, we first reassign the context,
+ * then free the old context.
+ */
+ dc->current_state = new_context;
+ dc_state_release(old);
+
+ // clear any forced full updates
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+}
+
+static int initialize_empty_surface_updates(
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates)
+{
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+ int i;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < status->plane_count; i++)
+ srf_updates[i].surface = status->plane_states[i];
+
+ return status->plane_count;
+}
+
+static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context =
+ create_minimal_transition_state(dc, new_context,
+ &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = new state\n");
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(
+ dc, intermediate_context, new_context, &policy);
+ }
+ return success;
+}
+
+static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context;
+ struct dc_state *old_current_state = dc->current_state;
+ struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ int surface_count;
+
+ /*
+ * Both current and new contexts share the same stream and plane state
+ * pointers. When new context is validated, stream and planes get
+ * populated with new updates such as new plane addresses. This makes
+ * the current context no longer valid because stream and planes are
+ * modified from the original. We backup current stream and plane states
+ * into scratch space whenever we are populating new context. So we can
+ * restore the original values back by calling the restore function now.
+ * This restores back the original stream and plane states associated
+ * with the current state.
+ */
+ restore_planes_and_stream_state(&dc->scratch.current_state, stream);
+ dc_state_retain(old_current_state);
+ intermediate_context = create_minimal_transition_state(dc,
+ old_current_state, &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = current state\n");
+ surface_count = initialize_empty_surface_updates(
+ stream, srf_updates);
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(dc, intermediate_context,
+ old_current_state, &policy);
+ }
+ dc_state_release(old_current_state);
+ /*
+ * Restore stream and plane states back to the values associated with
+ * new context.
+ */
+ restore_planes_and_stream_state(&dc->scratch.new_state, stream);
+ return success;
+}
+
+/**
+ * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
+ * on current or new context
+ *
+ * @dc: DC structure, used to get the current state
+ * @new_context: New context
+ * @stream: Stream getting the update for the flip
+ * @srf_updates: Surface updates
+ * @surface_count: Number of surfaces
+ *
+ * The function takes in current state and new state and determine a minimal
+ * transition state as the intermediate step which could make the transition
+ * between current and new states seamless. If found, it will commit the minimal
+ * transition state and update current state to this minimal transition state
+ * and return true, if not, it will return false.
+ *
+ * Return:
+ * Return True if the minimal transition succeeded, false otherwise
+ */
+static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = commit_minimal_transition_based_on_new_context(
+ dc, new_context, stream, srf_updates,
+ surface_count);
+ if (!success)
+ success = commit_minimal_transition_based_on_current_context(dc,
+ new_context, stream);
+ if (!success)
+ DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
+ return success;
+}
+
+/**
+ * commit_minimal_transition_state - Create a transition pipe split state
+ *
+ * @dc: Used to get the current state status
+ * @transition_base_context: New transition state
+ *
+ * In some specific configurations, such as pipe split on multi-display with
+ * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
+ * programming when moving to new planes. To mitigate those types of problems,
+ * this function adds a transition state that minimizes pipe usage before
+ * programming the new configuration. When adding a new plane, the current
+ * state requires the least pipes, so it is applied without splitting. When
+ * removing a plane, the new state requires the least pipes, so it is applied
+ * without splitting.
+ *
+ * Return:
+ * Return false if something is wrong in the transition state.
+ */
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context)
+{
+ struct dc_state *transition_context;
+ struct pipe_split_policy_backup policy;
+ enum dc_status ret = DC_ERROR_UNEXPECTED;
+ unsigned int i, j;
+ unsigned int pipe_in_use = 0;
+ bool subvp_in_use = false;
+ bool odm_in_use = false;
+
+ /* check current pipes in use*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state)
+ pipe_in_use++;
+ }
+
+ /* If SubVP is enabled and we are adding or removing planes from any main subvp
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ subvp_in_use = true;
+ break;
+ }
+ }
+
+ /* If ODM is enabled and we are adding or removing planes from any ODM
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
+
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ odm_in_use = resource_get_odm_slice_count(pipe) > 1;
+ break;
+ }
+ }
+
+ /* When the OS add a new surface if we have been used all of pipes with odm combine
+ * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
+ * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
+ * call it again. Otherwise return true to skip.
+ *
+ * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
+ * enter/exit MPO when DCN still have enough resources.
+ */
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
+ return true;
+
+ DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
+ dc->current_state == transition_base_context ? "current" : "new",
+ subvp_in_use ? "Subvp In Use" :
+ odm_in_use ? "ODM in Use" :
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
+ "Unknown");
+
+ dc_state_retain(transition_base_context);
+ transition_context = create_minimal_transition_state(dc,
+ transition_base_context, &policy);
+ if (transition_context) {
+ ret = dc_commit_state_no_check(dc, transition_context);
+ release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
+ }
+ dc_state_release(transition_base_context);
+
+ if (ret != DC_OK) {
+ /* this should never happen */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ /* force full surface update */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
+ }
+ }
+
+ return true;
+}
+
+static void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ int i = 0;
+
+ if (stream_update) {
+ fast_update[0].out_transfer_func = stream_update->out_transfer_func;
+ fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ fast_update[i].flip_addr = srf_updates[i].flip_addr;
+ fast_update[i].gamma = srf_updates[i].gamma;
+ fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
+ fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
+ fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ }
+}
+
+static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].flip_addr ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].input_csc_color_matrix ||
+ fast_update[i].coeff_reduction_factor)
+ return true;
+ }
+
+ return false;
+}
+
+static bool full_update_required(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_stream_status *stream_status;
+ const struct dc_state *context = dc->current_state;
+
+ for (i = 0; i < surface_count; i++) {
+ if (srf_updates &&
+ (srf_updates[i].plane_info ||
+ srf_updates[i].scaling_info ||
+ (srf_updates[i].hdr_mult.value &&
+ srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ srf_updates[i].in_transfer_func ||
+ srf_updates[i].func_shaper ||
+ srf_updates[i].lut3d_func ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ !is_surface_in_context(context, srf_updates[i].surface)))
+ return true;
+ }
+
+ if (stream_update &&
+ (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update) ||
+ stream_update->hdr_static_metadata ||
+ stream_update->abm_level ||
+ stream_update->periodic_interrupt ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->dpms_off ||
+ stream_update->allow_freesync ||
+ stream_update->vrr_active_variable ||
+ stream_update->vrr_active_fixed ||
+ stream_update->gamut_remap ||
+ stream_update->output_color_space ||
+ stream_update->dither_option ||
+ stream_update->wb_update ||
+ stream_update->dsc_config ||
+ stream_update->mst_bw_update ||
+ stream_update->func_shaper ||
+ stream_update->lut3d_func ||
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust))
+ return true;
+
+ if (stream) {
+ stream_status = dc_stream_get_status(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ return false;
+}
+
+static bool fast_update_only(struct dc *dc,
+ struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+ return fast_updates_exist(fast_update, surface_count)
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
+}
+
+static bool update_planes_and_stream_v1(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ dc_exit_ips_for_hw_access(dc);
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+
+ /* initialize scratch memory for building context */
+ context = dc_state_create_copy(state);
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
+ } else if (update_type == UPDATE_TYPE_FAST) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ dc_post_update_surfaces_to_stream(dc);
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ copy_surface_update_to_plane(surface, &srf_updates[i]);
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_state_release(context);
+ return false;
+ }
+ }
+
+ TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ /*update current_State*/
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+
+ dc->current_state = context;
+ dc_state_release(old);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+ }
+
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ }
+ return true;
+}
+
+static bool update_planes_and_stream_v2(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *context;
+ enum surface_update_type update_type;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ /* In cases where MPO and split or ODM are used transitions can
+ * cause underflow. Apply stream configuration with minimal pipe
+ * split first to avoid unsupported transitions for active pipes.
+ */
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
+ bool is_fast_update_only;
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ srf_updates,
+ surface_count,
+ &is_plane_addition);
+
+ /* on plane addition, minimal state is the current one */
+ if (force_minimal_pipe_splitting && is_plane_addition &&
+ !commit_minimal_transition_state(dc, dc->current_state))
+ return false;
+
+ if (!update_planes_and_stream_state(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_type,
+ &context))
+ return false;
+
+ /* on plane removal, minimal state is the new one */
+ if (force_minimal_pipe_splitting && !is_plane_addition) {
+ if (!commit_minimal_transition_state(dc, context)) {
+ dc_state_release(context);
+ return false;
+ }
+ update_type = UPDATE_TYPE_FULL;
+ }
+
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ commit_minimal_transition_state_in_dc_update(dc, context, stream,
+ srf_updates, surface_count);
+
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ if (!stream_update &&
+ dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ if (dc->current_state != context)
+ swap_and_release_current_context(dc, context, stream);
+ return true;
+}
+
+static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type)
+{
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ ASSERT(update_type < UPDATE_TYPE_FULL);
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update)
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+ else
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+}
+
+static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *new_context)
+{
+ ASSERT(update_type >= UPDATE_TYPE_FULL);
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
+ dc->current_state, new_context))
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ */
+ commit_minimal_transition_state_in_dc_update(dc, new_context,
+ stream, srf_updates, surface_count);
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ new_context);
+}
+
+static bool update_planes_and_stream_v3(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *new_context;
+ enum surface_update_type update_type;
+
+ /*
+ * When this function returns true and new_context is not equal to
+ * current state, the function allocates and validates a new dc state
+ * and assigns it to new_context. The function expects that the caller
+ * is responsible to free this memory when new_context is no longer
+ * used. We swap current with new context and free current instead. So
+ * new_context's memory will live until the next full update after it is
+ * replaced by a newer context. Refer to the use of
+ * swap_and_free_current_context below.
+ */
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
+ &new_context))
+ return false;
+
+ if (new_context == dc->current_state) {
+ commit_planes_and_stream_update_on_current_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type);
+ } else {
+ commit_planes_and_stream_update_with_new_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
+ }
+
+ return true;
+}
+
+bool dc_update_planes_and_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /*
+ * update planes and stream version 3 separates FULL and FAST updates
+ * to their own sequences. It aims to clean up frequent checks for
+ * update type resulting unnecessary branching in logic flow. It also
+ * adds a new commit minimal transition sequence, which detects the need
+ * for minimal transition based on the actual comparison of current and
+ * new states instead of "predicting" it based on per feature software
+ * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
+ * minimal transition sequence is made universal to any power saving
+ * features that would use extra free pipes such as Dynamic ODM/MPC
+ * Combine, MPO or SubVp. Therefore there is no longer a need to
+ * specially handle compatibility problems with transitions among those
+ * features as they are now transparent to the new sequence.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ return update_planes_and_stream_v3(dc, srf_updates,
+ surface_count, stream, stream_update);
+ return update_planes_and_stream_v2(dc, srf_updates,
+ surface_count, stream, stream_update);
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /* TODO: Since change commit sequence can have a huge impact,
+ * we decided to only enable it for DCN3x. However, as soon as
+ * we get more confident about this change we'll need to enable
+ * the new sequence for all ASICs.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+ return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+ if (i < dc->current_state->stream_count)
+ return dc->current_state->streams[i];
+ return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+/*
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+ if (dc == NULL)
+ return false;
+
+ return dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+ dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_power_down_on_boot(struct dc *dc)
+{
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
+ dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot(dc);
+}
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state)
+{
+ if (!dc->current_state)
+ return;
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+ dc_state_construct(dc, dc->current_state);
+
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+
+ dc->hwss.init_hw(dc);
+
+ if (dc->hwss.init_sys_ctx != NULL &&
+ dc->vm_pa_config.valid) {
+ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
+ }
+
+ break;
+ default:
+ ASSERT(dc->current_state->stream_count == 0);
+
+ dc_state_destruct(dc->current_state);
+
+ break;
+ }
+}
+
+void dc_resume(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc->link_srv->resume(dc->links[i]);
+}
+
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu)
+ return dmcu->funcs->is_dmcu_initialized(dmcu);
+ return false;
+}
+
+void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
+{
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
+}
+enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
+{
+ if (dc->hwss.set_clock)
+ return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
+ return DC_ERROR_UNEXPECTED;
+}
+void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
+{
+ if (dc->hwss.get_clock)
+ dc->hwss.get_clock(dc, clock_type, clock_cfg);
+}
+
+/* enable/disable eDP PSR without specify stream for eDP */
+bool dc_set_psr_allow_active(struct dc *dc, bool enable)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count ; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->psr_settings.psr_feature_enabled) {
+ if (enable && !link->psr_settings.psr_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
+ return false;
+ } else if (!enable && link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return;
+
+ if (allow != dc->idle_optimizations_allowed)
+ DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
+ dc->idle_optimizations_allowed, allow, caller_name);
+
+ if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return;
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
+ if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
+ return;
+
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+ if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+}
+
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations_internal(dc, false, caller_name);
+}
+
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return false;
+
+ if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return false;
+
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ return dc->ctx->dmub_srv->idle_allowed;
+}
+
+/* set min and max memory clock to lowest and highest DPM level, respectively */
+void dc_unlock_memory_clock_frequency(struct dc *dc)
+{
+ if (dc->clk_mgr->funcs->set_hard_min_memclk)
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+/* set min memory clock to the min required for current mode, max to maxDPM */
+void dc_lock_memory_clock_frequency(struct dc *dc)
+{
+ if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
+ dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
+
+ if (dc->clk_mgr->funcs->set_hard_min_memclk)
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
+{
+ struct dc_state *context = dc->current_state;
+ struct hubp *hubp;
+ struct pipe_ctx *pipe;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, true);
+
+ // wait for double buffer
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, true);
+ }
+ }
+
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, false);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, false);
+ }
+ }
+}
+
+
+/**
+ * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
+ * @dc: pointer to dc of the dm calling this
+ * @enable: True = transition to DC mode, false = transition back to AC mode
+ *
+ * Some SoCs define additional clock limits when in DC mode, DM should
+ * invoke this function when the platform undergoes a power source transition
+ * so DC can apply/unapply the limit. This interface may be disruptive to
+ * the onscreen content.
+ *
+ * Context: Triggered by OS through DM interface, or manually by escape calls.
+ * Need to hold a dclock when doing so.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
+{
+ unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
+ bool p_state_change_support;
+
+ if (!dc->config.dc_mode_clk_limit_support)
+ return;
+
+ softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
+ for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
+ if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
+ }
+ funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
+ p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
+
+ if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, softMax);
+ // else: No-Op
+ }
+ } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, maxDPM);
+ // else: No-Op
+ }
+ }
+ dc->clk_mgr->dc_mode_softmax_enabled = enable;
+}
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr)
+{
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
+ return true;
+ return false;
+}
+
+/* cleanup on driver unload */
+void dc_hardware_release(struct dc *dc)
+{
+ dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
+
+ if (dc->hwss.hardware_release)
+ dc->hwss.hardware_release(dc);
+}
+
+void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
+{
+ if (dc->current_state)
+ dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
+}
+
+/**
+ * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
+ *
+ * @dc: [in] dc structure
+ *
+ * Checks whether DMUB FW supports outbox notifications, if supported DM
+ * should register outbox interrupt prior to actually enabling interrupts
+ * via dc_enable_dmub_outbox
+ *
+ * Return:
+ * True if DMUB FW supports outbox notifications, False otherwise
+ */
+bool dc_is_dmub_outbox_supported(struct dc *dc)
+{
+ switch (dc->ctx->asic_id.chip_family) {
+
+ case FAMILY_YELLOW_CARP:
+ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_0:
+ if (!dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ default:
+ break;
+ }
+
+ /* dmub aux needs dmub notifications to be enabled */
+ return dc->debug.enable_dmub_aux_for_legacy_ddc;
+
+}
+
+/**
+ * dc_enable_dmub_notifications - Check if dmub fw supports outbox
+ *
+ * @dc: [in] dc structure
+ *
+ * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
+ * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
+ * API shall be removed after switching.
+ *
+ * Return:
+ * True if DMUB FW supports outbox notifications, False otherwise
+ */
+bool dc_enable_dmub_notifications(struct dc *dc)
+{
+ return dc_is_dmub_outbox_supported(dc);
+}
+
+/**
+ * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
+ *
+ * @dc: [in] dc structure
+ *
+ * Enables DMUB unsolicited notifications to x86 via outbox.
+ */
+void dc_enable_dmub_outbox(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+
+ dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
+}
+
+/**
+ * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
+ * Sets port index appropriately for legacy DDC
+ * @dc: dc structure
+ * @link_index: link index
+ * @payload: aux payload
+ *
+ * Returns: True if successful, False if failure
+ */
+bool dc_process_dmub_aux_transfer_async(struct dc *dc,
+ uint32_t link_index,
+ struct aux_payload *payload)
+{
+ uint8_t action;
+ union dmub_rb_cmd cmd = {0};
+
+ ASSERT(payload->length <= 16);
+
+ cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
+ cmd.dp_aux_access.header.payload_bytes = 0;
+ /* For dpia, ddc_pin is set to NULL */
+ if (!dc->links[link_index]->ddc->ddc_pin)
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
+ else
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+
+ cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
+ cmd.dp_aux_access.aux_control.timeout = 0;
+ cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
+ cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
+ cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
+
+ /* set aux action */
+ if (payload->i2c_over_aux) {
+ if (payload->write) {
+ if (payload->mot)
+ action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
+ else
+ action = DP_AUX_REQ_ACTION_I2C_WRITE;
+ } else {
+ if (payload->mot)
+ action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
+ else
+ action = DP_AUX_REQ_ACTION_I2C_READ;
+ }
+ } else {
+ if (payload->write)
+ action = DP_AUX_REQ_ACTION_DPCD_WRITE;
+ else
+ action = DP_AUX_REQ_ACTION_DPCD_READ;
+ }
+
+ cmd.dp_aux_access.aux_control.dpaux.action = action;
+
+ if (payload->length && payload->write) {
+ memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
+ payload->data,
+ payload->length
+ );
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index)
+{
+ uint8_t index, link_index = 0xFF;
+
+ for (index = 0; index < dc->link_count; index++) {
+ /* ddc_hw_inst has dpia port index for dpia links
+ * and ddc instance for legacy links
+ */
+ if (!dc->links[index]->ddc->ddc_pin) {
+ if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
+ link_index = index;
+ break;
+ }
+ }
+ }
+ ASSERT(link_index != 0xFF);
+ return link_index;
+}
+
+/**
+ * dc_process_dmub_set_config_async - Submits set_config command
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link_index: link index
+ * @payload: [in] aux payload
+ * @notify: [out] set_config immediate reply
+ *
+ * Submits set_config command to dmub via inbox message.
+ *
+ * Return:
+ * True if successful, False if failure
+ */
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify)
+{
+ union dmub_rb_cmd cmd = {0};
+ bool is_cmd_complete = true;
+
+ /* prepare SET_CONFIG command */
+ cmd.set_config_access.header.type = DMUB_CMD__DPIA;
+ cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
+
+ cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
+
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ /* command is not processed by dmub */
+ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
+ return is_cmd_complete;
+ }
+
+ /* command processed by dmub, if ret_status is 1, it is completed instantly */
+ if (cmd.set_config_access.header.ret_status == 1)
+ notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
+ else
+ /* cmd pending, will receive notification via outbox */
+ is_cmd_complete = false;
+
+ return is_cmd_complete;
+}
+
+/**
+ * dc_process_dmub_set_mst_slots - Submits MST solt allocation
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link index
+ * @mst_alloc_slots: [in] mst slots to be allotted
+ * @mst_slots_in_use: [out] mst slots in use returned in failure case
+ *
+ * Submits mst slot allocation command to dmub via inbox message
+ *
+ * Return:
+ * DC_OK if successful, DC_ERROR if failure
+ */
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ /* prepare MST_ALLOC_SLOTS command */
+ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
+ cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
+
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed by dmub, if ret_status is 1 */
+ if (cmd.set_config_access.header.ret_status != 1)
+ /* command processing error */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed and we have a status of 2, mst not enabled in dpia */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ /* previously configured mst alloc and used slots did not match */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
+ *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
+ return DC_NOT_SUPPORTED;
+ }
+
+ return DC_OK;
+}
+
+/**
+ * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
+ *
+ * @dc: [in] dc structure
+ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
+ *
+ * Submits dpia hpd int enable command to dmub via inbox message
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
+ * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
+ *
+ * @dc: [in] dc structure
+ *
+ *
+ */
+void dc_print_dmub_diagnostic_data(const struct dc *dc)
+{
+ dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
+}
+
+/**
+ * dc_disable_accelerated_mode - disable accelerated mode
+ * @dc: dc structure
+ */
+void dc_disable_accelerated_mode(struct dc *dc)
+{
+ bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
+}
+
+
+/**
+ * dc_notify_vsync_int_state - notifies vsync enable/disable state
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @enable: whether vsync is enabled or disabled
+ *
+ * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
+ * interrupts after steady state is reached.
+ */
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ if (link->psr_settings.psr_feature_enabled)
+ return;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++) {
+ if (edp_links[i] == link)
+ break;
+ }
+
+ if (i == edp_num) {
+ return;
+ }
+
+ if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
+ pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
+}
+
+/*****************************************************************************
+ * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ * ABM
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @pData: abm hw states
+ *
+ ****************************************************************************/
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return false;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++)
+ if (edp_links[i] == link)
+ break;
+
+ if (i == edp_num)
+ return false;
+
+ if (pipe->stream_res.abm &&
+ pipe->stream_res.abm->funcs->save_restore)
+ return pipe->stream_res.abm->funcs->save_restore(
+ pipe->stream_res.abm,
+ i,
+ pData);
+ return false;
+}
+
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
+{
+ unsigned int i;
+ bool subvp_sw_cursor_req = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ subvp_sw_cursor_req = true;
+ break;
+ }
+ }
+ properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+}
+
+/**
+ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
+ *
+ * Called when DM wants to power on/off eDP.
+ * Only work on links with flag skip_implict_edp_power_control is set.
+ *
+ * @dc: Current DC state
+ * @edp_link: a link with eDP connector signal type
+ * @powerOn: power on/off eDP
+ *
+ * Return: void
+ */
+void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
+ bool powerOn)
+{
+ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (edp_link->skip_implict_edp_power_control == false)
+ return;
+
+ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
+}
+
+/*
+ *****************************************************************************
+ * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
+ *
+ * Called when DM wants to make power policy decisions based on dc_state
+ *
+ *****************************************************************************
+ */
+struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
+{
+ struct dc_power_profile profile = { 0 };
+
+ profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ return profile;
+}
+
diff --git a/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/preimage.1 b/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/preimage.1
new file mode 100644
index 000000000000..fb232c6b7860
--- /dev/null
+++ b/rr-cache/167c3920940b340aa744e211de905f5c9bd2abae/preimage.1
@@ -0,0 +1,5835 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "amdgpu.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "dce/dce_hwseq.h"
+
+#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_plane_priv.h"
+
+#include "gpio_service_interface.h"
+#include "clk_mgr.h"
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bios_parser_interface.h"
+#include "bios/bios_parser_helper.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "dmcu.h"
+#include "dpp.h"
+#include "timing_generator.h"
+#include "abm.h"
+#include "virtual/virtual_link_encoder.h"
+#include "hubp.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+#include "link_enc_cfg.h"
+
+#include "link.h"
+#include "dm_helpers.h"
+#include "mem_input.h"
+
+#include "dc_dmub_srv.h"
+
+#include "dsc.h"
+
+#include "vm_helper.h"
+
+#include "dce/dce_i2c.h"
+
+#include "dmub/dmub_srv.h"
+
+#include "dce/dmub_psr.h"
+
+#include "dce/dmub_hw_lock_mgr.h"
+
+#include "dc_trace.h"
+
+#include "hw_sequencer_private.h"
+
+#include "dml2/dml2_internal_types.h"
+
+#include "dce/dmub_outbox.h"
+
+#define CTX \
+ dc->ctx
+
+#define DC_LOGGER \
+ dc->ctx->logger
+
+static const char DC_BUILD_ID[] = "production-build";
+
+/**
+ * DOC: Overview
+ *
+ * DC is the OS-agnostic component of the amdgpu DC driver.
+ *
+ * DC maintains and validates a set of structs representing the state of the
+ * driver and writes that state to AMD hardware
+ *
+ * Main DC HW structs:
+ *
+ * struct dc - The central struct. One per driver. Created on driver load,
+ * destroyed on driver unload.
+ *
+ * struct dc_context - One per driver.
+ * Used as a backpointer by most other structs in dc.
+ *
+ * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
+ * plugpoints). Created on driver load, destroyed on driver unload.
+ *
+ * struct dc_sink - One per display. Created on boot or hotplug.
+ * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
+ * (the display directly attached). It may also have one or more remote
+ * sinks (in the Multi-Stream Transport case)
+ *
+ * struct resource_pool - One per driver. Represents the hw blocks not in the
+ * main pipeline. Not directly accessible by dm.
+ *
+ * Main dc state structs:
+ *
+ * These structs can be created and destroyed as needed. There is a full set of
+ * these structs in dc->current_state representing the currently programmed state.
+ *
+ * struct dc_state - The global DC state to track global state information,
+ * such as bandwidth values.
+ *
+ * struct dc_stream_state - Represents the hw configuration for the pipeline from
+ * a framebuffer to a display. Maps one-to-one with dc_sink.
+ *
+ * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
+ * and may have more in the Multi-Plane Overlay case.
+ *
+ * struct resource_context - Represents the programmable state of everything in
+ * the resource_pool. Not directly accessible by dm.
+ *
+ * struct pipe_ctx - A member of struct resource_context. Represents the
+ * internal hardware pipeline components. Each dc_plane_state has either
+ * one or two (in the pipe-split case).
+ */
+
+/* Private functions */
+
+static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
+{
+ if (new > *original)
+ *original = new;
+}
+
+static void destroy_links(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (NULL != dc->links[i])
+ dc->link_srv->destroy_link(&dc->links[i]);
+ }
+}
+
+static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
+{
+ int i;
+ uint32_t count = 0;
+
+ for (i = 0; i < num_links; i++) {
+ if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
+ links[i]->is_internal_display)
+ count++;
+ }
+
+ return count;
+}
+
+static int get_seamless_boot_stream_count(struct dc_state *ctx)
+{
+ uint8_t i;
+ uint8_t seamless_boot_stream_count = 0;
+
+ for (i = 0; i < ctx->stream_count; i++)
+ if (ctx->streams[i]->apply_seamless_boot_optimization)
+ seamless_boot_stream_count++;
+
+ return seamless_boot_stream_count;
+}
+
+static bool create_links(
+ struct dc *dc,
+ uint32_t num_virtual_links)
+{
+ int i;
+ int connectors_num;
+ struct dc_bios *bios = dc->ctx->dc_bios;
+
+ dc->link_count = 0;
+
+ connectors_num = bios->funcs->get_connectors_number(bios);
+
+ DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
+
+ if (connectors_num > ENUM_ID_COUNT) {
+ dm_error(
+ "DC: Number of connectors %d exceeds maximum of %d!\n",
+ connectors_num,
+ ENUM_ID_COUNT);
+ return false;
+ }
+
+ dm_output_to_console(
+ "DC: %s: connectors_num: physical:%d, virtual:%d\n",
+ __func__,
+ connectors_num,
+ num_virtual_links);
+
+ // condition loop on link_count to allow skipping invalid indices
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
+ link_init_params.ctx = dc->ctx;
+ /* next BIOS object table connector */
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link = dc->link_srv->create_link(&link_init_params);
+
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ DC_LOG_DC("BIOS object table - end");
+
+ /* Create a link for each usb4 dpia port */
+ for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link_init_params.is_dpia_link = true;
+
+ link = dc->link_srv->create_link(&link_init_params);
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
+ for (i = 0; i < num_virtual_links; i++) {
+ struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
+ struct encoder_init_data enc_init = {0};
+
+ if (link == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
+ link->ctx = dc->ctx;
+ link->dc = dc;
+ link->connector_signal = SIGNAL_TYPE_VIRTUAL;
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_VIRTUAL;
+ link->link_id.enum_id = ENUM_ID_1;
+ link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ enc_init.ctx = dc->ctx;
+ enc_init.channel = CHANNEL_ID_UNKNOWN;
+ enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc_init.transmitter = TRANSMITTER_UNKNOWN;
+ enc_init.connector = link->link_id;
+ enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+ enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+ enc_init.encoder.enum_id = ENUM_ID_1;
+ virtual_link_encoder_construct(link->link_enc, &enc_init);
+ }
+
+ dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
+
+ return true;
+
+failed_alloc:
+ return false;
+}
+
+/* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction. This can happen if the
+ * number of physical connectors is less than the number of DIGs.
+ */
+static bool create_link_encoders(struct dc *dc)
+{
+ bool res = true;
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return res;
+
+ /* Create as many link encoder objects as the platform supports. DPIA
+ * endpoints can be programmably mapped to any DIG.
+ */
+ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
+ link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
+ (enum engine_id)(ENGINE_ID_DIGA + i));
+ if (link_enc) {
+ dc->res_pool->link_encoders[i] = link_enc;
+ dc->res_pool->dig_link_enc_count++;
+ } else {
+ res = false;
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+/* Destroy any additional DIG link encoder objects created by
+ * create_link_encoders().
+ * NB: Must only be called after destroy_links().
+ */
+static void destroy_link_encoders(struct dc *dc)
+{
+ unsigned int num_usb4_dpia;
+ unsigned int num_dig_link_enc;
+ int i;
+
+ if (!dc->res_pool)
+ return;
+
+ num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return;
+
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (link_enc) {
+ link_enc->funcs->destroy(&link_enc);
+ dc->res_pool->link_encoders[i] = NULL;
+ dc->res_pool->dig_link_enc_count--;
+ }
+ }
+}
+
+static struct dc_perf_trace *dc_perf_trace_create(void)
+{
+ return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
+}
+
+static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
+{
+ kfree(*perf_trace);
+ *perf_trace = NULL;
+}
+
+static bool set_long_vtotal(struct dc *dc, struct dc_stream_state *stream, struct dc_crtc_timing_adjust *adjust)
+{
+ if (!dc || !stream || !adjust)
+ return false;
+
+ if (!dc->current_state)
+ return false;
+
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ if (dc->hwss.set_long_vtotal)
+ dc->hwss.set_long_vtotal(&pipe, 1, adjust->v_total_min, adjust->v_total_max);
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * dc_stream_adjust_vmin_vmax - look up pipe context & update parts of DRR
+ * @dc: dc reference
+ * @stream: Initial dc stream state
+ * @adjust: Updated parameters for vertical_total_min and vertical_total_max
+ *
+ * Looks up the pipe context of dc_stream_state and updates the
+ * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
+ * Rate, which is a power-saving feature that targets reducing panel
+ * refresh rate while the screen is static
+ *
+ * Return: %true if the pipe context is found and adjusted;
+ * %false if the pipe context is not found.
+ */
+bool dc_stream_adjust_vmin_vmax(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_crtc_timing_adjust *adjust)
+{
+ int i;
+
+ /*
+ * Don't adjust DRR while there's bandwidth optimizations pending to
+ * avoid conflicting with firmware updates.
+ */
+ if (dc->ctx->dce_version > DCE_VERSION_MAX)
+ if (dc->optimized_required || dc->wm_optimized_required)
+ return false;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ stream->adjust.v_total_max = adjust->v_total_max;
+ stream->adjust.v_total_mid = adjust->v_total_mid;
+ stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
+ stream->adjust.v_total_min = adjust->v_total_min;
+ stream->adjust.allow_otg_v_count_halt = adjust->allow_otg_v_count_halt;
+
+ if (dc->caps.max_v_total != 0 &&
+ (adjust->v_total_max > dc->caps.max_v_total || adjust->v_total_min > dc->caps.max_v_total)) {
+ if (adjust->allow_otg_v_count_halt)
+ return set_long_vtotal(dc, stream, adjust);
+ else
+ return false;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ dc->hwss.set_drr(&pipe,
+ 1,
+ *adjust);
+
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * dc_stream_get_last_used_drr_vtotal - Looks up the pipe context of
+ * dc_stream_state and gets the last VTOTAL used by DRR (Dynamic Refresh Rate)
+ *
+ * @dc: [in] dc reference
+ * @stream: [in] Initial dc stream state
+ * @refresh_rate: [in] new refresh_rate
+ *
+ * Return: %true if the pipe context is found and there is an associated
+ * timing_generator for the DC;
+ * %false if the pipe context is not found or there is no
+ * timing_generator for the DC.
+ */
+bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t *refresh_rate)
+{
+ bool status = false;
+
+ int i = 0;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg) {
+ /* Only execute if a function pointer has been defined for
+ * the DC version in question
+ */
+ if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
+ pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
+
+ status = true;
+
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
+bool dc_stream_get_crtc_position(struct dc *dc,
+ struct dc_stream_state **streams, int num_streams,
+ unsigned int *v_pos, unsigned int *nom_v_pos)
+{
+ /* TODO: Support multiple streams */
+ const struct dc_stream_state *stream = streams[0];
+ int i;
+ bool ret = false;
+ struct crtc_position position;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.stream_enc) {
+ dc->hwss.get_position(&pipe, 1, &position);
+
+ *v_pos = position.vertical_count;
+ *nom_v_pos = position.nominal_vcount;
+ ret = true;
+ }
+ }
+ return ret;
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+static inline void
+dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
+ struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.secure_display.roi_info.phy_id = mux_mapping->phy_output_num;
+ cmd.secure_display.roi_info.otg_id = mux_mapping->otg_output_num;
+
+ if (is_stop) {
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_STOP_UPDATE;
+ } else {
+ cmd.secure_display.header.type = DMUB_CMD__SECURE_DISPLAY;
+ cmd.secure_display.header.sub_type = DMUB_CMD__SECURE_DISPLAY_CRC_WIN_NOTIFY;
+ cmd.secure_display.roi_info.x_start = rect->x;
+ cmd.secure_display.roi_info.y_start = rect->y;
+ cmd.secure_display.roi_info.x_end = rect->x + rect->width;
+ cmd.secure_display.roi_info.y_end = rect->y + rect->height;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+}
+
+static inline void
+dc_stream_forward_dmcu_crc_window(struct dmcu *dmcu,
+ struct rect *rect, struct otg_phy_mux *mux_mapping, bool is_stop)
+{
+ if (is_stop)
+ dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
+ else
+ dmcu->funcs->forward_crc_window(dmcu, rect, mux_mapping);
+}
+
+bool
+dc_stream_forward_crc_window(struct dc_stream_state *stream,
+ struct rect *rect, bool is_stop)
+{
+ struct dmcu *dmcu;
+ struct dc_dmub_srv *dmub_srv;
+ struct otg_phy_mux mux_mapping;
+ struct pipe_ctx *pipe;
+ int i;
+ struct dc *dc = stream->ctx->dc;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
+ break;
+ }
+
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ mux_mapping.phy_output_num = stream->link->link_enc_hw_inst;
+ mux_mapping.otg_output_num = pipe->stream_res.tg->inst;
+
+ dmcu = dc->res_pool->dmcu;
+ dmub_srv = dc->ctx->dmub_srv;
+
+ /* forward to dmub */
+ if (dmub_srv)
+ dc_stream_forward_dmub_crc_window(dmub_srv, rect, &mux_mapping, is_stop);
+ /* forward to dmcu */
+ else if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu))
+ dc_stream_forward_dmcu_crc_window(dmcu, rect, &mux_mapping, is_stop);
+ else
+ return false;
+
+ return true;
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dc_stream_configure_crc() - Configure CRC capture for the given stream.
+ * @dc: DC Object
+ * @stream: The stream to configure CRC on.
+ * @enable: Enable CRC if true, disable otherwise.
+ * @crc_window: CRC window (x/y start/end) information
+ * @continuous: Capture CRC on every frame if true. Otherwise, only capture
+ * once.
+ *
+ * By default, only CRC0 is configured, and the entire frame is used to
+ * calculate the CRC.
+ *
+ * Return: %false if the stream is not found or CRC capture is not supported;
+ * %true if the stream has been configured.
+ */
+bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
+ struct crc_params *crc_window, bool enable, bool continuous)
+{
+ struct pipe_ctx *pipe;
+ struct crc_params param;
+ struct timing_generator *tg;
+
+ pipe = resource_get_otg_master_for_stream(
+ &dc->current_state->res_ctx, stream);
+
+ /* Stream not found */
+ if (pipe == NULL)
+ return false;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ /* By default, capture the full frame */
+ param.windowa_x_start = 0;
+ param.windowa_y_start = 0;
+ param.windowa_x_end = pipe->stream->timing.h_addressable;
+ param.windowa_y_end = pipe->stream->timing.v_addressable;
+ param.windowb_x_start = 0;
+ param.windowb_y_start = 0;
+ param.windowb_x_end = pipe->stream->timing.h_addressable;
+ param.windowb_y_end = pipe->stream->timing.v_addressable;
+
+ if (crc_window) {
+ param.windowa_x_start = crc_window->windowa_x_start;
+ param.windowa_y_start = crc_window->windowa_y_start;
+ param.windowa_x_end = crc_window->windowa_x_end;
+ param.windowa_y_end = crc_window->windowa_y_end;
+ param.windowb_x_start = crc_window->windowb_x_start;
+ param.windowb_y_start = crc_window->windowb_y_start;
+ param.windowb_x_end = crc_window->windowb_x_end;
+ param.windowb_y_end = crc_window->windowb_y_end;
+ }
+
+ param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+ param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
+ /* Default to the union of both windows */
+ param.selection = UNION_WINDOW_A_B;
+ param.continuous_mode = continuous;
+ param.enable = enable;
+
+ tg = pipe->stream_res.tg;
+
+ /* Only call if supported */
+ if (tg->funcs->configure_crc)
+ return tg->funcs->configure_crc(tg, &param);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
+}
+
+/**
+ * dc_stream_get_crc() - Get CRC values for the given stream.
+ *
+ * @dc: DC object.
+ * @stream: The DC stream state of the stream to get CRCs from.
+ * @r_cr: CRC value for the red component.
+ * @g_y: CRC value for the green component.
+ * @b_cb: CRC value for the blue component.
+ *
+ * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
+ *
+ * Return:
+ * %false if stream is not found, or if CRCs are not enabled.
+ */
+bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
+ uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
+{
+ int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->stream == stream)
+ break;
+ }
+ /* Stream not found */
+ if (i == MAX_PIPES)
+ return false;
+
+ tg = pipe->stream_res.tg;
+
+ if (tg->funcs->get_crc)
+ return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
+ DC_LOG_WARNING("CRC capture not supported.");
+ return false;
+}
+
+void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
+ enum dc_dynamic_expansion option)
+{
+ /* OPP FMT dyn expansion updates*/
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx->stream_res.opp->dyn_expansion = option;
+ pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
+ pipe_ctx->stream_res.opp,
+ COLOR_SPACE_YCBCR601,
+ stream->timing.display_color_depth,
+ stream->signal);
+ }
+ }
+}
+
+void dc_stream_set_dither_option(struct dc_stream_state *stream,
+ enum dc_dither_option option)
+{
+ struct bit_depth_reduction_params params;
+ struct dc_link *link = stream->link;
+ struct pipe_ctx *pipes = NULL;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
+ stream) {
+ pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ if (!pipes)
+ return;
+ if (option > DITHER_OPTION_MAX)
+ return;
+
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
+ stream->dither_option = option;
+
+ memset(&params, 0, sizeof(params));
+ resource_build_bit_depth_reduction_params(stream, &params);
+ stream->bit_depth_params = params;
+
+ if (pipes->plane_res.xfm &&
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
+ pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
+ pipes->plane_res.xfm,
+ pipes->plane_res.scl_data.lb_params.depth,
+ &stream->bit_depth_params);
+ }
+
+ pipes->stream_res.opp->funcs->
+ opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
+}
+
+bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
+{
+ int i;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_gamut_remap(pipes);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
+{
+ int i;
+ bool ret = false;
+ struct pipe_ctx *pipes;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream
+ == stream) {
+
+ pipes = &dc->current_state->res_ctx.pipe_ctx[i];
+ dc->hwss.program_output_csc(dc,
+ pipes,
+ stream->output_color_space,
+ stream->csc_color_matrix.matrix,
+ pipes->stream_res.opp->inst);
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+void dc_stream_set_static_screen_params(struct dc *dc,
+ struct dc_stream_state **streams,
+ int num_streams,
+ const struct dc_static_screen_params *params)
+{
+ int i, j;
+ struct pipe_ctx *pipes_affected[MAX_PIPES];
+ int num_pipes_affected = 0;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < num_streams; i++) {
+ struct dc_stream_state *stream = streams[i];
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ if (dc->current_state->res_ctx.pipe_ctx[j].stream
+ == stream) {
+ pipes_affected[num_pipes_affected++] =
+ &dc->current_state->res_ctx.pipe_ctx[j];
+ }
+ }
+ }
+
+ dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
+}
+
+static void dc_destruct(struct dc *dc)
+{
+ // reset link encoder assignment table on destruct
+ if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
+ link_enc_cfg_init(dc, dc->current_state);
+
+ if (dc->current_state) {
+ dc_state_release(dc->current_state);
+ dc->current_state = NULL;
+ }
+
+ destroy_links(dc);
+
+ destroy_link_encoders(dc);
+
+ if (dc->clk_mgr) {
+ dc_destroy_clk_mgr(dc->clk_mgr);
+ dc->clk_mgr = NULL;
+ }
+
+ dc_destroy_resource_pool(dc);
+
+ if (dc->link_srv)
+ link_destroy_link_service(&dc->link_srv);
+
+ if (dc->ctx->gpio_service)
+ dal_gpio_service_destroy(&dc->ctx->gpio_service);
+
+ if (dc->ctx->created_bios)
+ dal_bios_parser_destroy(&dc->ctx->dc_bios);
+
+ kfree(dc->ctx->logger);
+ dc_perf_trace_destroy(&dc->ctx->perf_trace);
+
+ kfree(dc->ctx);
+ dc->ctx = NULL;
+
+ kfree(dc->bw_vbios);
+ dc->bw_vbios = NULL;
+
+ kfree(dc->bw_dceip);
+ dc->bw_dceip = NULL;
+
+ kfree(dc->dcn_soc);
+ dc->dcn_soc = NULL;
+
+ kfree(dc->dcn_ip);
+ dc->dcn_ip = NULL;
+
+ kfree(dc->vm_helper);
+ dc->vm_helper = NULL;
+
+}
+
+static bool dc_construct_ctx(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+
+ dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
+ if (!dc_ctx)
+ return false;
+
+ dc_ctx->cgs_device = init_params->cgs_device;
+ dc_ctx->driver_context = init_params->driver;
+ dc_ctx->dc = dc;
+ dc_ctx->asic_id = init_params->asic_id;
+ dc_ctx->dc_sink_id_count = 0;
+ dc_ctx->dc_stream_id_count = 0;
+ dc_ctx->dce_environment = init_params->dce_environment;
+ dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
+ dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
+
+ /* Create logger */
+ dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
+
+ if (!dc_ctx->logger) {
+ kfree(dc_ctx);
+ return false;
+ }
+
+ dc_ctx->logger->dev = adev_to_drm(init_params->driver);
+ dc->dml.logger = dc_ctx->logger;
+
+ dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
+
+ dc_ctx->perf_trace = dc_perf_trace_create();
+ if (!dc_ctx->perf_trace) {
+ kfree(dc_ctx);
+ ASSERT_CRITICAL(false);
+ return false;
+ }
+
+ dc->ctx = dc_ctx;
+
+ dc->link_srv = link_create_link_service();
+ if (!dc->link_srv)
+ return false;
+
+ return true;
+}
+
+static bool dc_construct(struct dc *dc,
+ const struct dc_init_data *init_params)
+{
+ struct dc_context *dc_ctx;
+ struct bw_calcs_dceip *dc_dceip;
+ struct bw_calcs_vbios *dc_vbios;
+ struct dcn_soc_bounding_box *dcn_soc;
+ struct dcn_ip_params *dcn_ip;
+
+ dc->config = init_params->flags;
+
+ // Allocate memory for the vm_helper
+ dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+ if (!dc->vm_helper) {
+ dm_error("%s: failed to create dc->vm_helper\n", __func__);
+ goto fail;
+ }
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
+ dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
+ if (!dc_dceip) {
+ dm_error("%s: failed to create dceip\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_dceip = dc_dceip;
+
+ dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
+ if (!dc_vbios) {
+ dm_error("%s: failed to create vbios\n", __func__);
+ goto fail;
+ }
+
+ dc->bw_vbios = dc_vbios;
+ dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
+ if (!dcn_soc) {
+ dm_error("%s: failed to create dcn_soc\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_soc = dcn_soc;
+
+ dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
+ if (!dcn_ip) {
+ dm_error("%s: failed to create dcn_ip\n", __func__);
+ goto fail;
+ }
+
+ dc->dcn_ip = dcn_ip;
+
+ if (!dc_construct_ctx(dc, init_params)) {
+ dm_error("%s: failed to create ctx\n", __func__);
+ goto fail;
+ }
+
+ dc_ctx = dc->ctx;
+
+ /* Resource should construct all asic specific resources.
+ * This should be the only place where we need to parse the asic id
+ */
+ if (init_params->vbios_override)
+ dc_ctx->dc_bios = init_params->vbios_override;
+ else {
+ /* Create BIOS parser */
+ struct bp_init_data bp_init_data;
+
+ bp_init_data.ctx = dc_ctx;
+ bp_init_data.bios = init_params->asic_id.atombios_base_address;
+
+ dc_ctx->dc_bios = dal_bios_parser_create(
+ &bp_init_data, dc_ctx->dce_version);
+
+ if (!dc_ctx->dc_bios) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc_ctx->created_bios = true;
+ }
+
+ dc->vendor_signature = init_params->vendor_signature;
+
+ /* Create GPIO service */
+ dc_ctx->gpio_service = dal_gpio_service_create(
+ dc_ctx->dce_version,
+ dc_ctx->dce_environment,
+ dc_ctx);
+
+ if (!dc_ctx->gpio_service) {
+ ASSERT_CRITICAL(false);
+ goto fail;
+ }
+
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
+ if (!dc->res_pool)
+ goto fail;
+
+ /* set i2c speed if not done by the respective dcnxxx__resource.c */
+ if (dc->caps.i2c_speed_in_khz_hdcp == 0)
+ dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
+ if (dc->caps.max_optimizable_video_width == 0)
+ dc->caps.max_optimizable_video_width = 5120;
+ dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
+ if (!dc->clk_mgr)
+ goto fail;
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
+
+ if (dc->res_pool->funcs->update_bw_bounding_box) {
+ DC_FP_START();
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
+ DC_FP_END();
+ }
+#endif
+
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+ dc->current_state = dc_state_create(dc, NULL);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
+ return true;
+
+fail:
+ return false;
+}
+
+static void disable_all_writeback_pipes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ int i;
+
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i].wb_enabled = false;
+}
+
+static void apply_ctx_interdependent_lock(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ bool lock)
+{
+ int i;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (resource_is_pipe_type(pipe_ctx, OPP_HEAD) &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
+ memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
+ get_hdr_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
+ get_surface_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
+ get_surface_tile_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else {
+ if (dc->ctx->dce_version < DCN_VERSION_2_0)
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space, &(pipe_ctx->visual_confirm_color));
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_2_0) {
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
+ get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ }
+ }
+}
+
+static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
+{
+ int i, j;
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
+ struct dc_state *current_ctx;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ if (dangling_context == NULL)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *old_stream =
+ dc->current_state->res_ctx.pipe_ctx[i].stream;
+ bool should_disable = true;
+ bool pipe_split_change = false;
+
+ if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
+ (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
+ else
+ pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
+ dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
+
+ for (j = 0; j < context->stream_count; j++) {
+ if (old_stream == context->streams[j]) {
+ should_disable = false;
+ break;
+ }
+ }
+ if (!should_disable && pipe_split_change &&
+ dc->current_state->stream_count != context->stream_count)
+ should_disable = true;
+
+ if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
+ !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
+ struct pipe_ctx *old_pipe, *new_pipe;
+
+ old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ new_pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (old_pipe->plane_state && !new_pipe->plane_state)
+ should_disable = true;
+ }
+
+ if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+ /* When disabling plane for a phantom pipe, we must turn on the
+ * phantom OTG so the disable programming gets the double buffer
+ * update. Otherwise the pipe will be left in a partially disabled
+ * state that can result in underflow or hang when enabling it
+ * again for different use.
+ */
+ if (is_phantom) {
+ if (tg->funcs->enable_crtc) {
+ int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
+
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
+ if (dc->hwss.blank_phantom)
+ dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
+ tg->funcs->enable_crtc(tg);
+ }
+ }
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+ disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
+ dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ /* We need to put the phantom OTG back into it's default (disabled) state or we
+ * can get corruption when transition from one SubVP config to a different one.
+ * The OTG is set to disable on falling edge of VUPDATE so the plane disable
+ * will still get it's double buffer update.
+ */
+ if (is_phantom) {
+ if (tg->funcs->disable_phantom_crtc)
+ tg->funcs->disable_phantom_crtc(tg);
+ }
+ }
+ }
+
+ current_ctx = dc->current_state;
+ dc->current_state = dangling_context;
+ dc_state_release(current_ctx);
+}
+
+static void disable_vbios_mode_if_required(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ unsigned int i, j;
+
+ /* check if timing_changed, disable stream*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_stream_state *stream = NULL;
+ struct dc_link *link = NULL;
+ struct pipe_ctx *pipe = NULL;
+
+ pipe = &context->res_ctx.pipe_ctx[i];
+ stream = pipe->stream;
+ if (stream == NULL)
+ continue;
+
+ if (stream->apply_seamless_boot_optimization)
+ continue;
+
+ // only looking for first odm pipe
+ if (pipe->prev_odm_pipe)
+ continue;
+
+ if (stream->link->local_sink &&
+ stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link = stream->link;
+ }
+
+ if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
+ unsigned int enc_inst, tg_inst = 0;
+ unsigned int pix_clk_100hz;
+
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+ if (enc_inst != ENGINE_ID_UNKNOWN) {
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (dc->res_pool->stream_enc[j]->id == enc_inst) {
+ tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+
+ dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+ dc->res_pool->dp_clock_source,
+ tg_inst, &pix_clk_100hz);
+
+ if (link->link_status.link_active) {
+ uint32_t requested_pix_clk_100hz =
+ pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
+
+ if (pix_clk_100hz != requested_pix_clk_100hz) {
+ dc->link_srv->set_dpms_off(pipe);
+ pipe->stream->dpms_off = false;
+ }
+ }
+ }
+ }
+ }
+}
+
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
+static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ PERF_TRACE();
+ for (i = 0; i < MAX_PIPES; i++) {
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ /* Timeout 100 ms */
+ while (count < 100000) {
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (!pipe->plane_state->status.is_flip_pending)
+ break;
+ udelay(1);
+ count++;
+ }
+ ASSERT(!pipe->plane_state->status.is_flip_pending);
+ }
+ PERF_TRACE();
+}
+
+/* Public functions */
+
+struct dc *dc_create(const struct dc_init_data *init_params)
+{
+ struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
+ unsigned int full_pipe_count;
+
+ if (!dc)
+ return NULL;
+
+ if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
+ if (!dc_construct_ctx(dc, init_params))
+ goto destruct_dc;
+ } else {
+ if (!dc_construct(dc, init_params))
+ goto destruct_dc;
+
+ full_pipe_count = dc->res_pool->pipe_count;
+ if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
+ full_pipe_count--;
+ dc->caps.max_streams = min(
+ full_pipe_count,
+ dc->res_pool->stream_enc_count);
+
+ dc->caps.max_links = dc->link_count;
+ dc->caps.max_audios = dc->res_pool->audio_count;
+ dc->caps.linear_pitch_alignment = 64;
+
+ dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
+ if (dc->res_pool->dmcu != NULL)
+ dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
+ }
+
+ dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
+ dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc->clk_reg_offsets = init_params->clk_reg_offsets;
+
+ /* Populate versioning information */
+ dc->versions.dc_ver = DC_VER;
+
+ dc->build_id = DC_BUILD_ID;
+
+ DC_LOG_DC("Display Core initialized\n");
+
+
+
+ return dc;
+
+destruct_dc:
+ dc_destruct(dc);
+ kfree(dc);
+ return NULL;
+}
+
+static void detect_edp_presence(struct dc *dc)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ struct dc_link *edp_link = NULL;
+ enum dc_connection_type type;
+ int i;
+ int edp_num;
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (!edp_num)
+ return;
+
+ for (i = 0; i < edp_num; i++) {
+ edp_link = edp_links[i];
+ if (dc->config.edp_not_connected) {
+ edp_link->edp_sink_present = false;
+ } else {
+ dc_link_detect_connection_type(edp_link, &type);
+ edp_link->edp_sink_present = (type != dc_connection_none);
+ }
+ }
+}
+
+void dc_hardware_init(struct dc *dc)
+{
+
+ detect_edp_presence(dc);
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
+ dc->hwss.init_hw(dc);
+}
+
+void dc_init_callbacks(struct dc *dc,
+ const struct dc_callback_init *init_params)
+{
+ dc->ctx->cp_psp = init_params->cp_psp;
+}
+
+void dc_deinit_callbacks(struct dc *dc)
+{
+ memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
+}
+
+void dc_destroy(struct dc **dc)
+{
+ dc_destruct(*dc);
+ kfree(*dc);
+ *dc = NULL;
+}
+
+static void enable_timing_multisync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, multisync_count = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream ||
+ !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
+ continue;
+ if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
+ continue;
+ multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
+ multisync_count++;
+ }
+
+ if (multisync_count > 0) {
+ dc->hwss.enable_per_frame_crtc_position_reset(
+ dc, multisync_count, multisync_pipes);
+ }
+}
+
+static void program_timing_sync(
+ struct dc *dc,
+ struct dc_state *ctx)
+{
+ int i, j, k;
+ int group_index = 0;
+ int num_group = 0;
+ int pipe_count = dc->res_pool->pipe_count;
+ struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
+
+ for (i = 0; i < pipe_count; i++) {
+ if (!ctx->res_ctx.pipe_ctx[i].stream
+ || ctx->res_ctx.pipe_ctx[i].top_pipe
+ || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
+ continue;
+
+ unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
+ }
+
+ for (i = 0; i < pipe_count; i++) {
+ int group_size = 1;
+ enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
+ struct pipe_ctx *pipe_set[MAX_PIPES];
+
+ if (!unsynced_pipes[i])
+ continue;
+
+ pipe_set[0] = unsynced_pipes[i];
+ unsynced_pipes[i] = NULL;
+
+ /* Add tg to the set, search rest of the tg's for ones with
+ * same timing, add all tgs with same timing to the group
+ */
+ for (j = i + 1; j < pipe_count; j++) {
+ if (!unsynced_pipes[j])
+ continue;
+ if (sync_type != TIMING_SYNCHRONIZABLE &&
+ dc->hwss.enable_vblanks_synchronization &&
+ unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
+ resource_are_vblanks_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ sync_type = VBLANK_SYNCHRONIZABLE;
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ } else
+ if (sync_type != VBLANK_SYNCHRONIZABLE &&
+ resource_are_streams_timing_synchronizable(
+ unsynced_pipes[j]->stream,
+ pipe_set[0]->stream)) {
+ sync_type = TIMING_SYNCHRONIZABLE;
+ pipe_set[group_size] = unsynced_pipes[j];
+ unsynced_pipes[j] = NULL;
+ group_size++;
+ }
+ }
+
+ /* set first unblanked pipe as master */
+ for (j = 0; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ if (j == 0)
+ break;
+
+ swap(pipe_set[0], pipe_set[j]);
+ break;
+ }
+ }
+
+ for (k = 0; k < group_size; k++) {
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
+
+ status->timing_sync_info.group_id = num_group;
+ status->timing_sync_info.group_size = group_size;
+ if (k == 0)
+ status->timing_sync_info.master = true;
+ else
+ status->timing_sync_info.master = false;
+
+ }
+
+ /* remove any other unblanked pipes as they have already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
+ }
+ } else {
+ /* remove any other pipes by checking valid plane */
+ for (j = j + 1; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
+ }
+ }
+
+ if (group_size > 1) {
+ if (sync_type == TIMING_SYNCHRONIZABLE) {
+ dc->hwss.enable_timing_synchronization(
+ dc, ctx, group_index, group_size, pipe_set);
+ } else
+ if (sync_type == VBLANK_SYNCHRONIZABLE) {
+ dc->hwss.enable_vblanks_synchronization(
+ dc, group_index, group_size, pipe_set);
+ }
+ group_index++;
+ }
+ num_group++;
+ }
+}
+
+static bool streams_changed(struct dc *dc,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ uint8_t i;
+
+ if (stream_count != dc->current_state->stream_count)
+ return true;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (dc->current_state->streams[i] != streams[i])
+ return true;
+ if (!streams[i]->link->link_state_valid)
+ return true;
+ }
+
+ return false;
+}
+
+bool dc_validate_boot_timing(const struct dc *dc,
+ const struct dc_sink *sink,
+ struct dc_crtc_timing *crtc_timing)
+{
+ struct timing_generator *tg;
+ struct stream_encoder *se = NULL;
+
+ struct dc_crtc_timing hw_crtc_timing = {0};
+
+ struct dc_link *link = sink->link;
+ unsigned int i, enc_inst, tg_inst = 0;
+
+ /* Support seamless boot on EDP displays only */
+ if (sink->sink_signal != SIGNAL_TYPE_EDP) {
+ return false;
+ }
+
+ if (dc->debug.force_odm_combine)
+ return false;
+
+ /* Check for enabled DIG to identify enabled display */
+ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ return false;
+
+ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+
+ if (enc_inst == ENGINE_ID_UNKNOWN)
+ return false;
+
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
+ if (dc->res_pool->stream_enc[i]->id == enc_inst) {
+
+ se = dc->res_pool->stream_enc[i];
+
+ tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
+ dc->res_pool->stream_enc[i]);
+ break;
+ }
+ }
+
+ // tg_inst not found
+ if (i == dc->res_pool->stream_enc_count)
+ return false;
+
+ if (tg_inst >= dc->res_pool->timing_generator_count)
+ return false;
+
+ if (tg_inst != link->link_enc->preferred_engine)
+ return false;
+
+ tg = dc->res_pool->timing_generators[tg_inst];
+
+ if (!tg->funcs->get_hw_timing)
+ return false;
+
+ if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
+ return false;
+
+ if (crtc_timing->h_total != hw_crtc_timing.h_total)
+ return false;
+
+ if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
+ return false;
+
+ if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
+ return false;
+
+ if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
+ return false;
+
+ if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
+ return false;
+
+ if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
+ return false;
+
+ if (crtc_timing->v_total != hw_crtc_timing.v_total)
+ return false;
+
+ if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
+ return false;
+
+ if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
+ return false;
+
+ if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
+ return false;
+
+ if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
+ return false;
+
+ if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
+ return false;
+
+ /* block DSC for now, as VBIOS does not currently support DSC timings */
+ if (crtc_timing->flags.DSC)
+ return false;
+
+ if (dc_is_dp_signal(link->connector_signal)) {
+ unsigned int pix_clk_100hz;
+ uint32_t numOdmPipes = 1;
+ uint32_t id_src[4] = {0};
+
+ dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+ dc->res_pool->dp_clock_source,
+ tg_inst, &pix_clk_100hz);
+
+ if (tg->funcs->get_optc_source)
+ tg->funcs->get_optc_source(tg,
+ &numOdmPipes, &id_src[0], &id_src[1]);
+
+ if (numOdmPipes == 2)
+ pix_clk_100hz *= 2;
+ if (numOdmPipes == 4)
+ pix_clk_100hz *= 4;
+
+ // Note: In rare cases, HW pixclk may differ from crtc's pixclk
+ // slightly due to rounding issues in 10 kHz units.
+ if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format)
+ return false;
+
+ if (!se->funcs->dp_get_pixel_format(
+ se,
+ &hw_crtc_timing.pixel_encoding,
+ &hw_crtc_timing.display_color_depth))
+ return false;
+
+ if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
+ return false;
+
+ if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
+ return false;
+ }
+
+ if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ return false;
+ }
+
+ if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
+ DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
+ return false;
+ }
+
+ return true;
+}
+
+static inline bool should_update_pipe_for_stream(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_state *stream)
+{
+ return (pipe_ctx->stream && pipe_ctx->stream == stream);
+}
+
+static inline bool should_update_pipe_for_plane(
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_plane_state *plane_state)
+{
+ return (pipe_ctx->plane_state == plane_state);
+}
+
+void dc_enable_stereo(
+ struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *streams[],
+ uint8_t stream_count)
+{
+ int i, j;
+ struct pipe_ctx *pipe;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (context != NULL) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ } else {
+ context = dc->current_state;
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ }
+
+ for (j = 0; pipe && j < stream_count; j++) {
+ if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+}
+
+void dc_trigger_sync(struct dc *dc, struct dc_state *context)
+{
+ if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
+ enable_timing_multisync(dc, context);
+ program_timing_sync(dc, context);
+ }
+}
+
+static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ unsigned int stream_mask = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ stream_mask |= 1 << i;
+ }
+
+ return stream_mask;
+}
+
+void dc_z10_restore(const struct dc *dc)
+{
+ if (dc->hwss.z10_restore)
+ dc->hwss.z10_restore(dc);
+}
+
+void dc_z10_save_init(struct dc *dc)
+{
+ if (dc->hwss.z10_save_init)
+ dc->hwss.z10_save_init(dc);
+}
+
+/**
+ * dc_commit_state_no_check - Apply context to the hardware
+ *
+ * @dc: DC object with the current status to be updated
+ * @context: New state that will become the current status at the end of this function
+ *
+ * Applies given context to the hardware and copy it into current context.
+ * It's up to the user to release the src context afterwards.
+ *
+ * Return: an enum dc_status result code for the operation
+ */
+static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
+{
+ struct dc_bios *dcb = dc->ctx->dc_bios;
+ enum dc_status result = DC_ERROR_UNEXPECTED;
+ struct pipe_ctx *pipe;
+ int i, k, l;
+ struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
+ struct dc_state *old_state;
+ bool subvp_prev_use = false;
+
+ dc_z10_restore(dc);
+ dc_allow_idle_optimizations(dc, false);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ /* Check old context for SubVP */
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
+ for (i = 0; i < context->stream_count; i++)
+ dc_streams[i] = context->streams[i];
+
+ if (!dcb->funcs->is_accelerated_mode(dcb)) {
+ disable_vbios_mode_if_required(dc, context);
+ dc->hwss.enable_accelerated_mode(dc, context);
+ }
+
+ if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ context->stream_count == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ /* When SubVP is active, all HW programming must be done while
+ * SubVP lock is acquired
+ */
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ disable_dangling_plane(dc, context);
+ /* re-program planes for existing stream, in case we need to
+ * free up plane resource for later use
+ */
+ if (dc->hwss.apply_ctx_for_surface) {
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->mode_changed)
+ continue;
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+ }
+
+ /* Program hardware */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
+ }
+
+ result = dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (result != DC_OK) {
+ /* Application of dc_state to hardware stopped. */
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
+ return result;
+ }
+
+ dc_trigger_sync(dc, context);
+
+ /* Full update should unconditionally be triggered when dc_commit_state_no_check is called */
+ for (i = 0; i < context->stream_count; i++) {
+ uint32_t prev_dsc_changed = context->streams[i]->update_flags.bits.dsc_changed;
+
+ context->streams[i]->update_flags.raw = 0xFFFFFFFF;
+ context->streams[i]->update_flags.bits.dsc_changed = prev_dsc_changed;
+ }
+
+ /* Program all planes within new context*/
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_link *link = context->streams[i]->link;
+
+ if (!context->streams[i]->mode_changed)
+ continue;
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
+ dc->hwss.apply_ctx_for_surface(
+ dc, context->streams[i],
+ context->stream_status[i].plane_count,
+ context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
+
+ /*
+ * enable stereo
+ * TODO rework dc_enable_stereo call to work with validation sets?
+ */
+ for (k = 0; k < MAX_PIPES; k++) {
+ pipe = &context->res_ctx.pipe_ctx[k];
+
+ for (l = 0 ; pipe && l < context->stream_count; l++) {
+ if (context->streams[l] &&
+ context->streams[l] == pipe->stream &&
+ dc->hwss.setup_stereo)
+ dc->hwss.setup_stereo(pipe, dc);
+ }
+ }
+
+ CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
+ context->streams[i]->timing.h_addressable,
+ context->streams[i]->timing.v_addressable,
+ context->streams[i]->timing.h_total,
+ context->streams[i]->timing.v_total,
+ context->streams[i]->timing.pix_clk_100hz / 10);
+ }
+
+ dc_enable_stereo(dc, context, dc_streams, context->stream_count);
+
+ if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ context->stream_count == 0) {
+ /* Must wait for no flips to be pending before doing optimize bw */
+ wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
+ /* Need to do otg sync again as otg could be out of sync due to otg
+ * workaround applied during clock update
+ */
+ dc_trigger_sync(dc, context);
+ }
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+ else
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+
+ context->stream_mask = get_stream_mask(dc, context);
+
+ if (context->stream_mask != dc->current_state->stream_mask)
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
+
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->mode_changed = false;
+
+ /* Clear update flags that were set earlier to avoid redundant programming */
+ for (i = 0; i < context->stream_count; i++) {
+ context->streams[i]->update_flags.raw = 0x0;
+ }
+
+ old_state = dc->current_state;
+ dc->current_state = context;
+
+ dc_state_release(old_state);
+
+ dc_state_retain(dc->current_state);
+
+ return result;
+}
+
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context);
+
+/**
+ * dc_commit_streams - Commit current stream state
+ *
+ * @dc: DC object with the commit state to be configured in the hardware
+ * @params: Parameters for the commit, including the streams to be committed
+ *
+ * Function responsible for commit streams change to the hardware.
+ *
+ * Return:
+ * Return DC_OK if everything work as expected, otherwise, return a dc_status
+ * code.
+ */
+enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params *params)
+{
+ int i, j;
+ struct dc_state *context;
+ enum dc_status res = DC_OK;
+ struct dc_validation_set set[MAX_STREAMS] = {0};
+ struct pipe_ctx *pipe;
+ bool handle_exit_odm2to1 = false;
+
+ if (!params)
+ return DC_ERROR_UNEXPECTED;
+
+ if (dc->ctx->dce_environment == DCE_ENV_VIRTUAL_HW)
+ return res;
+
+ if (!streams_changed(dc, params->streams, params->stream_count) &&
+ dc->current_state->power_source == params->power_source)
+ return res;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ DC_LOG_DC("%s: %d streams\n", __func__, params->stream_count);
+
+ for (i = 0; i < params->stream_count; i++) {
+ struct dc_stream_state *stream = params->streams[i];
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ dc_stream_log(dc, stream);
+
+ set[i].stream = stream;
+
+ if (status) {
+ set[i].plane_count = status->plane_count;
+ for (j = 0; j < status->plane_count; j++)
+ set[i].plane_states[j] = status->plane_states[j];
+ }
+ }
+
+ /* ODM Combine 2:1 power optimization is only applied for single stream
+ * scenario, it uses extra pipes than needed to reduce power consumption
+ * We need to switch off this feature to make room for new streams.
+ */
+ if (params->stream_count > dc->current_state->stream_count &&
+ dc->current_state->stream_count == 1) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe->next_odm_pipe)
+ handle_exit_odm2to1 = true;
+ }
+ }
+
+ if (handle_exit_odm2to1)
+ res = commit_minimal_transition_state(dc, dc->current_state);
+
+ context = dc_state_create_current_copy(dc);
+ if (!context)
+ goto context_alloc_fail;
+
+ context->power_source = params->power_source;
+
+ res = dc_validate_with_context(dc, set, params->stream_count, context, false);
+ if (res != DC_OK) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+
+ res = dc_commit_state_no_check(dc, context);
+
+ for (i = 0; i < params->stream_count; i++) {
+ for (j = 0; j < context->stream_count; j++) {
+ if (params->streams[i]->stream_id == context->streams[j]->stream_id)
+ params->streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
+
+ if (dc_is_embedded_signal(params->streams[i]->signal)) {
+ struct dc_stream_status *status = dc_state_get_stream_status(context, params->streams[i]);
+
+ if (dc->hwss.is_abm_supported)
+ status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, params->streams[i]);
+ else
+ status->is_abm_supported = true;
+ }
+ }
+ }
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+
+ DC_LOG_DC("%s Finished.\n", __func__);
+
+ return res;
+}
+
+bool dc_acquire_release_mpc_3dlut(
+ struct dc *dc, bool acquire,
+ struct dc_stream_state *stream,
+ struct dc_3dlut **lut,
+ struct dc_transfer_func **shaper)
+{
+ int pipe_idx;
+ bool ret = false;
+ bool found_pipe_idx = false;
+ const struct resource_pool *pool = dc->res_pool;
+ struct resource_context *res_ctx = &dc->current_state->res_ctx;
+ int mpcc_id = 0;
+
+ if (pool && res_ctx) {
+ if (acquire) {
+ /*find pipe idx for the given stream*/
+ for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
+ if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
+ found_pipe_idx = true;
+ mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
+ break;
+ }
+ }
+ } else
+ found_pipe_idx = true;/*for release pipe_idx is not required*/
+
+ if (found_pipe_idx) {
+ if (acquire && pool->funcs->acquire_post_bldn_3dlut)
+ ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
+ else if (!acquire && pool->funcs->release_post_bldn_3dlut)
+ ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
+ }
+ }
+ return ret;
+}
+
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ // Don't check flip pending on phantom pipes
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
+}
+
+/* Perform updates here which need to be deferred until next vupdate
+ *
+ * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
+ * but forcing lut memory to shutdown state is immediate. This causes
+ * single frame corruption as lut gets disabled mid-frame unless shutdown
+ * is deferred until after entering bypass.
+ */
+static void process_deferred_updates(struct dc *dc)
+{
+ int i = 0;
+
+ if (dc->debug.enable_mem_low_power.bits.cm) {
+ ASSERT(dc->dcn_ip->max_num_dpp);
+ for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
+ if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
+ dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
+ }
+}
+
+void dc_post_update_surfaces_to_stream(struct dc *dc)
+{
+ int i;
+ struct dc_state *context = dc->current_state;
+
+ if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
+ return;
+
+ post_surface_trace(dc);
+
+ /*
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+
+ if (dc->ctx->dce_version < DCE_VERSION_MAX)
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ else {
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+
+ if (is_flip_pending_in_pipes(dc, context))
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++)
+ if (context->res_ctx.pipe_ctx[i].stream == NULL ||
+ context->res_ctx.pipe_ctx[i].plane_state == NULL) {
+ context->res_ctx.pipe_ctx[i].pipe_idx = i;
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
+ }
+
+ process_deferred_updates(dc);
+
+ dc->hwss.optimize_bandwidth(dc, context);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, true);
+ }
+
+ dc->optimized_required = false;
+ dc->wm_optimized_required = false;
+}
+
+bool dc_set_generic_gpio_for_stereo(bool enable,
+ struct gpio_service *gpio_service)
+{
+ enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
+ struct gpio_pin_info pin_info;
+ struct gpio *generic;
+ struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
+ GFP_KERNEL);
+
+ if (!config)
+ return false;
+ pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
+
+ if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
+ kfree(config);
+ return false;
+ } else {
+ generic = dal_gpio_service_create_generic_mux(
+ gpio_service,
+ pin_info.offset,
+ pin_info.mask);
+ }
+
+ if (!generic) {
+ kfree(config);
+ return false;
+ }
+
+ gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
+
+ config->enable_output_from_mux = enable;
+ config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
+
+ if (gpio_result == GPIO_RESULT_OK)
+ gpio_result = dal_mux_setup_config(generic, config);
+
+ if (gpio_result == GPIO_RESULT_OK) {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return true;
+ } else {
+ dal_gpio_close(generic);
+ dal_gpio_destroy_generic_mux(&generic);
+ kfree(config);
+ return false;
+ }
+}
+
+static bool is_surface_in_context(
+ const struct dc_state *context,
+ const struct dc_plane_state *plane_state)
+{
+ int j;
+
+ for (j = 0; j < MAX_PIPES; j++) {
+ const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (plane_state == pipe_ctx->plane_state) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
+{
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+ enum surface_update_type update_type = UPDATE_TYPE_FAST;
+
+ if (!u->plane_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->plane_info->color_space != u->surface->color_space) {
+ update_flags->bits.color_space_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
+ update_flags->bits.horizontal_mirror_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->rotation != u->surface->rotation) {
+ update_flags->bits.rotation_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->format != u->surface->format) {
+ update_flags->bits.pixel_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->stereo_format != u->surface->stereo_format) {
+ update_flags->bits.stereo_format_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
+ update_flags->bits.per_pixel_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
+ update_flags->bits.global_alpha_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+ if (u->plane_info->dcc.enable != u->surface->dcc.enable
+ || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
+ || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
+ /* During DCC on/off, stutter period is calculated before
+ * DCC has fully transitioned. This results in incorrect
+ * stutter period calculation. Triggering a full update will
+ * recalculate stutter period.
+ */
+ update_flags->bits.dcc_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (resource_pixel_format_to_bpp(u->plane_info->format) !=
+ resource_pixel_format_to_bpp(u->surface->format)) {
+ /* different bytes per element will require full bandwidth
+ * and DML calculation
+ */
+ update_flags->bits.bpp_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+
+ if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
+ || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
+ update_flags->bits.plane_size_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+ }
+
+
+ if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
+ sizeof(union dc_tiling_info)) != 0) {
+ update_flags->bits.swizzle_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_MED);
+
+ /* todo: below are HW dependent, we should add a hook to
+ * DCE/N resource and validated there.
+ */
+ if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
+ /* swizzled mode requires RQ to be setup properly,
+ * thus need to run DML to calculate RQ settings
+ */
+ update_flags->bits.bandwidth_change = 1;
+ elevate_update_type(&update_type, UPDATE_TYPE_FULL);
+ }
+ }
+
+ /* This should be UPDATE_TYPE_FAST if nothing has changed. */
+ return update_type;
+}
+
+static enum surface_update_type get_scaling_info_update_type(
+ const struct dc *dc,
+ const struct dc_surface_update *u)
+{
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
+ if (!u->scaling_info)
+ return UPDATE_TYPE_FAST;
+
+ if (u->scaling_info->dst_rect.width != u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
+ || u->scaling_info->scaling_quality.integer_scaling !=
+ u->surface->scaling_quality.integer_scaling
+ ) {
+ update_flags->bits.scaling_change = 1;
+
+ if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
+ && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
+ || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
+ /* Making dst rect smaller requires a bandwidth change */
+ update_flags->bits.bandwidth_change = 1;
+ }
+
+ if (u->scaling_info->src_rect.width != u->surface->src_rect.width
+ || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
+
+ update_flags->bits.scaling_change = 1;
+ if (u->scaling_info->src_rect.width > u->surface->src_rect.width
+ || u->scaling_info->src_rect.height > u->surface->src_rect.height)
+ /* Making src rect bigger requires a bandwidth change */
+ update_flags->bits.clock_change = 1;
+ }
+
+ if (u->scaling_info->src_rect.width > dc->caps.max_optimizable_video_width &&
+ (u->scaling_info->clip_rect.width > u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height > u->surface->clip_rect.height))
+ /* Changing clip size of a large surface may result in MPC slice count change */
+ update_flags->bits.bandwidth_change = 1;
+
+ if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width ||
+ u->scaling_info->clip_rect.height != u->surface->clip_rect.height)
+ update_flags->bits.clip_size_change = 1;
+
+ if (u->scaling_info->src_rect.x != u->surface->src_rect.x
+ || u->scaling_info->src_rect.y != u->surface->src_rect.y
+ || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
+ || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
+ || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
+ || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
+ update_flags->bits.position_change = 1;
+
+ if (update_flags->bits.clock_change
+ || update_flags->bits.bandwidth_change
+ || update_flags->bits.scaling_change)
+ return UPDATE_TYPE_FULL;
+
+ if (update_flags->bits.position_change ||
+ update_flags->bits.clip_size_change)
+ return UPDATE_TYPE_MED;
+
+ return UPDATE_TYPE_FAST;
+}
+
+static enum surface_update_type det_surface_update(const struct dc *dc,
+ const struct dc_surface_update *u)
+{
+ const struct dc_state *context = dc->current_state;
+ enum surface_update_type type;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+ union surface_update_flags *update_flags = &u->surface->update_flags;
+
+ if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
+ update_flags->raw = 0xFFFFFFFF;
+ return UPDATE_TYPE_FULL;
+ }
+
+ update_flags->raw = 0; // Reset all flags
+
+ type = get_plane_info_update_type(u);
+ elevate_update_type(&overall_type, type);
+
+ type = get_scaling_info_update_type(dc, u);
+ elevate_update_type(&overall_type, type);
+
+ if (u->flip_addr) {
+ update_flags->bits.addr_update = 1;
+ if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
+ update_flags->bits.tmz_changed = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
+ }
+ }
+ if (u->in_transfer_func)
+ update_flags->bits.in_transfer_func_change = 1;
+
+ if (u->input_csc_color_matrix)
+ update_flags->bits.input_csc_change = 1;
+
+ if (u->coeff_reduction_factor)
+ update_flags->bits.coeff_reduction_change = 1;
+
+ if (u->gamut_remap_matrix)
+ update_flags->bits.gamut_remap_change = 1;
+
+ if (u->blend_tf)
+ update_flags->bits.gamma_change = 1;
+
+ if (u->gamma) {
+ enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
+
+ if (u->plane_info)
+ format = u->plane_info->format;
+ else if (u->surface)
+ format = u->surface->format;
+
+ if (dce_use_lut(format))
+ update_flags->bits.gamma_change = 1;
+ }
+
+ if (u->lut3d_func || u->func_shaper)
+ update_flags->bits.lut_3d = 1;
+
+ if (u->hdr_mult.value)
+ if (u->hdr_mult.value != u->surface->hdr_mult.value) {
+ update_flags->bits.hdr_mult = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_MED);
+ }
+
+ if (update_flags->bits.in_transfer_func_change) {
+ type = UPDATE_TYPE_MED;
+ elevate_update_type(&overall_type, type);
+ }
+
+ if (update_flags->bits.lut_3d) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+
+ if (dc->debug.enable_legacy_fast_update &&
+ (update_flags->bits.gamma_change ||
+ update_flags->bits.gamut_remap_change ||
+ update_flags->bits.input_csc_change ||
+ update_flags->bits.coeff_reduction_change)) {
+ type = UPDATE_TYPE_FULL;
+ elevate_update_type(&overall_type, type);
+ }
+ return overall_type;
+}
+
+static enum surface_update_type check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type overall_type = UPDATE_TYPE_FAST;
+
+ if (dc->idle_optimizations_allowed)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update && stream_update->pending_test_pattern) {
+ overall_type = UPDATE_TYPE_FULL;
+ }
+
+ /* some stream updates require passive update */
+ if (stream_update) {
+ union stream_update_flags *su_flags = &stream_update->stream->update_flags;
+
+ if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update)
+ su_flags->bits.scaling = 1;
+
+ if (dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ su_flags->bits.out_tf = 1;
+
+ if (stream_update->abm_level)
+ su_flags->bits.abm_level = 1;
+
+ if (stream_update->dpms_off)
+ su_flags->bits.dpms_off = 1;
+
+ if (stream_update->gamut_remap)
+ su_flags->bits.gamut_remap = 1;
+
+ if (stream_update->wb_update)
+ su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
+ if (stream_update->mst_bw_update)
+ su_flags->bits.mst_bw = 1;
+
+ if (stream_update->stream && stream_update->stream->freesync_on_desktop &&
+ (stream_update->vrr_infopacket || stream_update->allow_freesync ||
+ stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
+ su_flags->bits.fams_changed = 1;
+
+ if (su_flags->raw != 0)
+ overall_type = UPDATE_TYPE_FULL;
+
+ if (stream_update->output_csc_transform || stream_update->output_color_space)
+ su_flags->bits.out_csc = 1;
+
+ /* Output transfer function changes do not require bandwidth recalculation,
+ * so don't trigger a full update
+ */
+ if (!dc->debug.enable_legacy_fast_update && stream_update->out_transfer_func)
+ su_flags->bits.out_tf = 1;
+ }
+
+ for (i = 0 ; i < surface_count; i++) {
+ enum surface_update_type type =
+ det_surface_update(dc, &updates[i]);
+
+ elevate_update_type(&overall_type, type);
+ }
+
+ return overall_type;
+}
+
+/*
+ * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
+ *
+ * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
+ */
+enum surface_update_type dc_check_update_surfaces_for_stream(
+ struct dc *dc,
+ struct dc_surface_update *updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ const struct dc_stream_status *stream_status)
+{
+ int i;
+ enum surface_update_type type;
+
+ if (stream_update)
+ stream_update->stream->update_flags.raw = 0;
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0;
+
+ type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
+ if (type == UPDATE_TYPE_FULL) {
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
+ stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
+ for (i = 0; i < surface_count; i++)
+ updates[i].surface->update_flags.raw = 0xFFFFFFFF;
+ }
+
+ if (type == UPDATE_TYPE_FAST) {
+ // If there's an available clock comparator, we use that.
+ if (dc->clk_mgr->funcs->are_clock_states_equal) {
+ if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
+ dc->optimized_required = true;
+ // Else we fallback to mem compare.
+ } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
+ dc->optimized_required = true;
+ }
+
+ dc->optimized_required |= dc->wm_optimized_required;
+ }
+
+ return type;
+}
+
+static struct dc_stream_status *stream_get_status(
+ struct dc_state *ctx,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ for (i = 0; i < ctx->stream_count; i++) {
+ if (stream == ctx->streams[i]) {
+ return &ctx->stream_status[i];
+ }
+ }
+
+ return NULL;
+}
+
+static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
+
+static void copy_surface_update_to_plane(
+ struct dc_plane_state *surface,
+ struct dc_surface_update *srf_update)
+{
+ if (srf_update->flip_addr) {
+ surface->address = srf_update->flip_addr->address;
+ surface->flip_immediate =
+ srf_update->flip_addr->flip_immediate;
+ surface->time.time_elapsed_in_us[surface->time.index] =
+ srf_update->flip_addr->flip_timestamp_in_us -
+ surface->time.prev_update_time_in_us;
+ surface->time.prev_update_time_in_us =
+ srf_update->flip_addr->flip_timestamp_in_us;
+ surface->time.index++;
+ if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
+ surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
+ }
+
+ if (srf_update->scaling_info) {
+ surface->scaling_quality =
+ srf_update->scaling_info->scaling_quality;
+ surface->dst_rect =
+ srf_update->scaling_info->dst_rect;
+ surface->src_rect =
+ srf_update->scaling_info->src_rect;
+ surface->clip_rect =
+ srf_update->scaling_info->clip_rect;
+ }
+
+ if (srf_update->plane_info) {
+ surface->color_space =
+ srf_update->plane_info->color_space;
+ surface->format =
+ srf_update->plane_info->format;
+ surface->plane_size =
+ srf_update->plane_info->plane_size;
+ surface->rotation =
+ srf_update->plane_info->rotation;
+ surface->horizontal_mirror =
+ srf_update->plane_info->horizontal_mirror;
+ surface->stereo_format =
+ srf_update->plane_info->stereo_format;
+ surface->tiling_info =
+ srf_update->plane_info->tiling_info;
+ surface->visible =
+ srf_update->plane_info->visible;
+ surface->per_pixel_alpha =
+ srf_update->plane_info->per_pixel_alpha;
+ surface->global_alpha =
+ srf_update->plane_info->global_alpha;
+ surface->global_alpha_value =
+ srf_update->plane_info->global_alpha_value;
+ surface->dcc =
+ srf_update->plane_info->dcc;
+ surface->layer_index =
+ srf_update->plane_info->layer_index;
+ }
+
+ if (srf_update->gamma) {
+ memcpy(&surface->gamma_correction.entries,
+ &srf_update->gamma->entries,
+ sizeof(struct dc_gamma_entries));
+ surface->gamma_correction.is_identity =
+ srf_update->gamma->is_identity;
+ surface->gamma_correction.num_entries =
+ srf_update->gamma->num_entries;
+ surface->gamma_correction.type =
+ srf_update->gamma->type;
+ }
+
+ if (srf_update->in_transfer_func) {
+ surface->in_transfer_func.sdr_ref_white_level =
+ srf_update->in_transfer_func->sdr_ref_white_level;
+ surface->in_transfer_func.tf =
+ srf_update->in_transfer_func->tf;
+ surface->in_transfer_func.type =
+ srf_update->in_transfer_func->type;
+ memcpy(&surface->in_transfer_func.tf_pts,
+ &srf_update->in_transfer_func->tf_pts,
+ sizeof(struct dc_transfer_func_distributed_points));
+ }
+
+ if (srf_update->func_shaper)
+ memcpy(&surface->in_shaper_func, srf_update->func_shaper,
+ sizeof(surface->in_shaper_func));
+
+ if (srf_update->lut3d_func)
+ memcpy(&surface->lut3d_func, srf_update->lut3d_func,
+ sizeof(surface->lut3d_func));
+
+ if (srf_update->hdr_mult.value)
+ surface->hdr_mult =
+ srf_update->hdr_mult;
+
+ if (srf_update->blend_tf)
+ memcpy(&surface->blend_tf, srf_update->blend_tf,
+ sizeof(surface->blend_tf));
+
+ if (srf_update->input_csc_color_matrix)
+ surface->input_csc_color_matrix =
+ *srf_update->input_csc_color_matrix;
+
+ if (srf_update->coeff_reduction_factor)
+ surface->coeff_reduction_factor =
+ *srf_update->coeff_reduction_factor;
+
+ if (srf_update->gamut_remap_matrix)
+ surface->gamut_remap_matrix =
+ *srf_update->gamut_remap_matrix;
+}
+
+static void copy_stream_update_to_stream(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *update)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+
+ if (update == NULL || stream == NULL)
+ return;
+
+ if (update->src.height && update->src.width)
+ stream->src = update->src;
+
+ if (update->dst.height && update->dst.width)
+ stream->dst = update->dst;
+
+ if (update->out_transfer_func) {
+ stream->out_transfer_func.sdr_ref_white_level =
+ update->out_transfer_func->sdr_ref_white_level;
+ stream->out_transfer_func.tf = update->out_transfer_func->tf;
+ stream->out_transfer_func.type =
+ update->out_transfer_func->type;
+ memcpy(&stream->out_transfer_func.tf_pts,
+ &update->out_transfer_func->tf_pts,
+ sizeof(struct dc_transfer_func_distributed_points));
+ }
+
+ if (update->hdr_static_metadata)
+ stream->hdr_static_metadata = *update->hdr_static_metadata;
+
+ if (update->abm_level)
+ stream->abm_level = *update->abm_level;
+
+ if (update->periodic_interrupt)
+ stream->periodic_interrupt = *update->periodic_interrupt;
+
+ if (update->gamut_remap)
+ stream->gamut_remap_matrix = *update->gamut_remap;
+
+ /* Note: this being updated after mode set is currently not a use case
+ * however if it arises OCSC would need to be reprogrammed at the
+ * minimum
+ */
+ if (update->output_color_space)
+ stream->output_color_space = *update->output_color_space;
+
+ if (update->output_csc_transform)
+ stream->csc_color_matrix = *update->output_csc_transform;
+
+ if (update->vrr_infopacket)
+ stream->vrr_infopacket = *update->vrr_infopacket;
+
+ if (update->allow_freesync)
+ stream->allow_freesync = *update->allow_freesync;
+
+ if (update->vrr_active_variable)
+ stream->vrr_active_variable = *update->vrr_active_variable;
+
+ if (update->vrr_active_fixed)
+ stream->vrr_active_fixed = *update->vrr_active_fixed;
+
+ if (update->crtc_timing_adjust)
+ stream->adjust = *update->crtc_timing_adjust;
+
+ if (update->dpms_off)
+ stream->dpms_off = *update->dpms_off;
+
+ if (update->hfvsif_infopacket)
+ stream->hfvsif_infopacket = *update->hfvsif_infopacket;
+
+ if (update->vtem_infopacket)
+ stream->vtem_infopacket = *update->vtem_infopacket;
+
+ if (update->vsc_infopacket)
+ stream->vsc_infopacket = *update->vsc_infopacket;
+
+ if (update->vsp_infopacket)
+ stream->vsp_infopacket = *update->vsp_infopacket;
+
+ if (update->adaptive_sync_infopacket)
+ stream->adaptive_sync_infopacket = *update->adaptive_sync_infopacket;
+
+ if (update->dither_option)
+ stream->dither_option = *update->dither_option;
+
+ if (update->pending_test_pattern)
+ stream->test_pattern = *update->pending_test_pattern;
+ /* update current stream with writeback info */
+ if (update->wb_update) {
+ int i;
+
+ stream->num_wb_info = update->wb_update->num_wb_info;
+ ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
+ for (i = 0; i < stream->num_wb_info; i++)
+ stream->writeback_info[i] =
+ update->wb_update->writeback_info[i];
+ }
+ if (update->dsc_config) {
+ struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
+ uint32_t old_dsc_enabled = stream->timing.flags.DSC;
+ uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
+ update->dsc_config->num_slices_v != 0);
+
+ /* Use temporarry context for validating new DSC config */
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
+
+ if (dsc_validate_context) {
+ stream->timing.dsc_cfg = *update->dsc_config;
+ stream->timing.flags.DSC = enable_dsc;
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
+ stream->timing.dsc_cfg = old_dsc_cfg;
+ stream->timing.flags.DSC = old_dsc_enabled;
+ update->dsc_config = NULL;
+ }
+
+ dc_state_release(dsc_validate_context);
+ } else {
+ DC_ERROR("Failed to allocate new validate context for DSC change\n");
+ update->dsc_config = NULL;
+ }
+ }
+}
+
+static void backup_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++) {
+ scratch->plane_states[i] = *status->plane_states[i];
+ }
+ scratch->stream_state = *stream;
+<<<<<<<
+=======
+ if (stream->out_transfer_func)
+ scratch->out_transfer_func = *stream->out_transfer_func;
+>>>>>>>
+}
+
+static void restore_planes_and_stream_state(
+ struct dc_scratch_space *scratch,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++) {
+ *status->plane_states[i] = scratch->plane_states[i];
+ }
+ *stream = scratch->stream_state;
+<<<<<<<
+=======
+ if (stream->out_transfer_func)
+ *stream->out_transfer_func = scratch->out_transfer_func;
+>>>>>>>
+}
+
+/**
+ * update_seamless_boot_flags() - Helper function for updating seamless boot flags
+ *
+ * @dc: Current DC state
+ * @context: New DC state to be programmed
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ *
+ * Updating seamless boot flags do not need to be part of the commit sequence. This
+ * helper function will update the seamless boot flags on each flip (if required)
+ * outside of the HW commit sequence (fast or slow).
+ *
+ * Return: void
+ */
+static void update_seamless_boot_flags(struct dc *dc,
+ struct dc_state *context,
+ int surface_count,
+ struct dc_stream_state *stream)
+{
+ if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth. Important to note that it is expected UEFI will
+ * only light up a single display on POST, therefore we only expect
+ * one stream with seamless boot flag set.
+ */
+ if (stream->apply_seamless_boot_optimization) {
+ stream->apply_seamless_boot_optimization = false;
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->optimized_required = true;
+ }
+ }
+}
+
+/**
+ * update_planes_and_stream_state() - The function takes planes and stream
+ * updates as inputs and determines the appropriate update type. If update type
+ * is FULL, the function allocates a new context, populates and validates it.
+ * Otherwise, it updates current dc context. The function will return both
+ * new_context and new_update_type back to the caller. The function also backs
+ * up both current and new contexts into corresponding dc state scratch memory.
+ * TODO: The function does too many things, and even conditionally allocates dc
+ * context memory implicitly. We should consider to break it down.
+ *
+ * @dc: Current DC state
+ * @srf_updates: an array of surface updates
+ * @surface_count: surface update count
+ * @stream: Corresponding stream to be updated
+ * @stream_update: stream update
+ * @new_update_type: [out] determined update type by the function
+ * @new_context: [out] new context allocated and validated if update type is
+ * FULL, reference to current context if update type is less than FULL.
+ *
+ * Return: true if a valid update is populated into new_context, false
+ * otherwise.
+ */
+static bool update_planes_and_stream_state(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type *new_update_type,
+ struct dc_state **new_context)
+{
+ struct dc_state *context;
+ int i, j;
+ enum surface_update_type update_type;
+ const struct dc_stream_status *stream_status;
+ struct dc_context *dc_ctx = dc->ctx;
+
+ stream_status = dc_stream_get_status(stream);
+
+ if (!stream_status) {
+ if (surface_count) /* Only an error condition if surf_count non-zero*/
+ ASSERT(false);
+
+ return false; /* Cannot commit surface to stream that is not committed */
+ }
+
+ context = dc->current_state;
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.current_state, stream);
+
+ /* update current stream with the new updates */
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+ /* do not perform surface update if surface has invalid dimensions
+ * (all zero) and no scaling_info is provided
+ */
+ if (surface_count > 0) {
+ for (i = 0; i < surface_count; i++) {
+ if ((srf_updates[i].surface->src_rect.width == 0 ||
+ srf_updates[i].surface->src_rect.height == 0 ||
+ srf_updates[i].surface->dst_rect.width == 0 ||
+ srf_updates[i].surface->dst_rect.height == 0) &&
+ (!srf_updates[i].scaling_info ||
+ srf_updates[i].scaling_info->src_rect.width == 0 ||
+ srf_updates[i].scaling_info->src_rect.height == 0 ||
+ srf_updates[i].scaling_info->dst_rect.width == 0 ||
+ srf_updates[i].scaling_info->dst_rect.height == 0)) {
+ DC_ERROR("Invalid src/dst rects in surface update!\n");
+ return false;
+ }
+ }
+ }
+
+ if (update_type >= update_surface_trace_level)
+ update_surface_trace(dc, srf_updates, surface_count);
+
+ for (i = 0; i < surface_count; i++)
+ copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+ struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
+
+ for (i = 0; i < surface_count; i++)
+ new_planes[i] = srf_updates[i].surface;
+
+ /* initialize scratch memory for building context */
+ context = dc_state_create_copy(dc->current_state);
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return false;
+ }
+
+ /* For each full update, remove all existing phantom pipes first.
+ * Ensures that we have enough pipes for newly added MPO planes
+ */
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
+
+ /*remove old surfaces from context */
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
+
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+
+ /* add surface to context */
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+
+ /* save update parameters into surface */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ if (update_type != UPDATE_TYPE_MED)
+ continue;
+ if (surface->update_flags.bits.clip_size_change ||
+ surface->update_flags.bits.position_change) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ BREAK_TO_DEBUGGER();
+ goto fail;
+ }
+ }
+ update_seamless_boot_flags(dc, context, surface_count, stream);
+
+ *new_context = context;
+ *new_update_type = update_type;
+ if (update_type == UPDATE_TYPE_FULL)
+ backup_planes_and_stream_state(&dc->scratch.new_state, stream);
+
+ return true;
+
+fail:
+ dc_state_release(context);
+
+ return false;
+
+}
+
+static void commit_planes_do_stream_update(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int j;
+
+ // Stream updates
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) && pipe_ctx->stream == stream) {
+
+ if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
+ dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
+
+ if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->vtem_infopacket) {
+ resource_build_info_frame(pipe_ctx);
+ dc->hwss.update_info_frame(pipe_ctx);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dc->link_srv->dp_trace_source_sequence(
+ pipe_ctx->stream->link,
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+ }
+
+ if (stream_update->hdr_static_metadata &&
+ stream->use_dynamic_meta &&
+ dc->hwss.set_dmdata_attributes &&
+ pipe_ctx->stream->dmdata_address.quad_part != 0)
+ dc->hwss.set_dmdata_attributes(pipe_ctx);
+
+ if (stream_update->gamut_remap)
+ dc_stream_set_gamut_remap(dc, stream);
+
+ if (stream_update->output_csc_transform)
+ dc_stream_program_csc_matrix(dc, stream);
+
+ if (stream_update->dither_option) {
+ struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream,
+ &pipe_ctx->stream->bit_depth_params);
+ pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ while (odm_pipe) {
+ odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
+ &stream->bit_depth_params,
+ &stream->clamping);
+ odm_pipe = odm_pipe->next_odm_pipe;
+ }
+ }
+
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ if (stream_update->dsc_config)
+ dc->link_srv->update_dsc_config(pipe_ctx);
+
+ if (stream_update->mst_bw_update) {
+ if (stream_update->mst_bw_update->is_increase)
+ dc->link_srv->increase_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc->link_srv->reduce_mst_payload(pipe_ctx,
+ stream_update->mst_bw_update->mst_stream_bw);
+ }
+
+ if (stream_update->pending_test_pattern) {
+ /*
+ * test pattern params depends on ODM topology
+ * changes that we could be applying to front
+ * end. Since at the current stage front end
+ * changes are not yet applied. We can only
+ * apply test pattern in hw based on current
+ * state and populate the final test pattern
+ * params in new state. If current and new test
+ * pattern params are different as result of
+ * different ODM topology being used, it will be
+ * detected and handle during front end
+ * programming update.
+ */
+ dc->link_srv->dp_set_test_pattern(stream->link,
+ stream->test_pattern.type,
+ stream->test_pattern.color_space,
+ stream->test_pattern.p_link_settings,
+ stream->test_pattern.p_custom_pattern,
+ stream->test_pattern.cust_pattern_size);
+ resource_build_test_pattern_params(&context->res_ctx, pipe_ctx);
+ }
+
+ if (stream_update->dpms_off) {
+ if (*stream_update->dpms_off) {
+ dc->link_srv->set_dpms_off(pipe_ctx);
+ /* for dpms, keep acquired resources*/
+ if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
+ pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
+
+ dc->optimized_required = true;
+
+ } else {
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, dc->current_state);
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
+ }
+ } else if (pipe_ctx->stream->link->wa_flags.blank_stream_on_ocs_change && stream_update->output_color_space
+ && !stream->dpms_off && dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*
+ * Workaround for firmware issue in some receivers where they don't pick up
+ * correct output color space unless DP link is disabled/re-enabled
+ */
+ dc->link_srv->set_dpms_on(dc->current_state, pipe_ctx);
+ }
+
+ if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
+ bool should_program_abm = true;
+
+ // if otg funcs defined check if blanked before programming
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked)
+ if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
+ should_program_abm = false;
+
+ if (should_program_abm) {
+ if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
+ } else {
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(
+ pipe_ctx->stream_res.abm, stream->abm_level);
+ }
+ }
+ }
+ }
+ }
+}
+
+static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
+{
+ if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
+ || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ && stream->ctx->dce_version >= DCN_VERSION_3_1)
+ return true;
+
+ if (stream->link->replay_settings.config.replay_supported)
+ return true;
+
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
+ return false;
+}
+
+void dc_dmub_update_dirty_rect(struct dc *dc,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ struct dc_state *context)
+{
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
+ unsigned int i, j;
+ unsigned int panel_inst = 0;
+
+ if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
+ return;
+
+ if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+ return;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
+ cmd.update_dirty_rect.header.sub_type = 0;
+ cmd.update_dirty_rect.header.payload_bytes =
+ sizeof(cmd.update_dirty_rect) -
+ sizeof(cmd.update_dirty_rect.header);
+ update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
+
+ if (!srf_updates[i].surface || !flip_addr)
+ continue;
+ /* Do not send in immediate flip mode */
+ if (srf_updates[i].surface->flip_immediate)
+ continue;
+
+ update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
+ memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
+ sizeof(flip_addr->dirty_rects));
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ }
+ }
+}
+
+static void build_dmub_update_dirty_rect(
+ struct dc *dc,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
+ unsigned int i, j;
+ unsigned int panel_inst = 0;
+
+ if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
+ return;
+
+ if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
+ return;
+
+ memset(&cmd, 0x0, sizeof(cmd));
+ cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
+ cmd.update_dirty_rect.header.sub_type = 0;
+ cmd.update_dirty_rect.header.payload_bytes =
+ sizeof(cmd.update_dirty_rect) -
+ sizeof(cmd.update_dirty_rect.header);
+ update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
+
+ if (!srf_updates[i].surface || !flip_addr)
+ continue;
+ /* Do not send in immediate flip mode */
+ if (srf_updates[i].surface->flip_immediate)
+ continue;
+ update_dirty_rect->cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1;
+ update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
+ memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
+ sizeof(flip_addr->dirty_rects));
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+ update_dirty_rect->panel_inst = panel_inst;
+ update_dirty_rect->pipe_idx = j;
+ dc_dmub_cmd[*dmub_cmd_count].dmub_cmd = cmd;
+ dc_dmub_cmd[*dmub_cmd_count].wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT;
+ (*dmub_cmd_count)++;
+ }
+ }
+}
+
+
+/**
+ * build_dmub_cmd_list() - Build an array of DMCUB commands to be sent to DMCUB
+ *
+ * @dc: Current DC state
+ * @srf_updates: Array of surface updates
+ * @surface_count: Number of surfaces that have an updated
+ * @stream: Corresponding stream to be updated in the current flip
+ * @context: New DC state to be programmed
+ *
+ * @dc_dmub_cmd: Array of DMCUB commands to be sent to DMCUB
+ * @dmub_cmd_count: Count indicating the number of DMCUB commands in dc_dmub_cmd array
+ *
+ * This function builds an array of DMCUB commands to be sent to DMCUB. This function is required
+ * to build an array of commands and have them sent while the OTG lock is acquired.
+ *
+ * Return: void
+ */
+static void build_dmub_cmd_list(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_state *context,
+ struct dc_dmub_cmd dc_dmub_cmd[],
+ unsigned int *dmub_cmd_count)
+{
+ // Initialize cmd count to 0
+ *dmub_cmd_count = 0;
+ build_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context, dc_dmub_cmd, dmub_cmd_count);
+}
+
+static void commit_planes_for_stream_fast(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
+
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
+ }
+ }
+ }
+
+ stream_status = dc_state_get_stream_status(context, stream);
+
+ build_dmub_cmd_list(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ context,
+ context->dc_dmub_cmd,
+ &(context->dmub_cmd_count));
+ hwss_build_fast_sequence(dc,
+ context->dc_dmub_cmd,
+ context->dmub_cmd_count,
+ context->block_sequence,
+ &(context->block_sequence_steps),
+ top_pipe_to_program,
+ stream_status,
+ context);
+ hwss_execute_sequence(dc,
+ context->block_sequence,
+ context->block_sequence_steps);
+ /* Clear update flags so next flip doesn't have redundant programming
+ * (if there's no stream update, the update flags are not cleared).
+ * Surface updates are cleared unconditionally at the beginning of each flip,
+ * so no need to clear here.
+ */
+ if (top_pipe_to_program->stream)
+ top_pipe_to_program->stream->update_flags.raw = 0;
+}
+
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
+{
+/*
+ * This function calls HWSS to wait for any potentially double buffered
+ * operations to complete. It should be invoked as a pre-amble prior
+ * to full update programming before asserting any HW locks.
+ */
+ int pipe_idx;
+ int opp_inst;
+ int opp_count = dc->res_pool->res_cap->num_opp;
+ struct hubp *hubp;
+ int mpcc_inst;
+ const struct pipe_ctx *pipe_ctx;
+
+ for (pipe_idx = 0; pipe_idx < dc->res_pool->pipe_count; pipe_idx++) {
+ pipe_ctx = &dc_context->res_ctx.pipe_ctx[pipe_idx];
+
+ if (!pipe_ctx->stream)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear)
+ pipe_ctx->stream_res.tg->funcs->wait_drr_doublebuffer_pending_clear(pipe_ctx->stream_res.tg);
+
+ hubp = pipe_ctx->plane_res.hubp;
+ if (!hubp)
+ continue;
+
+ mpcc_inst = hubp->inst;
+ // MPCC inst is equal to pipe index in practice
+ for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
+ dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
+ dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
+ break;
+ }
+ }
+ }
+ wait_for_odm_update_pending_complete(dc, dc_context);
+}
+
+static void commit_planes_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *top_pipe_to_program = NULL;
+ bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
+ bool subvp_prev_use = false;
+ bool subvp_curr_use = false;
+ uint8_t current_stream_mask = 0;
+
+ // Once we apply the new subvp context to hardware it won't be in the
+ // dc->current_state anymore, so we have to cache it before we apply
+ // the new SubVP context
+ subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+ if (update_type == UPDATE_TYPE_FULL)
+ wait_for_outstanding_hw_updates(dc, context);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
+ }
+
+ if (update_type == UPDATE_TYPE_FULL) {
+ dc_allow_idle_optimizations(dc, false);
+
+ if (get_seamless_boot_stream_count(context) == 0)
+ dc->hwss.prepare_bandwidth(dc, context);
+
+ if (dc->hwss.update_dsc_pg)
+ dc->hwss.update_dsc_pg(dc, context, false);
+
+ context_clock_trace(dc, context);
+ }
+
+ top_pipe_to_program = resource_get_otg_master_for_stream(
+ &context->res_ctx,
+ stream);
+ ASSERT(top_pipe_to_program != NULL);
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Check old context for SubVP
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
+ if (subvp_prev_use)
+ break;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ subvp_curr_use = true;
+ break;
+ }
+ }
+
+ if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
+ struct pipe_ctx *mpcc_pipe;
+ struct pipe_ctx *odm_pipe;
+
+ for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
+ for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program &&
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ if (should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ true,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
+ dc->hwss.interdependent_update_lock(dc, context, true);
+
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+ }
+
+ dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
+
+ // Stream updates
+ if (stream_update)
+ commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
+
+ if (surface_count == 0) {
+ /*
+ * In case of turning off screen, no need to program front end a second time.
+ * just return after program blank.
+ */
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
+ if (dc->hwss.program_front_end_for_ctx)
+ dc->hwss.program_front_end_for_ctx(dc, context);
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ } else {
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
+ dc->hwss.post_unlock_program_front_end(dc, context);
+
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes,
+ NULL, subvp_prev_use);
+ return;
+ }
+
+ if (update_type != UPDATE_TYPE_FAST) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if ((dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP ||
+ dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH) &&
+ pipe_ctx->stream && pipe_ctx->plane_state) {
+ /* Only update visual confirm for SUBVP and Mclk switching here.
+ * The bar appears on all pipes, so we need to update the bar on all displays,
+ * so the information doesn't get stale.
+ */
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx,
+ pipe_ctx->plane_res.hubp->inst);
+ }
+ }
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+ /*set logical flag for lock/unlock use*/
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+ if (!pipe_ctx->plane_state)
+ continue;
+ if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+ pipe_ctx->plane_state->triplebuffer_flips = false;
+ if (update_type == UPDATE_TYPE_FAST &&
+ dc->hwss.program_triplebuffer != NULL &&
+ !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
+ /*triple buffer for VUpdate only*/
+ pipe_ctx->plane_state->triplebuffer_flips = true;
+ }
+ }
+ if (update_type == UPDATE_TYPE_FULL) {
+ /* force vsync flip when reconfiguring pipes to prevent underflow */
+ plane_state->flip_immediate = false;
+ }
+ }
+
+ // Update Type FULL, Surface updates
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ should_update_pipe_for_stream(context, pipe_ctx, stream)) {
+ struct dc_stream_status *stream_status = NULL;
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ /* Full fe update*/
+ if (update_type == UPDATE_TYPE_FAST)
+ continue;
+
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ stream_status =
+ stream_get_status(context, pipe_ctx->stream);
+
+ if (dc->hwss.apply_ctx_for_surface)
+ dc->hwss.apply_ctx_for_surface(
+ dc, pipe_ctx->stream, stream_status->plane_count, context);
+ }
+ }
+ if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
+ dc->hwss.program_front_end_for_ctx(dc, context);
+ if (dc->debug.validate_dml_output) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
+ if (cur_pipe->stream == NULL)
+ continue;
+
+ cur_pipe->plane_res.hubp->funcs->validate_dml_output(
+ cur_pipe->plane_res.hubp, dc->ctx,
+ &context->res_ctx.pipe_ctx[i].rq_regs,
+ &context->res_ctx.pipe_ctx[i].dlg_regs,
+ &context->res_ctx.pipe_ctx[i].ttu_regs);
+ }
+ }
+ }
+
+ // Update Type FAST, Surface updates
+ if (update_type == UPDATE_TYPE_FAST) {
+ if (dc->hwss.set_flip_control_gsl)
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
+ continue;
+
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+
+ // GSL has to be used for flip immediate
+ dc->hwss.set_flip_control_gsl(pipe_ctx,
+ pipe_ctx->plane_state->flip_immediate);
+ }
+ }
+
+ /* Perform requested Updates */
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *plane_state = srf_updates[i].surface;
+
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
+ continue;
+
+ if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
+ continue;
+
+ /*program triple buffer after lock based on flip type*/
+ if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
+ /*only enable triplebuffer for fast_update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ if (pipe_ctx->plane_state->update_flags.bits.addr_update)
+ dc->hwss.update_plane_addr(dc, pipe_ctx);
+ }
+ }
+ }
+
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ } else {
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+
+ if (should_use_dmub_lock(stream->link)) {
+ union dmub_hw_lock_flags hw_locks = { 0 };
+ struct dmub_hw_lock_inst_flags inst_flags = { 0 };
+
+ hw_locks.bits.lock_dig = 1;
+ inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
+
+ dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
+ false,
+ &hw_locks,
+ &inst_flags);
+ } else
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (subvp_curr_use) {
+ /* If enabling subvp or transitioning from subvp->subvp, enable the
+ * phantom streams before we program front end for the phantom pipes.
+ */
+ if (update_type != UPDATE_TYPE_FAST) {
+ if (dc->hwss.enable_phantom_streams)
+ dc->hwss.enable_phantom_streams(dc, context);
+ }
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
+
+ if (subvp_prev_use && !subvp_curr_use) {
+ /* If disabling subvp, disable phantom streams after front end
+ * programming has completed (we turn on phantom OTG in order
+ * to complete the plane disable for phantom pipes).
+ */
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ if (dc->hwss.commit_subvp_config)
+ dc->hwss.commit_subvp_config(dc, context);
+ /* Since phantom pipe programming is moved to post_unlock_program_front_end,
+ * move the SubVP lock to after the phantom pipes have been setup
+ */
+ if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
+ } else {
+ if (dc->hwss.subvp_pipe_control_lock)
+ dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
+ }
+
+ // Fire manual trigger only when bottom plane is flipped
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->plane_state)
+ continue;
+
+ if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
+ !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
+ !pipe_ctx->plane_state->update_flags.bits.addr_update ||
+ pipe_ctx->plane_state->skip_manual_trigger)
+ continue;
+
+ if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ }
+
+ current_stream_mask = get_stream_mask(dc, context);
+ if (current_stream_mask != context->stream_mask) {
+ context->stream_mask = current_stream_mask;
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
+ }
+}
+
+/**
+ * could_mpcc_tree_change_for_active_pipes - Check if an OPP associated with MPCC might change
+ *
+ * @dc: Used to get the current state status
+ * @stream: Target stream, which we want to remove the attached planes
+ * @srf_updates: Array of surface updates
+ * @surface_count: Number of surface update
+ * @is_plane_addition: [in] Fill out with true if it is a plane addition case
+ *
+ * DCN32x and newer support a feature named Dynamic ODM which can conflict with
+ * the MPO if used simultaneously in some specific configurations (e.g.,
+ * 4k@144). This function checks if the incoming context requires applying a
+ * transition state with unnecessary pipe splitting and ODM disabled to
+ * circumvent our hardware limitations to prevent this edge case. If the OPP
+ * associated with an MPCC might change due to plane additions, this function
+ * returns true.
+ *
+ * Return:
+ * Return true if OPP and MPCC might change, otherwise, return false.
+ */
+static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ bool *is_plane_addition)
+{
+
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ bool force_minimal_pipe_splitting = false;
+ bool subvp_active = false;
+ uint32_t i;
+
+ *is_plane_addition = false;
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count > 0 &&
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
+ /* determine if minimal transition is required due to MPC*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ if (cur_stream_status &&
+ dc->current_state->stream_count == 1 &&
+ dc->debug.enable_single_display_2to1_odm_policy) {
+ /* determine if minimal transition is required due to dynamic ODM*/
+ if (surface_count > 0) {
+ if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
+ subvp_active = true;
+ break;
+ }
+ }
+
+ /* For SubVP when adding or removing planes we need to add a minimal transition
+ * (even when disabling all planes). Whenever disabling a phantom pipe, we
+ * must use the minimal transition path to disable the pipe correctly.
+ *
+ * We want to use the minimal transition whenever subvp is active, not only if
+ * a plane is being added / removed from a subvp stream (MPO plane can be added
+ * to a DRR pipe of SubVP + DRR config, in which case we still want to run through
+ * a min transition to disable subvp.
+ */
+ if (cur_stream_status && subvp_active) {
+ /* determine if minimal transition is required due to SubVP*/
+ if (cur_stream_status->plane_count > surface_count) {
+ force_minimal_pipe_splitting = true;
+ } else if (cur_stream_status->plane_count < surface_count) {
+ force_minimal_pipe_splitting = true;
+ *is_plane_addition = true;
+ }
+ }
+
+ return force_minimal_pipe_splitting;
+}
+
+struct pipe_split_policy_backup {
+ bool dynamic_odm_policy;
+ bool subvp_policy;
+ enum pipe_split_policy mpc_policy;
+ char force_odm[MAX_PIPES];
+};
+
+static void backup_and_set_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ int i;
+
+ if (!dc->config.is_vmin_only_asic) {
+ policy->mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+ policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+ policy->subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+ for (i = 0; i < context->stream_count; i++) {
+ policy->force_odm[i] = context->streams[i]->debug.force_odm_combine_segments;
+ context->streams[i]->debug.force_odm_combine_segments = 0;
+ }
+}
+
+static void restore_minimal_pipe_split_policy(struct dc *dc,
+ struct dc_state *context,
+ struct pipe_split_policy_backup *policy)
+{
+ uint8_t i;
+
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy =
+ policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+ for (i = 0; i < context->stream_count; i++)
+ context->streams[i]->debug.force_odm_combine_segments = policy->force_odm[i];
+}
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *minimal_transition_context,
+ struct dc_state *base_context,
+ struct pipe_split_policy_backup *policy)
+{
+ restore_minimal_pipe_split_policy(dc, base_context, policy);
+ dc_state_release(minimal_transition_context);
+}
+
+static void force_vsync_flip_in_minimal_transition_context(struct dc_state *context)
+{
+ uint8_t i;
+ int j;
+ struct dc_stream_status *stream_status;
+
+ for (i = 0; i < context->stream_count; i++) {
+ stream_status = &context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++)
+ stream_status->plane_states[j]->flip_immediate = false;
+ }
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = NULL;
+
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
+
+ backup_and_set_minimal_pipe_split_policy(dc, base_context, policy);
+ /* commit minimal state */
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ /* prevent underflow and corruption when reconfiguring pipes */
+ force_vsync_flip_in_minimal_transition_context(minimal_transition_context);
+ } else {
+ /*
+ * This should never happen, minimal transition state should
+ * always be validated first before adding pipe split features.
+ */
+ release_minimal_transition_state(dc, minimal_transition_context, base_context, policy);
+ BREAK_TO_DEBUGGER();
+ minimal_transition_context = NULL;
+ }
+ return minimal_transition_context;
+}
+
+static bool is_pipe_topology_transition_seamless_with_intermediate_step(
+ struct dc *dc,
+ struct dc_state *initial_state,
+ struct dc_state *intermediate_state,
+ struct dc_state *final_state)
+{
+ return dc->hwss.is_pipe_topology_transition_seamless(dc, initial_state,
+ intermediate_state) &&
+ dc->hwss.is_pipe_topology_transition_seamless(dc,
+ intermediate_state, final_state);
+}
+
+static void swap_and_release_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_state *old = dc->current_state;
+ struct pipe_ctx *pipe_ctx;
+
+ /* Since memory free requires elevated IRQ, an interrupt
+ * request is generated by mem free. If this happens
+ * between freeing and reassigning the context, our vsync
+ * interrupt will call into dc and cause a memory
+ * corruption. Hence, we first reassign the context,
+ * then free the old context.
+ */
+ dc->current_state = new_context;
+ dc_state_release(old);
+
+ // clear any forced full updates
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &new_context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+}
+
+static int initialize_empty_surface_updates(
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates)
+{
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+ int i;
+
+ if (!status)
+ return 0;
+
+ for (i = 0; i < status->plane_count; i++)
+ srf_updates[i].surface = status->plane_states[i];
+
+ return status->plane_count;
+}
+
+static bool commit_minimal_transition_based_on_new_context(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context =
+ create_minimal_transition_state(dc, new_context,
+ &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = new state\n");
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(
+ dc, intermediate_context, new_context, &policy);
+ }
+ return success;
+}
+
+static bool commit_minimal_transition_based_on_current_context(struct dc *dc,
+ struct dc_state *new_context, struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct pipe_split_policy_backup policy;
+ struct dc_state *intermediate_context;
+ struct dc_state *old_current_state = dc->current_state;
+ struct dc_surface_update srf_updates[MAX_SURFACE_NUM] = {0};
+ int surface_count;
+
+ /*
+ * Both current and new contexts share the same stream and plane state
+ * pointers. When new context is validated, stream and planes get
+ * populated with new updates such as new plane addresses. This makes
+ * the current context no longer valid because stream and planes are
+ * modified from the original. We backup current stream and plane states
+ * into scratch space whenever we are populating new context. So we can
+ * restore the original values back by calling the restore function now.
+ * This restores back the original stream and plane states associated
+ * with the current state.
+ */
+ restore_planes_and_stream_state(&dc->scratch.current_state, stream);
+ dc_state_retain(old_current_state);
+ intermediate_context = create_minimal_transition_state(dc,
+ old_current_state, &policy);
+
+ if (intermediate_context) {
+ if (is_pipe_topology_transition_seamless_with_intermediate_step(
+ dc,
+ dc->current_state,
+ intermediate_context,
+ new_context)) {
+ DC_LOG_DC("commit minimal transition state: base = current state\n");
+ surface_count = initialize_empty_surface_updates(
+ stream, srf_updates);
+ commit_planes_for_stream(dc, srf_updates,
+ surface_count, stream, NULL,
+ UPDATE_TYPE_FULL, intermediate_context);
+ swap_and_release_current_context(
+ dc, intermediate_context, stream);
+ dc_state_retain(dc->current_state);
+ success = true;
+ }
+ release_minimal_transition_state(dc, intermediate_context,
+ old_current_state, &policy);
+ }
+ dc_state_release(old_current_state);
+ /*
+ * Restore stream and plane states back to the values associated with
+ * new context.
+ */
+ restore_planes_and_stream_state(&dc->scratch.new_state, stream);
+ return success;
+}
+
+/**
+ * commit_minimal_transition_state_in_dc_update - Commit a minimal state based
+ * on current or new context
+ *
+ * @dc: DC structure, used to get the current state
+ * @new_context: New context
+ * @stream: Stream getting the update for the flip
+ * @srf_updates: Surface updates
+ * @surface_count: Number of surfaces
+ *
+ * The function takes in current state and new state and determine a minimal
+ * transition state as the intermediate step which could make the transition
+ * between current and new states seamless. If found, it will commit the minimal
+ * transition state and update current state to this minimal transition state
+ * and return true, if not, it will return false.
+ *
+ * Return:
+ * Return True if the minimal transition succeeded, false otherwise
+ */
+static bool commit_minimal_transition_state_in_dc_update(struct dc *dc,
+ struct dc_state *new_context,
+ struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
+ int surface_count)
+{
+ bool success = commit_minimal_transition_based_on_new_context(
+ dc, new_context, stream, srf_updates,
+ surface_count);
+ if (!success)
+ success = commit_minimal_transition_based_on_current_context(dc,
+ new_context, stream);
+ if (!success)
+ DC_LOG_ERROR("Fail to commit a seamless minimal transition state between current and new states.\nThis pipe topology update is non-seamless!\n");
+ return success;
+}
+
+/**
+ * commit_minimal_transition_state - Create a transition pipe split state
+ *
+ * @dc: Used to get the current state status
+ * @transition_base_context: New transition state
+ *
+ * In some specific configurations, such as pipe split on multi-display with
+ * MPO and/or Dynamic ODM, removing a plane may cause unsupported pipe
+ * programming when moving to new planes. To mitigate those types of problems,
+ * this function adds a transition state that minimizes pipe usage before
+ * programming the new configuration. When adding a new plane, the current
+ * state requires the least pipes, so it is applied without splitting. When
+ * removing a plane, the new state requires the least pipes, so it is applied
+ * without splitting.
+ *
+ * Return:
+ * Return false if something is wrong in the transition state.
+ */
+static bool commit_minimal_transition_state(struct dc *dc,
+ struct dc_state *transition_base_context)
+{
+ struct dc_state *transition_context;
+ struct pipe_split_policy_backup policy;
+ enum dc_status ret = DC_ERROR_UNEXPECTED;
+ unsigned int i, j;
+ unsigned int pipe_in_use = 0;
+ bool subvp_in_use = false;
+ bool odm_in_use = false;
+
+ /* check current pipes in use*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state)
+ pipe_in_use++;
+ }
+
+ /* If SubVP is enabled and we are adding or removing planes from any main subvp
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
+ subvp_in_use = true;
+ break;
+ }
+ }
+
+ /* If ODM is enabled and we are adding or removing planes from any ODM
+ * pipe, we must use the minimal transition.
+ */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
+
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ odm_in_use = resource_get_odm_slice_count(pipe) > 1;
+ break;
+ }
+ }
+
+ /* When the OS add a new surface if we have been used all of pipes with odm combine
+ * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
+ * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
+ * call it again. Otherwise return true to skip.
+ *
+ * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
+ * enter/exit MPO when DCN still have enough resources.
+ */
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
+ return true;
+
+ DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
+ dc->current_state == transition_base_context ? "current" : "new",
+ subvp_in_use ? "Subvp In Use" :
+ odm_in_use ? "ODM in Use" :
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
+ "Unknown");
+
+ dc_state_retain(transition_base_context);
+ transition_context = create_minimal_transition_state(dc,
+ transition_base_context, &policy);
+ if (transition_context) {
+ ret = dc_commit_state_no_check(dc, transition_context);
+ release_minimal_transition_state(dc, transition_context, transition_base_context, &policy);
+ }
+ dc_state_release(transition_base_context);
+
+ if (ret != DC_OK) {
+ /* this should never happen */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ /* force full surface update */
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
+ dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
+ }
+ }
+
+ return true;
+}
+
+static void populate_fast_updates(struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update)
+{
+ int i = 0;
+
+ if (stream_update) {
+ fast_update[0].out_transfer_func = stream_update->out_transfer_func;
+ fast_update[0].output_csc_transform = stream_update->output_csc_transform;
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ fast_update[i].flip_addr = srf_updates[i].flip_addr;
+ fast_update[i].gamma = srf_updates[i].gamma;
+ fast_update[i].gamut_remap_matrix = srf_updates[i].gamut_remap_matrix;
+ fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
+ fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
+ }
+}
+
+static bool fast_updates_exist(struct dc_fast_update *fast_update, int surface_count)
+{
+ int i;
+
+ if (fast_update[0].out_transfer_func ||
+ fast_update[0].output_csc_transform)
+ return true;
+
+ for (i = 0; i < surface_count; i++) {
+ if (fast_update[i].flip_addr ||
+ fast_update[i].gamma ||
+ fast_update[i].gamut_remap_matrix ||
+ fast_update[i].input_csc_color_matrix ||
+ fast_update[i].coeff_reduction_factor)
+ return true;
+ }
+
+ return false;
+}
+
+static bool full_update_required(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+
+ int i;
+ struct dc_stream_status *stream_status;
+ const struct dc_state *context = dc->current_state;
+
+ for (i = 0; i < surface_count; i++) {
+ if (srf_updates &&
+ (srf_updates[i].plane_info ||
+ srf_updates[i].scaling_info ||
+ (srf_updates[i].hdr_mult.value &&
+ srf_updates[i].hdr_mult.value != srf_updates->surface->hdr_mult.value) ||
+ srf_updates[i].in_transfer_func ||
+ srf_updates[i].func_shaper ||
+ srf_updates[i].lut3d_func ||
+ srf_updates[i].surface->force_full_update ||
+ (srf_updates[i].flip_addr &&
+ srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
+ !is_surface_in_context(context, srf_updates[i].surface)))
+ return true;
+ }
+
+ if (stream_update &&
+ (((stream_update->src.height != 0 && stream_update->src.width != 0) ||
+ (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
+ stream_update->integer_scaling_update) ||
+ stream_update->hdr_static_metadata ||
+ stream_update->abm_level ||
+ stream_update->periodic_interrupt ||
+ stream_update->vrr_infopacket ||
+ stream_update->vsc_infopacket ||
+ stream_update->vsp_infopacket ||
+ stream_update->hfvsif_infopacket ||
+ stream_update->vtem_infopacket ||
+ stream_update->adaptive_sync_infopacket ||
+ stream_update->dpms_off ||
+ stream_update->allow_freesync ||
+ stream_update->vrr_active_variable ||
+ stream_update->vrr_active_fixed ||
+ stream_update->gamut_remap ||
+ stream_update->output_color_space ||
+ stream_update->dither_option ||
+ stream_update->wb_update ||
+ stream_update->dsc_config ||
+ stream_update->mst_bw_update ||
+ stream_update->func_shaper ||
+ stream_update->lut3d_func ||
+ stream_update->pending_test_pattern ||
+ stream_update->crtc_timing_adjust))
+ return true;
+
+ if (stream) {
+ stream_status = dc_stream_get_status(stream);
+ if (stream_status == NULL || stream_status->plane_count != surface_count)
+ return true;
+ }
+ if (dc->idle_optimizations_allowed)
+ return true;
+
+ return false;
+}
+
+static bool fast_update_only(struct dc *dc,
+ struct dc_fast_update *fast_update,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_update *stream_update,
+ struct dc_stream_state *stream)
+{
+ return fast_updates_exist(fast_update, surface_count)
+ && !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
+}
+
+static bool update_planes_and_stream_v1(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ const struct dc_stream_status *stream_status;
+ enum surface_update_type update_type;
+ struct dc_state *context;
+ struct dc_context *dc_ctx = dc->ctx;
+ int i, j;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ dc_exit_ips_for_hw_access(dc);
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ stream_status = dc_stream_get_status(stream);
+ context = dc->current_state;
+
+ update_type = dc_check_update_surfaces_for_stream(
+ dc, srf_updates, surface_count, stream_update, stream_status);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+
+ /* initialize scratch memory for building context */
+ context = dc_state_create_copy(state);
+ if (context == NULL) {
+ DC_ERROR("Failed to allocate new validate context!\n");
+ return false;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
+ new_pipe->plane_state->force_full_update = true;
+ }
+ } else if (update_type == UPDATE_TYPE_FAST) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ dc_post_update_surfaces_to_stream(dc);
+ }
+
+ for (i = 0; i < surface_count; i++) {
+ struct dc_plane_state *surface = srf_updates[i].surface;
+
+ copy_surface_update_to_plane(surface, &srf_updates[i]);
+
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
+ }
+
+ copy_stream_update_to_stream(dc, context, stream, stream_update);
+
+ if (update_type >= UPDATE_TYPE_FULL) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_state_release(context);
+ return false;
+ }
+ }
+
+ TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ /*update current_State*/
+ if (dc->current_state != context) {
+
+ struct dc_state *old = dc->current_state;
+
+ dc->current_state = context;
+ dc_state_release(old);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
+ pipe_ctx->plane_state->force_full_update = false;
+ }
+ }
+
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ }
+ return true;
+}
+
+static bool update_planes_and_stream_v2(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *context;
+ enum surface_update_type update_type;
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ /* In cases where MPO and split or ODM are used transitions can
+ * cause underflow. Apply stream configuration with minimal pipe
+ * split first to avoid unsupported transitions for active pipes.
+ */
+ bool force_minimal_pipe_splitting = 0;
+ bool is_plane_addition = 0;
+ bool is_fast_update_only;
+
+ populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
+ force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
+ dc,
+ stream,
+ srf_updates,
+ surface_count,
+ &is_plane_addition);
+
+ /* on plane addition, minimal state is the current one */
+ if (force_minimal_pipe_splitting && is_plane_addition &&
+ !commit_minimal_transition_state(dc, dc->current_state))
+ return false;
+
+ if (!update_planes_and_stream_state(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ &update_type,
+ &context))
+ return false;
+
+ /* on plane removal, minimal state is the new one */
+ if (force_minimal_pipe_splitting && !is_plane_addition) {
+ if (!commit_minimal_transition_state(dc, context)) {
+ dc_state_release(context);
+ return false;
+ }
+ update_type = UPDATE_TYPE_FULL;
+ }
+
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ commit_minimal_transition_state_in_dc_update(dc, context, stream,
+ srf_updates, surface_count);
+
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ } else {
+ if (!stream_update &&
+ dc->hwss.is_pipe_topology_transition_seamless &&
+ !dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context)) {
+ DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
+ BREAK_TO_DEBUGGER();
+ }
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ context);
+ }
+ if (dc->current_state != context)
+ swap_and_release_current_context(dc, context, stream);
+ return true;
+}
+
+static void commit_planes_and_stream_update_on_current_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type)
+{
+ struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+
+ ASSERT(update_type < UPDATE_TYPE_FULL);
+ populate_fast_updates(fast_update, srf_updates, surface_count,
+ stream_update);
+ if (fast_update_only(dc, fast_update, srf_updates, surface_count,
+ stream_update, stream) &&
+ !dc->debug.enable_legacy_fast_update)
+ commit_planes_for_stream_fast(dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+ else
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ dc->current_state);
+}
+
+static void commit_planes_and_stream_update_with_new_context(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ enum surface_update_type update_type,
+ struct dc_state *new_context)
+{
+ ASSERT(update_type >= UPDATE_TYPE_FULL);
+ if (!dc->hwss.is_pipe_topology_transition_seamless(dc,
+ dc->current_state, new_context))
+ /*
+ * It is required by the feature design that all pipe topologies
+ * using extra free pipes for power saving purposes such as
+ * dynamic ODM or SubVp shall only be enabled when it can be
+ * transitioned seamlessly to AND from its minimal transition
+ * state. A minimal transition state is defined as the same dc
+ * state but with all power saving features disabled. So it uses
+ * the minimum pipe topology. When we can't seamlessly
+ * transition from state A to state B, we will insert the
+ * minimal transition state A' or B' in between so seamless
+ * transition between A and B can be made possible.
+ */
+ commit_minimal_transition_state_in_dc_update(dc, new_context,
+ stream, srf_updates, surface_count);
+
+ commit_planes_for_stream(
+ dc,
+ srf_updates,
+ surface_count,
+ stream,
+ stream_update,
+ update_type,
+ new_context);
+}
+
+static bool update_planes_and_stream_v3(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ struct dc_state *new_context;
+ enum surface_update_type update_type;
+
+ /*
+ * When this function returns true and new_context is not equal to
+ * current state, the function allocates and validates a new dc state
+ * and assigns it to new_context. The function expects that the caller
+ * is responsible to free this memory when new_context is no longer
+ * used. We swap current with new context and free current instead. So
+ * new_context's memory will live until the next full update after it is
+ * replaced by a newer context. Refer to the use of
+ * swap_and_free_current_context below.
+ */
+ if (!update_planes_and_stream_state(dc, srf_updates, surface_count,
+ stream, stream_update, &update_type,
+ &new_context))
+ return false;
+
+ if (new_context == dc->current_state) {
+ commit_planes_and_stream_update_on_current_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type);
+ } else {
+ commit_planes_and_stream_update_with_new_context(dc,
+ srf_updates, surface_count, stream,
+ stream_update, update_type, new_context);
+ swap_and_release_current_context(dc, new_context, stream);
+ }
+
+ return true;
+}
+
+bool dc_update_planes_and_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates, int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /*
+ * update planes and stream version 3 separates FULL and FAST updates
+ * to their own sequences. It aims to clean up frequent checks for
+ * update type resulting unnecessary branching in logic flow. It also
+ * adds a new commit minimal transition sequence, which detects the need
+ * for minimal transition based on the actual comparison of current and
+ * new states instead of "predicting" it based on per feature software
+ * policy.i.e could_mpcc_tree_change_for_active_pipes. The new commit
+ * minimal transition sequence is made universal to any power saving
+ * features that would use extra free pipes such as Dynamic ODM/MPC
+ * Combine, MPO or SubVp. Therefore there is no longer a need to
+ * specially handle compatibility problems with transitions among those
+ * features as they are now transparent to the new sequence.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51)
+ return update_planes_and_stream_v3(dc, srf_updates,
+ surface_count, stream, stream_update);
+ return update_planes_and_stream_v2(dc, srf_updates,
+ surface_count, stream, stream_update);
+}
+
+void dc_commit_updates_for_stream(struct dc *dc,
+ struct dc_surface_update *srf_updates,
+ int surface_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_state *state)
+{
+ dc_exit_ips_for_hw_access(dc);
+ /* TODO: Since change commit sequence can have a huge impact,
+ * we decided to only enable it for DCN3x. However, as soon as
+ * we get more confident about this change we'll need to enable
+ * the new sequence for all ASICs.
+ */
+ if (dc->ctx->dce_version > DCN_VERSION_3_51) {
+ update_planes_and_stream_v3(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ if (dc->ctx->dce_version >= DCN_VERSION_3_2) {
+ update_planes_and_stream_v2(dc, srf_updates, surface_count,
+ stream, stream_update);
+ return;
+ }
+ update_planes_and_stream_v1(dc, srf_updates, surface_count, stream,
+ stream_update, state);
+}
+
+uint8_t dc_get_current_stream_count(struct dc *dc)
+{
+ return dc->current_state->stream_count;
+}
+
+struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
+{
+ if (i < dc->current_state->stream_count)
+ return dc->current_state->streams[i];
+ return NULL;
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+ struct dc *dc,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
+}
+
+/*
+ * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
+ */
+bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
+{
+
+ if (dc == NULL)
+ return false;
+
+ return dal_irq_service_set(dc->res_pool->irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+ dal_irq_service_ack(dc->res_pool->irqs, src);
+}
+
+void dc_power_down_on_boot(struct dc *dc)
+{
+ if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
+ dc->hwss.power_down_on_boot)
+ dc->hwss.power_down_on_boot(dc);
+}
+
+void dc_set_power_state(
+ struct dc *dc,
+ enum dc_acpi_cm_power_state power_state)
+{
+ if (!dc->current_state)
+ return;
+
+ switch (power_state) {
+ case DC_ACPI_CM_POWER_STATE_D0:
+ dc_state_construct(dc, dc->current_state);
+
+ dc_exit_ips_for_hw_access(dc);
+
+ dc_z10_restore(dc);
+
+ dc->hwss.init_hw(dc);
+
+ if (dc->hwss.init_sys_ctx != NULL &&
+ dc->vm_pa_config.valid) {
+ dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
+ }
+
+ break;
+ default:
+ ASSERT(dc->current_state->stream_count == 0);
+
+ dc_state_destruct(dc->current_state);
+
+ break;
+ }
+}
+
+void dc_resume(struct dc *dc)
+{
+ uint32_t i;
+
+ for (i = 0; i < dc->link_count; i++)
+ dc->link_srv->resume(dc->links[i]);
+}
+
+bool dc_is_dmcu_initialized(struct dc *dc)
+{
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+
+ if (dmcu)
+ return dmcu->funcs->is_dmcu_initialized(dmcu);
+ return false;
+}
+
+void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
+{
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
+}
+enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
+{
+ if (dc->hwss.set_clock)
+ return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
+ return DC_ERROR_UNEXPECTED;
+}
+void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
+{
+ if (dc->hwss.get_clock)
+ dc->hwss.get_clock(dc, clock_type, clock_cfg);
+}
+
+/* enable/disable eDP PSR without specify stream for eDP */
+bool dc_set_psr_allow_active(struct dc *dc, bool enable)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count ; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->psr_settings.psr_feature_enabled) {
+ if (enable && !link->psr_settings.psr_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
+ return false;
+ } else if (!enable && link->psr_settings.psr_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void dc_allow_idle_optimizations_internal(struct dc *dc, bool allow, char const *caller_name)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return;
+
+ if (allow != dc->idle_optimizations_allowed)
+ DC_LOG_IPS("%s: allow_idle old=%d new=%d (caller=%s)\n", __func__,
+ dc->idle_optimizations_allowed, allow, caller_name);
+
+ if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return;
+
+ if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
+ if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
+ return;
+
+ if (allow == dc->idle_optimizations_allowed)
+ return;
+
+ if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
+ dc->idle_optimizations_allowed = allow;
+}
+
+void dc_exit_ips_for_hw_access_internal(struct dc *dc, const char *caller_name)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations_internal(dc, false, caller_name);
+}
+
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
+ if (dc->debug.disable_idle_power_optimizations)
+ return false;
+
+ if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return false;
+
+ if (!dc->ctx->dmub_srv)
+ return false;
+
+ return dc->ctx->dmub_srv->idle_allowed;
+}
+
+/* set min and max memory clock to lowest and highest DPM level, respectively */
+void dc_unlock_memory_clock_frequency(struct dc *dc)
+{
+ if (dc->clk_mgr->funcs->set_hard_min_memclk)
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+/* set min memory clock to the min required for current mode, max to maxDPM */
+void dc_lock_memory_clock_frequency(struct dc *dc)
+{
+ if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
+ dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
+
+ if (dc->clk_mgr->funcs->set_hard_min_memclk)
+ dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
+
+ if (dc->clk_mgr->funcs->set_hard_max_memclk)
+ dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
+}
+
+static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
+{
+ struct dc_state *context = dc->current_state;
+ struct hubp *hubp;
+ struct pipe_ctx *pipe;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, true);
+
+ // wait for double buffer
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, true);
+ }
+ }
+
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
+ dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream != NULL) {
+ dc->hwss.disable_pixel_data(dc, pipe, false);
+
+ hubp = pipe->plane_res.hubp;
+ hubp->funcs->set_blank_regs(hubp, false);
+ }
+ }
+}
+
+
+/**
+ * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
+ * @dc: pointer to dc of the dm calling this
+ * @enable: True = transition to DC mode, false = transition back to AC mode
+ *
+ * Some SoCs define additional clock limits when in DC mode, DM should
+ * invoke this function when the platform undergoes a power source transition
+ * so DC can apply/unapply the limit. This interface may be disruptive to
+ * the onscreen content.
+ *
+ * Context: Triggered by OS through DM interface, or manually by escape calls.
+ * Need to hold a dclock when doing so.
+ *
+ * Return: none (void function)
+ *
+ */
+void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
+{
+ unsigned int softMax = 0, maxDPM = 0, funcMin = 0, i;
+ bool p_state_change_support;
+
+ if (!dc->config.dc_mode_clk_limit_support)
+ return;
+
+ softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
+ for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries; i++) {
+ if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz > maxDPM)
+ maxDPM = dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz;
+ }
+ funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
+ p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
+
+ if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, softMax);
+ // else: No-Op
+ }
+ } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
+ if (p_state_change_support) {
+ if (funcMin <= softMax)
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
+ // else: No-Op
+ } else {
+ if (funcMin <= softMax)
+ blank_and_force_memclk(dc, true, maxDPM);
+ // else: No-Op
+ }
+ }
+ dc->clk_mgr->dc_mode_softmax_enabled = enable;
+}
+bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc,
+ unsigned int pitch,
+ unsigned int height,
+ enum surface_pixel_format format,
+ struct dc_cursor_attributes *cursor_attr)
+{
+ if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, pitch, height, format, cursor_attr))
+ return true;
+ return false;
+}
+
+/* cleanup on driver unload */
+void dc_hardware_release(struct dc *dc)
+{
+ dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
+
+ if (dc->hwss.hardware_release)
+ dc->hwss.hardware_release(dc);
+}
+
+void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
+{
+ if (dc->current_state)
+ dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
+}
+
+/**
+ * dc_is_dmub_outbox_supported - Check if DMUB firmware support outbox notification
+ *
+ * @dc: [in] dc structure
+ *
+ * Checks whether DMUB FW supports outbox notifications, if supported DM
+ * should register outbox interrupt prior to actually enabling interrupts
+ * via dc_enable_dmub_outbox
+ *
+ * Return:
+ * True if DMUB FW supports outbox notifications, False otherwise
+ */
+bool dc_is_dmub_outbox_supported(struct dc *dc)
+{
+ switch (dc->ctx->asic_id.chip_family) {
+
+ case FAMILY_YELLOW_CARP:
+ /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_0:
+ if (!dc->debug.dpia_debug.bits.disable_dpia)
+ return true;
+ break;
+
+ default:
+ break;
+ }
+
+ /* dmub aux needs dmub notifications to be enabled */
+ return dc->debug.enable_dmub_aux_for_legacy_ddc;
+
+}
+
+/**
+ * dc_enable_dmub_notifications - Check if dmub fw supports outbox
+ *
+ * @dc: [in] dc structure
+ *
+ * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
+ * notifications. All DMs shall switch to dc_is_dmub_outbox_supported. This
+ * API shall be removed after switching.
+ *
+ * Return:
+ * True if DMUB FW supports outbox notifications, False otherwise
+ */
+bool dc_enable_dmub_notifications(struct dc *dc)
+{
+ return dc_is_dmub_outbox_supported(dc);
+}
+
+/**
+ * dc_enable_dmub_outbox - Enables DMUB unsolicited notification
+ *
+ * @dc: [in] dc structure
+ *
+ * Enables DMUB unsolicited notifications to x86 via outbox.
+ */
+void dc_enable_dmub_outbox(struct dc *dc)
+{
+ struct dc_context *dc_ctx = dc->ctx;
+
+ dmub_enable_outbox_notification(dc_ctx->dmub_srv);
+ DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
+}
+
+/**
+ * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
+ * Sets port index appropriately for legacy DDC
+ * @dc: dc structure
+ * @link_index: link index
+ * @payload: aux payload
+ *
+ * Returns: True if successful, False if failure
+ */
+bool dc_process_dmub_aux_transfer_async(struct dc *dc,
+ uint32_t link_index,
+ struct aux_payload *payload)
+{
+ uint8_t action;
+ union dmub_rb_cmd cmd = {0};
+
+ ASSERT(payload->length <= 16);
+
+ cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
+ cmd.dp_aux_access.header.payload_bytes = 0;
+ /* For dpia, ddc_pin is set to NULL */
+ if (!dc->links[link_index]->ddc->ddc_pin)
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
+ else
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+
+ cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
+ cmd.dp_aux_access.aux_control.timeout = 0;
+ cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
+ cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
+ cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
+
+ /* set aux action */
+ if (payload->i2c_over_aux) {
+ if (payload->write) {
+ if (payload->mot)
+ action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
+ else
+ action = DP_AUX_REQ_ACTION_I2C_WRITE;
+ } else {
+ if (payload->mot)
+ action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
+ else
+ action = DP_AUX_REQ_ACTION_I2C_READ;
+ }
+ } else {
+ if (payload->write)
+ action = DP_AUX_REQ_ACTION_DPCD_WRITE;
+ else
+ action = DP_AUX_REQ_ACTION_DPCD_READ;
+ }
+
+ cmd.dp_aux_access.aux_control.dpaux.action = action;
+
+ if (payload->length && payload->write) {
+ memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
+ payload->data,
+ payload->length
+ );
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ return true;
+}
+
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index)
+{
+ uint8_t index, link_index = 0xFF;
+
+ for (index = 0; index < dc->link_count; index++) {
+ /* ddc_hw_inst has dpia port index for dpia links
+ * and ddc instance for legacy links
+ */
+ if (!dc->links[index]->ddc->ddc_pin) {
+ if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
+ link_index = index;
+ break;
+ }
+ }
+ }
+ ASSERT(link_index != 0xFF);
+ return link_index;
+}
+
+/**
+ * dc_process_dmub_set_config_async - Submits set_config command
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link_index: link index
+ * @payload: [in] aux payload
+ * @notify: [out] set_config immediate reply
+ *
+ * Submits set_config command to dmub via inbox message.
+ *
+ * Return:
+ * True if successful, False if failure
+ */
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify)
+{
+ union dmub_rb_cmd cmd = {0};
+ bool is_cmd_complete = true;
+
+ /* prepare SET_CONFIG command */
+ cmd.set_config_access.header.type = DMUB_CMD__DPIA;
+ cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
+
+ cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
+
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ /* command is not processed by dmub */
+ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
+ return is_cmd_complete;
+ }
+
+ /* command processed by dmub, if ret_status is 1, it is completed instantly */
+ if (cmd.set_config_access.header.ret_status == 1)
+ notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
+ else
+ /* cmd pending, will receive notification via outbox */
+ is_cmd_complete = false;
+
+ return is_cmd_complete;
+}
+
+/**
+ * dc_process_dmub_set_mst_slots - Submits MST solt allocation
+ *
+ * @dc: [in] dc structure
+ * @link_index: [in] link index
+ * @mst_alloc_slots: [in] mst slots to be allotted
+ * @mst_slots_in_use: [out] mst slots in use returned in failure case
+ *
+ * Submits mst slot allocation command to dmub via inbox message
+ *
+ * Return:
+ * DC_OK if successful, DC_ERROR if failure
+ */
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ /* prepare MST_ALLOC_SLOTS command */
+ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
+ cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
+
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed by dmub, if ret_status is 1 */
+ if (cmd.set_config_access.header.ret_status != 1)
+ /* command processing error */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed and we have a status of 2, mst not enabled in dpia */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ /* previously configured mst alloc and used slots did not match */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
+ *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
+ return DC_NOT_SUPPORTED;
+ }
+
+ return DC_OK;
+}
+
+/**
+ * dc_process_dmub_dpia_hpd_int_enable - Submits DPIA DPD interruption
+ *
+ * @dc: [in] dc structure
+ * @hpd_int_enable: [in] 1 for hpd int enable, 0 to disable
+ *
+ * Submits dpia hpd int enable command to dmub via inbox message
+ */
+void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
+ uint32_t hpd_int_enable)
+{
+ union dmub_rb_cmd cmd = {0};
+
+ cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
+ cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+
+ DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
+}
+
+/**
+ * dc_print_dmub_diagnostic_data - Print DMUB diagnostic data for debugging
+ *
+ * @dc: [in] dc structure
+ *
+ *
+ */
+void dc_print_dmub_diagnostic_data(const struct dc *dc)
+{
+ dc_dmub_srv_log_diagnostic_data(dc->ctx->dmub_srv);
+}
+
+/**
+ * dc_disable_accelerated_mode - disable accelerated mode
+ * @dc: dc structure
+ */
+void dc_disable_accelerated_mode(struct dc *dc)
+{
+ bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
+}
+
+
+/**
+ * dc_notify_vsync_int_state - notifies vsync enable/disable state
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @enable: whether vsync is enabled or disabled
+ *
+ * Called when vsync is enabled/disabled Will notify DMUB to start/stop ABM
+ * interrupts after steady state is reached.
+ */
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ if (link->psr_settings.psr_feature_enabled)
+ return;
+
+ if (link->replay_settings.replay_feature_enabled)
+ return;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++) {
+ if (edp_links[i] == link)
+ break;
+ }
+
+ if (i == edp_num) {
+ return;
+ }
+
+ if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
+ pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
+}
+
+/*****************************************************************************
+ * dc_abm_save_restore() - Interface to DC for save+pause and restore+un-pause
+ * ABM
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @pData: abm hw states
+ *
+ ****************************************************************************/
+bool dc_abm_save_restore(
+ struct dc *dc,
+ struct dc_stream_state *stream,
+ struct abm_save_restore *pData)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return false;
+ }
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++)
+ if (edp_links[i] == link)
+ break;
+
+ if (i == edp_num)
+ return false;
+
+ if (pipe->stream_res.abm &&
+ pipe->stream_res.abm->funcs->save_restore)
+ return pipe->stream_res.abm->funcs->save_restore(
+ pipe->stream_res.abm,
+ i,
+ pData);
+ return false;
+}
+
+void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
+{
+ unsigned int i;
+ bool subvp_sw_cursor_req = false;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ subvp_sw_cursor_req = true;
+ break;
+ }
+ }
+ properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
+}
+
+/**
+ * dc_set_edp_power() - DM controls eDP power to be ON/OFF
+ *
+ * Called when DM wants to power on/off eDP.
+ * Only work on links with flag skip_implict_edp_power_control is set.
+ *
+ * @dc: Current DC state
+ * @edp_link: a link with eDP connector signal type
+ * @powerOn: power on/off eDP
+ *
+ * Return: void
+ */
+void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
+ bool powerOn)
+{
+ if (edp_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (edp_link->skip_implict_edp_power_control == false)
+ return;
+
+ edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
+}
+
+/*
+ *****************************************************************************
+ * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
+ *
+ * Called when DM wants to make power policy decisions based on dc_state
+ *
+ *****************************************************************************
+ */
+struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
+{
+ struct dc_power_profile profile = { 0 };
+
+ profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ return profile;
+}
+
diff --git a/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/postimage.1 b/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/postimage.1
new file mode 100644
index 000000000000..e4f333d4fb54
--- /dev/null
+++ b/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/postimage.1
@@ -0,0 +1,638 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#include "resource.h"
+#include "dcn351_fpu.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
+#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dml_inline_defs.h"
+
+#include "link.h"
+
+#define DC_LOGGER_INIT(logger)
+
+struct _vcs_dpi_ip_params_st dcn3_51_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 1536,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,/*not used*/
+ .opp_output_buffer_lines = 1,/*not used*/
+ .pixel_chunk_size_kbytes = 8,
+ //.alpha_pixel_chunk_size_kbytes = 4;/*new*/
+ //.min_pixel_chunk_size_bytes = 1024;/*new*/
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,/*delta from 10*/
+ .dsc422_native_support = true,/*delta from false*/
+ .is_line_buffer_bpp_fixed = true,/*new*/
+ .line_buffer_fixed_bpp = 32,/*delta from 48*/
+ .line_buffer_size_bits = 986880,/*delta from 789504*/
+ .max_line_buffer_lines = 32,/*delta from 12*/
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ /*.max_num_hdmi_frl_outputs = 1; new in dml2*/
+ /*.max_num_dp2p0_outputs = 2; new in dml2*/
+ /*.max_num_dp2p0_streams = 4; new in dml2*/
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/
+ /*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/
+ .dispclk_ramp_margin_percent = 1.11,/*delta from 1*/
+ /*.dppclk_delay_subtotal = 47;
+ .dppclk_delay_scl = 50;
+ .dppclk_delay_scl_lb_only = 16;
+ .dppclk_delay_cnvc_formatter = 28;
+ .dppclk_delay_cnvc_cursor = 6;
+ .dispclk_delay_subtotal = 125;*/ /*new to dml2*/
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 47, /* changed from 46,*/
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 125, /*changed from 119,*/
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+// .config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/
+
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 200.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 320.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 8,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
+ .fclk_change_latency_us = 24.0,
+ .usr_retraining_latency_us = 2,
+ .writeback_latency_us = 12.0,
+
+ .dram_channel_width_bytes = 4,/*not exist in dml2*/
+ .round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .dram_clock_change_latency_us = 11.72,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = 0,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
+};
+
+/*
+ * dcn351_update_bw_bounding_box
+ *
+ * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from
+ * spreadsheet with actual values as per dGPU SKU:
+ * - with passed few options from dc->config
+ * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
+ * need to get it from PM FW)
+ * - with passed latency values (passed in ns units) in dc-> bb override for
+ * debugging purposes
+ * - with passed latencies from VBIOS (in 100_ns units) if available for
+ * certain dGPU SKU
+ * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
+ * of the same ASIC)
+ * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
+ * FW for different clocks (which might differ for certain dGPU SKU of the
+ * same ASIC)
+ */
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params)
+{
+ unsigned int i, closest_clk_lvl;
+ int j;
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dc->scratch.update_bw_bounding_box.clock_limits;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dc_assert_fp_enabled();
+
+ dcn3_51_ip.max_num_otg =
+ dc->res_pool->res_cap->num_timing_generator;
+ dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_51_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1;
+ j >= 0; j--) {
+ if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_51_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz <
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+
+ clock_limits[i].fabricclk_mhz =
+ clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz =
+ clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz &&
+ clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts =
+ clk_table->entries[i].memclk_mhz * 2 *
+ clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
+ max_dispclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
+ max_dppclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ memcpy(dcn3_51_soc.clock_limits, clock_limits,
+ sizeof(dcn3_51_soc.clock_limits));
+
+ if (clk_table->num_entries)
+ dcn3_51_soc.num_states = clk_table->num_entries;
+
+ if (max_dispclk_mhz) {
+ dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+ if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip,
+ DML_PROJECT_DCN31);
+
+ /*copy to dml2, before dml2_create*/
+ if (clk_table->num_entries > 2) {
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dc->dml2_options.bbox_overrides.clks_table.num_states =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
+ clock_limits[i].dcfclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
+ clock_limits[i].fabricclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
+ clock_limits[i].dispclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
+ clock_limits[i].dppclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
+ clock_limits[i].socclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
+ }
+ }
+
+ /* Update latency values */
+ dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us;
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe = 0;
+ bool upscaled = false;
+ const unsigned int max_allowed_vblank_nom = 1023;
+
+ dcn31_populate_dml_pipes_from_context(dc, context, pipes,
+ fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing);
+ v_back_porch = get_vertical_back_porch(timing);
+
+ if (pipe->stream->adjust.v_total_max ==
+ pipe->stream->adjust.v_total_min &&
+ pipe->stream->adjust.v_total_min > timing->v_total) {
+ pipes[pipe_cnt].pipe.dest.vtotal =
+ pipe->stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total -
+ pipes[pipe_cnt].pipe.dest.vactive;
+ }
+
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height <
+ pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width <
+ pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the
+ * plane. We need to require support for immediate flip or
+ * underflow can be intermittently experienced depending on peak
+ * b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+
+ DC_FP_START();
+ dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ DC_FP_END();
+
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
+ dc->config.enable_4to1MPC = false;
+
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 &&
+ pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) &&
+ pipe->plane_state->src_rect.width <= 5120) {
+ /*
+ * Limit to 5k max to avoid forced pipe split when there
+ * is not enough detile for swath
+ */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >=
+ dc->debug.crb_alloc_policy_min_disp_count &&
+ dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP &&
+ dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode ==
+ dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy =
+ dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ unsigned int i, plane_count = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /*dcn351 does not support z9/z10*/
+ if (context->stream_count == 0 || plane_count == 0) {
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ bool is_pwrseq0 = link && link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+ int minmum_z8_residency =
+ dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
+ if (is_pwrseq0 && (is_psr || is_replay))
+ support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
+ }
+ context->bw_ctx.bw.dcn.clk.zstate_support = support;
+}
diff --git a/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/preimage.1 b/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/preimage.1
new file mode 100644
index 000000000000..4673ceb4b557
--- /dev/null
+++ b/rr-cache/1ccce56d486e6cb023b511796ea6bb9d98723dd7/preimage.1
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+#include "resource.h"
+#include "dcn351_fpu.h"
+#include "dcn31/dcn31_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn35/dcn35_resource.h"
+#include "dcn351/dcn351_resource.h"
+#include "dml/dcn31/dcn31_fpu.h"
+#include "dml/dcn35/dcn35_fpu.h"
+#include "dml/dml_inline_defs.h"
+
+#include "link.h"
+
+#define DC_LOGGER_INIT(logger)
+
+struct _vcs_dpi_ip_params_st dcn3_51_ip = {
+ .VBlankNomDefaultUS = 668,
+ .gpuvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_enable = 1,
+ .hostvm_max_page_table_levels = 2,
+ .rob_buffer_size_kbytes = 64,
+ .det_buffer_size_kbytes = 1536,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_64b = 256,
+ .compbuf_reserved_space_zs = 64,
+ .dpp_output_buffer_pixels = 2560,/*not used*/
+ .opp_output_buffer_lines = 1,/*not used*/
+ .pixel_chunk_size_kbytes = 8,
+ //.alpha_pixel_chunk_size_kbytes = 4;/*new*/
+ //.min_pixel_chunk_size_bytes = 1024;/*new*/
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+ .writeback_chunk_size_kbytes = 8,
+ .ptoi_supported = false,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,/*delta from 10*/
+ .dsc422_native_support = true,/*delta from false*/
+ .is_line_buffer_bpp_fixed = true,/*new*/
+ .line_buffer_fixed_bpp = 32,/*delta from 48*/
+ .line_buffer_size_bits = 986880,/*delta from 789504*/
+ .max_line_buffer_lines = 32,/*delta from 12*/
+ .writeback_interface_buffer_size_kbytes = 90,
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_wb = 1,
+ /*.max_num_hdmi_frl_outputs = 1; new in dml2*/
+ /*.max_num_dp2p0_outputs = 2; new in dml2*/
+ /*.max_num_dp2p0_streams = 4; new in dml2*/
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,/*changed from 64,*/
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,/*changed from 34*/
+ /*.dcc_meta_buffer_size_bytes = 6272; new to dml2*/
+ .dispclk_ramp_margin_percent = 1.11,/*delta from 1*/
+ /*.dppclk_delay_subtotal = 47;
+ .dppclk_delay_scl = 50;
+ .dppclk_delay_scl_lb_only = 16;
+ .dppclk_delay_cnvc_formatter = 28;
+ .dppclk_delay_cnvc_cursor = 6;
+ .dispclk_delay_subtotal = 125;*/ /*new to dml2*/
+ .max_inter_dcn_tile_repeaters = 8,
+ .cursor_buffer_size = 16,
+ .cursor_chunk_size = 2,
+ .writeback_line_buffer_buffer_size = 0,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .dppclk_delay_subtotal = 47, /* changed from 46,*/
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,/*changed from 27,*/
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 125, /*changed from 119,*/
+ .dynamic_metadata_vm_enabled = false,
+ .odm_combine_4to1_supported = false,
+ .dcc_supported = true,
+// .config_return_buffer_segment_size_in_kbytes = 64;/*required, hard coded in dml2_translate_ip_params*/
+
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = {
+ /*TODO: correct dispclk/dppclk voltage level determination*/
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .socclk_mhz = 600.0,
+ .dram_speed_mts = 3200.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 600.0,
+ .phyclk_mhz = 600.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 200.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 600.0,
+ .fabricclk_mhz = 1000.0,
+ .socclk_mhz = 733.0,
+ .dram_speed_mts = 6400.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 738.0,
+ .fabricclk_mhz = 1200.0,
+ .socclk_mhz = 880.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 800.0,
+ .dppclk_mhz = 800.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 266.7,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 800.0,
+ .fabricclk_mhz = 1400.0,
+ .socclk_mhz = 978.0,
+ .dram_speed_mts = 7500.0,
+ .dispclk_mhz = 960.0,
+ .dppclk_mhz = 960.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 320.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 873.0,
+ .fabricclk_mhz = 1600.0,
+ .socclk_mhz = 1100.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1066.7,
+ .dppclk_mhz = 1066.7,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 355.6,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 960.0,
+ .fabricclk_mhz = 1700.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 400.0,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 1067.0,
+ .fabricclk_mhz = 1850.0,
+ .socclk_mhz = 1257.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1371.4,
+ .dppclk_mhz = 1371.4,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 457.1,
+ .dtbclk_mhz = 600.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 2000.0,
+ .socclk_mhz = 1467.0,
+ .dram_speed_mts = 8533.0,
+ .dispclk_mhz = 1600.0,
+ .dppclk_mhz = 1600.0,
+ .phyclk_mhz = 810.0,
+ .phyclk_d18_mhz = 667.0,
+ .dscclk_mhz = 533.3,
+ .dtbclk_mhz = 600.0,
+ },
+ },
+ .num_states = 8,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 250.0,
+ .sr_enter_plus_exit_z8_time_us = 350.0,
+ .fclk_change_latency_us = 24.0,
+ .usr_retraining_latency_us = 2,
+ .writeback_latency_us = 12.0,
+
+ .dram_channel_width_bytes = 4,/*not exist in dml2*/
+ .round_trip_ping_latency_dcfclk_cycles = 106,/*not exist in dml2*/
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .dram_clock_change_latency_us = 11.72,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+
+ .pct_ideal_sdp_bw_after_urgent = 80.0,
+ .pct_ideal_fabric_bw_after_urgent = 80.0, /*new to dml2*/
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 65.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 30.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.5,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .do_urgent_latency_adjustment = 0,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+ .num_chans = 4,
+<<<<<<<
+=======
+ .dram_clock_change_latency_us = 11.72,
+>>>>>>>
+ .dispclk_dppclk_vco_speed_mhz = 2400.0,
+};
+
+/*
+ * dcn351_update_bw_bounding_box
+ *
+ * This would override some dcn3_51 ip_or_soc initial parameters hardcoded from
+ * spreadsheet with actual values as per dGPU SKU:
+ * - with passed few options from dc->config
+ * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might
+ * need to get it from PM FW)
+ * - with passed latency values (passed in ns units) in dc-> bb override for
+ * debugging purposes
+ * - with passed latencies from VBIOS (in 100_ns units) if available for
+ * certain dGPU SKU
+ * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU
+ * of the same ASIC)
+ * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM
+ * FW for different clocks (which might differ for certain dGPU SKU of the
+ * same ASIC)
+ */
+void dcn351_update_bw_bounding_box_fpu(struct dc *dc,
+ struct clk_bw_params *bw_params)
+{
+ unsigned int i, closest_clk_lvl;
+ int j;
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st *clock_limits =
+ dc->scratch.update_bw_bounding_box.clock_limits;
+ int max_dispclk_mhz = 0, max_dppclk_mhz = 0;
+
+ dc_assert_fp_enabled();
+
+ dcn3_51_ip.max_num_otg =
+ dc->res_pool->res_cap->num_timing_generator;
+ dcn3_51_ip.max_num_dpp = dc->res_pool->pipe_count;
+ dcn3_51_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+
+ /* Prepass to find max clocks independent of voltage level. */
+ for (i = 0; i < clk_table->num_entries; ++i) {
+ if (clk_table->entries[i].dispclk_mhz > max_dispclk_mhz)
+ max_dispclk_mhz = clk_table->entries[i].dispclk_mhz;
+ if (clk_table->entries[i].dppclk_mhz > max_dppclk_mhz)
+ max_dppclk_mhz = clk_table->entries[i].dppclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_51_soc.num_states - 1;
+ j >= 0; j--) {
+ if (dcn3_51_soc.clock_limits[j].dcfclk_mhz <=
+ clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+ if (clk_table->num_entries == 1) {
+ /*smu gives one DPM level, let's take the highest one*/
+ closest_clk_lvl = dcn3_51_soc.num_states - 1;
+ }
+
+ clock_limits[i].state = i;
+
+ /* Clocks dependent on voltage level. */
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ if (clk_table->num_entries == 1 &&
+ clock_limits[i].dcfclk_mhz <
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz) {
+ /*SMU fix not released yet*/
+ clock_limits[i].dcfclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dcfclk_mhz;
+ }
+
+ clock_limits[i].fabricclk_mhz =
+ clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz =
+ clk_table->entries[i].socclk_mhz;
+
+ if (clk_table->entries[i].memclk_mhz &&
+ clk_table->entries[i].wck_ratio)
+ clock_limits[i].dram_speed_mts =
+ clk_table->entries[i].memclk_mhz * 2 *
+ clk_table->entries[i].wck_ratio;
+
+ /* Clocks independent of voltage level. */
+ clock_limits[i].dispclk_mhz = max_dispclk_mhz ?
+ max_dispclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+
+ clock_limits[i].dppclk_mhz = max_dppclk_mhz ?
+ max_dppclk_mhz :
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+
+ clock_limits[i].dram_bw_per_chan_gbps =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz =
+ dcn3_51_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ memcpy(dcn3_51_soc.clock_limits, clock_limits,
+ sizeof(dcn3_51_soc.clock_limits));
+
+ if (clk_table->num_entries)
+ dcn3_51_soc.num_states = clk_table->num_entries;
+
+ if (max_dispclk_mhz) {
+ dcn3_51_soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = max_dispclk_mhz * 2;
+ }
+ if ((int)(dcn3_51_soc.dram_clock_change_latency_us * 1000)
+ != dc->debug.dram_clock_change_latency_ns
+ && dc->debug.dram_clock_change_latency_ns) {
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->debug.dram_clock_change_latency_ns / 1000.0;
+ }
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_51_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_51_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_51_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
+ /*temp till dml2 fully work without dml1*/
+ dml_init_instance(&dc->dml, &dcn3_51_soc, &dcn3_51_ip,
+ DML_PROJECT_DCN31);
+
+ /*copy to dml2, before dml2_create*/
+ if (clk_table->num_entries > 2) {
+
+ for (i = 0; i < clk_table->num_entries; i++) {
+ dc->dml2_options.bbox_overrides.clks_table.num_states =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
+ clock_limits[i].dcfclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
+ clock_limits[i].fabricclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
+ clock_limits[i].dispclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
+ clock_limits[i].dppclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
+ clock_limits[i].socclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
+ clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
+ dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz =
+ clock_limits[i].dtbclk_mhz;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
+ clk_table->num_entries;
+ dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels =
+ clk_table->num_entries;
+ }
+ }
+
+ /* Update latency values */
+ dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_51_soc.dram_clock_change_latency_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_51_soc.sr_exit_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_51_soc.sr_enter_plus_exit_time_us;
+
+ dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_51_soc.sr_exit_z8_time_us;
+ dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_51_soc.sr_enter_plus_exit_z8_time_us;
+}
+
+static bool is_dual_plane(enum surface_pixel_format format)
+{
+ return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
+ format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
+}
+
+/*
+ * micro_sec_to_vert_lines () - converts time to number of vertical lines for a given timing
+ *
+ * @param: num_us: number of microseconds
+ * @return: number of vertical lines. If exact number of vertical lines is not found then
+ * it will round up to next number of lines to guarantee num_us
+ */
+static unsigned int micro_sec_to_vert_lines(unsigned int num_us, struct dc_crtc_timing *timing)
+{
+ unsigned int num_lines = 0;
+ unsigned int lines_time_in_ns = 1000.0 *
+ (((float)timing->h_total * 1000.0) /
+ ((float)timing->pix_clk_100hz / 10.0));
+
+ num_lines = dml_ceil(1000.0 * num_us / lines_time_in_ns, 1.0);
+
+ return num_lines;
+}
+
+static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing)
+{
+ unsigned int v_active = 0, v_blank = 0, v_back_porch = 0;
+
+ v_active = timing->v_border_top + timing->v_addressable + timing->v_border_bottom;
+ v_blank = timing->v_total - v_active;
+ v_back_porch = v_blank - timing->v_front_porch - timing->v_sync_width;
+
+ return v_back_porch;
+}
+
+int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ bool fast_validate)
+{
+ int i, pipe_cnt;
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *pipe = 0;
+ bool upscaled = false;
+ const unsigned int max_allowed_vblank_nom = 1023;
+
+ dcn31_populate_dml_pipes_from_context(dc, context, pipes,
+ fast_validate);
+
+ for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+ struct dc_crtc_timing *timing;
+ unsigned int num_lines = 0;
+ unsigned int v_back_porch = 0;
+
+ if (!res_ctx->pipe_ctx[i].stream)
+ continue;
+
+ pipe = &res_ctx->pipe_ctx[i];
+ timing = &pipe->stream->timing;
+
+ num_lines = micro_sec_to_vert_lines(dcn3_51_ip.VBlankNomDefaultUS, timing);
+ v_back_porch = get_vertical_back_porch(timing);
+
+ if (pipe->stream->adjust.v_total_max ==
+ pipe->stream->adjust.v_total_min &&
+ pipe->stream->adjust.v_total_min > timing->v_total) {
+ pipes[pipe_cnt].pipe.dest.vtotal =
+ pipe->stream->adjust.v_total_min;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total -
+ pipes[pipe_cnt].pipe.dest.vactive;
+ }
+
+ pipes[pipe_cnt].pipe.dest.vblank_nom = timing->v_total - pipes[pipe_cnt].pipe.dest.vactive;
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, num_lines);
+ // vblank_nom should not smaller than (VSync (timing->v_sync_width + v_back_porch) + 2)
+ // + 2 is because
+ // 1 -> VStartup_start should be 1 line before VSync
+ // 1 -> always reserve 1 line between start of vblank to vstartup signal
+ pipes[pipe_cnt].pipe.dest.vblank_nom =
+ max(pipes[pipe_cnt].pipe.dest.vblank_nom, timing->v_sync_width + v_back_porch + 2);
+ pipes[pipe_cnt].pipe.dest.vblank_nom = min(pipes[pipe_cnt].pipe.dest.vblank_nom, max_allowed_vblank_nom);
+
+ if (pipe->plane_state &&
+ (pipe->plane_state->src_rect.height <
+ pipe->plane_state->dst_rect.height ||
+ pipe->plane_state->src_rect.width <
+ pipe->plane_state->dst_rect.width))
+ upscaled = true;
+
+ /*
+ * Immediate flip can be set dynamically after enabling the
+ * plane. We need to require support for immediate flip or
+ * underflow can be intermittently experienced depending on peak
+ * b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
+ pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
+
+ DC_FP_START();
+ dcn31_zero_pipe_dcc_fraction(pipes, pipe_cnt);
+ DC_FP_END();
+
+ pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
+ pipes[pipe_cnt].pipe.src.dcc_rate = 3;
+ pipes[pipe_cnt].dout.dsc_input_bpc = 0;
+ pipes[pipe_cnt].pipe.src.gpuvm_min_page_size_kbytes = 256;
+
+ if (pipes[pipe_cnt].dout.dsc_enable) {
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_888:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ pipes[pipe_cnt].dout.dsc_input_bpc = 12;
+ break;
+ default:
+ ASSERT(0);
+ break;
+ }
+ }
+
+ pipe_cnt++;
+ }
+
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 384;/*per guide*/
+ dc->config.enable_4to1MPC = false;
+
+ if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
+ if (is_dual_plane(pipe->plane_state->format)
+ && pipe->plane_state->src_rect.width <= 1920 &&
+ pipe->plane_state->src_rect.height <= 1080) {
+ dc->config.enable_4to1MPC = true;
+ } else if (!is_dual_plane(pipe->plane_state->format) &&
+ pipe->plane_state->src_rect.width <= 5120) {
+ /*
+ * Limit to 5k max to avoid forced pipe split when there
+ * is not enough detile for swath
+ */
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ pipes[0].pipe.src.unbounded_req_mode = true;
+ }
+ } else if (context->stream_count >=
+ dc->debug.crb_alloc_policy_min_disp_count &&
+ dc->debug.crb_alloc_policy > DET_SIZE_DEFAULT) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes =
+ dc->debug.crb_alloc_policy * 64;
+ } else if (context->stream_count >= 3 && upscaled) {
+ context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
+ }
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->stream)
+ continue;
+
+ if (pipe->stream->signal == SIGNAL_TYPE_EDP &&
+ dc->debug.seamless_boot_odm_combine &&
+ pipe->stream->apply_seamless_boot_optimization) {
+
+ if (pipe->stream->apply_boot_odm_mode ==
+ dm_odm_combine_policy_2to1) {
+ context->bw_ctx.dml.vba.ODMCombinePolicy =
+ dm_odm_combine_policy_2to1;
+ break;
+ }
+ }
+ }
+
+ return pipe_cnt;
+}
+
+void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context)
+{
+ enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
+ unsigned int i, plane_count = 0;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].plane_state)
+ plane_count++;
+ }
+
+ /*dcn351 does not support z9/z10*/
+ if (context->stream_count == 0 || plane_count == 0) {
+ support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
+ } else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
+ struct dc_link *link = context->streams[0]->sink->link;
+ bool is_pwrseq0 = link && link->link_index == 0;
+ bool is_psr = (link && (link->psr_settings.psr_version == DC_PSR_VERSION_1 ||
+ link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) && !link->panel_config.psr.disable_psr);
+ bool is_replay = link && link->replay_settings.replay_feature_enabled;
+ int minmum_z8_residency =
+ dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
+ bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
+
+ /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/
+ if (is_pwrseq0 && (is_psr || is_replay))
+ support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW;
+ }
+ context->bw_ctx.bw.dcn.clk.zstate_support = support;
+}
diff --git a/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/postimage.1 b/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/postimage.1
new file mode 100644
index 000000000000..c4dc5881d8df
--- /dev/null
+++ b/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/postimage.1
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V14_0_0_PPSMC_H__
+#define __SMU_V14_0_0_PPSMC_H__
+
+/*! @mainpage PMFW-PPS (PPLib) Message Interface
+ This documentation contains the subsections:\n\n
+ @ref ResponseCodes\n
+ @ref definitions\n
+ @ref enums\n
+*/
+
+/** @def PPS_PMFW_IF_VER
+* PPS (PPLib) to PMFW IF version 1.0
+*/
+#define PPS_PMFW_IF_VER "1.0" ///< Major.Minor
+
+/** @defgroup ResponseCodes PMFW Response Codes
+* @{
+*/
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1 ///< Message Response OK
+#define PPSMC_Result_Failed 0xFF ///< Message Response Failed
+#define PPSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
+#define PPSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
+#define PPSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
+/** @}*/
+
+/** @defgroup definitions Message definitions
+* @{
+*/
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
+#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
+#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
+#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
+#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_TransferTableSmu2Dram 0x0F ///< Transfer driver interface table from PMFW SRAM to DRAM
+#define PPSMC_MSG_TransferTableDram2Smu 0x10 ///< Transfer driver interface table from DRAM to PMFW SRAM
+#define PPSMC_MSG_GfxDeviceDriverReset 0x11 ///< Request GFX mode 2 reset
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
+#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
+#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
+#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
+#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
+#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
+#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
+#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
+#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
+#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
+#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
+#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
+#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
+#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
+#define PPSMC_MSG_PowerDownVpe 0x32 ///< Power down VPE
+#define PPSMC_MSG_GetVpeDpmTable 0x33 ///< Get VPE DPM table
+#define PPSMC_MSG_EnableLSdma 0x34 ///< Enable LSDMA
+#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
+#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
+#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
+/** @}*/
+
+/**
+* @defgroup enums Enum Definitions
+* @{
+*/
+
+/** @enum Mode_Reset_e
+* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset
+*/
+//argument for PPSMC_MSG_GfxDeviceDriverReset
+typedef enum {
+ MODE1_RESET = 1, ///< Mode reset type 1
+ MODE2_RESET = 2 ///< Mode reset type 2
+} Mode_Reset_e;
+
+/** @}*/
+
+/** @enum ZStates_e
+* Zstate types, argument for PPSMC_MSG_AllowZstates
+*/
+//Argument for PPSMC_MSG_AllowZstates
+typedef enum {
+ DISALLOW_ZSTATES = 0, ///< Disallow Zstates
+ ALLOW_ZSTATES_Z8 = 8, ///< Allows Z8 only
+ ALLOW_ZSTATES_Z9 = 9, ///< Allows Z9 and Z8
+} ZStates_e;
+
+/** @}*/
+#endif
diff --git a/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/preimage.1 b/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/preimage.1
new file mode 100644
index 000000000000..330fd8d2b1bd
--- /dev/null
+++ b/rr-cache/4d899fd0f8e7209e3b955fcd705fe7027a7d98c9/preimage.1
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V14_0_0_PPSMC_H__
+#define __SMU_V14_0_0_PPSMC_H__
+
+/*! @mainpage PMFW-PPS (PPLib) Message Interface
+ This documentation contains the subsections:\n\n
+ @ref ResponseCodes\n
+ @ref definitions\n
+ @ref enums\n
+*/
+
+/** @def PPS_PMFW_IF_VER
+* PPS (PPLib) to PMFW IF version 1.0
+*/
+#define PPS_PMFW_IF_VER "1.0" ///< Major.Minor
+
+/** @defgroup ResponseCodes PMFW Response Codes
+* @{
+*/
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1 ///< Message Response OK
+#define PPSMC_Result_Failed 0xFF ///< Message Response Failed
+#define PPSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command
+#define PPSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite
+#define PPSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message
+/** @}*/
+
+/** @defgroup definitions Message definitions
+* @{
+*/
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team
+#define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version
+#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version
+#define PPSMC_MSG_PowerDownVcn1 0x04 ///< Power down VCN1
+#define PPSMC_MSG_PowerUpVcn1 0x05 ///< Power up VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_PowerDownVcn0 0x06 ///< Power down VCN0
+#define PPSMC_MSG_PowerUpVcn0 0x07 ///< Power up VCN0; VCN0 is power gated by default
+#define PPSMC_MSG_SetHardMinVcn0 0x08 ///< For wireless display
+#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
+#define PPSMC_MSG_SetHardMinVcn1 0x0A ///< For wireless display
+#define PPSMC_MSG_SetSoftMinVcn1 0x0B ///< Set soft min for VCN1 clocks (VCLK1 and DCLK1)
+#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload
+#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_TransferTableSmu2Dram 0x0F ///< Transfer driver interface table from PMFW SRAM to DRAM
+#define PPSMC_MSG_TransferTableDram2Smu 0x10 ///< Transfer driver interface table from DRAM to PMFW SRAM
+#define PPSMC_MSG_GfxDeviceDriverReset 0x11 ///< Request GFX mode 2 reset
+#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW
+#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK
+#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinVcn0 0x15 ///< Set soft min for VCN0 clocks (VCLK0 and DCLK0)
+<<<<<<<
+=======
+
+>>>>>>>
+#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU
+#define PPSMC_MSG_spare_0x17 0x17 ///< Get GFX clock frequency
+#define PPSMC_MSG_spare_0x18 0x18 ///< Get FCLK frequency
+#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry
+#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry
+#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK
+#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK
+#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK
+#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK
+#define PPSMC_MSG_SetSoftMaxVcn0 0x1F ///< Set soft max for VCN0 clocks (VCLK0 and DCLK0)
+<<<<<<<
+#define PPSMC_MSG_spare_0x20 0x20
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
+
+=======
+#define PPSMC_MSG_spare_0x20 0x20 ///< Set power limit percentage
+#define PPSMC_MSG_PowerDownJpeg0 0x21 ///< Power down Jpeg of VCN0
+#define PPSMC_MSG_PowerUpJpeg0 0x22 ///< Power up Jpeg of VCN0; VCN0 is power gated by default
+>>>>>>>
+#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK
+#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK
+#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity
+#define PPSMC_MSG_PowerDownJpeg1 0x26 ///< Power down Jpeg of VCN1
+#define PPSMC_MSG_PowerUpJpeg1 0x27 ///< Power up Jpeg of VCN1; VCN1 is power gated by default
+#define PPSMC_MSG_SetSoftMaxVcn1 0x28 ///< Set soft max for VCN1 clocks (VCLK1 and DCLK1)
+#define PPSMC_MSG_PowerDownIspByTile 0x29 ///< ISP is power gated by default
+#define PPSMC_MSG_PowerUpIspByTile 0x2A ///< This message is used to power up ISP tiles and enable the ISP DPM
+#define PPSMC_MSG_SetHardMinIspiclkByFreq 0x2B ///< Set HardMin by frequency for ISPICLK
+#define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK
+#define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN0.UMSCH (aka VSCH) scheduler
+#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis
+#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn
+#define PPSMC_MSG_PowerUpVpe 0x31 ///< Power up VPE
+#define PPSMC_MSG_PowerDownVpe 0x32 ///< Power down VPE
+#define PPSMC_MSG_GetVpeDpmTable 0x33 ///< Get VPE DPM table
+#define PPSMC_MSG_EnableLSdma 0x34 ///< Enable LSDMA
+#define PPSMC_MSG_DisableLSdma 0x35 ///< Disable LSDMA
+#define PPSMC_MSG_SetSoftMaxVpe 0x36 ///<
+#define PPSMC_MSG_SetSoftMinVpe 0x37 ///<
+#define PPSMC_MSG_AllocMALLCache 0x38 ///< Allocating MALL Cache
+#define PPSMC_MSG_ReleaseMALLCache 0x39 ///< Releasing MALL Cache
+#define PPSMC_Message_Count 0x3A ///< Total number of PPSMC messages
+/** @}*/
+
+/**
+* @defgroup enums Enum Definitions
+* @{
+*/
+
+/** @enum Mode_Reset_e
+* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset
+*/
+//argument for PPSMC_MSG_GfxDeviceDriverReset
+typedef enum {
+ MODE1_RESET = 1, ///< Mode reset type 1
+ MODE2_RESET = 2 ///< Mode reset type 2
+} Mode_Reset_e;
+
+/** @}*/
+
+/** @enum ZStates_e
+* Zstate types, argument for PPSMC_MSG_AllowZstates
+*/
+//Argument for PPSMC_MSG_AllowZstates
+typedef enum {
+ DISALLOW_ZSTATES = 0, ///< Disallow Zstates
+ ALLOW_ZSTATES_Z8 = 8, ///< Allows Z8 only
+ ALLOW_ZSTATES_Z9 = 9, ///< Allows Z9 and Z8
+} ZStates_e;
+
+/** @}*/
+#endif
diff --git a/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/postimage.1 b/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/postimage.1
new file mode 100644
index 000000000000..6c9b4e6491a5
--- /dev/null
+++ b/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/postimage.1
@@ -0,0 +1,1166 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dcn35_clk_mgr.h"
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+// For dce12_get_dp_ref_freq_khz
+#include "dce100/dce_clk_mgr.h"
+
+// For dcn20_update_clocks_update_dpp_dto
+#include "dcn20/dcn20_clk_mgr.h"
+
+
+
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn35_smu.h"
+#include "dm_helpers.h"
+
+/* TODO: remove this include once we ported over remaining clk mgr functions*/
+#include "dcn30/dcn30_clk_mgr.h"
+#include "dcn31/dcn31_clk_mgr.h"
+
+#include "dc_dmub_srv.h"
+#include "link.h"
+#include "logger_types.h"
+
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+#define regCLK1_CLK_PLL_REQ 0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
+#define REG(reg_name) \
+ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+static int dcn35_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context,
+ int *all_active_disps)
+{
+ int i, display_count = 0;
+ bool tmds_present = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+ if (all_active_disps != NULL)
+ *all_active_disps = display_count;
+ /* WA for hang on HDMI after display off back on*/
+ if (display_count == 0 && tmds_present)
+ display_count = 1;
+
+ return display_count;
+}
+
+static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
+{
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
+ if (disable) {
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ int ref_dtbclk_khz)
+{
+ struct dccg *dccg = clk_mgr->dccg;
+ uint32_t tg_mask = 0;
+ int i;
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dtbclk_dto_params dto_params = {0};
+
+ /* use mask to program DTO once per tg */
+ if (pipe_ctx->stream_res.tg &&
+ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+
+ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+ bool dppclk_active[MAX_PIPES] = {0};
+
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ dppclk_active[dpp_inst] = true;
+ }
+ if (safe_to_lower)
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp;
+
+ if (old_dpp && !dppclk_active[old_dpp->inst])
+ clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0);
+ }
+}
+
+void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ union dmub_rb_cmd cmd;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count = 0;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool dpp_clock_lowered = false;
+ int all_active_disps = 0;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
+ if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
+ new_clocks->ref_dtbclk_khz = 600000;
+
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
+ dcn35_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ } else {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ union display_idle_optimization_u idle_info = { 0 };
+
+ dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
+ }
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ dcn35_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
+ }
+
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+
+ /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
+ if (!dc->debug.disable_dtb_ref_clk_switch &&
+ should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
+ clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ if (dpp_clock_lowered) {
+ // increase per DPP DTO before lowering global dppclk
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ } else {
+ // increase global DPPCLK before lowering per DPP DTO
+ if (update_dppclk || update_dispclk)
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+
+ // notify DMCUB of latest clocks
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
+ cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
+ cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
+ cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz;
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+ struct dc_context *ctx = clk_mgr->base.ctx;
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 314.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
+}
+
+static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ dcn35_smu_enable_pme_wa(clk_mgr);
+}
+
+
+bool dcn35_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->zstate_support != b->zstate_support)
+ return false;
+ else if (a->dtbclk_en != b->dtbclk_en)
+ return false;
+
+ return true;
+}
+
+static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr_dcn35 *clk_mgr)
+{
+}
+
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+}
+static struct clk_bw_params dcn35_bw_params = {
+ .vram_type = Ddr4MemType,
+ .num_channels = 1,
+ .clk_table = {
+ .num_entries = 4,
+ },
+
+};
+
+static struct wm_table ddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ }
+};
+
+static DpmClocks_t_dcn35 dummy_clocks;
+
+static struct dcn35_watermarks dummy_wms = { 0 };
+
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
+static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
+ bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
+ }
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
+ bw_params->clk_table.entries[i].dcfclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ /* Modify previous watermark range to cover up to max */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+
+ /* modify the min and max to make sure we cover the whole range*/
+ table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
+ table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
+ table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
+}
+
+static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr);
+ struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table);
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.low_part);
+ dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+}
+
+static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
+
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
+{
+ return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
+}
+
+static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
+ struct integrated_info *bios_info,
+ DpmClocks_t_dcn35 *clock_table)
+{
+ struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
+ int i;
+
+ /* Determine min/max p-state values. */
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
+ max_dram_speed_mts = dram_speed_mts;
+ max_pstate = i;
+ }
+ }
+
+ min_dram_speed_mts = max_dram_speed_mts;
+ min_pstate = max_pstate;
+
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
+ min_dram_speed_mts = dram_speed_mts;
+ min_pstate = i;
+ }
+ }
+
+ /* We expect the table to contain at least one valid P-state entry. */
+ ASSERT(clock_table->NumMemPstatesEnabled &&
+ is_valid_clock_value(max_dram_speed_mts) &&
+ is_valid_clock_value(min_dram_speed_mts));
+
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ } else {
+ /* Invalid number of entries in the table from PMFW. */
+ ASSERT(0);
+ }
+
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
+
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
+
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
+ int j;
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio =
+ convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
+
+ /* Dcfclk and Fclk are tied, but at a different ratio */
+ bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
+ }
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz =
+ find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->MemPstateTable[max_pstate].WckRatio);
+ i++;
+ }
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz =
+ find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz =
+ find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].fclk_mhz =
+ find_max_clk_value(clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].fclk_mhz)
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
+ bw_params->vram_type = bios_info->memory_type;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ bw_params->wm_table.entries[i].wm_inst = i;
+
+ if (i >= bw_params->clk_table.num_entries) {
+ bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+
+ bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ bw_params->wm_table.entries[i].valid = true;
+ }
+}
+
+static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ int display_count;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+}
+
+static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ //SMU optimization is performed part of low power state exit.
+ dcn35_smu_exit_low_power_state(clk_mgr);
+
+}
+
+static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ bool ips_supported = true;
+
+ ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
+
+ return ips_supported;
+}
+
+static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
+{
+ init_clk_states(clk_mgr);
+
+/* TODO: Implement the functions and remove the ifndef guard */
+}
+
+static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ int fclk_adj = new_clocks->fclk_khz;
+
+ /* TODO: remove this after correctly set by DML */
+ new_clocks->dcfclk_khz = 400000;
+ new_clocks->socclk_khz = 400000;
+
+ /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
+ //int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
+ new_clocks->fclk_khz = 4320000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) {
+ clk_mgr->clks.socclk_khz = new_clocks->socclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) {
+ clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) {
+ clk_mgr->clks.fclk_khz = fclk_adj;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
+ */
+ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
+ clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
+ if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
+ clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
+ /* TODO: set dtbclk in correct place */
+ clk_mgr->clks.dtbclk_en = true;
+ dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower);
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
+}
+
+static struct clk_mgr_funcs dcn35_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks,
+ .init_clocks = dcn35_init_clocks,
+ .enable_pme_wa = dcn35_enable_pme_wa,
+ .are_clock_states_equal = dcn35_are_clock_states_equal,
+ .notify_wm_ranges = dcn35_notify_wm_ranges,
+ .set_low_power_state = dcn35_set_low_power_state,
+ .exit_low_power_state = dcn35_exit_low_power_state,
+ .is_ips_supported = dcn35_is_ips_supported,
+};
+
+struct clk_mgr_funcs dcn35_fpga_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks_fpga,
+ .init_clocks = dcn35_init_clocks_fpga,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+};
+
+void dcn35_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ clk_mgr->base.base.ctx = ctx;
+ clk_mgr->base.base.funcs = &dcn35_funcs;
+
+ clk_mgr->base.pp_smu = pp_smu;
+
+ clk_mgr->base.dccg = dccg;
+ clk_mgr->base.dfs_bypass_disp_clk = 0;
+
+ clk_mgr->base.dprefclk_ss_percentage = 0;
+ clk_mgr->base.dprefclk_ss_divider = 1000;
+ clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
+
+ clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(struct dcn35_watermarks),
+ &clk_mgr->smu_wm_set.mc_address.quad_part);
+
+ if (!clk_mgr->smu_wm_set.wm_set) {
+ clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+ clk_mgr->smu_wm_set.mc_address.quad_part = 0;
+ }
+ ASSERT(clk_mgr->smu_wm_set.wm_set);
+
+ smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(DpmClocks_t_dcn35),
+ &smu_dpm_clks.mc_address.quad_part);
+
+ if (smu_dpm_clks.dpm_clks == NULL) {
+ smu_dpm_clks.dpm_clks = &dummy_clocks;
+ smu_dpm_clks.mc_address.quad_part = 0;
+ }
+
+ ASSERT(smu_dpm_clks.dpm_clks);
+
+ clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
+
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
+
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn35_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn35_bw_params.wm_table = ddr5_wm_table;
+ }
+ /* Saved clocks configured at boot for debug purposes */
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+
+ clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
+
+ dce_clock_read_ss_info(&clk_mgr->base);
+ /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
+ clk_mgr->base.base.bw_params = &dcn35_bw_params;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "FClkLevelsEnabled: %d\n"
+ "NumMemPstatesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumMemPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]);
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage);
+ }
+
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
+ dcn35_clk_mgr_helper_populate_bw_params(
+ &clk_mgr->base,
+ ctx->dc_bios->integrated_info,
+ smu_dpm_clks.dpm_clks);
+ }
+ }
+
+ if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ smu_dpm_clks.dpm_clks);
+
+ if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ bool ips_support = false;
+
+ /*avoid call pmfw at init*/
+ ips_support = dcn35_smu_get_ips_supported(&clk_mgr->base);
+ if (ips_support) {
+ ctx->dc->debug.ignore_pg = false;
+ ctx->dc->debug.disable_dpp_power_gate = false;
+ ctx->dc->debug.disable_hubp_power_gate = false;
+ ctx->dc->debug.disable_dsc_power_gate = false;
+ } else {
+ /*let's reset the config control flag*/
+ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
+ }
+ }
+}
+
+void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
+{
+ struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int);
+
+ if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ clk_mgr->smu_wm_set.wm_set);
+}
diff --git a/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/preimage.1 b/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/preimage.1
new file mode 100644
index 000000000000..df3b32023045
--- /dev/null
+++ b/rr-cache/b0f66c584ddead21f2267ca9063b2ea9c387bb2d/preimage.1
@@ -0,0 +1,1169 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dcn35_clk_mgr.h"
+
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+
+// For dce12_get_dp_ref_freq_khz
+#include "dce100/dce_clk_mgr.h"
+
+// For dcn20_update_clocks_update_dpp_dto
+#include "dcn20/dcn20_clk_mgr.h"
+
+
+
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dcn35_smu.h"
+#include "dm_helpers.h"
+
+/* TODO: remove this include once we ported over remaining clk mgr functions*/
+#include "dcn30/dcn30_clk_mgr.h"
+#include "dcn31/dcn31_clk_mgr.h"
+
+#include "dc_dmub_srv.h"
+#include "link.h"
+#include "logger_types.h"
+
+#undef DC_LOGGER
+#define DC_LOGGER \
+ clk_mgr->base.base.ctx->logger
+
+#define regCLK1_CLK_PLL_REQ 0x0237
+#define regCLK1_CLK_PLL_REQ_BASE_IDX 0
+
+#define CLK1_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK1_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK1_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK1_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK1_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK1_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+#define regCLK1_CLK2_BYPASS_CNTL 0x029c
+#define regCLK1_CLK2_BYPASS_CNTL_BASE_IDX 0
+
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+#define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
+
+<<<<<<<
+=======
+#define regCLK5_0_CLK5_spll_field_8 0x464b
+#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0
+
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd
+#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L
+
+>>>>>>>
+#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0
+
+#define REG(reg_name) \
+ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+
+#define TO_CLK_MGR_DCN35(clk_mgr)\
+ container_of(clk_mgr, struct clk_mgr_dcn35, base)
+
+static int dcn35_get_active_display_cnt_wa(
+ struct dc *dc,
+ struct dc_state *context,
+ int *all_active_disps)
+{
+ int i, display_count = 0;
+ bool tmds_present = false;
+
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_state *stream = context->streams[i];
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
+ stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+ tmds_present = true;
+ }
+
+ for (i = 0; i < dc->link_count; i++) {
+ const struct dc_link *link = dc->links[i];
+
+ /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ display_count++;
+ }
+ if (all_active_disps != NULL)
+ *all_active_disps = display_count;
+ /* WA for hang on HDMI after display off back on*/
+ if (display_count == 0 && tmds_present)
+ display_count = 1;
+
+ return display_count;
+}
+
+static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
+{
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; ++i) {
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->top_pipe || pipe->prev_odm_pipe)
+ continue;
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
+ if (disable) {
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
+ reset_sync_context_for_pipe(dc, context, i);
+ } else {
+ pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
+ }
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context,
+ int ref_dtbclk_khz)
+{
+ struct dccg *dccg = clk_mgr->dccg;
+ uint32_t tg_mask = 0;
+ int i;
+
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct dtbclk_dto_params dto_params = {0};
+
+ /* use mask to program DTO once per tg */
+ if (pipe_ctx->stream_res.tg &&
+ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
+ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
+
+ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
+ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
+
+ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
+ }
+ }
+}
+
+static void dcn35_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context, bool safe_to_lower)
+{
+ int i;
+ bool dppclk_active[MAX_PIPES] = {0};
+
+
+ clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz;
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ int dpp_inst = 0, dppclk_khz, prev_dppclk_khz;
+
+ dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+
+ if (context->res_ctx.pipe_ctx[i].plane_res.dpp)
+ dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst;
+ else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz == 0) {
+ /* dpp == NULL && dppclk_khz == 0 is valid because of pipe harvesting.
+ * In this case just continue in loop
+ */
+ continue;
+ } else if (!context->res_ctx.pipe_ctx[i].plane_res.dpp && dppclk_khz > 0) {
+ /* The software state is not valid if dpp resource is NULL and
+ * dppclk_khz > 0.
+ */
+ ASSERT(false);
+ continue;
+ }
+
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
+
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
+ clk_mgr->dccg->funcs->update_dpp_dto(
+ clk_mgr->dccg, dpp_inst, dppclk_khz);
+ dppclk_active[dpp_inst] = true;
+ }
+ if (safe_to_lower)
+ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
+ struct dpp *old_dpp = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.dpp;
+
+ if (old_dpp && !dppclk_active[old_dpp->inst])
+ clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, old_dpp->inst, 0);
+ }
+}
+
+void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ union dmub_rb_cmd cmd;
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count = 0;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool dpp_clock_lowered = false;
+ int all_active_disps = 0;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
+ if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
+ new_clocks->ref_dtbclk_khz = 600000;
+
+ /*
+ * if it is safe to lower, but we are already in the lower state, we don't have to do anything
+ * also if safe to lower is false, we just go in the higher state
+ */
+ if (safe_to_lower) {
+ if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+ if (clk_mgr->base.ctx->dc->config.allow_0_dtb_clk)
+ dcn35_smu_set_dtbclk(clk_mgr, false);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+ }
+ /* check that we're not already in lower */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+ } else {
+ if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
+ new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
+ dcn35_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
+ clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
+ }
+
+ if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+ dcn35_smu_set_dtbclk(clk_mgr, true);
+ clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ /* check that we're not already in D0 */
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
+ union display_idle_optimization_u idle_info = { 0 };
+
+ dcn35_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
+ /* update power state */
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
+ }
+ }
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ dcn35_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ dcn35_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
+ }
+
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+ dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
+ dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
+
+ update_dispclk = true;
+ }
+
+ /* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
+ if (!dc->debug.disable_dtb_ref_clk_switch &&
+ should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
+ clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
+ dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
+ clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
+ }
+
+ if (dpp_clock_lowered) {
+ // increase per DPP DTO before lowering global dppclk
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ } else {
+ // increase global DPPCLK before lowering per DPP DTO
+ if (update_dppclk || update_dispclk)
+ dcn35_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+
+ // notify DMCUB of latest clocks
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
+ cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
+ cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
+ cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz;
+ cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
+ cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
+
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
+{
+ /* get FbMult value */
+ struct fixed31_32 pll_req;
+ unsigned int fbmult_frac_val = 0;
+ unsigned int fbmult_int_val = 0;
+ struct dc_context *ctx = clk_mgr->base.ctx;
+
+ /*
+ * Register value of fbmult is in 8.16 format, we are converting to 314.32
+ * to leverage the fix point operations available in driver
+ */
+
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_frac, &fbmult_frac_val); /* 16 bit fractional part*/
+ REG_GET(CLK1_CLK_PLL_REQ, FbMult_int, &fbmult_int_val); /* 8 bit integer part */
+
+ pll_req = dc_fixpt_from_int(fbmult_int_val);
+
+ /*
+ * since fractional part is only 16 bit in register definition but is 32 bit
+ * in our fix point definiton, need to shift left by 16 to obtain correct value
+ */
+ pll_req.value |= fbmult_frac_val << 16;
+
+ /* multiply by REFCLK period */
+ pll_req = dc_fixpt_mul_int(pll_req, clk_mgr->dfs_ref_freq_khz);
+
+ /* integer part is now VCO frequency in kHz */
+ return dc_fixpt_floor(pll_req);
+}
+
+static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ dcn35_smu_enable_pme_wa(clk_mgr);
+}
+
+
+bool dcn35_are_clock_states_equal(struct dc_clocks *a,
+ struct dc_clocks *b)
+{
+ if (a->dispclk_khz != b->dispclk_khz)
+ return false;
+ else if (a->dppclk_khz != b->dppclk_khz)
+ return false;
+ else if (a->dcfclk_khz != b->dcfclk_khz)
+ return false;
+ else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
+ return false;
+ else if (a->zstate_support != b->zstate_support)
+ return false;
+ else if (a->dtbclk_en != b->dtbclk_en)
+ return false;
+
+ return true;
+}
+
+static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
+ struct clk_mgr_dcn35 *clk_mgr)
+{
+}
+
+static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t ssc_enable;
+
+ REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable);
+
+ return ssc_enable == 1;
+}
+
+static void init_clk_states(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+
+ if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD)
+ clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
+
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ init_clk_states(clk_mgr);
+
+ // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
+ if (dcn35_is_spll_ssc_enabled(clk_mgr))
+ clk_mgr->dp_dto_source_clock_in_khz =
+ dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz);
+ else
+ clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz;
+
+}
+static struct clk_bw_params dcn35_bw_params = {
+ .vram_type = Ddr4MemType,
+ .num_channels = 1,
+ .clk_table = {
+ .num_entries = 4,
+ },
+
+};
+
+static struct wm_table ddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.72,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ }
+};
+
+static struct wm_table lpddr5_wm_table = {
+ .entries = {
+ {
+ .wm_inst = WM_A,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_B,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_C,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ {
+ .wm_inst = WM_D,
+ .wm_type = WM_TYPE_PSTATE_CHG,
+ .pstate_latency_us = 11.65333,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .valid = true,
+ },
+ }
+};
+
+static DpmClocks_t_dcn35 dummy_clocks;
+
+static struct dcn35_watermarks dummy_wms = { 0 };
+
+static struct dcn35_ss_info_table ss_info_table = {
+ .ss_divider = 1000,
+ .ss_percentage = {0, 0, 375, 375, 375}
+};
+
+static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
+{
+ struct dc_context *ctx = clk_mgr->base.ctx;
+ uint32_t clock_source;
+
+ REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
+ // If it's DFS mode, clock_source is 0.
+ if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) {
+ clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
+
+ if (clk_mgr->dprefclk_ss_percentage != 0) {
+ clk_mgr->ss_on_dprefclk = true;
+ clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
+ }
+ }
+}
+
+static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
+{
+ int i, num_valid_sets;
+
+ num_valid_sets = 0;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ /* skip empty entries, the smu array has no holes*/
+ if (!bw_params->wm_table.entries[i].valid)
+ continue;
+
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
+ if (i == 0)
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
+ else {
+ /* add 1 to make it non-overlapping with next lvl */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
+ bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
+ }
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
+ bw_params->clk_table.entries[i].dcfclk_mhz;
+
+ } else {
+ /* unconstrained for memory retraining */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
+
+ /* Modify previous watermark range to cover up to max */
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+ }
+ num_valid_sets++;
+ }
+
+ ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
+
+ /* modify the min and max to make sure we cover the whole range*/
+ table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
+ table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
+
+ /* This is for writeback only, does not matter currently as no writeback support*/
+ table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
+ table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
+ table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
+ table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
+}
+
+static void dcn35_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct clk_mgr_dcn35 *clk_mgr_dcn35 = TO_CLK_MGR_DCN35(clk_mgr);
+ struct dcn35_watermarks *table = clk_mgr_dcn35->smu_wm_set.wm_set;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || clk_mgr_dcn35->smu_wm_set.mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_build_watermark_ranges(clk_mgr_base->bw_params, table);
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ clk_mgr_dcn35->smu_wm_set.mc_address.low_part);
+ dcn35_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+}
+
+static void dcn35_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
+ struct dcn35_smu_dpm_clks *smu_dpm_clks)
+{
+ DpmClocks_t_dcn35 *table = smu_dpm_clks->dpm_clks;
+
+ if (!clk_mgr->smu_ver)
+ return;
+
+ if (!table || smu_dpm_clks->mc_address.quad_part == 0)
+ return;
+
+ memset(table, 0, sizeof(*table));
+
+ dcn35_smu_set_dram_addr_high(clk_mgr,
+ smu_dpm_clks->mc_address.high_part);
+ dcn35_smu_set_dram_addr_low(clk_mgr,
+ smu_dpm_clks->mc_address.low_part);
+ dcn35_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
+}
+
+static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
+{
+ uint32_t max = 0;
+ int i;
+
+ for (i = 0; i < num_clocks; ++i) {
+ if (clocks[i] > max)
+ max = clocks[i];
+ }
+
+ return max;
+}
+
+static inline bool is_valid_clock_value(uint32_t clock_value)
+{
+ return clock_value > 1 && clock_value < 100000;
+}
+
+static unsigned int convert_wck_ratio(uint8_t wck_ratio)
+{
+ switch (wck_ratio) {
+ case WCK_RATIO_1_2:
+ return 2;
+
+ case WCK_RATIO_1_4:
+ return 4;
+ /* Find lowest DPM, FCLK is filled in reverse order*/
+
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
+{
+ return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
+}
+
+static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
+ struct integrated_info *bios_info,
+ DpmClocks_t_dcn35 *clock_table)
+{
+ struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
+ struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
+ uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
+ uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
+ uint32_t num_memps, num_fclk, num_dcfclk;
+ int i;
+
+ /* Determine min/max p-state values. */
+ num_memps = (clock_table->NumMemPstatesEnabled > NUM_MEM_PSTATE_LEVELS) ? NUM_MEM_PSTATE_LEVELS :
+ clock_table->NumMemPstatesEnabled;
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
+ max_dram_speed_mts = dram_speed_mts;
+ max_pstate = i;
+ }
+ }
+
+ min_dram_speed_mts = max_dram_speed_mts;
+ min_pstate = max_pstate;
+
+ for (i = 0; i < num_memps; i++) {
+ uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
+
+ if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
+ min_dram_speed_mts = dram_speed_mts;
+ min_pstate = i;
+ }
+ }
+
+ /* We expect the table to contain at least one valid P-state entry. */
+ ASSERT(clock_table->NumMemPstatesEnabled &&
+ is_valid_clock_value(max_dram_speed_mts) &&
+ is_valid_clock_value(min_dram_speed_mts));
+
+ /* dispclk and dppclk can be max at any voltage, same number of levels for both */
+ if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
+ clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
+ max_dispclk = find_max_clk_value(clock_table->DispClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ max_dppclk = find_max_clk_value(clock_table->DppClocks,
+ clock_table->NumDispClkLevelsEnabled);
+ } else {
+ /* Invalid number of entries in the table from PMFW. */
+ ASSERT(0);
+ }
+
+ /* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
+ ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
+
+ num_fclk = (clock_table->NumFclkLevelsEnabled > NUM_FCLK_DPM_LEVELS) ? NUM_FCLK_DPM_LEVELS :
+ clock_table->NumFclkLevelsEnabled;
+ max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk);
+
+ num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS :
+ clock_table->NumDcfClkLevelsEnabled;
+ for (i = 0; i < num_dcfclk; i++) {
+ int j;
+
+ /* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
+ for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
+ if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
+ break;
+
+ bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
+
+ /* Now update clocks we do read */
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
+ bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio =
+ convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
+
+ /* Dcfclk and Fclk are tied, but at a different ratio */
+ bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
+ }
+
+ /* Make sure to include at least one entry at highest pstate */
+ if (max_pstate != min_pstate || i == 0) {
+ if (i > MAX_NUM_DPM_LVL - 1)
+ i = MAX_NUM_DPM_LVL - 1;
+
+ bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
+ bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
+ bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
+ bw_params->clk_table.entries[i].dcfclk_mhz =
+ find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
+ bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
+ bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
+ clock_table->MemPstateTable[max_pstate].WckRatio);
+ i++;
+ }
+ bw_params->clk_table.num_entries = i--;
+
+ /* Make sure all highest clocks are included*/
+ bw_params->clk_table.entries[i].socclk_mhz =
+ find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dispclk_mhz =
+ find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].dppclk_mhz =
+ find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
+ bw_params->clk_table.entries[i].fclk_mhz =
+ find_max_clk_value(clock_table->FclkClocks_Freq, NUM_FCLK_DPM_LEVELS);
+ ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels = clock_table->NumDcfClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dispclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_dppclk_levels = clock_table->NumDispClkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
+ bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
+
+ /*
+ * Set any 0 clocks to max default setting. Not an issue for
+ * power since we aren't doing switching in such case anyway
+ */
+ for (i = 0; i < bw_params->clk_table.num_entries; i++) {
+ if (!bw_params->clk_table.entries[i].fclk_mhz) {
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
+ bw_params->clk_table.entries[i].voltage = def_max.voltage;
+ }
+ if (!bw_params->clk_table.entries[i].dcfclk_mhz)
+ bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
+ if (!bw_params->clk_table.entries[i].socclk_mhz)
+ bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
+ if (!bw_params->clk_table.entries[i].dispclk_mhz)
+ bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
+ if (!bw_params->clk_table.entries[i].dppclk_mhz)
+ bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
+ if (!bw_params->clk_table.entries[i].fclk_mhz)
+ bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_mhz)
+ bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
+ if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
+ bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
+ if (!bw_params->clk_table.entries[i].dtbclk_mhz)
+ bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
+ }
+ ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
+ bw_params->vram_type = bios_info->memory_type;
+ bw_params->dram_channel_width_bytes = bios_info->memory_type == 0x22 ? 8 : 4;
+ bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
+
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ bw_params->wm_table.entries[i].wm_inst = i;
+
+ if (i >= bw_params->clk_table.num_entries) {
+ bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+
+ bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ bw_params->wm_table.entries[i].valid = true;
+ }
+}
+
+static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ int display_count;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ struct dc_state *context = dc->current_state;
+
+ if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
+ /* if we can go lower, go lower */
+ if (display_count == 0)
+ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
+ }
+}
+
+static void dcn35_exit_low_power_state(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+
+ //SMU optimization is performed part of low power state exit.
+ dcn35_smu_exit_low_power_state(clk_mgr);
+
+}
+
+static bool dcn35_is_ips_supported(struct clk_mgr *clk_mgr_base)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ bool ips_supported = true;
+
+ ips_supported = dcn35_smu_get_ips_supported(clk_mgr) ? true : false;
+
+ return ips_supported;
+}
+
+static void dcn35_init_clocks_fpga(struct clk_mgr *clk_mgr)
+{
+ init_clk_states(clk_mgr);
+
+/* TODO: Implement the functions and remove the ifndef guard */
+}
+
+static void dcn35_update_clocks_fpga(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ int fclk_adj = new_clocks->fclk_khz;
+
+ /* TODO: remove this after correctly set by DML */
+ new_clocks->dcfclk_khz = 400000;
+ new_clocks->socclk_khz = 400000;
+
+ /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */
+ //int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000;
+ new_clocks->fclk_khz = 4320000;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
+ clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
+ clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
+ clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr->clks.socclk_khz)) {
+ clk_mgr->clks.socclk_khz = new_clocks->socclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr->clks.dramclk_khz)) {
+ clk_mgr->clks.dramclk_khz = new_clocks->dramclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ }
+
+ if (should_set_clock(safe_to_lower, fclk_adj, clk_mgr->clks.fclk_khz)) {
+ clk_mgr->clks.fclk_khz = fclk_adj;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ }
+
+ /* Both fclk and ref_dppclk run on the same scemi clock.
+ * So take the higher value since the DPP DTO is typically programmed
+ * such that max dppclk is 1:1 with ref_dppclk.
+ */
+ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz)
+ clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz;
+ if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz)
+ clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz;
+
+ // Both fclk and ref_dppclk run on the same scemi clock.
+ clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz;
+
+ /* TODO: set dtbclk in correct place */
+ clk_mgr->clks.dtbclk_en = true;
+ dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks);
+ dcn35_update_clocks_update_dpp_dto(clk_mgr_int, context, safe_to_lower);
+
+ dcn35_update_clocks_update_dtb_dto(clk_mgr_int, context, clk_mgr->clks.ref_dtbclk_khz);
+}
+
+static struct clk_mgr_funcs dcn35_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks,
+ .init_clocks = dcn35_init_clocks,
+ .enable_pme_wa = dcn35_enable_pme_wa,
+ .are_clock_states_equal = dcn35_are_clock_states_equal,
+ .notify_wm_ranges = dcn35_notify_wm_ranges,
+ .set_low_power_state = dcn35_set_low_power_state,
+ .exit_low_power_state = dcn35_exit_low_power_state,
+ .is_ips_supported = dcn35_is_ips_supported,
+};
+
+struct clk_mgr_funcs dcn35_fpga_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn35_update_clocks_fpga,
+ .init_clocks = dcn35_init_clocks_fpga,
+ .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
+};
+
+void dcn35_clk_mgr_construct(
+ struct dc_context *ctx,
+ struct clk_mgr_dcn35 *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
+ clk_mgr->base.base.ctx = ctx;
+ clk_mgr->base.base.funcs = &dcn35_funcs;
+
+ clk_mgr->base.pp_smu = pp_smu;
+
+ clk_mgr->base.dccg = dccg;
+ clk_mgr->base.dfs_bypass_disp_clk = 0;
+
+ clk_mgr->base.dprefclk_ss_percentage = 0;
+ clk_mgr->base.dprefclk_ss_divider = 1000;
+ clk_mgr->base.ss_on_dprefclk = false;
+ clk_mgr->base.dfs_ref_freq_khz = 48000;
+
+ clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(struct dcn35_watermarks),
+ &clk_mgr->smu_wm_set.mc_address.quad_part);
+
+ if (!clk_mgr->smu_wm_set.wm_set) {
+ clk_mgr->smu_wm_set.wm_set = &dummy_wms;
+ clk_mgr->smu_wm_set.mc_address.quad_part = 0;
+ }
+ ASSERT(clk_mgr->smu_wm_set.wm_set);
+
+ smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.base.ctx,
+ DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ sizeof(DpmClocks_t_dcn35),
+ &smu_dpm_clks.mc_address.quad_part);
+
+ if (smu_dpm_clks.dpm_clks == NULL) {
+ smu_dpm_clks.dpm_clks = &dummy_clocks;
+ smu_dpm_clks.mc_address.quad_part = 0;
+ }
+
+ ASSERT(smu_dpm_clks.dpm_clks);
+
+ clk_mgr->base.smu_ver = dcn35_smu_get_smu_version(&clk_mgr->base);
+
+ if (clk_mgr->base.smu_ver)
+ clk_mgr->base.smu_present = true;
+
+ /* TODO: Check we get what we expect during bringup */
+ clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
+
+ if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
+ dcn35_bw_params.wm_table = lpddr5_wm_table;
+ } else {
+ dcn35_bw_params.wm_table = ddr5_wm_table;
+ }
+ /* Saved clocks configured at boot for debug purposes */
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
+
+ clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
+
+ dce_clock_read_ss_info(&clk_mgr->base);
+ /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
+
+ dcn35_read_ss_info_from_lut(&clk_mgr->base);
+
+ clk_mgr->base.base.bw_params = &dcn35_bw_params;
+
+ if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
+ int i;
+ dcn35_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
+ DC_LOG_SMU("NumDcfClkLevelsEnabled: %d\n"
+ "NumDispClkLevelsEnabled: %d\n"
+ "NumSocClkLevelsEnabled: %d\n"
+ "VcnClkLevelsEnabled: %d\n"
+ "FClkLevelsEnabled: %d\n"
+ "NumMemPstatesEnabled: %d\n"
+ "MinGfxClk: %d\n"
+ "MaxGfxClk: %d\n",
+ smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->VcnClkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled,
+ smu_dpm_clks.dpm_clks->NumMemPstatesEnabled,
+ smu_dpm_clks.dpm_clks->MinGfxClk,
+ smu_dpm_clks.dpm_clks->MaxGfxClk);
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDcfClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DcfClocks[%d] = %d\n",
+ i,
+ smu_dpm_clks.dpm_clks->DcfClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumDispClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->DispClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->DispClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocClocks[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocClocks[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumFclkLevelsEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Freq[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Freq[i]);
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->FclkClocks_Voltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->FclkClocks_Voltage[i]);
+ }
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumSocClkLevelsEnabled; i++)
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks->SocVoltage[%d] = %d\n",
+ i, smu_dpm_clks.dpm_clks->SocVoltage[i]);
+
+ for (i = 0; i < smu_dpm_clks.dpm_clks->NumMemPstatesEnabled; i++) {
+ DC_LOG_SMU("smu_dpm_clks.dpm_clks.MemPstateTable[%d].UClk = %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].MemClk= %d\n"
+ "smu_dpm_clks.dpm_clks->MemPstateTable[%d].Voltage = %d\n",
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].UClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].MemClk,
+ i, smu_dpm_clks.dpm_clks->MemPstateTable[i].Voltage);
+ }
+
+ if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
+ dcn35_clk_mgr_helper_populate_bw_params(
+ &clk_mgr->base,
+ ctx->dc_bios->integrated_info,
+ smu_dpm_clks.dpm_clks);
+ }
+ }
+
+ if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ smu_dpm_clks.dpm_clks);
+
+ if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ bool ips_support = false;
+
+ /*avoid call pmfw at init*/
+ ips_support = dcn35_smu_get_ips_supported(&clk_mgr->base);
+ if (ips_support) {
+ ctx->dc->debug.ignore_pg = false;
+ ctx->dc->debug.disable_dpp_power_gate = false;
+ ctx->dc->debug.disable_hubp_power_gate = false;
+ ctx->dc->debug.disable_dsc_power_gate = false;
+ } else {
+ /*let's reset the config control flag*/
+ ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
+ }
+ }
+}
+
+void dcn35_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
+{
+ struct clk_mgr_dcn35 *clk_mgr = TO_CLK_MGR_DCN35(clk_mgr_int);
+
+ if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ clk_mgr->smu_wm_set.wm_set);
+}
diff --git a/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/postimage b/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/postimage
new file mode 100644
index 000000000000..4d9a76446df8
--- /dev/null
+++ b/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/postimage
@@ -0,0 +1,11567 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "link_enc_cfg.h"
+#include "dc/inc/core_types.h"
+#include "dal_asic_id.h"
+#include "dmub/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
+#include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
+#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_ucode.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_crtc.h"
+#include "amdgpu_dm_hdcp.h"
+#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
+#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/component.h>
+#include <linux/dmi.h>
+
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include <acpi/video.h>
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "soc15_common.h"
+#include "vega10_ip_offset.h"
+
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+
+#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
+#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
+#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
+#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
+#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
+#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
+#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+
+#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
+#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
+#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+ switch (link->dpcd_caps.dongle_type) {
+ case DISPLAY_DONGLE_NONE:
+ return DRM_MODE_SUBCONNECTOR_Native;
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ struct drm_connector *connector = &aconnector->base;
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (aconnector->dc_sink)
+ subconnector = get_subconnector_type(link);
+
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+
+/*
+ * initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ u32 link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+static void handle_hpd_rx_irq(void *param);
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ if (WARN_ON(otg_inst == -1))
+ return adev->mode_info.crtcs[0];
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
+ return true;
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
+ return true;
+ else
+ return false;
+}
+
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ *
+ * @dc: Display Core control structure
+ * @update_type: specify whether it is FULL/MEDIUM/FAST update
+ * @planes_count: planes count to update
+ * @stream: stream state
+ * @stream_update: stream update
+ * @array_of_surface_update: dc surface update pointer
+ *
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_device *dev = adev_to_drm(adev);
+ unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /* TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ drm_dbg_state(dev, "CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_dbg_state(dev,
+ "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id, amdgpu_crtc);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ return;
+ }
+
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
+
+ WARN_ON(!e);
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ drm_dbg_state(dev,
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
+
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
+
+ drm_dbg_vbl(drm_dev,
+ "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ vrr_active);
+
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (vrr_active) {
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc->dm_irq_params.stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+ }
+ }
+}
+
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: used for determining the CRTC instance
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
+ struct amdgpu_crtc *acrtc;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+ if (!acrtc)
+ return;
+
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ vrr_active, acrtc->dm_irq_params.active_planes);
+
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!vrr_active)
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (acrtc->dm_irq_params.stream &&
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
+
+ /*
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
+ * In that case, pageflip completion interrupts won't fire and pageflip
+ * completion events won't get delivered. Prevent this by sending
+ * pending pageflip events from here if a flip is still pending.
+ *
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
+ * avoid race conditions between flip programming and completion,
+ * which could cause too early flip completion events.
+ */
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ acrtc->dm_irq_params.active_planes == 0) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+/**
+ * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+ * DCN generation ASICs
+ * @interrupt_params: interrupt parameters
+ *
+ * Used to set crc window/read out crc value at vertical line 0 position
+ */
+static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+
+ if (!acrtc)
+ return;
+
+ amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ u8 link_index = 0;
+ struct drm_device *dev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ link_index = notify->link_index;
+ link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (hpd_aconnector) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ handle_hpd_irq_helper(hpd_aconnector);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ handle_hpd_rx_irq(hpd_aconnector);
+ }
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
+#define DMUB_TRACE_MAX_READ 64
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+ struct dmub_notification notify;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ u32 count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
+
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (!dm->dmub_callback[notify.type]) {
+ DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+ GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status == DP_HPD_PLUG;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
+ }
+
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void *handle);
+
+/* Allocate memory for FBC compressed data */
+static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+{
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
+ struct drm_display_mode *mode;
+ unsigned long max_size = 0;
+
+ if (adev->dm.dc->fbc_compressor == NULL)
+ return;
+
+ if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (compressor->bo_ptr)
+ return;
+
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (max_size < mode->htotal * mode->vtotal)
+ max_size = mode->htotal * mode->vtotal;
+ }
+
+ if (max_size) {
+ int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize FBC\n");
+ else {
+ adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
+ DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ }
+
+ }
+
+}
+
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&adev->dm.audio_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->audio_inst != port)
+ continue;
+
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ mutex_unlock(&adev->dm.audio_lock);
+
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
+ .get_eld = amdgpu_dm_audio_component_get_eld,
+};
+
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = &amdgpu_dm_audio_component_ops;
+ acomp->dev = kdev;
+ adev->dm.audio_component = acomp;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ adev->dm.audio_component = NULL;
+}
+
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
+ .bind = amdgpu_dm_audio_component_bind,
+ .unbind = amdgpu_dm_audio_component_unbind,
+};
+
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
+{
+ int i, ret;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].id =
+ adev->dm.dc->res_pool->audios[i]->inst;
+ adev->mode_info.audio.pin[i].offset = 0;
+ }
+
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ if (ret < 0)
+ return ret;
+
+ adev->dm.audio_registered = true;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ if (adev->dm.audio_registered) {
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ adev->dm.audio_registered = false;
+ }
+
+ /* TODO: Disable audio? */
+
+ adev->mode_info.audio.enabled = false;
+}
+
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+{
+ struct drm_audio_component *acomp = adev->dm.audio_component;
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
+
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ pin, -1);
+ }
+}
+
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dc_context *ctx = adev->dm.dc->ctx;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register offsets for ASICs with runtime initialization available */
+ if (dmub_srv->hw_funcs.init_reg_offsets)
+ dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->vm_manager.vram_base_offset;
+
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.dpia_supported = true;
+ hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
+ break;
+ default:
+ break;
+ }
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+{
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
+ PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+
+ memset(pa_config, 0, sizeof(*pa_config));
+
+ agp_base = 0;
+ agp_bot = adev->gmc.agp_start >> 24;
+ agp_top = adev->gmc.agp_end >> 24;
+
+ /* AGP aperture is disabled */
+ if (agp_bot > agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
+ page_table_base.low_part = lower_32_bits(pt_base);
+
+ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
+ pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+ pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+ pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
+ pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+ pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
+ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+
+}
+
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ goto out_err;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ struct dc_callback_init init_params;
+ int r;
+
+ adev->dm.ddev = adev_to_drm(adev);
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+ memset(&init_params, 0, sizeof(init_params));
+
+ mutex_init(&adev->dm.dpia_aux_lock);
+ mutex_init(&adev->dm.dc_lock);
+ mutex_init(&adev->dm.audio_lock);
+
+ if (amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
+
+ init_data.asic_id.vram_width = adev->gmc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ switch (adev->dm.dmcub_fw_version) {
+ case 0: /* development */
+ case 0x1: /* linux-firmware.git hash 6d9f399 */
+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
+ init_data.flags.disable_dmcu = false;
+ break;
+ default:
+ init_data.flags.disable_dmcu = true;
+ }
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
+
+ /* APU support S/G display by default except:
+ * ASICs before Carrizo,
+ * RAVEN1 (Users reported stability issue)
+ */
+
+ if (adev->asic_type < CHIP_CARRIZO) {
+ init_data.flags.gpu_vm_support = false;
+ } else if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN)
+ init_data.flags.gpu_vm_support = false;
+ else
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+ } else {
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ }
+
+ adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
+
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+
+ init_data.flags.seamless_boot_edp_requested = false;
+
+ if (amdgpu_device_seamless_boot_supported(adev)) {
+ init_data.flags.seamless_boot_edp_requested = true;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
+ init_data.flags.enable_mipi_converter_optimization = true;
+
+ init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
+ init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+ init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ init_data.flags.disable_ips_in_vpb = 0;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
+ INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
+ if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
+ adev->dm.dc->debug.force_subvp_mclk_switch = true;
+
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
+ adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+
+ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
+ adev->dm.dc->debug.ignore_cable_id = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
+ dc_hardware_init(adev->dm.dc);
+
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
+ struct dc_phy_addr_space_config pa_config;
+
+ mmhub_read_system_context(adev, &pa_config);
+
+ // Call the DC init_memory func
+ dc_setup_system_context(adev->dm.dc, &pa_config);
+ }
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ amdgpu_dm_init_color_mod();
+
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_control_workqueue =
+ create_singlethread_workqueue("dm_vblank_control_workqueue");
+ if (!adev->dm.vblank_control_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ }
+
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ init_completion(&adev->dm.dmub_aux_transfer_done);
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+ if (!adev->dm.dmub_notify) {
+ DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ goto error;
+ }
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
+ amdgpu_dm_outbox_init(adev);
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
+ dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
+ }
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* create fake encoders for MST */
+ dm_dp_create_fake_mst_encoders(adev);
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -EINVAL;
+}
+
+static int amdgpu_dm_early_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_audio_fini(adev);
+
+ return 0;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->dm.vblank_control_workqueue) {
+ destroy_workqueue(adev->dm.vblank_control_workqueue);
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
+ }
+#endif
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc) {
+ dc_deinit_callbacks(adev->dm.dc);
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+
+ mutex_destroy(&adev->dm.audio_lock);
+ mutex_destroy(&adev->dm.dc_lock);
+ mutex_destroy(&adev->dm.dpia_aux_lock);
+}
+
+static int load_dmcu_fw(struct amdgpu_device *adev)
+{
+ const char *fw_name_dmcu = NULL;
+ int r;
+ const struct dmcu_firmware_header_v1_0 *hdr;
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
+ case CHIP_RAVEN:
+ if (ASICREV_IS_PICASSO(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else
+ return 0;
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ return 0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
+ return 0;
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
+ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
+ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
+ adev->dm.fw_dmcu = NULL;
+ return 0;
+ }
+ if (r) {
+ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ fw_name_dmcu);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+ return r;
+ }
+
+ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
+
+ return 0;
+}
+
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ dmub_asic = DMUB_ASIC_DCN21;
+ break;
+ case IP_VERSION(3, 0, 0):
+ dmub_asic = DMUB_ASIC_DCN30;
+ break;
+ case IP_VERSION(3, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN301;
+ break;
+ case IP_VERSION(3, 0, 2):
+ dmub_asic = DMUB_ASIC_DCN302;
+ break;
+ case IP_VERSION(3, 0, 3):
+ dmub_asic = DMUB_ASIC_DCN303;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
+ break;
+ case IP_VERSION(3, 1, 4):
+ dmub_asic = DMUB_ASIC_DCN314;
+ break;
+ case IP_VERSION(3, 1, 5):
+ dmub_asic = DMUB_ASIC_DCN315;
+ break;
+ case IP_VERSION(3, 1, 6):
+ dmub_asic = DMUB_ASIC_DCN316;
+ break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ break;
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ dmub_asic = DMUB_ASIC_DCN35;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data = region_params.bss_data_size ?
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+ region_params.window_memory_type = window_memory_type;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&memory_params, 0, sizeof(memory_params));
+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+ memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dm_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
+
+ return load_dmcu_fw(adev);
+}
+
+static int dm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector,
+ aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ aconnector->dc_link->type =
+ dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ break;
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = NULL;
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_override = false;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu) {
+ if (!dmcu_load_iram(dmcu, params))
+ return -EINVAL;
+ } else if (adev->dm.dc->ctx->dmub_srv) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+ return -EINVAL;
+ }
+ }
+
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+}
+
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
+
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+}
+
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ break;
+ default:
+ return 0;
+ }
+
+ ret = amdgpu_dpm_write_watermarks_table(adev);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ }
+ }
+
+}
+
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_state_create_current_copy(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+ return res;
+}
+
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ if (amdgpu_in_reset(adev)) {
+ mutex_lock(&dm->dc_lock);
+
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ return ret;
+ }
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ s3_handle_mst(adev_to_drm(adev), true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ return 0;
+}
+
+struct drm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ u32 i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return connector;
+ }
+
+ return NULL;
+}
+
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ drm_err(dev, "Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ drm_err(dev, "Failed to create sink!\n");
+ return;
+ }
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ drm_err(dev, "Failed to read EDID\n");
+
+}
+
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dm->ddev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
+ }
+
+cleanup:
+ kfree(bundle);
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct dc_state *dc_state;
+ int i, r, j, ret;
+ bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
+
+ if (dm->dc->caps.ips_support) {
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ }
+
+ if (amdgpu_in_reset(adev)) {
+ dc_state = dm->cached_dc_state;
+
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround we can copy
+ * off of the current state.
+ *
+ * We lose the previous assignments, but we had already
+ * commit 0 streams anyway.
+ */
+ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_state_release(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc, NULL);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+
+ /* Before powering on DC we need to re-initialize DMUB. */
+ dm_dmub_hw_resume(adev);
+
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ /* power on hardware */
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
+ /* Do detection*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_link)
+ continue;
+
+ /*
+ * this is the case when traversing through already created end sink
+ * MST connectors, should be skipped
+ */
+ if (aconnector && aconnector->mst_root)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+ } else {
+ mutex_lock(&dm->dc_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
+ return 0;
+}
+
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .early_fini = amdgpu_dm_early_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ struct amdgpu_dm_backlight_caps *caps;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ struct drm_luminance_range_info *luminance_range;
+
+ if (aconnector->bl_idx == -1 ||
+ aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = drm_to_adev(conn_base->dev);
+
+ caps = &adev->dm.backlight_caps[aconnector->bl_idx];
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
+ caps->aux_support = true;
+
+ if (amdgpu_backlight == 0)
+ caps->aux_support = false;
+ else if (amdgpu_backlight == 1)
+ caps->aux_support = true;
+
+ luminance_range = &conn_base->display_info.luminance_range;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+ sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
+
+ /*
+ * Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * Skip if already done during boot.
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /*
+ * For S3 resume with headless use eml_sink to fake stream
+ * because on resume connector->sink is set to NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ /*
+ * retain and release below are used to
+ * bump up refcount for sink because the link doesn't point
+ * to it anymore after disconnect, so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ amdgpu_dm_update_freesync_caps(connector,
+ aconnector->edid);
+ } else {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ if (!aconnector->dc_sink) {
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
+ return;
+ }
+
+ if (aconnector->dc_sink == sink) {
+ /*
+ * We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!!
+ */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do
+ */
+ if (sink) {
+ /*
+ * TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here.
+ */
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
+ } else {
+ aconnector->edid =
+ (struct edid *)sink->dc_edid.raw_edid;
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
+ }
+
+ if (!aconnector->timing_requested) {
+ aconnector->timing_requested =
+ kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ drm_err(dev,
+ "failed to create aconnector->requested_timing\n");
+ }
+
+ drm_connector_update_edid_property(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ update_connector_ext_caps(aconnector);
+ } else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ update_subconnector_property(aconnector);
+
+ if (sink)
+ dc_sink_release(sink);
+}
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ bool ret = false;
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in its own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ dm_con_state->update_hdcp = true;
+ }
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&adev->dm.dc_lock);
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ bool result = false;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ union hpd_irq_data hpd_irq_data;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = dc_link->link_index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
+
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+
+ if (link_loss) {
+ bool skip = false;
+
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
+
+out:
+ if (result && !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ if (ret) {
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
+
+ if (dc_link->type != dc_connection_mst_branch)
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
+
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ if (adev->family >= AMDGPU_FAMILY_AI)
+ client_id = SOC15_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static const unsigned int vrtl_int_srcid[] = {
+ DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+ };
+#endif
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use otg vertical line interrupt */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+ vrtl_int_srcid[i], &adev->vline0_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vline0 irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+ DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+ break;
+ }
+
+ c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+ - DC_IRQ_SOURCE_DC1_VLINE0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ }
+#endif
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r, i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+ &adev->dmub_outbox_irq);
+ if (r) {
+ DRM_ERROR("Failed to add outbox irq id!\n");
+ return r;
+ }
+
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_outbox_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params);
+ }
+
+ return 0;
+}
+
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+
+ if (*dm_state)
+ return 0;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+static struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_state_create_copy(old_state->context);
+
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_state_release(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ struct dm_atomic_state *state;
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* indicates support for immediate flip */
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_state_create_current_copy(adev->dm.dc);
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(adev_to_drm(adev),
+ &adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
+ r = amdgpu_display_modeset_create_props(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
+ r = amdgpu_dm_audio_init(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+ return 0;
+}
+
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
+
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (dm->backlight_caps[bl_idx].caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps[bl_idx].caps_valid = true;
+ if (caps.aux_support)
+ return;
+ dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
+ dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
+ } else {
+ dm->backlight_caps[bl_idx].min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ if (dm->backlight_caps[bl_idx].aux_support)
+ return;
+
+ dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int *min, unsigned int *max)
+{
+ if (!caps)
+ return 0;
+
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
+ } else {
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
+ }
+ return 1;
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
+}
+
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness)
+{
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link;
+ u32 brightness;
+ bool rc;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ /* Change brightness based on AUX property */
+ if (caps.aux_support) {
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
+ } else {
+ rc = dc_link_set_backlight_level(link, brightness, 0);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
+ }
+
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
+}
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
+
+ return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+ int ret;
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ if (caps.aux_support) {
+ u32 avg, peak;
+ bool rc;
+
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+ if (!rc)
+ return dm->brightness[bl_idx];
+ return convert_brightness_to_user(&caps, avg);
+ }
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ return amdgpu_dm_backlight_get_level(dm, i);
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_device *drm = aconnector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
+ struct backlight_properties props = { 0 };
+ char bl_name[16];
+
+ if (aconnector->bl_idx == -1)
+ return;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(drm, "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ drm->primary->index + aconnector->bl_idx);
+
+ dm->backlight_dev[aconnector->bl_idx] =
+ backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ &amdgpu_dm_backlight_ops, &props);
+
+ if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ } else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
+{
+ struct drm_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->type = plane_type;
+
+ /*
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
+ return ret;
+ }
+
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
+ return ret;
+}
+
+
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ int bl_idx = dm->num_of_edps;
+
+ if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+ link->type == dc_connection_none)
+ return;
+
+ if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+ drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+ return;
+ }
+
+ aconnector->bl_idx = bl_idx;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ dm->backlight_link[bl_idx] = link;
+ dm->num_of_edps++;
+
+ update_connector_ext_caps(aconnector);
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
+
+/*
+ * In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ s32 i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ u32 link_cnt;
+ s32 primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ amdgpu_dm_set_irq_funcs(adev);
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -EINVAL;
+ }
+
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
+
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ /* Do not create overlay if MPO disabled */
+ if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
+ break;
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (max_overlay-- == 0)
+ break;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ /* Use Outbox interrupt */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (register_outbox_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
+ }
+
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&dm->dc_lock);
+ ret = dc_link_detect(link, DETECT_REASON_BOOT);
+ mutex_unlock(&dm->dc_lock);
+
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ setup_backlight_device(dm, aconnector);
+
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
+ }
+ }
+ amdgpu_set_panel_orientation(&aconnector->base);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ if (dce60_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ goto fail;
+ }
+ break;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+
+ return -EINVAL;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/*
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct drm_device *drm_dev = dev_get_drvdata(device);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ case IP_VERSION(3, 5, 0):
+ fw_name_dmub = FIRMWARE_DCN_35_DMUB;
+ break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ return r;
+}
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_VEGAM:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ default:
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /*
+ * Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init()
+ */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev_to_drm(adev)->dev,
+ &dev_attr_s3_debug);
+#endif
+ adev->dc_enabled = true;
+
+ return dm_init_microcode(adev);
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
+{
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const u64 tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_ARGB16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ABGR16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ plane_info->layer_index = plane_state->normalized_zpos;
+
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address,
+ tmz_surface, force_disable_dcc);
+ if (ret)
+ return ret;
+
+ amdgpu_dm_plane_fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ int ret;
+ bool force_disable_dcc = false;
+
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ afb->tiling_flags,
+ &plane_info,
+ &dc_plane_state->address,
+ afb->tmz_surface,
+ force_disable_dcc);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index;
+ dc_plane_state->flip_int_enabled = true;
+
+ /*
+ * Always set input transfer function, since plane state is refreshed
+ * every time.
+ */
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline void fill_dc_dirty_rect(struct drm_plane *plane,
+ struct rect *dirty_rect, int32_t x,
+ s32 y, s32 width, s32 height,
+ int *i, bool ffu)
+{
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
+
+ dirty_rect->x = x;
+ dirty_rect->y = y;
+ dirty_rect->width = width;
+ dirty_rect->height = height;
+
+ if (ffu)
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ plane->base.id, width, height);
+ else
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
+ plane->base.id, x, y, width, height);
+
+ (*i)++;
+}
+
+/**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
+ * @dirty_regions_changed: dirty regions changed
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ */
+static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
+ bool *dirty_regions_changed)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ u32 num_clips;
+ struct drm_mode_rect *clips;
+ bool bb_changed;
+ bool fb_changed;
+ u32 i = 0;
+ *dirty_regions_changed = false;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
+ if (!dm_crtc_state->mpo_requested) {
+ if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
+ &flip_addrs->dirty_rect_count,
+ false);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ *dirty_regions_changed = bb_changed;
+
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ if (bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ old_plane_state->crtc_x,
+ old_plane_state->crtc_y,
+ old_plane_state->crtc_w,
+ old_plane_state->crtc_h, &i, false);
+ }
+
+ if (num_clips) {
+ for (; i < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[i], clips->x1,
+ clips->y1, clips->x2 - clips->x1,
+ clips->y2 - clips->y1, &i, false);
+ } else if (fb_changed && !bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+ }
+
+ flip_addrs->dirty_rect_count = i;
+ return;
+
+ffu:
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay,
+ &flip_addrs->dirty_rect_count, true);
+}
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector,
+ bool is_y420, int requested_bpc)
+{
+ u8 bpc;
+
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
+
+ if (requested_bpc > 0) {
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min_t(u8, bpc, requested_bpc);
+
+ /* Round down to the nearest even number. */
+ bpc = bpc - (bpc & 1);
+ }
+
+ switch (bpc) {
+ case 0:
+ /*
+ * Temporary Work around, DRM doesn't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ const struct drm_connector_state *connector_state)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (connector_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT601_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ break;
+ case DRM_MODE_COLORIMETRY_OPRGB:
+ color_space = COLOR_SPACE_ADOBERGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ else
+ color_space = COLOR_SPACE_2020_YCBCR;
+ break;
+ case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ default:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ color_space = COLOR_SPACE_SRGB;
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+ break;
+ }
+
+ return color_space;
+}
+
+static enum display_content_type
+get_output_content_type(const struct drm_connector_state *connector_state)
+{
+ switch (connector_state->content_type) {
+ default:
+ case DRM_MODE_CONTENT_TYPE_NO_DATA:
+ return DISPLAY_CONTENT_TYPE_NO_DATA;
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ return DISPLAY_CONTENT_TYPE_GRAPHICS;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ return DISPLAY_CONTENT_TYPE_PHOTO;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ return DISPLAY_CONTENT_TYPE_CINEMA;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ return DISPLAY_CONTENT_TYPE_GAME;
+ }
+}
+
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ enum dc_color_depth depth = timing_out->display_color_depth;
+ int normalized_clk;
+
+ do {
+ normalized_clk = timing_out->pix_clk_100hz / 10;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
+ }
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
+}
+
+static void fill_stream_properties_from_drm_display_mode(
+ struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector,
+ const struct drm_connector_state *connector_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+
+ if (old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
+ if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
+
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
+
+ stream->output_color_space = get_output_color_space(timing_out, connector_state);
+ stream->content_type = get_output_content_type(connector_state);
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strscpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static struct dc_sink *
+create_fake_sink(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct dc_sink *sink = NULL;
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return NULL;
+ }
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+
+ return sink;
+}
+
+static void set_multisync_trigger_params(
+ struct dc_stream_state *stream)
+{
+ struct dc_stream_state *master = NULL;
+
+ if (stream->triggered_crtc_reset.enabled) {
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
+ }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+ int j, highest_rfr = 0, master_stream = 0;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+ int refresh_rate = 0;
+
+ refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+ master_stream = j;
+ }
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+ stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
+ }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+ int i = 0;
+ struct dc_stream_state *stream;
+
+ if (context->stream_count < 2)
+ return;
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+ /*
+ * TODO: add a function to read AMD VSDB bits and set
+ * crtc_sync_master.multi_sync_enabled flag
+ * For now it's set to false
+ */
+ }
+
+ set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
+}
+
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
+static struct drm_display_mode *
+get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ bool use_probed_modes)
+{
+ struct drm_display_mode *m, *m_pref = NULL;
+ u16 current_refresh, highest_refresh;
+ struct list_head *list_head = use_probed_modes ?
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+ /* Find the preferred mode */
+ list_for_each_entry(m, list_head, head) {
+ if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ m_pref = m;
+ break;
+ }
+ }
+
+ if (!m_pref) {
+ /* Probably an EDID with no preferred mode. Fallback to first entry */
+ m_pref = list_first_entry_or_null(
+ &aconnector->base.modes, struct drm_display_mode, head);
+ if (!m_pref) {
+ DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ return NULL;
+ }
+ }
+
+ highest_refresh = drm_mode_vrefresh(m_pref);
+
+ /*
+ * Find the mode with highest refresh rate with same resolution.
+ * For some monitors, preferred mode is not the mode with highest
+ * supported refresh rate.
+ */
+ list_for_each_entry(m, list_head, head) {
+ current_refresh = drm_mode_vrefresh(m);
+
+ if (m->hdisplay == m_pref->hdisplay &&
+ m->vdisplay == m_pref->vdisplay &&
+ highest_refresh < current_refresh) {
+ highest_refresh = current_refresh;
+ m_pref = m;
+ }
+ }
+
+ drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
+ return m_pref;
+}
+
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_display_mode *high_mode;
+ int timing_diff;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!high_mode || !mode)
+ return false;
+
+ timing_diff = high_mode->vtotal - mode->vtotal;
+
+ if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+ high_mode->hdisplay != mode->hdisplay ||
+ high_mode->vdisplay != mode->vdisplay ||
+ high_mode->hsync_start != mode->hsync_start ||
+ high_mode->hsync_end != mode->hsync_end ||
+ high_mode->htotal != mode->htotal ||
+ high_mode->hskew != mode->hskew ||
+ high_mode->vscan != mode->vscan ||
+ high_mode->vsync_start - mode->vsync_start != timing_diff ||
+ high_mode->vsync_end - mode->vsync_end != timing_diff)
+ return false;
+ else
+ return true;
+}
+
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
+
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ 0,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ }
+}
+
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ struct drm_connector *drm_connector = &aconnector->base;
+ u32 link_bandwidth_kbps;
+ struct dc *dc = sink->ctx->dc;
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
+ drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bandwidth_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
+ }
+ }
+
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct drm_connector *connector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector_state *con_state = &dm_state->base;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode;
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode *freesync_mode = NULL;
+ bool native_mode_found = false;
+ bool recalculate_timing = false;
+ bool scale = dm_state->scaling != RMX_OFF;
+ int mode_refresh;
+ int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+ struct dsc_dec_dpcd_caps dsc_caps;
+
+ struct dc_link *link = NULL;
+ struct dc_sink *sink = NULL;
+
+ drm_mode_init(&mode, drm_mode);
+ memset(&saved_mode, 0, sizeof(saved_mode));
+
+ if (connector == NULL) {
+ DRM_ERROR("connector is NULL!\n");
+ return stream;
+ }
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
+ aconnector = NULL;
+ aconnector = to_amdgpu_dm_connector(connector);
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
+
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
+ if (!sink)
+ return stream;
+
+ } else {
+ sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
+ }
+
+ stream = dc_create_stream_for_sink(sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto finish;
+ }
+
+ /* We leave this NULL for writeback connectors */
+ stream->dm_stream_context = aconnector;
+
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ connector->display_info.hdmi.scdc.scrambling.low_rates;
+
+ list_for_each_entry(preferred_mode, &connector->modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &connector->modes,
+ struct drm_display_mode,
+ head);
+
+ mode_refresh = drm_mode_vrefresh(&mode);
+
+ if (preferred_mode == NULL) {
+ /*
+ * This may not be an error, the use case is when we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else if (aconnector) {
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode, scale);
+
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
+ }
+
+ if (recalculate_timing)
+ drm_mode_set_crtcinfo(&saved_mode, 0);
+
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, NULL,
+ requested_bpc);
+ else
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, old_stream,
+ requested_bpc);
+
+ /* The rest isn't needed for writeback connectors */
+ if (!aconnector)
+ goto finish;
+
+ if (aconnector->timing_changed) {
+ drm_dbg(aconnector->base.dev,
+ "overriding timing for automated test, bpc %d, changing to %d\n",
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
+ /* SST DSC determination policy */
+ update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+ apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ connector,
+ sink);
+
+ update_stream_signal(stream, sink);
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
+ }
+finish:
+ dc_sink_release(sink);
+
+ return stream;
+}
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+
+ update_subconnector_property(aconnector);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /*
+ * Call only if mst_mgr was initialized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
+ if (aconnector->bl_idx != -1) {
+ backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ }
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ if (aconnector->i2c) {
+ i2c_del_adapter(&aconnector->i2c->base);
+ kfree(aconnector->i2c);
+ }
+ kfree(aconnector->dm_dp_aux.aux.name);
+
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+ state->base.max_requested_bpc = 8;
+ state->vcpi_slots = 0;
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ new_state->freesync_capable = state->freesync_capable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
+ return &new_state->base;
+}
+
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int r;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
+ amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
+};
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(edid))
+ init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON) {
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /*
+ * In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+ create_eml_sink(aconnector);
+}
+
+static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ enum dc_status dc_result = DC_ERROR_UNEXPECTED;
+ struct dc_plane_state *dc_plane_state = NULL;
+ struct dc_state *dc_state = NULL;
+
+ if (!stream)
+ goto cleanup;
+
+ dc_plane_state = dc_create_plane_state(dc);
+ if (!dc_plane_state)
+ goto cleanup;
+
+ dc_state = dc_state_create(dc, NULL);
+ if (!dc_state)
+ goto cleanup;
+
+ /* populate stream to plane */
+ dc_plane_state->src_rect.height = stream->src.height;
+ dc_plane_state->src_rect.width = stream->src.width;
+ dc_plane_state->dst_rect.height = stream->src.height;
+ dc_plane_state->dst_rect.width = stream->src.width;
+ dc_plane_state->clip_rect.height = stream->src.height;
+ dc_plane_state->clip_rect.width = stream->src.width;
+ dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
+ dc_plane_state->plane_size.surface_size.height = stream->src.height;
+ dc_plane_state->plane_size.surface_size.width = stream->src.width;
+ dc_plane_state->plane_size.chroma_size.height = stream->src.height;
+ dc_plane_state->plane_size.chroma_size.width = stream->src.width;
+ dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->rotation = ROTATION_ANGLE_0;
+ dc_plane_state->is_tiling_rotated = false;
+ dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
+
+ dc_result = dc_validate_stream(dc, stream);
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_plane(dc, dc_plane_state);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
+
+ if (dc_result == DC_OK && !dc_state_add_plane(
+ dc,
+ stream,
+ dc_plane_state,
+ dc_state))
+ dc_result = DC_FAIL_ATTACH_SURFACES;
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_global_state(dc, dc_state, true);
+
+cleanup:
+ if (dc_state)
+ dc_state_release(dc_state);
+
+ if (dc_plane_state)
+ dc_plane_state_release(dc_plane_state);
+
+ return dc_result;
+}
+
+struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dc_stream_state *stream;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return stream;
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+
+ if (dc_result == DC_OK)
+ dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result,
+ dc_status_to_str(dc_result));
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
+ return stream;
+}
+
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /*
+ * Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
+ aconnector->base.force != DRM_FORCE_ON) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+
+ stream = create_validate_stream_for_sink(aconnector, mode,
+ to_dm_connector_state(connector->state),
+ NULL);
+ if (stream) {
+ dc_stream_release(stream);
+ result = MODE_OK;
+ }
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
+ struct dc_info_packet *out)
+{
+ struct hdmi_drm_infoframe frame;
+ unsigned char buf[30]; /* 26 + 4 */
+ ssize_t len;
+ int ret, i;
+
+ memset(out, 0, sizeof(*out));
+
+ if (!state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
+ if (ret)
+ return ret;
+
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
+ if (len < 0)
+ return (int)len;
+
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
+ if (len != 30)
+ return -EINVAL;
+
+ /* Prepare the infopacket for DC. */
+ switch (state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ out->hb0 = 0x87; /* type */
+ out->hb1 = 0x01; /* version */
+ out->hb2 = 0x1A; /* length */
+ out->sb[0] = buf[3]; /* checksum */
+ i = 1;
+ break;
+
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ out->hb0 = 0x00; /* sdp id, zero */
+ out->hb1 = 0x87; /* type */
+ out->hb2 = 0x1D; /* payload len - 1 */
+ out->hb3 = (0x13 << 2); /* sdp version */
+ out->sb[0] = 0x01; /* version */
+ out->sb[1] = 0x1A; /* length */
+ i = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&out->sb[i], &buf[4], 26);
+ out->valid = true;
+
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
+ sizeof(out->sb), false);
+
+ return 0;
+}
+
+static int
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_con_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_con_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct drm_crtc *crtc = new_con_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
+ int ret;
+
+ trace_amdgpu_dm_connector_atomic_check(new_con_state);
+
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!crtc)
+ return 0;
+
+ if (new_con_state->colorspace != old_con_state->colorspace) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (new_con_state->content_type != old_con_state->content_type) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
+ struct dc_info_packet hdr_infopacket;
+
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
+ if (ret)
+ return ret;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ /*
+ * DC considers the stream backends changed if the
+ * static metadata changes. Forcing the modeset also
+ * gives a simple way for userspace to switch from
+ * 8bpc to 10bpc when setting the metadata to enter
+ * or exit HDR.
+ *
+ * Changing the static metadata after it's been
+ * set is permissible, however. So only force a
+ * modeset if we're entering or exiting HDR.
+ */
+ new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
+ !old_con_state->hdr_output_metadata ||
+ !new_con_state->hdr_output_metadata;
+ }
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplugging a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * are missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .atomic_check = amdgpu_dm_connector_atomic_check,
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->mst_output_port)
+ return 0;
+
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
+ }
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, ret;
+ int vcpi, pbn_div, pbn, slot_num = 0;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->mst_output_port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
+ if (j == dc_state->stream_count)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ if (stream->timing.flags.DSC != 1) {
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = slot_num;
+
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ }
+
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ if (!mode)
+ continue;
+
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ /* sorting the probed modes before calling function
+ * amdgpu_dm_get_native_mode() since EDID can have
+ * more than one preferred mode. The modes that are
+ * later in the probed mode list could be of higher
+ * and preferred resolution. For example, 3840x2160
+ * resolution in base EDID preferred timing and 4096x2160
+ * preferred resolution in DID extension block later.
+ */
+ drm_mode_sort(&connector->probed_modes);
+ amdgpu_dm_get_native_mode(connector);
+
+ /* Freesync capabilities are reset by calling
+ * drm_add_edid_modes() and need to be
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
+ if (drm_mode_equal(m, mode))
+ return true;
+ }
+
+ return false;
+}
+
+static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+{
+ const struct drm_display_mode *m;
+ struct drm_display_mode *new_mode;
+ uint i;
+ u32 new_modes_count = 0;
+
+ /* Standard FPS values
+ *
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
+ */
+ static const u32 common_rates[] = {
+ 23976, 24000, 25000, 29970, 30000,
+ 48000, 50000, 60000, 72000, 96000, 120000
+ };
+
+ /*
+ * Find mode with highest refresh rate with the same resolution
+ * as the preferred mode. Some monitors report a preferred mode
+ * with lower resolution than the highest refresh rate supported.
+ */
+
+ m = get_highest_refresh_rate_mode(aconnector, true);
+ if (!m)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
+
+ if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ continue;
+
+ if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+ common_rates[i] > aconnector->max_vfreq * 1000)
+ continue;
+
+ num = (unsigned long long)m->clock * 1000 * 1000;
+ den = common_rates[i] * (unsigned long long)m->htotal;
+ target_vtotal = div_u64(num, den);
+ target_vtotal_diff = target_vtotal - m->vtotal;
+
+ /* Check for illegal modes */
+ if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+ m->vsync_end + target_vtotal_diff < m->vsync_start ||
+ m->vtotal + target_vtotal_diff < m->vsync_end)
+ continue;
+
+ new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+ if (!new_mode)
+ goto out;
+
+ new_mode->vtotal += (u16)target_vtotal_diff;
+ new_mode->vsync_start += (u16)target_vtotal_diff;
+ new_mode->vsync_end += (u16)target_vtotal_diff;
+ new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ new_mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (!is_duplicate_mode(aconnector, new_mode)) {
+ drm_mode_probed_add(&aconnector->base, new_mode);
+ new_modes_count += 1;
+ } else
+ drm_mode_destroy(aconnector->base.dev, new_mode);
+ }
+ out:
+ return new_modes_count;
+}
+
+static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (!(amdgpu_freesync_vid_mode && edid))
+ return;
+
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ amdgpu_dm_connector->num_modes +=
+ add_fs_modes(amdgpu_dm_connector);
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (!drm_edid_is_valid(edid)) {
+ amdgpu_dm_connector->num_modes =
+ drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ }
+ amdgpu_dm_fbc_init(connector);
+
+ return amdgpu_dm_connector->num_modes;
+}
+
+static const u32 supported_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ aconnector->connector_id = link_index;
+ aconnector->bl_idx = -1;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
+ mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
+
+ /*
+ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ link->link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link->link_enc);
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+
+ aconnector->base.state->max_bpc = 16;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ /* Content Type is currently only implemented for HDMI. */
+ drm_connector_attach_content_type_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
+ if (adev->dm.hdcp_workqueue)
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
+ }
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ return result;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+
+/*
+ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ u32 link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Not needed for writeback connector */
+ link->priv = aconnector;
+
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init_with_ddc(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type,
+ &i2c->base);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_get(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ } else {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_put(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc)
+{
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
+ /**
+ * This reads the current state for the IRQ and force reapplies
+ * the setting to hardware.
+ */
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ return true;
+ }
+
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
+ *
+ * Handles: UNDESIRED -> ENABLED
+ */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
+ return true;
+ }
+
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ drm_dbg_state(acrtc->base.dev,
+ "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
+{
+ struct mod_vrr_params vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+ bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+
+ /* Need to call this before the frame ends. */
+ dc_stream_adjust_vmin_vmax(dm->dc,
+ new_crtc_state->stream,
+ &vrr_params.adjust);
+ }
+ }
+
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ packet_type,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket,
+ pack_sdp_v1_3);
+
+ new_crtc_state->freesync_vrr_info_changed |=
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void update_stream_irq_parameters(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ /*
+ * if freesync compatible mode was set, config.state will be set
+ * in atomic check
+ */
+ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+ (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+ new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+ vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+ vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+ vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+ vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+ } else {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ }
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_config = config;
+ /* Copy state for access from DM IRQ handler */
+ acrtc->dm_irq_params.freesync_config = config;
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
+}
+
+static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
+{
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+
+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool wait_for_vblank)
+{
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ int planes_count = 0, vpos, hpos;
+ unsigned long flags;
+ u32 target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+ bool cursor_update = false;
+ bool pflip_present = false;
+ bool dirty_rects_changed = false;
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
+ bool plane_needs_flip;
+ struct dc_plane_state *dc_plane;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
+
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
+ }
+
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
+ &bundle->scaling_infos[planes_count]);
+
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+
+ pflip_present = pflip_present || plane_needs_flip;
+
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
+
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state,
+ afb->tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address,
+ afb->tmz_surface, false);
+
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
+
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
+
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
+
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change memory domain, FB pitch, DCC state, rotation or
+ * mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
+ */
+ if (crtc->state->async_flip &&
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
+
+ bundle->flip_addrs[planes_count].flip_immediate =
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
+
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
+ }
+
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
+
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
+
+ planes_count += 1;
+
+ }
+
+ if (pflip_present) {
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
+ } else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target_vblank = last_flip_vblank + wait_for_vblank;
+
+ /*
+ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc_attach->enabled &&
+ (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &pcrtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /**
+ * Prepare the flip event for the pageflip interrupt to handle.
+ *
+ * This only works in the case where we've already turned on the
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
+ * from 0 -> n planes we have to skip a hardware generated event
+ * and rely on sending it from software.
+ */
+ if (acrtc_attach->base.state->event &&
+ acrtc_state->active_planes > 0) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
+ prepare_flip_isr(acrtc_attach);
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (acrtc_state->stream) {
+ if (acrtc_state->freesync_vrr_info_changed)
+ bundle->stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ /* Update the planes if changed or disable if we don't have any. */
+ if ((planes_count || acrtc_state->active_planes == 0) &&
+ acrtc_state->stream) {
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ bundle->stream_update.stream = acrtc_state->stream;
+ if (new_pcrtc_state->mode_changed) {
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
+ }
+
+ if (new_pcrtc_state->color_mgmt_changed) {
+ /*
+ * TODO: This isn't fully correct since we've actually
+ * already modified the stream in place.
+ */
+ bundle->stream_update.gamut_remap =
+ &acrtc_state->stream->gamut_remap_matrix;
+ bundle->stream_update.output_csc_transform =
+ &acrtc_state->stream->csc_color_matrix;
+ bundle->stream_update.out_transfer_func =
+ &acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
+ }
+
+ acrtc_state->stream->abm_level = acrtc_state->abm_level;
+ if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
+
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
+ /*
+ * If FreeSync state on the stream has changed then we need to
+ * re-adjust the min/max bounds now that DC doesn't handle this
+ * as part of commit.
+ */
+ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ dc_stream_adjust_vmin_vmax(
+ dm->dc, acrtc_state->stream,
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+ mutex_lock(&dm->dc_lock);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+ *
+ * Most pipes are put into power gating when unused.
+ *
+ * When power gating is enabled on a pipe we lose the
+ * interrupt enablement state when power gating is disabled.
+ *
+ * So we need to update the IRQ control state in hardware
+ * whenever the pipe turns on (since it could be previously
+ * power gated) or off (since some pipes can't be power gated
+ * on some ASICs).
+ */
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+ if (aconn->psr_skip_count > 0)
+ aconn->psr_skip_count--;
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
+ }
+
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
+
+cleanup:
+ kfree(bundle);
+}
+
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *new_dm_crtc_state;
+ const struct dc_stream_status *status;
+ int i, inst;
+
+ /* Notify device removals. */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ if (old_con_state->crtc != new_con_state->crtc) {
+ /* CRTC changes require notification. */
+ goto notify;
+ }
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+notify:
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = aconnector->audio_inst;
+ aconnector->audio_inst = -1;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+
+ /* Notify audio device additions. */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!new_dm_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
+ if (!status)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = status->audio_inst;
+ aconnector->audio_inst = inst;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+}
+
+/*
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
+ */
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+{
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
+ bool mode_set_reset_required = false;
+ u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, false);
+ dc_stream_release(dm_old_crtc_state->stream);
+ }
+ }
+
+ drm_atomic_helper_calc_timestamping_constants(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Disable cursor if disabling crtc */
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ struct dc_cursor_position position;
+
+ memset(&position, 0, sizeof(position));
+ mutex_lock(&dm->dc_lock);
+ dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnected in fact.
+ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ pm_runtime_get_noresume(dev->dev);
+
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ mode_set_reset_required = true;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ mode_set_reset_required = true;
+ }
+ } /* for_each_crtc_in_state() */
+
+ /* if there mode set or reset, disable eDP PSR, Replay */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ amdgpu_dm_replay_disable_all(dm);
+ amdgpu_dm_psr_disable_all(dm);
+ }
+
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
+
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+ if (!status)
+ drm_err(dev,
+ "got no status for stream %p on acrtc%p\n",
+ dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+}
+
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ dm_new_con_state->update_hdcp = true;
+ continue;
+ }
+
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
+ }
+
+ /* Handle connector state changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_surface_update *dummy_updates;
+ struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
+ struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
+
+ memset(&stream_update, 0, sizeof(stream_update));
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
+ dm_old_con_state);
+
+ abm_changed = dm_new_crtc_state->abm_level !=
+ dm_old_crtc_state->abm_level;
+
+ hdr_changed =
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
+
+ if (!scaling_changed && !abm_changed && !hdr_changed)
+ continue;
+
+ stream_update.stream = dm_new_crtc_state->stream;
+ if (scaling_changed) {
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, dm_new_crtc_state->stream);
+
+ stream_update.src = dm_new_crtc_state->stream->src;
+ stream_update.dst = dm_new_crtc_state->stream->dst;
+ }
+
+ if (abm_changed) {
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
+
+ stream_update.abm_level = &dm_new_crtc_state->abm_level;
+ }
+
+ if (hdr_changed) {
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
+ stream_update.hdr_static_metadata = &hdr_packet;
+ }
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (WARN_ON(!status))
+ continue;
+
+ WARN_ON(!status->plane_count);
+
+ /*
+ * TODO: DC refuses to perform stream updates without a dc_surface_update.
+ * Here we create an empty update on each plane.
+ * To fix this, DC should permit updating only stream properties.
+ */
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
+ for (j = 0; j < status->plane_count; j++)
+ dummy_updates[j].surface = status->plane_states[0];
+
+
+ mutex_lock(&dm->dc_lock);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
+ mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
+ }
+
+ /**
+ * Enable interrupts for CRTCs that are newly enabled or went through
+ * a modeset. It was intentionally deferred until after the front end
+ * state was modified to wait until the OTG was on and so the IRQ
+ * handlers didn't access stale or invalid state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+#ifdef CONFIG_DEBUG_FS
+ enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+#endif
+
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ dc_stream_retain(dm_new_crtc_state->stream);
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ /**
+ * Frontend may have changed so reapply the CRC capture
+ * settings for the stream.
+ */
+ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+#endif
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
+ }
+ }
+#endif
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->async_flip)
+ wait_for_vblank = false;
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
+ }
+
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
+ /* Update audio instances for each connector. */
+ amdgpu_dm_commit_audio(dev, state);
+
+ /* restore the backlight level */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i] &&
+ (dm->actual_brightness[i] != dm->brightness[i]))
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ /* Don't free the memory if we are hitting this as part of suspend.
+ * This way we don't free any memory during suspend; see
+ * amdgpu_bo_free_kernel(). The memory will be freed in the first
+ * non-suspend modeset or when the driver is torn down.
+ */
+ if (!adev->in_suspend) {
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ }
+
+ /*
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+}
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto out;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto out;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto out;
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+
+out:
+ drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+
+ return ret;
+}
+
+/*
+ * This function handles all cases when set mode does not come upon hotplug.
+ * This includes when a display is unplugged then plugged back into the
+ * same port and when running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /*
+ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /*
+ * Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
+{
+ struct mod_freesync_config config = {0};
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
+ bool fs_vid_mode = false;
+
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
+
+ if (new_crtc_state->vrr_supported) {
+ new_crtc_state->stream->ignore_msa_timing_param = true;
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+
+ config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
+ config.vsif_supported = true;
+ config.btr = true;
+
+ if (fs_vid_mode) {
+ config.state = VRR_STATE_ACTIVE_FIXED;
+ config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+ goto out;
+ } else if (new_crtc_state->base.vrr_enabled) {
+ config.state = VRR_STATE_ACTIVE_VARIABLE;
+ } else {
+ config.state = VRR_STATE_INACTIVE;
+ }
+ }
+out:
+ new_crtc_state->freesync_config = config;
+}
+
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
+
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
+}
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ const struct drm_display_mode *old_mode, *new_mode;
+
+ if (!old_crtc_state || !new_crtc_state)
+ return false;
+
+ old_mode = &old_crtc_state->mode;
+ new_mode = &new_crtc_state->mode;
+
+ if (old_mode->clock == new_mode->clock &&
+ old_mode->hdisplay == new_mode->hdisplay &&
+ old_mode->vdisplay == new_mode->vdisplay &&
+ old_mode->htotal == new_mode->htotal &&
+ old_mode->vtotal != new_mode->vtotal &&
+ old_mode->hsync_start == new_mode->hsync_start &&
+ old_mode->vsync_start != new_mode->vsync_start &&
+ old_mode->hsync_end == new_mode->hsync_end &&
+ old_mode->vsync_end != new_mode->vsync_end &&
+ old_mode->hskew == new_mode->hskew &&
+ old_mode->vscan == new_mode->vscan &&
+ (old_mode->vsync_end - old_mode->vsync_start) ==
+ (new_mode->vsync_end - new_mode->vsync_start))
+ return true;
+
+ return false;
+}
+
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
+ u64 num, den, res;
+ struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+
+ num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+ den = (unsigned long long)new_crtc_state->mode.htotal *
+ (unsigned long long)new_crtc_state->mode.vtotal;
+
+ res = div_u64(num, den);
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+}
+
+static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct dm_atomic_state *dm_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
+ * update changed items
+ */
+ struct amdgpu_crtc *acrtc = NULL;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+ connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (connector)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /* TODO This hack should go away */
+ if (connector && enable) {
+ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ connector);
+
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ goto fail;
+ }
+
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+ * will do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * TODO: Check VSDB bits to decide whether this should
+ * be enabled or not.
+ */
+ new_stream->triggered_crtc_reset.enabled =
+ dm->force_timing_sync;
+
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ ret = fill_hdr_info_packet(drm_new_conn_state,
+ &new_stream->hdr_static_metadata);
+ if (ret)
+ goto fail;
+
+ /*
+ * If we already removed the old stream from the context
+ * (and set the new stream to NULL) then we can't reuse
+ * the old stream even if the stream and scaling are unchanged.
+ * We'll hit the BUG_ON and black screen.
+ *
+ * TODO: Refactor this function to allow this check to work
+ * in all conditions.
+ */
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ goto skip_modeset;
+
+ if (dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+ }
+
+ /* mode_changed flag may get updated above, need to check again */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto skip_modeset;
+
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER(
+ "Mode change not required for front porch change, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+
+ set_freesync_fixed_config(dm_new_crtc_state);
+
+ goto skip_modeset;
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
+ is_freesync_video_mode(&new_crtc_state->mode,
+ aconnector)) {
+ struct drm_display_mode *high_mode;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_state_remove_stream(
+ dm->dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!connector)
+ goto skip_modeset;
+
+ if (modereset_required(new_crtc_state))
+ goto skip_modeset;
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ dm_new_crtc_state->stream = new_stream;
+
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_state_add_stream(
+ dm->dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+skip_modeset:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!(enable && connector && new_crtc_state->active))
+ return 0;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ }
+
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+ * TODO: Remove this hack for all asics once it proves that the
+ * fast updates works fine on DCN3.2+.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ /* CRTC Degamma changes currently require us to recreate planes. */
+ if (new_crtc_state->color_mgmt_changed)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
+
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* Src/dst size and scaling updates. */
+ if (old_other_state->src_w != new_other_state->src_w ||
+ old_other_state->src_h != new_other_state->src_h ||
+ old_other_state->crtc_w != new_other_state->crtc_w ||
+ old_other_state->crtc_h != new_other_state->crtc_h)
+ return true;
+
+ /* Rotation / mirroring updates. */
+ if (old_other_state->rotation != new_other_state->rotation)
+ return true;
+
+ /* Blending updates. */
+ if (old_other_state->pixel_blend_mode !=
+ new_other_state->pixel_blend_mode)
+ return true;
+
+ /* Alpha updates. */
+ if (old_other_state->alpha != new_other_state->alpha)
+ return true;
+
+ /* Colorspace changes. */
+ if (old_other_state->color_range != new_other_state->color_range ||
+ old_other_state->color_encoding != new_other_state->color_encoding)
+ return true;
+
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
+ /* Framebuffer checks fall at the end. */
+ if (!old_other_state->fb || !new_other_state->fb)
+ continue;
+
+ /* Pixel format changes can require bandwidth updates. */
+ if (old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+
+ old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
+ new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
+
+ /* Tiling and DCC changes also require bandwidth updates. */
+ if (old_afb->tiling_flags != new_afb->tiling_flags ||
+ old_afb->base.modifier != new_afb->base.modifier)
+ return true;
+ }
+
+ return false;
+}
+
+static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
+ struct drm_plane_state *new_plane_state,
+ struct drm_framebuffer *fb)
+{
+ struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ unsigned int pitch;
+ bool linear;
+
+ if (fb->width > new_acrtc->max_cursor_width ||
+ fb->height > new_acrtc->max_cursor_height) {
+ DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
+ new_plane_state->fb->width,
+ new_plane_state->fb->height);
+ return -EINVAL;
+ }
+ if (new_plane_state->src_w != fb->width << 16 ||
+ new_plane_state->src_h != fb->height << 16) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ /* Pitch in pixels */
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+
+ if (fb->width != pitch) {
+ DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
+ fb->width, pitch);
+ return -EINVAL;
+ }
+
+ switch (pitch) {
+ case 64:
+ case 128:
+ case 256:
+ /* FB pitch is supported by cursor plane */
+ break;
+ default:
+ DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
+ return -EINVAL;
+ }
+
+ /* Core DRM takes care of checking FB modifiers, so we only need to
+ * check tiling flags when the FB doesn't have a modifier.
+ */
+ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
+ } else {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+ }
+ if (!linear) {
+ DRM_DEBUG_ATOMIC("Cursor FB not linear");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dm_update_plane_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ bool enable,
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
+{
+
+ struct dm_atomic_state *dm_state = NULL;
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
+ bool needs_reset;
+ int ret = 0;
+
+
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+ if (!needs_reset)
+ return 0;
+
+ if (!old_plane_crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
+ if (!dc_state_remove_plane(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ return -EINVAL;
+ }
+
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ if (!new_plane_crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ return 0;
+
+ if (!needs_reset)
+ return 0;
+
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
+
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+
+ *is_top_most_overlay = false;
+ }
+
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ ret = fill_dc_plane_attributes(
+ drm_to_adev(new_plane_crtc->dev),
+ dc_new_plane_state,
+ new_plane_state,
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
+ if (!dc_state_add_plane(
+ dc,
+ dm_new_crtc_state->stream,
+ dc_new_plane_state,
+ dm_state->context)) {
+
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
+ }
+
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+
+ *lock_and_validation_needed = true;
+ }
+
+
+ return ret;
+}
+
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+ int *out_plane_scale_w, int *out_plane_scale_h)
+{
+ int plane_src_w, plane_src_h;
+
+ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
+static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+ * positioning from the underlying pipe. Check the cursor plane's
+ * blending properties match the underlying planes'.
+ */
+
+ /* If no plane was enabled or changed scaling, no need to check again */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+ continue;
+
+ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+ any_relevant_change = true;
+ break;
+ }
+
+ if (new_plane_state->fb == old_plane_state->fb &&
+ new_plane_state->crtc_w == old_plane_state->crtc_w &&
+ new_plane_state->crtc_h == old_plane_state->crtc_h)
+ continue;
+
+ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ any_relevant_change = true;
+ break;
+ }
+ }
+
+ if (!any_relevant_change)
+ return 0;
+
+ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+ if (IS_ERR(new_cursor_state))
+ return PTR_ERR(new_cursor_state);
+
+ if (!new_cursor_state->fb)
+ return 0;
+
+ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+ /* Need to check all enabled planes, even if this commit doesn't change
+ * their state
+ */
+ i = drm_atomic_add_affected_planes(state, crtc);
+ if (i)
+ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
+
+ dm_get_plane_scale(new_underlying_state,
+ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
+ }
+
+ return 0;
+}
+
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state, *old_conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
+}
+
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events.
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_atomic_state *dm_state = NULL;
+ struct dc *dc = adev->dm.dc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ enum dc_status status;
+ int ret, i;
+ bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
+ goto fail;
+ }
+
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
+ new_crtc_state->connectors_changed = true;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
+ goto fail;
+ }
+ }
+ }
+ }
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
+ dm_old_crtc_state->dsc_force_changed == false)
+ continue;
+
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
+ goto fail;
+ }
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
+ goto fail;
+ }
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
+ goto fail;
+ }
+
+ if (dm_old_crtc_state->dsc_force_changed)
+ new_crtc_state->mode_changed = true;
+ }
+
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
+ goto fail;
+ }
+ }
+ }
+
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
+
+ /* Remove exiting planes if they are modified */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) !=
+ get_mem_type(new_plane_state->fb))
+ lock_and_validation_needed = true;
+
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ false,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Disable all crtcs which require disable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ false,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Enable all crtcs which require enable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ true,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Add new/modified planes */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ true,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
+ goto fail;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
+
+ /* Check cursor planes scaling */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
+ goto fail;
+ }
+ }
+
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
+ /* Check scaling and underscan changes*/
+ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+ * DC global validation to ensure that the configuration can be applied
+ * to hardware.
+ *
+ * We have to currently stall out here in atomic_check for outstanding
+ * commits to finish in this case because our IRQ handlers reference
+ * DRM state directly - we can end up disabling interrupts too early
+ * if we don't.
+ *
+ * TODO: Remove this stall and drop DM state private objects.
+ */
+ if (lock_and_validation_needed) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
+ goto fail;
+ }
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
+ goto fail;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
+ goto fail;
+ }
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
+ goto fail;
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
+ if (status != DC_OK) {
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
+ */
+
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
+
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
+
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
+
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ /*
+ * Only allow async flips for fast updates that don't change
+ * the FB pitch, the DCC state, rotation, etc.
+ */
+ if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
+ UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ u8 dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+
+static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ unsigned int offset,
+ unsigned int total_length,
+ u8 *data,
+ unsigned int length,
+ struct amdgpu_hdmi_vsdb_info *vsdb)
+{
+ bool res;
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_send_edid_cea *input;
+ struct dmub_cmd_edid_cea_output *output;
+
+ if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ input = &cmd.edid_cea.data.input;
+
+ cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
+ cmd.edid_cea.header.sub_type = 0;
+ cmd.edid_cea.header.payload_bytes =
+ sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
+ input->offset = offset;
+ input->length = length;
+ input->cea_total_length = total_length;
+ memcpy(input->payload, data, length);
+
+ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ if (!res) {
+ DRM_ERROR("EDID CEA parser failed\n");
+ return false;
+ }
+
+ output = &cmd.edid_cea.data.output;
+
+ if (output->type == DMUB_CMD__EDID_CEA_ACK) {
+ if (!output->ack.success) {
+ DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ output->ack.offset);
+ }
+ } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
+ if (!output->amd_vsdb.vsdb_found)
+ return false;
+
+ vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
+ vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
+ vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
+ vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
+ } else {
+ DRM_WARN("Unknown EDID CEA parser results\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ bool res;
+ int offset;
+
+ /* send 8 bytes a time */
+ if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
+ return false;
+
+ if (i+8 == len) {
+ /* EDID block sent completed, expect result */
+ int version, min_rate, max_rate;
+
+ res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
+ if (res) {
+ /* amd vsdb found */
+ vsdb_info->freesync_supported = 1;
+ vsdb_info->amd_vsdb_version = version;
+ vsdb_info->min_refresh_rate_hz = min_rate;
+ vsdb_info->max_refresh_rate_hz = max_rate;
+ return true;
+ }
+ /* not amd vsdb */
+ return false;
+ }
+
+ /* check for ack*/
+ res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
+ if (!res)
+ return false;
+ }
+
+ return false;
+}
+
+static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ /* send 8 bytes a time */
+ if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
+ return false;
+ }
+
+ return vsdb_info->freesync_supported;
+}
+
+static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (adev->dm.dmub_srv)
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ else
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
+}
+
+static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ while (j < EDID_LENGTH) {
+ struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
+ unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
+
+ if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
+ amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
+ vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
+ vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
+ DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
+
+ return true;
+ }
+ j++;
+ }
+
+ return false;
+}
+
+static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ bool valid_vsdb_found = false;
+
+ /*----- drm_find_cea_extension() -----*/
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return -ENODEV;
+
+ /*----- cea_db_offsets() -----*/
+ if (edid_ext[0] != CEA_EXT)
+ return -ENODEV;
+
+ valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+
+ return valid_vsdb_found ? i : -ENODEV;
+}
+
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
+void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i = 0;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
+
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ goto update;
+ }
+
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ amdgpu_dm_connector->min_vfreq = 0;
+ amdgpu_dm_connector->max_vfreq = 0;
+ amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
+
+ goto update;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ if (!adev->dm.freesync_module)
+ goto update;
+
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ bool edid_check_required = false;
+
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
+ }
+
+ if (edid_check_required) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+
+ freesync_capable = true;
+ }
+ }
+ parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+
+ if (vsdb_info.replay_mode) {
+ amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
+ amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
+ amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
+ }
+
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported) {
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
+}
+
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int i;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (dc->current_state) {
+ for (i = 0; i < dc->current_state->stream_count; ++i)
+ dc->current_state->streams[i]
+ ->triggered_crtc_reset.enabled =
+ adev->dm.force_timing_sync;
+
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
+ dc_trigger_sync(dc, dc->current_state);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+}
+
+void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ u32 value, const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
+}
+
+uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ const char *func_name)
+{
+ u32 value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+ int ret = -1;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ if (p_notify->result != AUX_RET_SUCCESS) {
+ /*
+ * Transient states before tunneling is enabled could
+ * lead to this error. We can ignore this for now.
+ */
+ if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ payload->address, payload->length,
+ p_notify->result);
+ }
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && p_notify->aux_reply.length &&
+ (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+
+ if (payload->length != p_notify->aux_reply.length) {
+ DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+ p_notify->aux_reply.length,
+ payload->address, payload->length);
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+ memcpy(payload->data, p_notify->aux_reply.data,
+ p_notify->aux_reply.length);
+ }
+
+ /* success */
+ ret = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+int amdgpu_dm_process_dmub_set_config_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ bool is_cmd_complete;
+ int ret;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
+ link_index, payload, adev->dm.dmub_notify);
+
+ if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ ret = 0;
+ *operation_result = adev->dm.dmub_notify->sc_status;
+ } else {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ ret = -1;
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}
diff --git a/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/preimage b/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/preimage
new file mode 100644
index 000000000000..fa44944538bd
--- /dev/null
+++ b/rr-cache/d1b95d32568e36081f0d9fa2b5ec12cc7cb2ca0a/preimage
@@ -0,0 +1,11571 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "link_enc_cfg.h"
+#include "dc/inc/core_types.h"
+#include "dal_asic_id.h"
+#include "dmub/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
+#include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
+#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_ucode.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_crtc.h"
+#include "amdgpu_dm_hdcp.h"
+#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
+#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/component.h>
+#include <linux/dmi.h>
+
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include <acpi/video.h>
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "soc15_common.h"
+#include "vega10_ip_offset.h"
+
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+
+#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
+#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
+#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
+#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
+#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
+#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
+#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+
+#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
+#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
+#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+ switch (link->dpcd_caps.dongle_type) {
+ case DISPLAY_DONGLE_NONE:
+ return DRM_MODE_SUBCONNECTOR_Native;
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ struct drm_connector *connector = &aconnector->base;
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (aconnector->dc_sink)
+ subconnector = get_subconnector_type(link);
+
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+
+/*
+ * initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ u32 link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+static void handle_hpd_rx_irq(void *param);
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ if (WARN_ON(otg_inst == -1))
+ return adev->mode_info.crtcs[0];
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
+ return true;
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
+ return true;
+ else
+ return false;
+}
+
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ *
+ * @dc: Display Core control structure
+ * @update_type: specify whether it is FULL/MEDIUM/FAST update
+ * @planes_count: planes count to update
+ * @stream: stream state
+ * @stream_update: stream update
+ * @array_of_surface_update: dc surface update pointer
+ *
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_device *dev = adev_to_drm(adev);
+ unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /* TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ drm_dbg_state(dev, "CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_dbg_state(dev,
+ "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id, amdgpu_crtc);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ return;
+ }
+
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
+
+ WARN_ON(!e);
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ drm_dbg_state(dev,
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
+
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
+
+ drm_dbg_vbl(drm_dev,
+ "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ vrr_active);
+
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (vrr_active) {
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc->dm_irq_params.stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+ }
+ }
+}
+
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: used for determining the CRTC instance
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
+ struct amdgpu_crtc *acrtc;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+ if (!acrtc)
+ return;
+
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ vrr_active, acrtc->dm_irq_params.active_planes);
+
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!vrr_active)
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (acrtc->dm_irq_params.stream &&
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
+
+ /*
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
+ * In that case, pageflip completion interrupts won't fire and pageflip
+ * completion events won't get delivered. Prevent this by sending
+ * pending pageflip events from here if a flip is still pending.
+ *
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
+ * avoid race conditions between flip programming and completion,
+ * which could cause too early flip completion events.
+ */
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ acrtc->dm_irq_params.active_planes == 0) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+/**
+ * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+ * DCN generation ASICs
+ * @interrupt_params: interrupt parameters
+ *
+ * Used to set crc window/read out crc value at vertical line 0 position
+ */
+static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+
+ if (!acrtc)
+ return;
+
+ amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ u8 link_index = 0;
+ struct drm_device *dev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ link_index = notify->link_index;
+ link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (hpd_aconnector) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ handle_hpd_irq_helper(hpd_aconnector);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ handle_hpd_rx_irq(hpd_aconnector);
+ }
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
+#define DMUB_TRACE_MAX_READ 64
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+ struct dmub_notification notify;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ u32 count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
+
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (!dm->dmub_callback[notify.type]) {
+ DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+ GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status == DP_HPD_PLUG;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
+ }
+
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void *handle);
+
+/* Allocate memory for FBC compressed data */
+static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+{
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
+ struct drm_display_mode *mode;
+ unsigned long max_size = 0;
+
+ if (adev->dm.dc->fbc_compressor == NULL)
+ return;
+
+ if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (compressor->bo_ptr)
+ return;
+
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (max_size < mode->htotal * mode->vtotal)
+ max_size = mode->htotal * mode->vtotal;
+ }
+
+ if (max_size) {
+ int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize FBC\n");
+ else {
+ adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
+ DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ }
+
+ }
+
+}
+
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&adev->dm.audio_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->audio_inst != port)
+ continue;
+
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ mutex_unlock(&adev->dm.audio_lock);
+
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
+ .get_eld = amdgpu_dm_audio_component_get_eld,
+};
+
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = &amdgpu_dm_audio_component_ops;
+ acomp->dev = kdev;
+ adev->dm.audio_component = acomp;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ adev->dm.audio_component = NULL;
+}
+
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
+ .bind = amdgpu_dm_audio_component_bind,
+ .unbind = amdgpu_dm_audio_component_unbind,
+};
+
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
+{
+ int i, ret;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].id =
+ adev->dm.dc->res_pool->audios[i]->inst;
+ adev->mode_info.audio.pin[i].offset = 0;
+ }
+
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ if (ret < 0)
+ return ret;
+
+ adev->dm.audio_registered = true;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ if (adev->dm.audio_registered) {
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ adev->dm.audio_registered = false;
+ }
+
+ /* TODO: Disable audio? */
+
+ adev->mode_info.audio.enabled = false;
+}
+
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+{
+ struct drm_audio_component *acomp = adev->dm.audio_component;
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
+
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ pin, -1);
+ }
+}
+
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dc_context *ctx = adev->dm.dc->ctx;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register offsets for ASICs with runtime initialization available */
+ if (dmub_srv->hw_funcs.init_reg_offsets)
+ dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->vm_manager.vram_base_offset;
+
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.dpia_supported = true;
+ hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
+ break;
+ default:
+ break;
+ }
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+{
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
+ PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+
+ memset(pa_config, 0, sizeof(*pa_config));
+
+ agp_base = 0;
+ agp_bot = adev->gmc.agp_start >> 24;
+ agp_top = adev->gmc.agp_end >> 24;
+
+ /* AGP aperture is disabled */
+ if (agp_bot > agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
+ page_table_base.low_part = lower_32_bits(pt_base);
+
+ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
+ pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+ pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+ pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
+ pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+ pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
+ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+
+}
+
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ goto out_err;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ struct dc_callback_init init_params;
+ int r;
+
+ adev->dm.ddev = adev_to_drm(adev);
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+ memset(&init_params, 0, sizeof(init_params));
+
+ mutex_init(&adev->dm.dpia_aux_lock);
+ mutex_init(&adev->dm.dc_lock);
+ mutex_init(&adev->dm.audio_lock);
+
+ if (amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
+
+ init_data.asic_id.vram_width = adev->gmc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ switch (adev->dm.dmcub_fw_version) {
+ case 0: /* development */
+ case 0x1: /* linux-firmware.git hash 6d9f399 */
+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
+ init_data.flags.disable_dmcu = false;
+ break;
+ default:
+ init_data.flags.disable_dmcu = true;
+ }
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
+
+ /* APU support S/G display by default except:
+ * ASICs before Carrizo,
+ * RAVEN1 (Users reported stability issue)
+ */
+
+ if (adev->asic_type < CHIP_CARRIZO) {
+ init_data.flags.gpu_vm_support = false;
+ } else if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN)
+ init_data.flags.gpu_vm_support = false;
+ else
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+ } else {
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ }
+
+ adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
+
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+
+ init_data.flags.seamless_boot_edp_requested = false;
+
+ if (amdgpu_device_seamless_boot_supported(adev)) {
+ init_data.flags.seamless_boot_edp_requested = true;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
+ init_data.flags.enable_mipi_converter_optimization = true;
+
+ init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
+ init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+ init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ init_data.flags.disable_ips_in_vpb = 0;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
+ INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
+ if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
+ adev->dm.dc->debug.force_subvp_mclk_switch = true;
+
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
+ adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+
+ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
+ adev->dm.dc->debug.ignore_cable_id = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
+ dc_hardware_init(adev->dm.dc);
+
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
+ struct dc_phy_addr_space_config pa_config;
+
+ mmhub_read_system_context(adev, &pa_config);
+
+ // Call the DC init_memory func
+ dc_setup_system_context(adev->dm.dc, &pa_config);
+ }
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ amdgpu_dm_init_color_mod();
+
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_control_workqueue =
+ create_singlethread_workqueue("dm_vblank_control_workqueue");
+ if (!adev->dm.vblank_control_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ }
+
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ init_completion(&adev->dm.dmub_aux_transfer_done);
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+ if (!adev->dm.dmub_notify) {
+ DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ goto error;
+ }
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
+ amdgpu_dm_outbox_init(adev);
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
+ dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
+ }
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* create fake encoders for MST */
+ dm_dp_create_fake_mst_encoders(adev);
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -EINVAL;
+}
+
+static int amdgpu_dm_early_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_audio_fini(adev);
+
+ return 0;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->dm.vblank_control_workqueue) {
+ destroy_workqueue(adev->dm.vblank_control_workqueue);
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
+ }
+#endif
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc) {
+ dc_deinit_callbacks(adev->dm.dc);
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+
+ mutex_destroy(&adev->dm.audio_lock);
+ mutex_destroy(&adev->dm.dc_lock);
+ mutex_destroy(&adev->dm.dpia_aux_lock);
+}
+
+static int load_dmcu_fw(struct amdgpu_device *adev)
+{
+ const char *fw_name_dmcu = NULL;
+ int r;
+ const struct dmcu_firmware_header_v1_0 *hdr;
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
+ case CHIP_RAVEN:
+ if (ASICREV_IS_PICASSO(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else
+ return 0;
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ return 0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
+ return 0;
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
+ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
+ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
+ adev->dm.fw_dmcu = NULL;
+ return 0;
+ }
+ if (r) {
+ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ fw_name_dmcu);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+ return r;
+ }
+
+ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
+
+ return 0;
+}
+
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ dmub_asic = DMUB_ASIC_DCN21;
+ break;
+ case IP_VERSION(3, 0, 0):
+ dmub_asic = DMUB_ASIC_DCN30;
+ break;
+ case IP_VERSION(3, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN301;
+ break;
+ case IP_VERSION(3, 0, 2):
+ dmub_asic = DMUB_ASIC_DCN302;
+ break;
+ case IP_VERSION(3, 0, 3):
+ dmub_asic = DMUB_ASIC_DCN303;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
+ break;
+ case IP_VERSION(3, 1, 4):
+ dmub_asic = DMUB_ASIC_DCN314;
+ break;
+ case IP_VERSION(3, 1, 5):
+ dmub_asic = DMUB_ASIC_DCN315;
+ break;
+ case IP_VERSION(3, 1, 6):
+ dmub_asic = DMUB_ASIC_DCN316;
+ break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ break;
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ dmub_asic = DMUB_ASIC_DCN35;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data = region_params.bss_data_size ?
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+ region_params.window_memory_type = window_memory_type;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&memory_params, 0, sizeof(memory_params));
+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+ memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dm_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
+
+ return load_dmcu_fw(adev);
+}
+
+static int dm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector,
+ aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ aconnector->dc_link->type =
+ dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ break;
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = NULL;
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_override = false;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu) {
+ if (!dmcu_load_iram(dmcu, params))
+ return -EINVAL;
+ } else if (adev->dm.dc->ctx->dmub_srv) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+ return -EINVAL;
+ }
+ }
+
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+}
+
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
+
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+}
+
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ break;
+ default:
+ return 0;
+ }
+
+ ret = amdgpu_dpm_write_watermarks_table(adev);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ }
+ }
+
+}
+
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_state_create_current_copy(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+ return res;
+}
+
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ if (amdgpu_in_reset(adev)) {
+ mutex_lock(&dm->dc_lock);
+
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ return ret;
+ }
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ s3_handle_mst(adev_to_drm(adev), true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ return 0;
+}
+
+struct drm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ u32 i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return connector;
+ }
+
+ return NULL;
+}
+
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ drm_err(dev, "Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ drm_err(dev, "Failed to create sink!\n");
+ return;
+ }
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ drm_err(dev, "Failed to read EDID\n");
+
+}
+
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dm->ddev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
+ }
+
+cleanup:
+ kfree(bundle);
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct dc_state *dc_state;
+ int i, r, j, ret;
+ bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
+
+ if (dm->dc->caps.ips_support) {
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ }
+
+ if (amdgpu_in_reset(adev)) {
+ dc_state = dm->cached_dc_state;
+
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround we can copy
+ * off of the current state.
+ *
+ * We lose the previous assignments, but we had already
+ * commit 0 streams anyway.
+ */
+ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_state_release(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc, NULL);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+
+ /* Before powering on DC we need to re-initialize DMUB. */
+ dm_dmub_hw_resume(adev);
+
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ /* power on hardware */
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
+ /* Do detection*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_link)
+ continue;
+
+ /*
+ * this is the case when traversing through already created end sink
+ * MST connectors, should be skipped
+ */
+ if (aconnector && aconnector->mst_root)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+ } else {
+ mutex_lock(&dm->dc_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
+ return 0;
+}
+
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .early_fini = amdgpu_dm_early_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ struct amdgpu_dm_backlight_caps *caps;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ struct drm_luminance_range_info *luminance_range;
+
+ if (aconnector->bl_idx == -1 ||
+ aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = drm_to_adev(conn_base->dev);
+
+ caps = &adev->dm.backlight_caps[aconnector->bl_idx];
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
+ caps->aux_support = true;
+
+ if (amdgpu_backlight == 0)
+ caps->aux_support = false;
+ else if (amdgpu_backlight == 1)
+ caps->aux_support = true;
+
+ luminance_range = &conn_base->display_info.luminance_range;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+ sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
+
+ /*
+ * Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * Skip if already done during boot.
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /*
+ * For S3 resume with headless use eml_sink to fake stream
+ * because on resume connector->sink is set to NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ /*
+ * retain and release below are used to
+ * bump up refcount for sink because the link doesn't point
+ * to it anymore after disconnect, so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ amdgpu_dm_update_freesync_caps(connector,
+ aconnector->edid);
+ } else {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ if (!aconnector->dc_sink) {
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
+ return;
+ }
+
+ if (aconnector->dc_sink == sink) {
+ /*
+ * We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!!
+ */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do
+ */
+ if (sink) {
+ /*
+ * TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here.
+ */
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
+ } else {
+ aconnector->edid =
+ (struct edid *)sink->dc_edid.raw_edid;
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
+ }
+
+ if (!aconnector->timing_requested) {
+ aconnector->timing_requested =
+ kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ drm_err(dev,
+ "failed to create aconnector->requested_timing\n");
+ }
+
+ drm_connector_update_edid_property(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ update_connector_ext_caps(aconnector);
+ } else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ update_subconnector_property(aconnector);
+
+ if (sink)
+ dc_sink_release(sink);
+}
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ bool ret = false;
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in its own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ dm_con_state->update_hdcp = true;
+ }
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&adev->dm.dc_lock);
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ bool result = false;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ union hpd_irq_data hpd_irq_data;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = dc_link->link_index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
+
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+
+ if (link_loss) {
+ bool skip = false;
+
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
+
+out:
+ if (result && !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ if (ret) {
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
+
+ if (dc_link->type != dc_connection_mst_branch)
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
+
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ if (adev->family >= AMDGPU_FAMILY_AI)
+ client_id = SOC15_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static const unsigned int vrtl_int_srcid[] = {
+ DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+ };
+#endif
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use otg vertical line interrupt */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+ vrtl_int_srcid[i], &adev->vline0_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vline0 irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+ DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+ break;
+ }
+
+ c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+ - DC_IRQ_SOURCE_DC1_VLINE0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ }
+#endif
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r, i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+ &adev->dmub_outbox_irq);
+ if (r) {
+ DRM_ERROR("Failed to add outbox irq id!\n");
+ return r;
+ }
+
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_outbox_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params);
+ }
+
+ return 0;
+}
+
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+
+ if (*dm_state)
+ return 0;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+static struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_state_create_copy(old_state->context);
+
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_state_release(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ struct dm_atomic_state *state;
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* indicates support for immediate flip */
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_state_create_current_copy(adev->dm.dc);
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(adev_to_drm(adev),
+ &adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
+ r = amdgpu_display_modeset_create_props(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
+ r = amdgpu_dm_audio_init(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+ return 0;
+}
+
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
+
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (dm->backlight_caps[bl_idx].caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps[bl_idx].caps_valid = true;
+ if (caps.aux_support)
+ return;
+ dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
+ dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
+ } else {
+ dm->backlight_caps[bl_idx].min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ if (dm->backlight_caps[bl_idx].aux_support)
+ return;
+
+ dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int *min, unsigned int *max)
+{
+ if (!caps)
+ return 0;
+
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
+ } else {
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
+ }
+ return 1;
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
+}
+
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness)
+{
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link;
+ u32 brightness;
+ bool rc;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ /* Change brightness based on AUX property */
+ if (caps.aux_support) {
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
+ } else {
+ rc = dc_link_set_backlight_level(link, brightness, 0);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
+ }
+
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
+}
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
+
+ return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+ int ret;
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ if (caps.aux_support) {
+ u32 avg, peak;
+ bool rc;
+
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+ if (!rc)
+ return dm->brightness[bl_idx];
+ return convert_brightness_to_user(&caps, avg);
+ }
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ return amdgpu_dm_backlight_get_level(dm, i);
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_device *drm = aconnector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
+ struct backlight_properties props = { 0 };
+ char bl_name[16];
+
+ if (aconnector->bl_idx == -1)
+ return;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(drm, "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ drm->primary->index + aconnector->bl_idx);
+
+ dm->backlight_dev[aconnector->bl_idx] =
+ backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ &amdgpu_dm_backlight_ops, &props);
+
+ if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ } else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
+{
+ struct drm_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->type = plane_type;
+
+ /*
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
+ return ret;
+ }
+
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
+ return ret;
+}
+
+
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ int bl_idx = dm->num_of_edps;
+
+ if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+ link->type == dc_connection_none)
+ return;
+
+ if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+ drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+ return;
+ }
+
+ aconnector->bl_idx = bl_idx;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ dm->backlight_link[bl_idx] = link;
+ dm->num_of_edps++;
+
+ update_connector_ext_caps(aconnector);
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
+
+/*
+ * In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ s32 i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ u32 link_cnt;
+ s32 primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ amdgpu_dm_set_irq_funcs(adev);
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -EINVAL;
+ }
+
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
+
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ /* Do not create overlay if MPO disabled */
+ if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
+ break;
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (max_overlay-- == 0)
+ break;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ /* Use Outbox interrupt */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (register_outbox_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
+ }
+
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&dm->dc_lock);
+ ret = dc_link_detect(link, DETECT_REASON_BOOT);
+ mutex_unlock(&dm->dc_lock);
+
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ setup_backlight_device(dm, aconnector);
+
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
+ }
+ }
+ amdgpu_set_panel_orientation(&aconnector->base);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ if (dce60_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ goto fail;
+ }
+ break;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+
+ return -EINVAL;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/*
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct drm_device *drm_dev = dev_get_drvdata(device);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ case IP_VERSION(3, 5, 0):
+ fw_name_dmub = FIRMWARE_DCN_35_DMUB;
+ break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ return r;
+}
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_VEGAM:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ default:
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /*
+ * Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init()
+ */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev_to_drm(adev)->dev,
+ &dev_attr_s3_debug);
+#endif
+ adev->dc_enabled = true;
+
+ return dm_init_microcode(adev);
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
+{
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const u64 tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_ARGB16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ABGR16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ plane_info->layer_index = plane_state->normalized_zpos;
+
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address,
+ tmz_surface, force_disable_dcc);
+ if (ret)
+ return ret;
+
+ amdgpu_dm_plane_fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ int ret;
+ bool force_disable_dcc = false;
+
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ afb->tiling_flags,
+ &plane_info,
+ &dc_plane_state->address,
+ afb->tmz_surface,
+ force_disable_dcc);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index;
+ dc_plane_state->flip_int_enabled = true;
+
+ /*
+ * Always set input transfer function, since plane state is refreshed
+ * every time.
+ */
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline void fill_dc_dirty_rect(struct drm_plane *plane,
+ struct rect *dirty_rect, int32_t x,
+ s32 y, s32 width, s32 height,
+ int *i, bool ffu)
+{
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
+
+ dirty_rect->x = x;
+ dirty_rect->y = y;
+ dirty_rect->width = width;
+ dirty_rect->height = height;
+
+ if (ffu)
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ plane->base.id, width, height);
+ else
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
+ plane->base.id, x, y, width, height);
+
+ (*i)++;
+}
+
+/**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
+ * @dirty_regions_changed: dirty regions changed
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ */
+static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
+ bool *dirty_regions_changed)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ u32 num_clips;
+ struct drm_mode_rect *clips;
+ bool bb_changed;
+ bool fb_changed;
+ u32 i = 0;
+ *dirty_regions_changed = false;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
+ if (!dm_crtc_state->mpo_requested) {
+ if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
+ &flip_addrs->dirty_rect_count,
+ false);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ *dirty_regions_changed = bb_changed;
+
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ if (bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ old_plane_state->crtc_x,
+ old_plane_state->crtc_y,
+ old_plane_state->crtc_w,
+ old_plane_state->crtc_h, &i, false);
+ }
+
+ if (num_clips) {
+ for (; i < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[i], clips->x1,
+ clips->y1, clips->x2 - clips->x1,
+ clips->y2 - clips->y1, &i, false);
+ } else if (fb_changed && !bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+ }
+
+ flip_addrs->dirty_rect_count = i;
+ return;
+
+ffu:
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay,
+ &flip_addrs->dirty_rect_count, true);
+}
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector,
+ bool is_y420, int requested_bpc)
+{
+ u8 bpc;
+
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
+
+ if (requested_bpc > 0) {
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min_t(u8, bpc, requested_bpc);
+
+ /* Round down to the nearest even number. */
+ bpc = bpc - (bpc & 1);
+ }
+
+ switch (bpc) {
+ case 0:
+ /*
+ * Temporary Work around, DRM doesn't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ const struct drm_connector_state *connector_state)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (connector_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT601_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ break;
+ case DRM_MODE_COLORIMETRY_OPRGB:
+ color_space = COLOR_SPACE_ADOBERGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ else
+ color_space = COLOR_SPACE_2020_YCBCR;
+ break;
+ case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ default:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ color_space = COLOR_SPACE_SRGB;
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+ break;
+ }
+
+ return color_space;
+}
+
+static enum display_content_type
+get_output_content_type(const struct drm_connector_state *connector_state)
+{
+ switch (connector_state->content_type) {
+ default:
+ case DRM_MODE_CONTENT_TYPE_NO_DATA:
+ return DISPLAY_CONTENT_TYPE_NO_DATA;
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ return DISPLAY_CONTENT_TYPE_GRAPHICS;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ return DISPLAY_CONTENT_TYPE_PHOTO;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ return DISPLAY_CONTENT_TYPE_CINEMA;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ return DISPLAY_CONTENT_TYPE_GAME;
+ }
+}
+
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ enum dc_color_depth depth = timing_out->display_color_depth;
+ int normalized_clk;
+
+ do {
+ normalized_clk = timing_out->pix_clk_100hz / 10;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
+ }
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
+}
+
+static void fill_stream_properties_from_drm_display_mode(
+ struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector,
+ const struct drm_connector_state *connector_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+
+ if (old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
+ if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
+
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
+
+ stream->output_color_space = get_output_color_space(timing_out, connector_state);
+ stream->content_type = get_output_content_type(connector_state);
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strscpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static struct dc_sink *
+create_fake_sink(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct dc_sink *sink = NULL;
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return NULL;
+ }
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+
+ return sink;
+}
+
+static void set_multisync_trigger_params(
+ struct dc_stream_state *stream)
+{
+ struct dc_stream_state *master = NULL;
+
+ if (stream->triggered_crtc_reset.enabled) {
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
+ }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+ int j, highest_rfr = 0, master_stream = 0;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+ int refresh_rate = 0;
+
+ refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+ master_stream = j;
+ }
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+ stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
+ }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+ int i = 0;
+ struct dc_stream_state *stream;
+
+ if (context->stream_count < 2)
+ return;
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+ /*
+ * TODO: add a function to read AMD VSDB bits and set
+ * crtc_sync_master.multi_sync_enabled flag
+ * For now it's set to false
+ */
+ }
+
+ set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
+}
+
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
+static struct drm_display_mode *
+get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ bool use_probed_modes)
+{
+ struct drm_display_mode *m, *m_pref = NULL;
+ u16 current_refresh, highest_refresh;
+ struct list_head *list_head = use_probed_modes ?
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+ /* Find the preferred mode */
+ list_for_each_entry(m, list_head, head) {
+ if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ m_pref = m;
+ break;
+ }
+ }
+
+ if (!m_pref) {
+ /* Probably an EDID with no preferred mode. Fallback to first entry */
+ m_pref = list_first_entry_or_null(
+ &aconnector->base.modes, struct drm_display_mode, head);
+ if (!m_pref) {
+ DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ return NULL;
+ }
+ }
+
+ highest_refresh = drm_mode_vrefresh(m_pref);
+
+ /*
+ * Find the mode with highest refresh rate with same resolution.
+ * For some monitors, preferred mode is not the mode with highest
+ * supported refresh rate.
+ */
+ list_for_each_entry(m, list_head, head) {
+ current_refresh = drm_mode_vrefresh(m);
+
+ if (m->hdisplay == m_pref->hdisplay &&
+ m->vdisplay == m_pref->vdisplay &&
+ highest_refresh < current_refresh) {
+ highest_refresh = current_refresh;
+ m_pref = m;
+ }
+ }
+
+ drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
+ return m_pref;
+}
+
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_display_mode *high_mode;
+ int timing_diff;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!high_mode || !mode)
+ return false;
+
+ timing_diff = high_mode->vtotal - mode->vtotal;
+
+ if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+ high_mode->hdisplay != mode->hdisplay ||
+ high_mode->vdisplay != mode->vdisplay ||
+ high_mode->hsync_start != mode->hsync_start ||
+ high_mode->hsync_end != mode->hsync_end ||
+ high_mode->htotal != mode->htotal ||
+ high_mode->hskew != mode->hskew ||
+ high_mode->vscan != mode->vscan ||
+ high_mode->vsync_start - mode->vsync_start != timing_diff ||
+ high_mode->vsync_end - mode->vsync_end != timing_diff)
+ return false;
+ else
+ return true;
+}
+
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
+
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ 0,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ }
+}
+
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ struct drm_connector *drm_connector = &aconnector->base;
+ u32 link_bandwidth_kbps;
+ struct dc *dc = sink->ctx->dc;
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
+ drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bandwidth_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
+ }
+ }
+
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct drm_connector *connector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector_state *con_state = &dm_state->base;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode;
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode *freesync_mode = NULL;
+ bool native_mode_found = false;
+ bool recalculate_timing = false;
+ bool scale = dm_state->scaling != RMX_OFF;
+ int mode_refresh;
+ int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+ struct dsc_dec_dpcd_caps dsc_caps;
+
+ struct dc_link *link = NULL;
+ struct dc_sink *sink = NULL;
+
+ drm_mode_init(&mode, drm_mode);
+ memset(&saved_mode, 0, sizeof(saved_mode));
+
+ if (connector == NULL) {
+ DRM_ERROR("connector is NULL!\n");
+ return stream;
+ }
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
+ aconnector = NULL;
+ aconnector = to_amdgpu_dm_connector(connector);
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
+
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
+ if (!sink)
+ return stream;
+
+ } else {
+ sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
+ }
+
+ stream = dc_create_stream_for_sink(sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto finish;
+ }
+
+ /* We leave this NULL for writeback connectors */
+ stream->dm_stream_context = aconnector;
+
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ connector->display_info.hdmi.scdc.scrambling.low_rates;
+
+ list_for_each_entry(preferred_mode, &connector->modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &connector->modes,
+ struct drm_display_mode,
+ head);
+
+ mode_refresh = drm_mode_vrefresh(&mode);
+
+ if (preferred_mode == NULL) {
+ /*
+ * This may not be an error, the use case is when we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else if (aconnector) {
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode, scale);
+
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
+ }
+
+ if (recalculate_timing)
+ drm_mode_set_crtcinfo(&saved_mode, 0);
+
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, NULL,
+ requested_bpc);
+ else
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, old_stream,
+ requested_bpc);
+
+ /* The rest isn't needed for writeback connectors */
+ if (!aconnector)
+ goto finish;
+
+ if (aconnector->timing_changed) {
+ drm_dbg(aconnector->base.dev,
+ "overriding timing for automated test, bpc %d, changing to %d\n",
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
+ /* SST DSC determination policy */
+ update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+ apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ connector,
+ sink);
+
+ update_stream_signal(stream, sink);
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+<<<<<<<
+ if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+=======
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
+>>>>>>>
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
+ }
+finish:
+ dc_sink_release(sink);
+
+ return stream;
+}
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+
+ update_subconnector_property(aconnector);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /*
+ * Call only if mst_mgr was initialized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
+ if (aconnector->bl_idx != -1) {
+ backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ }
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ if (aconnector->i2c) {
+ i2c_del_adapter(&aconnector->i2c->base);
+ kfree(aconnector->i2c);
+ }
+ kfree(aconnector->dm_dp_aux.aux.name);
+
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+ state->base.max_requested_bpc = 8;
+ state->vcpi_slots = 0;
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ new_state->freesync_capable = state->freesync_capable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
+ return &new_state->base;
+}
+
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int r;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
+ amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
+};
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(edid))
+ init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON) {
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /*
+ * In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+ create_eml_sink(aconnector);
+}
+
+static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ enum dc_status dc_result = DC_ERROR_UNEXPECTED;
+ struct dc_plane_state *dc_plane_state = NULL;
+ struct dc_state *dc_state = NULL;
+
+ if (!stream)
+ goto cleanup;
+
+ dc_plane_state = dc_create_plane_state(dc);
+ if (!dc_plane_state)
+ goto cleanup;
+
+ dc_state = dc_state_create(dc, NULL);
+ if (!dc_state)
+ goto cleanup;
+
+ /* populate stream to plane */
+ dc_plane_state->src_rect.height = stream->src.height;
+ dc_plane_state->src_rect.width = stream->src.width;
+ dc_plane_state->dst_rect.height = stream->src.height;
+ dc_plane_state->dst_rect.width = stream->src.width;
+ dc_plane_state->clip_rect.height = stream->src.height;
+ dc_plane_state->clip_rect.width = stream->src.width;
+ dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
+ dc_plane_state->plane_size.surface_size.height = stream->src.height;
+ dc_plane_state->plane_size.surface_size.width = stream->src.width;
+ dc_plane_state->plane_size.chroma_size.height = stream->src.height;
+ dc_plane_state->plane_size.chroma_size.width = stream->src.width;
+ dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->rotation = ROTATION_ANGLE_0;
+ dc_plane_state->is_tiling_rotated = false;
+ dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
+
+ dc_result = dc_validate_stream(dc, stream);
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_plane(dc, dc_plane_state);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
+
+ if (dc_result == DC_OK && !dc_state_add_plane(
+ dc,
+ stream,
+ dc_plane_state,
+ dc_state))
+ dc_result = DC_FAIL_ATTACH_SURFACES;
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_global_state(dc, dc_state, true);
+
+cleanup:
+ if (dc_state)
+ dc_state_release(dc_state);
+
+ if (dc_plane_state)
+ dc_plane_state_release(dc_plane_state);
+
+ return dc_result;
+}
+
+struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dc_stream_state *stream;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return stream;
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+
+ if (dc_result == DC_OK)
+ dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result,
+ dc_status_to_str(dc_result));
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
+ return stream;
+}
+
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /*
+ * Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
+ aconnector->base.force != DRM_FORCE_ON) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+
+ stream = create_validate_stream_for_sink(aconnector, mode,
+ to_dm_connector_state(connector->state),
+ NULL);
+ if (stream) {
+ dc_stream_release(stream);
+ result = MODE_OK;
+ }
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
+ struct dc_info_packet *out)
+{
+ struct hdmi_drm_infoframe frame;
+ unsigned char buf[30]; /* 26 + 4 */
+ ssize_t len;
+ int ret, i;
+
+ memset(out, 0, sizeof(*out));
+
+ if (!state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
+ if (ret)
+ return ret;
+
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
+ if (len < 0)
+ return (int)len;
+
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
+ if (len != 30)
+ return -EINVAL;
+
+ /* Prepare the infopacket for DC. */
+ switch (state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ out->hb0 = 0x87; /* type */
+ out->hb1 = 0x01; /* version */
+ out->hb2 = 0x1A; /* length */
+ out->sb[0] = buf[3]; /* checksum */
+ i = 1;
+ break;
+
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ out->hb0 = 0x00; /* sdp id, zero */
+ out->hb1 = 0x87; /* type */
+ out->hb2 = 0x1D; /* payload len - 1 */
+ out->hb3 = (0x13 << 2); /* sdp version */
+ out->sb[0] = 0x01; /* version */
+ out->sb[1] = 0x1A; /* length */
+ i = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&out->sb[i], &buf[4], 26);
+ out->valid = true;
+
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
+ sizeof(out->sb), false);
+
+ return 0;
+}
+
+static int
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_con_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_con_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct drm_crtc *crtc = new_con_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
+ int ret;
+
+ trace_amdgpu_dm_connector_atomic_check(new_con_state);
+
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!crtc)
+ return 0;
+
+ if (new_con_state->colorspace != old_con_state->colorspace) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (new_con_state->content_type != old_con_state->content_type) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
+ struct dc_info_packet hdr_infopacket;
+
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
+ if (ret)
+ return ret;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ /*
+ * DC considers the stream backends changed if the
+ * static metadata changes. Forcing the modeset also
+ * gives a simple way for userspace to switch from
+ * 8bpc to 10bpc when setting the metadata to enter
+ * or exit HDR.
+ *
+ * Changing the static metadata after it's been
+ * set is permissible, however. So only force a
+ * modeset if we're entering or exiting HDR.
+ */
+ new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
+ !old_con_state->hdr_output_metadata ||
+ !new_con_state->hdr_output_metadata;
+ }
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplugging a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * are missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .atomic_check = amdgpu_dm_connector_atomic_check,
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->mst_output_port)
+ return 0;
+
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
+ }
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, ret;
+ int vcpi, pbn_div, pbn, slot_num = 0;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->mst_output_port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
+ if (j == dc_state->stream_count)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ if (stream->timing.flags.DSC != 1) {
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = slot_num;
+
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ }
+
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ if (!mode)
+ continue;
+
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ /* sorting the probed modes before calling function
+ * amdgpu_dm_get_native_mode() since EDID can have
+ * more than one preferred mode. The modes that are
+ * later in the probed mode list could be of higher
+ * and preferred resolution. For example, 3840x2160
+ * resolution in base EDID preferred timing and 4096x2160
+ * preferred resolution in DID extension block later.
+ */
+ drm_mode_sort(&connector->probed_modes);
+ amdgpu_dm_get_native_mode(connector);
+
+ /* Freesync capabilities are reset by calling
+ * drm_add_edid_modes() and need to be
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
+ if (drm_mode_equal(m, mode))
+ return true;
+ }
+
+ return false;
+}
+
+static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+{
+ const struct drm_display_mode *m;
+ struct drm_display_mode *new_mode;
+ uint i;
+ u32 new_modes_count = 0;
+
+ /* Standard FPS values
+ *
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
+ */
+ static const u32 common_rates[] = {
+ 23976, 24000, 25000, 29970, 30000,
+ 48000, 50000, 60000, 72000, 96000, 120000
+ };
+
+ /*
+ * Find mode with highest refresh rate with the same resolution
+ * as the preferred mode. Some monitors report a preferred mode
+ * with lower resolution than the highest refresh rate supported.
+ */
+
+ m = get_highest_refresh_rate_mode(aconnector, true);
+ if (!m)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
+
+ if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ continue;
+
+ if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+ common_rates[i] > aconnector->max_vfreq * 1000)
+ continue;
+
+ num = (unsigned long long)m->clock * 1000 * 1000;
+ den = common_rates[i] * (unsigned long long)m->htotal;
+ target_vtotal = div_u64(num, den);
+ target_vtotal_diff = target_vtotal - m->vtotal;
+
+ /* Check for illegal modes */
+ if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+ m->vsync_end + target_vtotal_diff < m->vsync_start ||
+ m->vtotal + target_vtotal_diff < m->vsync_end)
+ continue;
+
+ new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+ if (!new_mode)
+ goto out;
+
+ new_mode->vtotal += (u16)target_vtotal_diff;
+ new_mode->vsync_start += (u16)target_vtotal_diff;
+ new_mode->vsync_end += (u16)target_vtotal_diff;
+ new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ new_mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (!is_duplicate_mode(aconnector, new_mode)) {
+ drm_mode_probed_add(&aconnector->base, new_mode);
+ new_modes_count += 1;
+ } else
+ drm_mode_destroy(aconnector->base.dev, new_mode);
+ }
+ out:
+ return new_modes_count;
+}
+
+static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (!(amdgpu_freesync_vid_mode && edid))
+ return;
+
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ amdgpu_dm_connector->num_modes +=
+ add_fs_modes(amdgpu_dm_connector);
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (!drm_edid_is_valid(edid)) {
+ amdgpu_dm_connector->num_modes =
+ drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ }
+ amdgpu_dm_fbc_init(connector);
+
+ return amdgpu_dm_connector->num_modes;
+}
+
+static const u32 supported_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ aconnector->connector_id = link_index;
+ aconnector->bl_idx = -1;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
+ mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
+
+ /*
+ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ link->link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link->link_enc);
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+
+ aconnector->base.state->max_bpc = 16;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ /* Content Type is currently only implemented for HDMI. */
+ drm_connector_attach_content_type_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
+ if (adev->dm.hdcp_workqueue)
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
+ }
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ return result;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+
+/*
+ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ u32 link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Not needed for writeback connector */
+ link->priv = aconnector;
+
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init_with_ddc(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type,
+ &i2c->base);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_get(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ } else {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_put(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc)
+{
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
+ /**
+ * This reads the current state for the IRQ and force reapplies
+ * the setting to hardware.
+ */
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ return true;
+ }
+
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
+ *
+ * Handles: UNDESIRED -> ENABLED
+ */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
+ return true;
+ }
+
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ drm_dbg_state(acrtc->base.dev,
+ "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
+{
+ struct mod_vrr_params vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+ bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+
+ /* Need to call this before the frame ends. */
+ dc_stream_adjust_vmin_vmax(dm->dc,
+ new_crtc_state->stream,
+ &vrr_params.adjust);
+ }
+ }
+
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ packet_type,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket,
+ pack_sdp_v1_3);
+
+ new_crtc_state->freesync_vrr_info_changed |=
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void update_stream_irq_parameters(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ /*
+ * if freesync compatible mode was set, config.state will be set
+ * in atomic check
+ */
+ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+ (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+ new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+ vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+ vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+ vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+ vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+ } else {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ }
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_config = config;
+ /* Copy state for access from DM IRQ handler */
+ acrtc->dm_irq_params.freesync_config = config;
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
+}
+
+static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
+{
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+
+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool wait_for_vblank)
+{
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ int planes_count = 0, vpos, hpos;
+ unsigned long flags;
+ u32 target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+ bool cursor_update = false;
+ bool pflip_present = false;
+ bool dirty_rects_changed = false;
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
+ bool plane_needs_flip;
+ struct dc_plane_state *dc_plane;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
+
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
+ }
+
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
+ &bundle->scaling_infos[planes_count]);
+
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+
+ pflip_present = pflip_present || plane_needs_flip;
+
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
+
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state,
+ afb->tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address,
+ afb->tmz_surface, false);
+
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
+
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
+
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
+
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change memory domain, FB pitch, DCC state, rotation or
+ * mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
+ */
+ if (crtc->state->async_flip &&
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
+
+ bundle->flip_addrs[planes_count].flip_immediate =
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
+
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
+ }
+
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
+
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
+
+ planes_count += 1;
+
+ }
+
+ if (pflip_present) {
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
+ } else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target_vblank = last_flip_vblank + wait_for_vblank;
+
+ /*
+ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc_attach->enabled &&
+ (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &pcrtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /**
+ * Prepare the flip event for the pageflip interrupt to handle.
+ *
+ * This only works in the case where we've already turned on the
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
+ * from 0 -> n planes we have to skip a hardware generated event
+ * and rely on sending it from software.
+ */
+ if (acrtc_attach->base.state->event &&
+ acrtc_state->active_planes > 0) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
+ prepare_flip_isr(acrtc_attach);
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (acrtc_state->stream) {
+ if (acrtc_state->freesync_vrr_info_changed)
+ bundle->stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ /* Update the planes if changed or disable if we don't have any. */
+ if ((planes_count || acrtc_state->active_planes == 0) &&
+ acrtc_state->stream) {
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ bundle->stream_update.stream = acrtc_state->stream;
+ if (new_pcrtc_state->mode_changed) {
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
+ }
+
+ if (new_pcrtc_state->color_mgmt_changed) {
+ /*
+ * TODO: This isn't fully correct since we've actually
+ * already modified the stream in place.
+ */
+ bundle->stream_update.gamut_remap =
+ &acrtc_state->stream->gamut_remap_matrix;
+ bundle->stream_update.output_csc_transform =
+ &acrtc_state->stream->csc_color_matrix;
+ bundle->stream_update.out_transfer_func =
+ &acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
+ }
+
+ acrtc_state->stream->abm_level = acrtc_state->abm_level;
+ if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
+
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
+ /*
+ * If FreeSync state on the stream has changed then we need to
+ * re-adjust the min/max bounds now that DC doesn't handle this
+ * as part of commit.
+ */
+ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ dc_stream_adjust_vmin_vmax(
+ dm->dc, acrtc_state->stream,
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+ mutex_lock(&dm->dc_lock);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+ *
+ * Most pipes are put into power gating when unused.
+ *
+ * When power gating is enabled on a pipe we lose the
+ * interrupt enablement state when power gating is disabled.
+ *
+ * So we need to update the IRQ control state in hardware
+ * whenever the pipe turns on (since it could be previously
+ * power gated) or off (since some pipes can't be power gated
+ * on some ASICs).
+ */
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+ if (aconn->psr_skip_count > 0)
+ aconn->psr_skip_count--;
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
+ }
+
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
+
+cleanup:
+ kfree(bundle);
+}
+
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *new_dm_crtc_state;
+ const struct dc_stream_status *status;
+ int i, inst;
+
+ /* Notify device removals. */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ if (old_con_state->crtc != new_con_state->crtc) {
+ /* CRTC changes require notification. */
+ goto notify;
+ }
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+notify:
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = aconnector->audio_inst;
+ aconnector->audio_inst = -1;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+
+ /* Notify audio device additions. */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!new_dm_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
+ if (!status)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = status->audio_inst;
+ aconnector->audio_inst = inst;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+}
+
+/*
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
+ */
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+{
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
+ bool mode_set_reset_required = false;
+ u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, false);
+ dc_stream_release(dm_old_crtc_state->stream);
+ }
+ }
+
+ drm_atomic_helper_calc_timestamping_constants(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Disable cursor if disabling crtc */
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ struct dc_cursor_position position;
+
+ memset(&position, 0, sizeof(position));
+ mutex_lock(&dm->dc_lock);
+ dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnected in fact.
+ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ pm_runtime_get_noresume(dev->dev);
+
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ mode_set_reset_required = true;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ mode_set_reset_required = true;
+ }
+ } /* for_each_crtc_in_state() */
+
+ /* if there mode set or reset, disable eDP PSR, Replay */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ amdgpu_dm_replay_disable_all(dm);
+ amdgpu_dm_psr_disable_all(dm);
+ }
+
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
+
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+ if (!status)
+ drm_err(dev,
+ "got no status for stream %p on acrtc%p\n",
+ dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+}
+
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ dm_new_con_state->update_hdcp = true;
+ continue;
+ }
+
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
+ }
+
+ /* Handle connector state changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_surface_update *dummy_updates;
+ struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
+ struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
+
+ memset(&stream_update, 0, sizeof(stream_update));
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
+ dm_old_con_state);
+
+ abm_changed = dm_new_crtc_state->abm_level !=
+ dm_old_crtc_state->abm_level;
+
+ hdr_changed =
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
+
+ if (!scaling_changed && !abm_changed && !hdr_changed)
+ continue;
+
+ stream_update.stream = dm_new_crtc_state->stream;
+ if (scaling_changed) {
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, dm_new_crtc_state->stream);
+
+ stream_update.src = dm_new_crtc_state->stream->src;
+ stream_update.dst = dm_new_crtc_state->stream->dst;
+ }
+
+ if (abm_changed) {
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
+
+ stream_update.abm_level = &dm_new_crtc_state->abm_level;
+ }
+
+ if (hdr_changed) {
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
+ stream_update.hdr_static_metadata = &hdr_packet;
+ }
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (WARN_ON(!status))
+ continue;
+
+ WARN_ON(!status->plane_count);
+
+ /*
+ * TODO: DC refuses to perform stream updates without a dc_surface_update.
+ * Here we create an empty update on each plane.
+ * To fix this, DC should permit updating only stream properties.
+ */
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
+ for (j = 0; j < status->plane_count; j++)
+ dummy_updates[j].surface = status->plane_states[0];
+
+
+ mutex_lock(&dm->dc_lock);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
+ mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
+ }
+
+ /**
+ * Enable interrupts for CRTCs that are newly enabled or went through
+ * a modeset. It was intentionally deferred until after the front end
+ * state was modified to wait until the OTG was on and so the IRQ
+ * handlers didn't access stale or invalid state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+#ifdef CONFIG_DEBUG_FS
+ enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+#endif
+
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ dc_stream_retain(dm_new_crtc_state->stream);
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ /**
+ * Frontend may have changed so reapply the CRC capture
+ * settings for the stream.
+ */
+ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+#endif
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
+ }
+ }
+#endif
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->async_flip)
+ wait_for_vblank = false;
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
+ }
+
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
+ /* Update audio instances for each connector. */
+ amdgpu_dm_commit_audio(dev, state);
+
+ /* restore the backlight level */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i] &&
+ (dm->actual_brightness[i] != dm->brightness[i]))
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ /* Don't free the memory if we are hitting this as part of suspend.
+ * This way we don't free any memory during suspend; see
+ * amdgpu_bo_free_kernel(). The memory will be freed in the first
+ * non-suspend modeset or when the driver is torn down.
+ */
+ if (!adev->in_suspend) {
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ }
+
+ /*
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+}
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto out;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto out;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto out;
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+
+out:
+ drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+
+ return ret;
+}
+
+/*
+ * This function handles all cases when set mode does not come upon hotplug.
+ * This includes when a display is unplugged then plugged back into the
+ * same port and when running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /*
+ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /*
+ * Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
+{
+ struct mod_freesync_config config = {0};
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
+ bool fs_vid_mode = false;
+
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
+
+ if (new_crtc_state->vrr_supported) {
+ new_crtc_state->stream->ignore_msa_timing_param = true;
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+
+ config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
+ config.vsif_supported = true;
+ config.btr = true;
+
+ if (fs_vid_mode) {
+ config.state = VRR_STATE_ACTIVE_FIXED;
+ config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+ goto out;
+ } else if (new_crtc_state->base.vrr_enabled) {
+ config.state = VRR_STATE_ACTIVE_VARIABLE;
+ } else {
+ config.state = VRR_STATE_INACTIVE;
+ }
+ }
+out:
+ new_crtc_state->freesync_config = config;
+}
+
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
+
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
+}
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ const struct drm_display_mode *old_mode, *new_mode;
+
+ if (!old_crtc_state || !new_crtc_state)
+ return false;
+
+ old_mode = &old_crtc_state->mode;
+ new_mode = &new_crtc_state->mode;
+
+ if (old_mode->clock == new_mode->clock &&
+ old_mode->hdisplay == new_mode->hdisplay &&
+ old_mode->vdisplay == new_mode->vdisplay &&
+ old_mode->htotal == new_mode->htotal &&
+ old_mode->vtotal != new_mode->vtotal &&
+ old_mode->hsync_start == new_mode->hsync_start &&
+ old_mode->vsync_start != new_mode->vsync_start &&
+ old_mode->hsync_end == new_mode->hsync_end &&
+ old_mode->vsync_end != new_mode->vsync_end &&
+ old_mode->hskew == new_mode->hskew &&
+ old_mode->vscan == new_mode->vscan &&
+ (old_mode->vsync_end - old_mode->vsync_start) ==
+ (new_mode->vsync_end - new_mode->vsync_start))
+ return true;
+
+ return false;
+}
+
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
+ u64 num, den, res;
+ struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+
+ num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+ den = (unsigned long long)new_crtc_state->mode.htotal *
+ (unsigned long long)new_crtc_state->mode.vtotal;
+
+ res = div_u64(num, den);
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+}
+
+static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct dm_atomic_state *dm_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
+ * update changed items
+ */
+ struct amdgpu_crtc *acrtc = NULL;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+ connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (connector)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /* TODO This hack should go away */
+ if (connector && enable) {
+ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ connector);
+
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ goto fail;
+ }
+
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+ * will do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * TODO: Check VSDB bits to decide whether this should
+ * be enabled or not.
+ */
+ new_stream->triggered_crtc_reset.enabled =
+ dm->force_timing_sync;
+
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ ret = fill_hdr_info_packet(drm_new_conn_state,
+ &new_stream->hdr_static_metadata);
+ if (ret)
+ goto fail;
+
+ /*
+ * If we already removed the old stream from the context
+ * (and set the new stream to NULL) then we can't reuse
+ * the old stream even if the stream and scaling are unchanged.
+ * We'll hit the BUG_ON and black screen.
+ *
+ * TODO: Refactor this function to allow this check to work
+ * in all conditions.
+ */
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ goto skip_modeset;
+
+ if (dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+ }
+
+ /* mode_changed flag may get updated above, need to check again */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto skip_modeset;
+
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER(
+ "Mode change not required for front porch change, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+
+ set_freesync_fixed_config(dm_new_crtc_state);
+
+ goto skip_modeset;
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
+ is_freesync_video_mode(&new_crtc_state->mode,
+ aconnector)) {
+ struct drm_display_mode *high_mode;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_state_remove_stream(
+ dm->dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!connector)
+ goto skip_modeset;
+
+ if (modereset_required(new_crtc_state))
+ goto skip_modeset;
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ dm_new_crtc_state->stream = new_stream;
+
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_state_add_stream(
+ dm->dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+skip_modeset:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!(enable && connector && new_crtc_state->active))
+ return 0;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ }
+
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+ * TODO: Remove this hack for all asics once it proves that the
+ * fast updates works fine on DCN3.2+.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ /* CRTC Degamma changes currently require us to recreate planes. */
+ if (new_crtc_state->color_mgmt_changed)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
+
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* Src/dst size and scaling updates. */
+ if (old_other_state->src_w != new_other_state->src_w ||
+ old_other_state->src_h != new_other_state->src_h ||
+ old_other_state->crtc_w != new_other_state->crtc_w ||
+ old_other_state->crtc_h != new_other_state->crtc_h)
+ return true;
+
+ /* Rotation / mirroring updates. */
+ if (old_other_state->rotation != new_other_state->rotation)
+ return true;
+
+ /* Blending updates. */
+ if (old_other_state->pixel_blend_mode !=
+ new_other_state->pixel_blend_mode)
+ return true;
+
+ /* Alpha updates. */
+ if (old_other_state->alpha != new_other_state->alpha)
+ return true;
+
+ /* Colorspace changes. */
+ if (old_other_state->color_range != new_other_state->color_range ||
+ old_other_state->color_encoding != new_other_state->color_encoding)
+ return true;
+
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
+ /* Framebuffer checks fall at the end. */
+ if (!old_other_state->fb || !new_other_state->fb)
+ continue;
+
+ /* Pixel format changes can require bandwidth updates. */
+ if (old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+
+ old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
+ new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
+
+ /* Tiling and DCC changes also require bandwidth updates. */
+ if (old_afb->tiling_flags != new_afb->tiling_flags ||
+ old_afb->base.modifier != new_afb->base.modifier)
+ return true;
+ }
+
+ return false;
+}
+
+static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
+ struct drm_plane_state *new_plane_state,
+ struct drm_framebuffer *fb)
+{
+ struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ unsigned int pitch;
+ bool linear;
+
+ if (fb->width > new_acrtc->max_cursor_width ||
+ fb->height > new_acrtc->max_cursor_height) {
+ DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
+ new_plane_state->fb->width,
+ new_plane_state->fb->height);
+ return -EINVAL;
+ }
+ if (new_plane_state->src_w != fb->width << 16 ||
+ new_plane_state->src_h != fb->height << 16) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ /* Pitch in pixels */
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+
+ if (fb->width != pitch) {
+ DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
+ fb->width, pitch);
+ return -EINVAL;
+ }
+
+ switch (pitch) {
+ case 64:
+ case 128:
+ case 256:
+ /* FB pitch is supported by cursor plane */
+ break;
+ default:
+ DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
+ return -EINVAL;
+ }
+
+ /* Core DRM takes care of checking FB modifiers, so we only need to
+ * check tiling flags when the FB doesn't have a modifier.
+ */
+ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
+ } else {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+ }
+ if (!linear) {
+ DRM_DEBUG_ATOMIC("Cursor FB not linear");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dm_update_plane_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ bool enable,
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
+{
+
+ struct dm_atomic_state *dm_state = NULL;
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
+ bool needs_reset;
+ int ret = 0;
+
+
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+ if (!needs_reset)
+ return 0;
+
+ if (!old_plane_crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
+ if (!dc_state_remove_plane(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ return -EINVAL;
+ }
+
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ if (!new_plane_crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ return 0;
+
+ if (!needs_reset)
+ return 0;
+
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
+
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+
+ *is_top_most_overlay = false;
+ }
+
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ ret = fill_dc_plane_attributes(
+ drm_to_adev(new_plane_crtc->dev),
+ dc_new_plane_state,
+ new_plane_state,
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
+ if (!dc_state_add_plane(
+ dc,
+ dm_new_crtc_state->stream,
+ dc_new_plane_state,
+ dm_state->context)) {
+
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
+ }
+
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+
+ *lock_and_validation_needed = true;
+ }
+
+
+ return ret;
+}
+
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+ int *out_plane_scale_w, int *out_plane_scale_h)
+{
+ int plane_src_w, plane_src_h;
+
+ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
+static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+ * positioning from the underlying pipe. Check the cursor plane's
+ * blending properties match the underlying planes'.
+ */
+
+ /* If no plane was enabled or changed scaling, no need to check again */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+ continue;
+
+ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+ any_relevant_change = true;
+ break;
+ }
+
+ if (new_plane_state->fb == old_plane_state->fb &&
+ new_plane_state->crtc_w == old_plane_state->crtc_w &&
+ new_plane_state->crtc_h == old_plane_state->crtc_h)
+ continue;
+
+ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ any_relevant_change = true;
+ break;
+ }
+ }
+
+ if (!any_relevant_change)
+ return 0;
+
+ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+ if (IS_ERR(new_cursor_state))
+ return PTR_ERR(new_cursor_state);
+
+ if (!new_cursor_state->fb)
+ return 0;
+
+ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+ /* Need to check all enabled planes, even if this commit doesn't change
+ * their state
+ */
+ i = drm_atomic_add_affected_planes(state, crtc);
+ if (i)
+ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
+
+ dm_get_plane_scale(new_underlying_state,
+ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
+ }
+
+ return 0;
+}
+
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state, *old_conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
+}
+
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events.
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_atomic_state *dm_state = NULL;
+ struct dc *dc = adev->dm.dc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ enum dc_status status;
+ int ret, i;
+ bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
+ goto fail;
+ }
+
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
+ new_crtc_state->connectors_changed = true;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
+ goto fail;
+ }
+ }
+ }
+ }
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
+ dm_old_crtc_state->dsc_force_changed == false)
+ continue;
+
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
+ goto fail;
+ }
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
+ goto fail;
+ }
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
+ goto fail;
+ }
+
+ if (dm_old_crtc_state->dsc_force_changed)
+ new_crtc_state->mode_changed = true;
+ }
+
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
+ goto fail;
+ }
+ }
+ }
+
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
+
+ /* Remove exiting planes if they are modified */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) !=
+ get_mem_type(new_plane_state->fb))
+ lock_and_validation_needed = true;
+
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ false,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Disable all crtcs which require disable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ false,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Enable all crtcs which require enable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ true,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Add new/modified planes */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ true,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
+ goto fail;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
+
+ /* Check cursor planes scaling */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
+ goto fail;
+ }
+ }
+
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
+ /* Check scaling and underscan changes*/
+ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+ * DC global validation to ensure that the configuration can be applied
+ * to hardware.
+ *
+ * We have to currently stall out here in atomic_check for outstanding
+ * commits to finish in this case because our IRQ handlers reference
+ * DRM state directly - we can end up disabling interrupts too early
+ * if we don't.
+ *
+ * TODO: Remove this stall and drop DM state private objects.
+ */
+ if (lock_and_validation_needed) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
+ goto fail;
+ }
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
+ goto fail;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
+ goto fail;
+ }
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
+ goto fail;
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
+ if (status != DC_OK) {
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
+ */
+
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
+
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
+
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
+
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ /*
+ * Only allow async flips for fast updates that don't change
+ * the FB pitch, the DCC state, rotation, etc.
+ */
+ if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
+ UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ u8 dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+
+static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ unsigned int offset,
+ unsigned int total_length,
+ u8 *data,
+ unsigned int length,
+ struct amdgpu_hdmi_vsdb_info *vsdb)
+{
+ bool res;
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_send_edid_cea *input;
+ struct dmub_cmd_edid_cea_output *output;
+
+ if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ input = &cmd.edid_cea.data.input;
+
+ cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
+ cmd.edid_cea.header.sub_type = 0;
+ cmd.edid_cea.header.payload_bytes =
+ sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
+ input->offset = offset;
+ input->length = length;
+ input->cea_total_length = total_length;
+ memcpy(input->payload, data, length);
+
+ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ if (!res) {
+ DRM_ERROR("EDID CEA parser failed\n");
+ return false;
+ }
+
+ output = &cmd.edid_cea.data.output;
+
+ if (output->type == DMUB_CMD__EDID_CEA_ACK) {
+ if (!output->ack.success) {
+ DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ output->ack.offset);
+ }
+ } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
+ if (!output->amd_vsdb.vsdb_found)
+ return false;
+
+ vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
+ vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
+ vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
+ vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
+ } else {
+ DRM_WARN("Unknown EDID CEA parser results\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ bool res;
+ int offset;
+
+ /* send 8 bytes a time */
+ if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
+ return false;
+
+ if (i+8 == len) {
+ /* EDID block sent completed, expect result */
+ int version, min_rate, max_rate;
+
+ res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
+ if (res) {
+ /* amd vsdb found */
+ vsdb_info->freesync_supported = 1;
+ vsdb_info->amd_vsdb_version = version;
+ vsdb_info->min_refresh_rate_hz = min_rate;
+ vsdb_info->max_refresh_rate_hz = max_rate;
+ return true;
+ }
+ /* not amd vsdb */
+ return false;
+ }
+
+ /* check for ack*/
+ res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
+ if (!res)
+ return false;
+ }
+
+ return false;
+}
+
+static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ /* send 8 bytes a time */
+ if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
+ return false;
+ }
+
+ return vsdb_info->freesync_supported;
+}
+
+static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (adev->dm.dmub_srv)
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ else
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
+}
+
+static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ while (j < EDID_LENGTH) {
+ struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
+ unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
+
+ if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
+ amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
+ vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
+ vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
+ DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
+
+ return true;
+ }
+ j++;
+ }
+
+ return false;
+}
+
+static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ bool valid_vsdb_found = false;
+
+ /*----- drm_find_cea_extension() -----*/
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return -ENODEV;
+
+ /*----- cea_db_offsets() -----*/
+ if (edid_ext[0] != CEA_EXT)
+ return -ENODEV;
+
+ valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+
+ return valid_vsdb_found ? i : -ENODEV;
+}
+
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
+void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i = 0;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
+
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ goto update;
+ }
+
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ amdgpu_dm_connector->min_vfreq = 0;
+ amdgpu_dm_connector->max_vfreq = 0;
+ amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
+
+ goto update;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ if (!adev->dm.freesync_module)
+ goto update;
+
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ bool edid_check_required = false;
+
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
+ }
+
+ if (edid_check_required) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+
+ freesync_capable = true;
+ }
+ }
+ parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+
+ if (vsdb_info.replay_mode) {
+ amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
+ amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
+ amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
+ }
+
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported) {
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
+}
+
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int i;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (dc->current_state) {
+ for (i = 0; i < dc->current_state->stream_count; ++i)
+ dc->current_state->streams[i]
+ ->triggered_crtc_reset.enabled =
+ adev->dm.force_timing_sync;
+
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
+ dc_trigger_sync(dc, dc->current_state);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+}
+
+void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ u32 value, const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
+}
+
+uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ const char *func_name)
+{
+ u32 value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+ int ret = -1;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ if (p_notify->result != AUX_RET_SUCCESS) {
+ /*
+ * Transient states before tunneling is enabled could
+ * lead to this error. We can ignore this for now.
+ */
+ if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ payload->address, payload->length,
+ p_notify->result);
+ }
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && p_notify->aux_reply.length &&
+ (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+
+ if (payload->length != p_notify->aux_reply.length) {
+ DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+ p_notify->aux_reply.length,
+ payload->address, payload->length);
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+ memcpy(payload->data, p_notify->aux_reply.data,
+ p_notify->aux_reply.length);
+ }
+
+ /* success */
+ ret = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+int amdgpu_dm_process_dmub_set_config_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ bool is_cmd_complete;
+ int ret;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
+ link_index, payload, adev->dm.dmub_notify);
+
+ if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ ret = 0;
+ *operation_result = adev->dm.dmub_notify->sc_status;
+ } else {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ ret = -1;
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}
diff --git a/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/postimage.1 b/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/postimage.1
new file mode 100644
index 000000000000..4d9a76446df8
--- /dev/null
+++ b/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/postimage.1
@@ -0,0 +1,11567 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "link_enc_cfg.h"
+#include "dc/inc/core_types.h"
+#include "dal_asic_id.h"
+#include "dmub/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
+#include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
+#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_ucode.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_crtc.h"
+#include "amdgpu_dm_hdcp.h"
+#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
+#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/component.h>
+#include <linux/dmi.h>
+
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include <acpi/video.h>
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "soc15_common.h"
+#include "vega10_ip_offset.h"
+
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+
+#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
+#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
+#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
+#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
+#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
+#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
+#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+
+#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
+#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
+#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+ switch (link->dpcd_caps.dongle_type) {
+ case DISPLAY_DONGLE_NONE:
+ return DRM_MODE_SUBCONNECTOR_Native;
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ struct drm_connector *connector = &aconnector->base;
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (aconnector->dc_sink)
+ subconnector = get_subconnector_type(link);
+
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+
+/*
+ * initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ u32 link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+static void handle_hpd_rx_irq(void *param);
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ if (WARN_ON(otg_inst == -1))
+ return adev->mode_info.crtcs[0];
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
+ return true;
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
+ return true;
+ else
+ return false;
+}
+
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ *
+ * @dc: Display Core control structure
+ * @update_type: specify whether it is FULL/MEDIUM/FAST update
+ * @planes_count: planes count to update
+ * @stream: stream state
+ * @stream_update: stream update
+ * @array_of_surface_update: dc surface update pointer
+ *
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_device *dev = adev_to_drm(adev);
+ unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /* TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ drm_dbg_state(dev, "CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_dbg_state(dev,
+ "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id, amdgpu_crtc);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ return;
+ }
+
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
+
+ WARN_ON(!e);
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ drm_dbg_state(dev,
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
+
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
+
+ drm_dbg_vbl(drm_dev,
+ "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ vrr_active);
+
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (vrr_active) {
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc->dm_irq_params.stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+ }
+ }
+}
+
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: used for determining the CRTC instance
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
+ struct amdgpu_crtc *acrtc;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+ if (!acrtc)
+ return;
+
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ vrr_active, acrtc->dm_irq_params.active_planes);
+
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!vrr_active)
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (acrtc->dm_irq_params.stream &&
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
+
+ /*
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
+ * In that case, pageflip completion interrupts won't fire and pageflip
+ * completion events won't get delivered. Prevent this by sending
+ * pending pageflip events from here if a flip is still pending.
+ *
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
+ * avoid race conditions between flip programming and completion,
+ * which could cause too early flip completion events.
+ */
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ acrtc->dm_irq_params.active_planes == 0) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+/**
+ * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+ * DCN generation ASICs
+ * @interrupt_params: interrupt parameters
+ *
+ * Used to set crc window/read out crc value at vertical line 0 position
+ */
+static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+
+ if (!acrtc)
+ return;
+
+ amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ u8 link_index = 0;
+ struct drm_device *dev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ link_index = notify->link_index;
+ link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (hpd_aconnector) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ handle_hpd_irq_helper(hpd_aconnector);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ handle_hpd_rx_irq(hpd_aconnector);
+ }
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
+#define DMUB_TRACE_MAX_READ 64
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+ struct dmub_notification notify;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ u32 count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
+
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (!dm->dmub_callback[notify.type]) {
+ DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+ GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status == DP_HPD_PLUG;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
+ }
+
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void *handle);
+
+/* Allocate memory for FBC compressed data */
+static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+{
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
+ struct drm_display_mode *mode;
+ unsigned long max_size = 0;
+
+ if (adev->dm.dc->fbc_compressor == NULL)
+ return;
+
+ if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (compressor->bo_ptr)
+ return;
+
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (max_size < mode->htotal * mode->vtotal)
+ max_size = mode->htotal * mode->vtotal;
+ }
+
+ if (max_size) {
+ int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize FBC\n");
+ else {
+ adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
+ DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ }
+
+ }
+
+}
+
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&adev->dm.audio_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->audio_inst != port)
+ continue;
+
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ mutex_unlock(&adev->dm.audio_lock);
+
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
+ .get_eld = amdgpu_dm_audio_component_get_eld,
+};
+
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = &amdgpu_dm_audio_component_ops;
+ acomp->dev = kdev;
+ adev->dm.audio_component = acomp;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ adev->dm.audio_component = NULL;
+}
+
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
+ .bind = amdgpu_dm_audio_component_bind,
+ .unbind = amdgpu_dm_audio_component_unbind,
+};
+
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
+{
+ int i, ret;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].id =
+ adev->dm.dc->res_pool->audios[i]->inst;
+ adev->mode_info.audio.pin[i].offset = 0;
+ }
+
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ if (ret < 0)
+ return ret;
+
+ adev->dm.audio_registered = true;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ if (adev->dm.audio_registered) {
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ adev->dm.audio_registered = false;
+ }
+
+ /* TODO: Disable audio? */
+
+ adev->mode_info.audio.enabled = false;
+}
+
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+{
+ struct drm_audio_component *acomp = adev->dm.audio_component;
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
+
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ pin, -1);
+ }
+}
+
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dc_context *ctx = adev->dm.dc->ctx;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register offsets for ASICs with runtime initialization available */
+ if (dmub_srv->hw_funcs.init_reg_offsets)
+ dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->vm_manager.vram_base_offset;
+
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.dpia_supported = true;
+ hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
+ break;
+ default:
+ break;
+ }
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+{
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
+ PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+
+ memset(pa_config, 0, sizeof(*pa_config));
+
+ agp_base = 0;
+ agp_bot = adev->gmc.agp_start >> 24;
+ agp_top = adev->gmc.agp_end >> 24;
+
+ /* AGP aperture is disabled */
+ if (agp_bot > agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
+ page_table_base.low_part = lower_32_bits(pt_base);
+
+ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
+ pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+ pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+ pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
+ pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+ pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
+ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+
+}
+
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ goto out_err;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ struct dc_callback_init init_params;
+ int r;
+
+ adev->dm.ddev = adev_to_drm(adev);
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+ memset(&init_params, 0, sizeof(init_params));
+
+ mutex_init(&adev->dm.dpia_aux_lock);
+ mutex_init(&adev->dm.dc_lock);
+ mutex_init(&adev->dm.audio_lock);
+
+ if (amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
+
+ init_data.asic_id.vram_width = adev->gmc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ switch (adev->dm.dmcub_fw_version) {
+ case 0: /* development */
+ case 0x1: /* linux-firmware.git hash 6d9f399 */
+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
+ init_data.flags.disable_dmcu = false;
+ break;
+ default:
+ init_data.flags.disable_dmcu = true;
+ }
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
+
+ /* APU support S/G display by default except:
+ * ASICs before Carrizo,
+ * RAVEN1 (Users reported stability issue)
+ */
+
+ if (adev->asic_type < CHIP_CARRIZO) {
+ init_data.flags.gpu_vm_support = false;
+ } else if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN)
+ init_data.flags.gpu_vm_support = false;
+ else
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+ } else {
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ }
+
+ adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
+
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+
+ init_data.flags.seamless_boot_edp_requested = false;
+
+ if (amdgpu_device_seamless_boot_supported(adev)) {
+ init_data.flags.seamless_boot_edp_requested = true;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
+ init_data.flags.enable_mipi_converter_optimization = true;
+
+ init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
+ init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+ init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ init_data.flags.disable_ips_in_vpb = 0;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
+ INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
+ if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
+ adev->dm.dc->debug.force_subvp_mclk_switch = true;
+
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
+ adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+
+ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
+ adev->dm.dc->debug.ignore_cable_id = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
+ dc_hardware_init(adev->dm.dc);
+
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
+ struct dc_phy_addr_space_config pa_config;
+
+ mmhub_read_system_context(adev, &pa_config);
+
+ // Call the DC init_memory func
+ dc_setup_system_context(adev->dm.dc, &pa_config);
+ }
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ amdgpu_dm_init_color_mod();
+
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_control_workqueue =
+ create_singlethread_workqueue("dm_vblank_control_workqueue");
+ if (!adev->dm.vblank_control_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ }
+
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ init_completion(&adev->dm.dmub_aux_transfer_done);
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+ if (!adev->dm.dmub_notify) {
+ DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ goto error;
+ }
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
+ amdgpu_dm_outbox_init(adev);
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
+ dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
+ }
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* create fake encoders for MST */
+ dm_dp_create_fake_mst_encoders(adev);
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -EINVAL;
+}
+
+static int amdgpu_dm_early_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_audio_fini(adev);
+
+ return 0;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->dm.vblank_control_workqueue) {
+ destroy_workqueue(adev->dm.vblank_control_workqueue);
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
+ }
+#endif
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc) {
+ dc_deinit_callbacks(adev->dm.dc);
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+
+ mutex_destroy(&adev->dm.audio_lock);
+ mutex_destroy(&adev->dm.dc_lock);
+ mutex_destroy(&adev->dm.dpia_aux_lock);
+}
+
+static int load_dmcu_fw(struct amdgpu_device *adev)
+{
+ const char *fw_name_dmcu = NULL;
+ int r;
+ const struct dmcu_firmware_header_v1_0 *hdr;
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
+ case CHIP_RAVEN:
+ if (ASICREV_IS_PICASSO(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else
+ return 0;
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ return 0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
+ return 0;
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
+ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
+ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
+ adev->dm.fw_dmcu = NULL;
+ return 0;
+ }
+ if (r) {
+ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ fw_name_dmcu);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+ return r;
+ }
+
+ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
+
+ return 0;
+}
+
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ dmub_asic = DMUB_ASIC_DCN21;
+ break;
+ case IP_VERSION(3, 0, 0):
+ dmub_asic = DMUB_ASIC_DCN30;
+ break;
+ case IP_VERSION(3, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN301;
+ break;
+ case IP_VERSION(3, 0, 2):
+ dmub_asic = DMUB_ASIC_DCN302;
+ break;
+ case IP_VERSION(3, 0, 3):
+ dmub_asic = DMUB_ASIC_DCN303;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
+ break;
+ case IP_VERSION(3, 1, 4):
+ dmub_asic = DMUB_ASIC_DCN314;
+ break;
+ case IP_VERSION(3, 1, 5):
+ dmub_asic = DMUB_ASIC_DCN315;
+ break;
+ case IP_VERSION(3, 1, 6):
+ dmub_asic = DMUB_ASIC_DCN316;
+ break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ break;
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ dmub_asic = DMUB_ASIC_DCN35;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data = region_params.bss_data_size ?
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+ region_params.window_memory_type = window_memory_type;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&memory_params, 0, sizeof(memory_params));
+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+ memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dm_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
+
+ return load_dmcu_fw(adev);
+}
+
+static int dm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector,
+ aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ aconnector->dc_link->type =
+ dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ break;
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = NULL;
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_override = false;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu) {
+ if (!dmcu_load_iram(dmcu, params))
+ return -EINVAL;
+ } else if (adev->dm.dc->ctx->dmub_srv) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+ return -EINVAL;
+ }
+ }
+
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+}
+
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
+
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+}
+
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ break;
+ default:
+ return 0;
+ }
+
+ ret = amdgpu_dpm_write_watermarks_table(adev);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ }
+ }
+
+}
+
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_state_create_current_copy(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+ return res;
+}
+
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ if (amdgpu_in_reset(adev)) {
+ mutex_lock(&dm->dc_lock);
+
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ return ret;
+ }
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ s3_handle_mst(adev_to_drm(adev), true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ return 0;
+}
+
+struct drm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ u32 i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return connector;
+ }
+
+ return NULL;
+}
+
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ drm_err(dev, "Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ drm_err(dev, "Failed to create sink!\n");
+ return;
+ }
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ drm_err(dev, "Failed to read EDID\n");
+
+}
+
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dm->ddev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
+ }
+
+cleanup:
+ kfree(bundle);
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct dc_state *dc_state;
+ int i, r, j, ret;
+ bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
+
+ if (dm->dc->caps.ips_support) {
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ }
+
+ if (amdgpu_in_reset(adev)) {
+ dc_state = dm->cached_dc_state;
+
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround we can copy
+ * off of the current state.
+ *
+ * We lose the previous assignments, but we had already
+ * commit 0 streams anyway.
+ */
+ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_state_release(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc, NULL);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+
+ /* Before powering on DC we need to re-initialize DMUB. */
+ dm_dmub_hw_resume(adev);
+
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ /* power on hardware */
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
+ /* Do detection*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_link)
+ continue;
+
+ /*
+ * this is the case when traversing through already created end sink
+ * MST connectors, should be skipped
+ */
+ if (aconnector && aconnector->mst_root)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+ } else {
+ mutex_lock(&dm->dc_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
+ return 0;
+}
+
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .early_fini = amdgpu_dm_early_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ struct amdgpu_dm_backlight_caps *caps;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ struct drm_luminance_range_info *luminance_range;
+
+ if (aconnector->bl_idx == -1 ||
+ aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = drm_to_adev(conn_base->dev);
+
+ caps = &adev->dm.backlight_caps[aconnector->bl_idx];
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
+ caps->aux_support = true;
+
+ if (amdgpu_backlight == 0)
+ caps->aux_support = false;
+ else if (amdgpu_backlight == 1)
+ caps->aux_support = true;
+
+ luminance_range = &conn_base->display_info.luminance_range;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+ sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
+
+ /*
+ * Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * Skip if already done during boot.
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /*
+ * For S3 resume with headless use eml_sink to fake stream
+ * because on resume connector->sink is set to NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ /*
+ * retain and release below are used to
+ * bump up refcount for sink because the link doesn't point
+ * to it anymore after disconnect, so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ amdgpu_dm_update_freesync_caps(connector,
+ aconnector->edid);
+ } else {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ if (!aconnector->dc_sink) {
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
+ return;
+ }
+
+ if (aconnector->dc_sink == sink) {
+ /*
+ * We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!!
+ */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do
+ */
+ if (sink) {
+ /*
+ * TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here.
+ */
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
+ } else {
+ aconnector->edid =
+ (struct edid *)sink->dc_edid.raw_edid;
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
+ }
+
+ if (!aconnector->timing_requested) {
+ aconnector->timing_requested =
+ kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ drm_err(dev,
+ "failed to create aconnector->requested_timing\n");
+ }
+
+ drm_connector_update_edid_property(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ update_connector_ext_caps(aconnector);
+ } else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ update_subconnector_property(aconnector);
+
+ if (sink)
+ dc_sink_release(sink);
+}
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ bool ret = false;
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in its own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ dm_con_state->update_hdcp = true;
+ }
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&adev->dm.dc_lock);
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ bool result = false;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ union hpd_irq_data hpd_irq_data;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = dc_link->link_index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
+
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+
+ if (link_loss) {
+ bool skip = false;
+
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
+
+out:
+ if (result && !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ if (ret) {
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
+
+ if (dc_link->type != dc_connection_mst_branch)
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
+
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ if (adev->family >= AMDGPU_FAMILY_AI)
+ client_id = SOC15_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static const unsigned int vrtl_int_srcid[] = {
+ DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+ };
+#endif
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use otg vertical line interrupt */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+ vrtl_int_srcid[i], &adev->vline0_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vline0 irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+ DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+ break;
+ }
+
+ c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+ - DC_IRQ_SOURCE_DC1_VLINE0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ }
+#endif
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r, i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+ &adev->dmub_outbox_irq);
+ if (r) {
+ DRM_ERROR("Failed to add outbox irq id!\n");
+ return r;
+ }
+
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_outbox_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params);
+ }
+
+ return 0;
+}
+
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+
+ if (*dm_state)
+ return 0;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+static struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_state_create_copy(old_state->context);
+
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_state_release(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ struct dm_atomic_state *state;
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* indicates support for immediate flip */
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_state_create_current_copy(adev->dm.dc);
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(adev_to_drm(adev),
+ &adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
+ r = amdgpu_display_modeset_create_props(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
+ r = amdgpu_dm_audio_init(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+ return 0;
+}
+
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
+
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (dm->backlight_caps[bl_idx].caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps[bl_idx].caps_valid = true;
+ if (caps.aux_support)
+ return;
+ dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
+ dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
+ } else {
+ dm->backlight_caps[bl_idx].min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ if (dm->backlight_caps[bl_idx].aux_support)
+ return;
+
+ dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int *min, unsigned int *max)
+{
+ if (!caps)
+ return 0;
+
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
+ } else {
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
+ }
+ return 1;
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
+}
+
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness)
+{
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link;
+ u32 brightness;
+ bool rc;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ /* Change brightness based on AUX property */
+ if (caps.aux_support) {
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
+ } else {
+ rc = dc_link_set_backlight_level(link, brightness, 0);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
+ }
+
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
+}
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
+
+ return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+ int ret;
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ if (caps.aux_support) {
+ u32 avg, peak;
+ bool rc;
+
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+ if (!rc)
+ return dm->brightness[bl_idx];
+ return convert_brightness_to_user(&caps, avg);
+ }
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ return amdgpu_dm_backlight_get_level(dm, i);
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_device *drm = aconnector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
+ struct backlight_properties props = { 0 };
+ char bl_name[16];
+
+ if (aconnector->bl_idx == -1)
+ return;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(drm, "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ drm->primary->index + aconnector->bl_idx);
+
+ dm->backlight_dev[aconnector->bl_idx] =
+ backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ &amdgpu_dm_backlight_ops, &props);
+
+ if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ } else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
+{
+ struct drm_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->type = plane_type;
+
+ /*
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
+ return ret;
+ }
+
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
+ return ret;
+}
+
+
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ int bl_idx = dm->num_of_edps;
+
+ if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+ link->type == dc_connection_none)
+ return;
+
+ if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+ drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+ return;
+ }
+
+ aconnector->bl_idx = bl_idx;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ dm->backlight_link[bl_idx] = link;
+ dm->num_of_edps++;
+
+ update_connector_ext_caps(aconnector);
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
+
+/*
+ * In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ s32 i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ u32 link_cnt;
+ s32 primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ amdgpu_dm_set_irq_funcs(adev);
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -EINVAL;
+ }
+
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
+
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ /* Do not create overlay if MPO disabled */
+ if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
+ break;
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (max_overlay-- == 0)
+ break;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ /* Use Outbox interrupt */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (register_outbox_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
+ }
+
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&dm->dc_lock);
+ ret = dc_link_detect(link, DETECT_REASON_BOOT);
+ mutex_unlock(&dm->dc_lock);
+
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ setup_backlight_device(dm, aconnector);
+
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
+ }
+ }
+ amdgpu_set_panel_orientation(&aconnector->base);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ if (dce60_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ goto fail;
+ }
+ break;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+
+ return -EINVAL;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/*
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct drm_device *drm_dev = dev_get_drvdata(device);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ case IP_VERSION(3, 5, 0):
+ fw_name_dmub = FIRMWARE_DCN_35_DMUB;
+ break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ return r;
+}
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_VEGAM:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ default:
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /*
+ * Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init()
+ */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev_to_drm(adev)->dev,
+ &dev_attr_s3_debug);
+#endif
+ adev->dc_enabled = true;
+
+ return dm_init_microcode(adev);
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
+{
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const u64 tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_ARGB16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ABGR16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ plane_info->layer_index = plane_state->normalized_zpos;
+
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address,
+ tmz_surface, force_disable_dcc);
+ if (ret)
+ return ret;
+
+ amdgpu_dm_plane_fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ int ret;
+ bool force_disable_dcc = false;
+
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ afb->tiling_flags,
+ &plane_info,
+ &dc_plane_state->address,
+ afb->tmz_surface,
+ force_disable_dcc);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index;
+ dc_plane_state->flip_int_enabled = true;
+
+ /*
+ * Always set input transfer function, since plane state is refreshed
+ * every time.
+ */
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline void fill_dc_dirty_rect(struct drm_plane *plane,
+ struct rect *dirty_rect, int32_t x,
+ s32 y, s32 width, s32 height,
+ int *i, bool ffu)
+{
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
+
+ dirty_rect->x = x;
+ dirty_rect->y = y;
+ dirty_rect->width = width;
+ dirty_rect->height = height;
+
+ if (ffu)
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ plane->base.id, width, height);
+ else
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
+ plane->base.id, x, y, width, height);
+
+ (*i)++;
+}
+
+/**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
+ * @dirty_regions_changed: dirty regions changed
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ */
+static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
+ bool *dirty_regions_changed)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ u32 num_clips;
+ struct drm_mode_rect *clips;
+ bool bb_changed;
+ bool fb_changed;
+ u32 i = 0;
+ *dirty_regions_changed = false;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
+ if (!dm_crtc_state->mpo_requested) {
+ if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
+ &flip_addrs->dirty_rect_count,
+ false);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ *dirty_regions_changed = bb_changed;
+
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ if (bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ old_plane_state->crtc_x,
+ old_plane_state->crtc_y,
+ old_plane_state->crtc_w,
+ old_plane_state->crtc_h, &i, false);
+ }
+
+ if (num_clips) {
+ for (; i < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[i], clips->x1,
+ clips->y1, clips->x2 - clips->x1,
+ clips->y2 - clips->y1, &i, false);
+ } else if (fb_changed && !bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+ }
+
+ flip_addrs->dirty_rect_count = i;
+ return;
+
+ffu:
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay,
+ &flip_addrs->dirty_rect_count, true);
+}
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector,
+ bool is_y420, int requested_bpc)
+{
+ u8 bpc;
+
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
+
+ if (requested_bpc > 0) {
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min_t(u8, bpc, requested_bpc);
+
+ /* Round down to the nearest even number. */
+ bpc = bpc - (bpc & 1);
+ }
+
+ switch (bpc) {
+ case 0:
+ /*
+ * Temporary Work around, DRM doesn't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ const struct drm_connector_state *connector_state)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (connector_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT601_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ break;
+ case DRM_MODE_COLORIMETRY_OPRGB:
+ color_space = COLOR_SPACE_ADOBERGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ else
+ color_space = COLOR_SPACE_2020_YCBCR;
+ break;
+ case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ default:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ color_space = COLOR_SPACE_SRGB;
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+ break;
+ }
+
+ return color_space;
+}
+
+static enum display_content_type
+get_output_content_type(const struct drm_connector_state *connector_state)
+{
+ switch (connector_state->content_type) {
+ default:
+ case DRM_MODE_CONTENT_TYPE_NO_DATA:
+ return DISPLAY_CONTENT_TYPE_NO_DATA;
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ return DISPLAY_CONTENT_TYPE_GRAPHICS;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ return DISPLAY_CONTENT_TYPE_PHOTO;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ return DISPLAY_CONTENT_TYPE_CINEMA;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ return DISPLAY_CONTENT_TYPE_GAME;
+ }
+}
+
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ enum dc_color_depth depth = timing_out->display_color_depth;
+ int normalized_clk;
+
+ do {
+ normalized_clk = timing_out->pix_clk_100hz / 10;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
+ }
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
+}
+
+static void fill_stream_properties_from_drm_display_mode(
+ struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector,
+ const struct drm_connector_state *connector_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+
+ if (old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
+ if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
+
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
+
+ stream->output_color_space = get_output_color_space(timing_out, connector_state);
+ stream->content_type = get_output_content_type(connector_state);
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strscpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static struct dc_sink *
+create_fake_sink(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct dc_sink *sink = NULL;
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return NULL;
+ }
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+
+ return sink;
+}
+
+static void set_multisync_trigger_params(
+ struct dc_stream_state *stream)
+{
+ struct dc_stream_state *master = NULL;
+
+ if (stream->triggered_crtc_reset.enabled) {
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
+ }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+ int j, highest_rfr = 0, master_stream = 0;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+ int refresh_rate = 0;
+
+ refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+ master_stream = j;
+ }
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+ stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
+ }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+ int i = 0;
+ struct dc_stream_state *stream;
+
+ if (context->stream_count < 2)
+ return;
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+ /*
+ * TODO: add a function to read AMD VSDB bits and set
+ * crtc_sync_master.multi_sync_enabled flag
+ * For now it's set to false
+ */
+ }
+
+ set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
+}
+
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
+static struct drm_display_mode *
+get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ bool use_probed_modes)
+{
+ struct drm_display_mode *m, *m_pref = NULL;
+ u16 current_refresh, highest_refresh;
+ struct list_head *list_head = use_probed_modes ?
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+ /* Find the preferred mode */
+ list_for_each_entry(m, list_head, head) {
+ if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ m_pref = m;
+ break;
+ }
+ }
+
+ if (!m_pref) {
+ /* Probably an EDID with no preferred mode. Fallback to first entry */
+ m_pref = list_first_entry_or_null(
+ &aconnector->base.modes, struct drm_display_mode, head);
+ if (!m_pref) {
+ DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ return NULL;
+ }
+ }
+
+ highest_refresh = drm_mode_vrefresh(m_pref);
+
+ /*
+ * Find the mode with highest refresh rate with same resolution.
+ * For some monitors, preferred mode is not the mode with highest
+ * supported refresh rate.
+ */
+ list_for_each_entry(m, list_head, head) {
+ current_refresh = drm_mode_vrefresh(m);
+
+ if (m->hdisplay == m_pref->hdisplay &&
+ m->vdisplay == m_pref->vdisplay &&
+ highest_refresh < current_refresh) {
+ highest_refresh = current_refresh;
+ m_pref = m;
+ }
+ }
+
+ drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
+ return m_pref;
+}
+
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_display_mode *high_mode;
+ int timing_diff;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!high_mode || !mode)
+ return false;
+
+ timing_diff = high_mode->vtotal - mode->vtotal;
+
+ if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+ high_mode->hdisplay != mode->hdisplay ||
+ high_mode->vdisplay != mode->vdisplay ||
+ high_mode->hsync_start != mode->hsync_start ||
+ high_mode->hsync_end != mode->hsync_end ||
+ high_mode->htotal != mode->htotal ||
+ high_mode->hskew != mode->hskew ||
+ high_mode->vscan != mode->vscan ||
+ high_mode->vsync_start - mode->vsync_start != timing_diff ||
+ high_mode->vsync_end - mode->vsync_end != timing_diff)
+ return false;
+ else
+ return true;
+}
+
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
+
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ 0,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ }
+}
+
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ struct drm_connector *drm_connector = &aconnector->base;
+ u32 link_bandwidth_kbps;
+ struct dc *dc = sink->ctx->dc;
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
+ drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bandwidth_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
+ }
+ }
+
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct drm_connector *connector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector_state *con_state = &dm_state->base;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode;
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode *freesync_mode = NULL;
+ bool native_mode_found = false;
+ bool recalculate_timing = false;
+ bool scale = dm_state->scaling != RMX_OFF;
+ int mode_refresh;
+ int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+ struct dsc_dec_dpcd_caps dsc_caps;
+
+ struct dc_link *link = NULL;
+ struct dc_sink *sink = NULL;
+
+ drm_mode_init(&mode, drm_mode);
+ memset(&saved_mode, 0, sizeof(saved_mode));
+
+ if (connector == NULL) {
+ DRM_ERROR("connector is NULL!\n");
+ return stream;
+ }
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
+ aconnector = NULL;
+ aconnector = to_amdgpu_dm_connector(connector);
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
+
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
+ if (!sink)
+ return stream;
+
+ } else {
+ sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
+ }
+
+ stream = dc_create_stream_for_sink(sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto finish;
+ }
+
+ /* We leave this NULL for writeback connectors */
+ stream->dm_stream_context = aconnector;
+
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ connector->display_info.hdmi.scdc.scrambling.low_rates;
+
+ list_for_each_entry(preferred_mode, &connector->modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &connector->modes,
+ struct drm_display_mode,
+ head);
+
+ mode_refresh = drm_mode_vrefresh(&mode);
+
+ if (preferred_mode == NULL) {
+ /*
+ * This may not be an error, the use case is when we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else if (aconnector) {
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode, scale);
+
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
+ }
+
+ if (recalculate_timing)
+ drm_mode_set_crtcinfo(&saved_mode, 0);
+
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, NULL,
+ requested_bpc);
+ else
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, old_stream,
+ requested_bpc);
+
+ /* The rest isn't needed for writeback connectors */
+ if (!aconnector)
+ goto finish;
+
+ if (aconnector->timing_changed) {
+ drm_dbg(aconnector->base.dev,
+ "overriding timing for automated test, bpc %d, changing to %d\n",
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
+ /* SST DSC determination policy */
+ update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+ apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ connector,
+ sink);
+
+ update_stream_signal(stream, sink);
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
+ }
+finish:
+ dc_sink_release(sink);
+
+ return stream;
+}
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+
+ update_subconnector_property(aconnector);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /*
+ * Call only if mst_mgr was initialized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
+ if (aconnector->bl_idx != -1) {
+ backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ }
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ if (aconnector->i2c) {
+ i2c_del_adapter(&aconnector->i2c->base);
+ kfree(aconnector->i2c);
+ }
+ kfree(aconnector->dm_dp_aux.aux.name);
+
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+ state->base.max_requested_bpc = 8;
+ state->vcpi_slots = 0;
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ new_state->freesync_capable = state->freesync_capable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
+ return &new_state->base;
+}
+
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int r;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
+ amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
+};
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(edid))
+ init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON) {
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /*
+ * In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+ create_eml_sink(aconnector);
+}
+
+static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ enum dc_status dc_result = DC_ERROR_UNEXPECTED;
+ struct dc_plane_state *dc_plane_state = NULL;
+ struct dc_state *dc_state = NULL;
+
+ if (!stream)
+ goto cleanup;
+
+ dc_plane_state = dc_create_plane_state(dc);
+ if (!dc_plane_state)
+ goto cleanup;
+
+ dc_state = dc_state_create(dc, NULL);
+ if (!dc_state)
+ goto cleanup;
+
+ /* populate stream to plane */
+ dc_plane_state->src_rect.height = stream->src.height;
+ dc_plane_state->src_rect.width = stream->src.width;
+ dc_plane_state->dst_rect.height = stream->src.height;
+ dc_plane_state->dst_rect.width = stream->src.width;
+ dc_plane_state->clip_rect.height = stream->src.height;
+ dc_plane_state->clip_rect.width = stream->src.width;
+ dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
+ dc_plane_state->plane_size.surface_size.height = stream->src.height;
+ dc_plane_state->plane_size.surface_size.width = stream->src.width;
+ dc_plane_state->plane_size.chroma_size.height = stream->src.height;
+ dc_plane_state->plane_size.chroma_size.width = stream->src.width;
+ dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->rotation = ROTATION_ANGLE_0;
+ dc_plane_state->is_tiling_rotated = false;
+ dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
+
+ dc_result = dc_validate_stream(dc, stream);
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_plane(dc, dc_plane_state);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
+
+ if (dc_result == DC_OK && !dc_state_add_plane(
+ dc,
+ stream,
+ dc_plane_state,
+ dc_state))
+ dc_result = DC_FAIL_ATTACH_SURFACES;
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_global_state(dc, dc_state, true);
+
+cleanup:
+ if (dc_state)
+ dc_state_release(dc_state);
+
+ if (dc_plane_state)
+ dc_plane_state_release(dc_plane_state);
+
+ return dc_result;
+}
+
+struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dc_stream_state *stream;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return stream;
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+
+ if (dc_result == DC_OK)
+ dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result,
+ dc_status_to_str(dc_result));
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
+ return stream;
+}
+
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /*
+ * Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
+ aconnector->base.force != DRM_FORCE_ON) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+
+ stream = create_validate_stream_for_sink(aconnector, mode,
+ to_dm_connector_state(connector->state),
+ NULL);
+ if (stream) {
+ dc_stream_release(stream);
+ result = MODE_OK;
+ }
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
+ struct dc_info_packet *out)
+{
+ struct hdmi_drm_infoframe frame;
+ unsigned char buf[30]; /* 26 + 4 */
+ ssize_t len;
+ int ret, i;
+
+ memset(out, 0, sizeof(*out));
+
+ if (!state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
+ if (ret)
+ return ret;
+
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
+ if (len < 0)
+ return (int)len;
+
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
+ if (len != 30)
+ return -EINVAL;
+
+ /* Prepare the infopacket for DC. */
+ switch (state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ out->hb0 = 0x87; /* type */
+ out->hb1 = 0x01; /* version */
+ out->hb2 = 0x1A; /* length */
+ out->sb[0] = buf[3]; /* checksum */
+ i = 1;
+ break;
+
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ out->hb0 = 0x00; /* sdp id, zero */
+ out->hb1 = 0x87; /* type */
+ out->hb2 = 0x1D; /* payload len - 1 */
+ out->hb3 = (0x13 << 2); /* sdp version */
+ out->sb[0] = 0x01; /* version */
+ out->sb[1] = 0x1A; /* length */
+ i = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&out->sb[i], &buf[4], 26);
+ out->valid = true;
+
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
+ sizeof(out->sb), false);
+
+ return 0;
+}
+
+static int
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_con_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_con_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct drm_crtc *crtc = new_con_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
+ int ret;
+
+ trace_amdgpu_dm_connector_atomic_check(new_con_state);
+
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!crtc)
+ return 0;
+
+ if (new_con_state->colorspace != old_con_state->colorspace) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (new_con_state->content_type != old_con_state->content_type) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
+ struct dc_info_packet hdr_infopacket;
+
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
+ if (ret)
+ return ret;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ /*
+ * DC considers the stream backends changed if the
+ * static metadata changes. Forcing the modeset also
+ * gives a simple way for userspace to switch from
+ * 8bpc to 10bpc when setting the metadata to enter
+ * or exit HDR.
+ *
+ * Changing the static metadata after it's been
+ * set is permissible, however. So only force a
+ * modeset if we're entering or exiting HDR.
+ */
+ new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
+ !old_con_state->hdr_output_metadata ||
+ !new_con_state->hdr_output_metadata;
+ }
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplugging a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * are missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .atomic_check = amdgpu_dm_connector_atomic_check,
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->mst_output_port)
+ return 0;
+
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
+ }
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, ret;
+ int vcpi, pbn_div, pbn, slot_num = 0;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->mst_output_port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
+ if (j == dc_state->stream_count)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ if (stream->timing.flags.DSC != 1) {
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = slot_num;
+
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ }
+
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ if (!mode)
+ continue;
+
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ /* sorting the probed modes before calling function
+ * amdgpu_dm_get_native_mode() since EDID can have
+ * more than one preferred mode. The modes that are
+ * later in the probed mode list could be of higher
+ * and preferred resolution. For example, 3840x2160
+ * resolution in base EDID preferred timing and 4096x2160
+ * preferred resolution in DID extension block later.
+ */
+ drm_mode_sort(&connector->probed_modes);
+ amdgpu_dm_get_native_mode(connector);
+
+ /* Freesync capabilities are reset by calling
+ * drm_add_edid_modes() and need to be
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
+ if (drm_mode_equal(m, mode))
+ return true;
+ }
+
+ return false;
+}
+
+static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+{
+ const struct drm_display_mode *m;
+ struct drm_display_mode *new_mode;
+ uint i;
+ u32 new_modes_count = 0;
+
+ /* Standard FPS values
+ *
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
+ */
+ static const u32 common_rates[] = {
+ 23976, 24000, 25000, 29970, 30000,
+ 48000, 50000, 60000, 72000, 96000, 120000
+ };
+
+ /*
+ * Find mode with highest refresh rate with the same resolution
+ * as the preferred mode. Some monitors report a preferred mode
+ * with lower resolution than the highest refresh rate supported.
+ */
+
+ m = get_highest_refresh_rate_mode(aconnector, true);
+ if (!m)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
+
+ if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ continue;
+
+ if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+ common_rates[i] > aconnector->max_vfreq * 1000)
+ continue;
+
+ num = (unsigned long long)m->clock * 1000 * 1000;
+ den = common_rates[i] * (unsigned long long)m->htotal;
+ target_vtotal = div_u64(num, den);
+ target_vtotal_diff = target_vtotal - m->vtotal;
+
+ /* Check for illegal modes */
+ if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+ m->vsync_end + target_vtotal_diff < m->vsync_start ||
+ m->vtotal + target_vtotal_diff < m->vsync_end)
+ continue;
+
+ new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+ if (!new_mode)
+ goto out;
+
+ new_mode->vtotal += (u16)target_vtotal_diff;
+ new_mode->vsync_start += (u16)target_vtotal_diff;
+ new_mode->vsync_end += (u16)target_vtotal_diff;
+ new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ new_mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (!is_duplicate_mode(aconnector, new_mode)) {
+ drm_mode_probed_add(&aconnector->base, new_mode);
+ new_modes_count += 1;
+ } else
+ drm_mode_destroy(aconnector->base.dev, new_mode);
+ }
+ out:
+ return new_modes_count;
+}
+
+static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (!(amdgpu_freesync_vid_mode && edid))
+ return;
+
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ amdgpu_dm_connector->num_modes +=
+ add_fs_modes(amdgpu_dm_connector);
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (!drm_edid_is_valid(edid)) {
+ amdgpu_dm_connector->num_modes =
+ drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ }
+ amdgpu_dm_fbc_init(connector);
+
+ return amdgpu_dm_connector->num_modes;
+}
+
+static const u32 supported_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ aconnector->connector_id = link_index;
+ aconnector->bl_idx = -1;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
+ mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
+
+ /*
+ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ link->link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link->link_enc);
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+
+ aconnector->base.state->max_bpc = 16;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ /* Content Type is currently only implemented for HDMI. */
+ drm_connector_attach_content_type_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
+ if (adev->dm.hdcp_workqueue)
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
+ }
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ return result;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+
+/*
+ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ u32 link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Not needed for writeback connector */
+ link->priv = aconnector;
+
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init_with_ddc(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type,
+ &i2c->base);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_get(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ } else {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_put(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc)
+{
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
+ /**
+ * This reads the current state for the IRQ and force reapplies
+ * the setting to hardware.
+ */
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ return true;
+ }
+
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
+ *
+ * Handles: UNDESIRED -> ENABLED
+ */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
+ return true;
+ }
+
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ drm_dbg_state(acrtc->base.dev,
+ "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
+{
+ struct mod_vrr_params vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+ bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+
+ /* Need to call this before the frame ends. */
+ dc_stream_adjust_vmin_vmax(dm->dc,
+ new_crtc_state->stream,
+ &vrr_params.adjust);
+ }
+ }
+
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ packet_type,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket,
+ pack_sdp_v1_3);
+
+ new_crtc_state->freesync_vrr_info_changed |=
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void update_stream_irq_parameters(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ /*
+ * if freesync compatible mode was set, config.state will be set
+ * in atomic check
+ */
+ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+ (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+ new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+ vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+ vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+ vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+ vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+ } else {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ }
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_config = config;
+ /* Copy state for access from DM IRQ handler */
+ acrtc->dm_irq_params.freesync_config = config;
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
+}
+
+static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
+{
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+
+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool wait_for_vblank)
+{
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ int planes_count = 0, vpos, hpos;
+ unsigned long flags;
+ u32 target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+ bool cursor_update = false;
+ bool pflip_present = false;
+ bool dirty_rects_changed = false;
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
+ bool plane_needs_flip;
+ struct dc_plane_state *dc_plane;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
+
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
+ }
+
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
+ &bundle->scaling_infos[planes_count]);
+
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+
+ pflip_present = pflip_present || plane_needs_flip;
+
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
+
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state,
+ afb->tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address,
+ afb->tmz_surface, false);
+
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
+
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
+
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
+
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change memory domain, FB pitch, DCC state, rotation or
+ * mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
+ */
+ if (crtc->state->async_flip &&
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
+
+ bundle->flip_addrs[planes_count].flip_immediate =
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
+
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
+ }
+
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
+
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
+
+ planes_count += 1;
+
+ }
+
+ if (pflip_present) {
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
+ } else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target_vblank = last_flip_vblank + wait_for_vblank;
+
+ /*
+ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc_attach->enabled &&
+ (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &pcrtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /**
+ * Prepare the flip event for the pageflip interrupt to handle.
+ *
+ * This only works in the case where we've already turned on the
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
+ * from 0 -> n planes we have to skip a hardware generated event
+ * and rely on sending it from software.
+ */
+ if (acrtc_attach->base.state->event &&
+ acrtc_state->active_planes > 0) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
+ prepare_flip_isr(acrtc_attach);
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (acrtc_state->stream) {
+ if (acrtc_state->freesync_vrr_info_changed)
+ bundle->stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ /* Update the planes if changed or disable if we don't have any. */
+ if ((planes_count || acrtc_state->active_planes == 0) &&
+ acrtc_state->stream) {
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ bundle->stream_update.stream = acrtc_state->stream;
+ if (new_pcrtc_state->mode_changed) {
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
+ }
+
+ if (new_pcrtc_state->color_mgmt_changed) {
+ /*
+ * TODO: This isn't fully correct since we've actually
+ * already modified the stream in place.
+ */
+ bundle->stream_update.gamut_remap =
+ &acrtc_state->stream->gamut_remap_matrix;
+ bundle->stream_update.output_csc_transform =
+ &acrtc_state->stream->csc_color_matrix;
+ bundle->stream_update.out_transfer_func =
+ &acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
+ }
+
+ acrtc_state->stream->abm_level = acrtc_state->abm_level;
+ if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
+
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
+ /*
+ * If FreeSync state on the stream has changed then we need to
+ * re-adjust the min/max bounds now that DC doesn't handle this
+ * as part of commit.
+ */
+ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ dc_stream_adjust_vmin_vmax(
+ dm->dc, acrtc_state->stream,
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+ mutex_lock(&dm->dc_lock);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+ *
+ * Most pipes are put into power gating when unused.
+ *
+ * When power gating is enabled on a pipe we lose the
+ * interrupt enablement state when power gating is disabled.
+ *
+ * So we need to update the IRQ control state in hardware
+ * whenever the pipe turns on (since it could be previously
+ * power gated) or off (since some pipes can't be power gated
+ * on some ASICs).
+ */
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+ if (aconn->psr_skip_count > 0)
+ aconn->psr_skip_count--;
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
+ }
+
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
+
+cleanup:
+ kfree(bundle);
+}
+
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *new_dm_crtc_state;
+ const struct dc_stream_status *status;
+ int i, inst;
+
+ /* Notify device removals. */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ if (old_con_state->crtc != new_con_state->crtc) {
+ /* CRTC changes require notification. */
+ goto notify;
+ }
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+notify:
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = aconnector->audio_inst;
+ aconnector->audio_inst = -1;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+
+ /* Notify audio device additions. */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!new_dm_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
+ if (!status)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = status->audio_inst;
+ aconnector->audio_inst = inst;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+}
+
+/*
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
+ */
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+{
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
+ bool mode_set_reset_required = false;
+ u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, false);
+ dc_stream_release(dm_old_crtc_state->stream);
+ }
+ }
+
+ drm_atomic_helper_calc_timestamping_constants(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Disable cursor if disabling crtc */
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ struct dc_cursor_position position;
+
+ memset(&position, 0, sizeof(position));
+ mutex_lock(&dm->dc_lock);
+ dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnected in fact.
+ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ pm_runtime_get_noresume(dev->dev);
+
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ mode_set_reset_required = true;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ mode_set_reset_required = true;
+ }
+ } /* for_each_crtc_in_state() */
+
+ /* if there mode set or reset, disable eDP PSR, Replay */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ amdgpu_dm_replay_disable_all(dm);
+ amdgpu_dm_psr_disable_all(dm);
+ }
+
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
+
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+ if (!status)
+ drm_err(dev,
+ "got no status for stream %p on acrtc%p\n",
+ dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+}
+
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ dm_new_con_state->update_hdcp = true;
+ continue;
+ }
+
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
+ }
+
+ /* Handle connector state changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_surface_update *dummy_updates;
+ struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
+ struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
+
+ memset(&stream_update, 0, sizeof(stream_update));
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
+ dm_old_con_state);
+
+ abm_changed = dm_new_crtc_state->abm_level !=
+ dm_old_crtc_state->abm_level;
+
+ hdr_changed =
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
+
+ if (!scaling_changed && !abm_changed && !hdr_changed)
+ continue;
+
+ stream_update.stream = dm_new_crtc_state->stream;
+ if (scaling_changed) {
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, dm_new_crtc_state->stream);
+
+ stream_update.src = dm_new_crtc_state->stream->src;
+ stream_update.dst = dm_new_crtc_state->stream->dst;
+ }
+
+ if (abm_changed) {
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
+
+ stream_update.abm_level = &dm_new_crtc_state->abm_level;
+ }
+
+ if (hdr_changed) {
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
+ stream_update.hdr_static_metadata = &hdr_packet;
+ }
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (WARN_ON(!status))
+ continue;
+
+ WARN_ON(!status->plane_count);
+
+ /*
+ * TODO: DC refuses to perform stream updates without a dc_surface_update.
+ * Here we create an empty update on each plane.
+ * To fix this, DC should permit updating only stream properties.
+ */
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
+ for (j = 0; j < status->plane_count; j++)
+ dummy_updates[j].surface = status->plane_states[0];
+
+
+ mutex_lock(&dm->dc_lock);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
+ mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
+ }
+
+ /**
+ * Enable interrupts for CRTCs that are newly enabled or went through
+ * a modeset. It was intentionally deferred until after the front end
+ * state was modified to wait until the OTG was on and so the IRQ
+ * handlers didn't access stale or invalid state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+#ifdef CONFIG_DEBUG_FS
+ enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+#endif
+
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ dc_stream_retain(dm_new_crtc_state->stream);
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ /**
+ * Frontend may have changed so reapply the CRC capture
+ * settings for the stream.
+ */
+ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+#endif
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
+ }
+ }
+#endif
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->async_flip)
+ wait_for_vblank = false;
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
+ }
+
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
+ /* Update audio instances for each connector. */
+ amdgpu_dm_commit_audio(dev, state);
+
+ /* restore the backlight level */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i] &&
+ (dm->actual_brightness[i] != dm->brightness[i]))
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ /* Don't free the memory if we are hitting this as part of suspend.
+ * This way we don't free any memory during suspend; see
+ * amdgpu_bo_free_kernel(). The memory will be freed in the first
+ * non-suspend modeset or when the driver is torn down.
+ */
+ if (!adev->in_suspend) {
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ }
+
+ /*
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+}
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto out;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto out;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto out;
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+
+out:
+ drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+
+ return ret;
+}
+
+/*
+ * This function handles all cases when set mode does not come upon hotplug.
+ * This includes when a display is unplugged then plugged back into the
+ * same port and when running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /*
+ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /*
+ * Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
+{
+ struct mod_freesync_config config = {0};
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
+ bool fs_vid_mode = false;
+
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
+
+ if (new_crtc_state->vrr_supported) {
+ new_crtc_state->stream->ignore_msa_timing_param = true;
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+
+ config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
+ config.vsif_supported = true;
+ config.btr = true;
+
+ if (fs_vid_mode) {
+ config.state = VRR_STATE_ACTIVE_FIXED;
+ config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+ goto out;
+ } else if (new_crtc_state->base.vrr_enabled) {
+ config.state = VRR_STATE_ACTIVE_VARIABLE;
+ } else {
+ config.state = VRR_STATE_INACTIVE;
+ }
+ }
+out:
+ new_crtc_state->freesync_config = config;
+}
+
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
+
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
+}
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ const struct drm_display_mode *old_mode, *new_mode;
+
+ if (!old_crtc_state || !new_crtc_state)
+ return false;
+
+ old_mode = &old_crtc_state->mode;
+ new_mode = &new_crtc_state->mode;
+
+ if (old_mode->clock == new_mode->clock &&
+ old_mode->hdisplay == new_mode->hdisplay &&
+ old_mode->vdisplay == new_mode->vdisplay &&
+ old_mode->htotal == new_mode->htotal &&
+ old_mode->vtotal != new_mode->vtotal &&
+ old_mode->hsync_start == new_mode->hsync_start &&
+ old_mode->vsync_start != new_mode->vsync_start &&
+ old_mode->hsync_end == new_mode->hsync_end &&
+ old_mode->vsync_end != new_mode->vsync_end &&
+ old_mode->hskew == new_mode->hskew &&
+ old_mode->vscan == new_mode->vscan &&
+ (old_mode->vsync_end - old_mode->vsync_start) ==
+ (new_mode->vsync_end - new_mode->vsync_start))
+ return true;
+
+ return false;
+}
+
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
+ u64 num, den, res;
+ struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+
+ num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+ den = (unsigned long long)new_crtc_state->mode.htotal *
+ (unsigned long long)new_crtc_state->mode.vtotal;
+
+ res = div_u64(num, den);
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+}
+
+static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct dm_atomic_state *dm_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
+ * update changed items
+ */
+ struct amdgpu_crtc *acrtc = NULL;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+ connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (connector)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /* TODO This hack should go away */
+ if (connector && enable) {
+ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ connector);
+
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ goto fail;
+ }
+
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+ * will do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * TODO: Check VSDB bits to decide whether this should
+ * be enabled or not.
+ */
+ new_stream->triggered_crtc_reset.enabled =
+ dm->force_timing_sync;
+
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ ret = fill_hdr_info_packet(drm_new_conn_state,
+ &new_stream->hdr_static_metadata);
+ if (ret)
+ goto fail;
+
+ /*
+ * If we already removed the old stream from the context
+ * (and set the new stream to NULL) then we can't reuse
+ * the old stream even if the stream and scaling are unchanged.
+ * We'll hit the BUG_ON and black screen.
+ *
+ * TODO: Refactor this function to allow this check to work
+ * in all conditions.
+ */
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ goto skip_modeset;
+
+ if (dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+ }
+
+ /* mode_changed flag may get updated above, need to check again */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto skip_modeset;
+
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER(
+ "Mode change not required for front porch change, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+
+ set_freesync_fixed_config(dm_new_crtc_state);
+
+ goto skip_modeset;
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
+ is_freesync_video_mode(&new_crtc_state->mode,
+ aconnector)) {
+ struct drm_display_mode *high_mode;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_state_remove_stream(
+ dm->dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!connector)
+ goto skip_modeset;
+
+ if (modereset_required(new_crtc_state))
+ goto skip_modeset;
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ dm_new_crtc_state->stream = new_stream;
+
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_state_add_stream(
+ dm->dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+skip_modeset:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!(enable && connector && new_crtc_state->active))
+ return 0;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ }
+
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+ * TODO: Remove this hack for all asics once it proves that the
+ * fast updates works fine on DCN3.2+.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ /* CRTC Degamma changes currently require us to recreate planes. */
+ if (new_crtc_state->color_mgmt_changed)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
+
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* Src/dst size and scaling updates. */
+ if (old_other_state->src_w != new_other_state->src_w ||
+ old_other_state->src_h != new_other_state->src_h ||
+ old_other_state->crtc_w != new_other_state->crtc_w ||
+ old_other_state->crtc_h != new_other_state->crtc_h)
+ return true;
+
+ /* Rotation / mirroring updates. */
+ if (old_other_state->rotation != new_other_state->rotation)
+ return true;
+
+ /* Blending updates. */
+ if (old_other_state->pixel_blend_mode !=
+ new_other_state->pixel_blend_mode)
+ return true;
+
+ /* Alpha updates. */
+ if (old_other_state->alpha != new_other_state->alpha)
+ return true;
+
+ /* Colorspace changes. */
+ if (old_other_state->color_range != new_other_state->color_range ||
+ old_other_state->color_encoding != new_other_state->color_encoding)
+ return true;
+
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
+ /* Framebuffer checks fall at the end. */
+ if (!old_other_state->fb || !new_other_state->fb)
+ continue;
+
+ /* Pixel format changes can require bandwidth updates. */
+ if (old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+
+ old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
+ new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
+
+ /* Tiling and DCC changes also require bandwidth updates. */
+ if (old_afb->tiling_flags != new_afb->tiling_flags ||
+ old_afb->base.modifier != new_afb->base.modifier)
+ return true;
+ }
+
+ return false;
+}
+
+static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
+ struct drm_plane_state *new_plane_state,
+ struct drm_framebuffer *fb)
+{
+ struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ unsigned int pitch;
+ bool linear;
+
+ if (fb->width > new_acrtc->max_cursor_width ||
+ fb->height > new_acrtc->max_cursor_height) {
+ DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
+ new_plane_state->fb->width,
+ new_plane_state->fb->height);
+ return -EINVAL;
+ }
+ if (new_plane_state->src_w != fb->width << 16 ||
+ new_plane_state->src_h != fb->height << 16) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ /* Pitch in pixels */
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+
+ if (fb->width != pitch) {
+ DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
+ fb->width, pitch);
+ return -EINVAL;
+ }
+
+ switch (pitch) {
+ case 64:
+ case 128:
+ case 256:
+ /* FB pitch is supported by cursor plane */
+ break;
+ default:
+ DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
+ return -EINVAL;
+ }
+
+ /* Core DRM takes care of checking FB modifiers, so we only need to
+ * check tiling flags when the FB doesn't have a modifier.
+ */
+ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
+ } else {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+ }
+ if (!linear) {
+ DRM_DEBUG_ATOMIC("Cursor FB not linear");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dm_update_plane_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ bool enable,
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
+{
+
+ struct dm_atomic_state *dm_state = NULL;
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
+ bool needs_reset;
+ int ret = 0;
+
+
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+ if (!needs_reset)
+ return 0;
+
+ if (!old_plane_crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
+ if (!dc_state_remove_plane(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ return -EINVAL;
+ }
+
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ if (!new_plane_crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ return 0;
+
+ if (!needs_reset)
+ return 0;
+
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
+
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+
+ *is_top_most_overlay = false;
+ }
+
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ ret = fill_dc_plane_attributes(
+ drm_to_adev(new_plane_crtc->dev),
+ dc_new_plane_state,
+ new_plane_state,
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
+ if (!dc_state_add_plane(
+ dc,
+ dm_new_crtc_state->stream,
+ dc_new_plane_state,
+ dm_state->context)) {
+
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
+ }
+
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+
+ *lock_and_validation_needed = true;
+ }
+
+
+ return ret;
+}
+
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+ int *out_plane_scale_w, int *out_plane_scale_h)
+{
+ int plane_src_w, plane_src_h;
+
+ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
+static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+ * positioning from the underlying pipe. Check the cursor plane's
+ * blending properties match the underlying planes'.
+ */
+
+ /* If no plane was enabled or changed scaling, no need to check again */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+ continue;
+
+ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+ any_relevant_change = true;
+ break;
+ }
+
+ if (new_plane_state->fb == old_plane_state->fb &&
+ new_plane_state->crtc_w == old_plane_state->crtc_w &&
+ new_plane_state->crtc_h == old_plane_state->crtc_h)
+ continue;
+
+ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ any_relevant_change = true;
+ break;
+ }
+ }
+
+ if (!any_relevant_change)
+ return 0;
+
+ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+ if (IS_ERR(new_cursor_state))
+ return PTR_ERR(new_cursor_state);
+
+ if (!new_cursor_state->fb)
+ return 0;
+
+ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+ /* Need to check all enabled planes, even if this commit doesn't change
+ * their state
+ */
+ i = drm_atomic_add_affected_planes(state, crtc);
+ if (i)
+ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
+
+ dm_get_plane_scale(new_underlying_state,
+ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
+ }
+
+ return 0;
+}
+
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state, *old_conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
+}
+
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events.
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_atomic_state *dm_state = NULL;
+ struct dc *dc = adev->dm.dc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ enum dc_status status;
+ int ret, i;
+ bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
+ goto fail;
+ }
+
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
+ new_crtc_state->connectors_changed = true;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
+ goto fail;
+ }
+ }
+ }
+ }
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
+ dm_old_crtc_state->dsc_force_changed == false)
+ continue;
+
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
+ goto fail;
+ }
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
+ goto fail;
+ }
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
+ goto fail;
+ }
+
+ if (dm_old_crtc_state->dsc_force_changed)
+ new_crtc_state->mode_changed = true;
+ }
+
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
+ goto fail;
+ }
+ }
+ }
+
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
+
+ /* Remove exiting planes if they are modified */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) !=
+ get_mem_type(new_plane_state->fb))
+ lock_and_validation_needed = true;
+
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ false,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Disable all crtcs which require disable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ false,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Enable all crtcs which require enable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ true,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Add new/modified planes */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ true,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
+ goto fail;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
+
+ /* Check cursor planes scaling */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
+ goto fail;
+ }
+ }
+
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
+ /* Check scaling and underscan changes*/
+ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+ * DC global validation to ensure that the configuration can be applied
+ * to hardware.
+ *
+ * We have to currently stall out here in atomic_check for outstanding
+ * commits to finish in this case because our IRQ handlers reference
+ * DRM state directly - we can end up disabling interrupts too early
+ * if we don't.
+ *
+ * TODO: Remove this stall and drop DM state private objects.
+ */
+ if (lock_and_validation_needed) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
+ goto fail;
+ }
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
+ goto fail;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
+ goto fail;
+ }
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
+ goto fail;
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
+ if (status != DC_OK) {
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
+ */
+
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
+
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
+
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
+
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ /*
+ * Only allow async flips for fast updates that don't change
+ * the FB pitch, the DCC state, rotation, etc.
+ */
+ if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
+ UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ u8 dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+
+static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ unsigned int offset,
+ unsigned int total_length,
+ u8 *data,
+ unsigned int length,
+ struct amdgpu_hdmi_vsdb_info *vsdb)
+{
+ bool res;
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_send_edid_cea *input;
+ struct dmub_cmd_edid_cea_output *output;
+
+ if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ input = &cmd.edid_cea.data.input;
+
+ cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
+ cmd.edid_cea.header.sub_type = 0;
+ cmd.edid_cea.header.payload_bytes =
+ sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
+ input->offset = offset;
+ input->length = length;
+ input->cea_total_length = total_length;
+ memcpy(input->payload, data, length);
+
+ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ if (!res) {
+ DRM_ERROR("EDID CEA parser failed\n");
+ return false;
+ }
+
+ output = &cmd.edid_cea.data.output;
+
+ if (output->type == DMUB_CMD__EDID_CEA_ACK) {
+ if (!output->ack.success) {
+ DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ output->ack.offset);
+ }
+ } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
+ if (!output->amd_vsdb.vsdb_found)
+ return false;
+
+ vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
+ vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
+ vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
+ vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
+ } else {
+ DRM_WARN("Unknown EDID CEA parser results\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ bool res;
+ int offset;
+
+ /* send 8 bytes a time */
+ if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
+ return false;
+
+ if (i+8 == len) {
+ /* EDID block sent completed, expect result */
+ int version, min_rate, max_rate;
+
+ res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
+ if (res) {
+ /* amd vsdb found */
+ vsdb_info->freesync_supported = 1;
+ vsdb_info->amd_vsdb_version = version;
+ vsdb_info->min_refresh_rate_hz = min_rate;
+ vsdb_info->max_refresh_rate_hz = max_rate;
+ return true;
+ }
+ /* not amd vsdb */
+ return false;
+ }
+
+ /* check for ack*/
+ res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
+ if (!res)
+ return false;
+ }
+
+ return false;
+}
+
+static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ /* send 8 bytes a time */
+ if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
+ return false;
+ }
+
+ return vsdb_info->freesync_supported;
+}
+
+static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (adev->dm.dmub_srv)
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ else
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
+}
+
+static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ while (j < EDID_LENGTH) {
+ struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
+ unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
+
+ if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
+ amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
+ vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
+ vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
+ DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
+
+ return true;
+ }
+ j++;
+ }
+
+ return false;
+}
+
+static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ bool valid_vsdb_found = false;
+
+ /*----- drm_find_cea_extension() -----*/
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return -ENODEV;
+
+ /*----- cea_db_offsets() -----*/
+ if (edid_ext[0] != CEA_EXT)
+ return -ENODEV;
+
+ valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+
+ return valid_vsdb_found ? i : -ENODEV;
+}
+
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
+void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i = 0;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
+
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ goto update;
+ }
+
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ amdgpu_dm_connector->min_vfreq = 0;
+ amdgpu_dm_connector->max_vfreq = 0;
+ amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
+
+ goto update;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ if (!adev->dm.freesync_module)
+ goto update;
+
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ bool edid_check_required = false;
+
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
+ }
+
+ if (edid_check_required) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+
+ freesync_capable = true;
+ }
+ }
+ parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+
+ if (vsdb_info.replay_mode) {
+ amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
+ amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
+ amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
+ }
+
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported) {
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
+}
+
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int i;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (dc->current_state) {
+ for (i = 0; i < dc->current_state->stream_count; ++i)
+ dc->current_state->streams[i]
+ ->triggered_crtc_reset.enabled =
+ adev->dm.force_timing_sync;
+
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
+ dc_trigger_sync(dc, dc->current_state);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+}
+
+void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ u32 value, const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
+}
+
+uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ const char *func_name)
+{
+ u32 value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+ int ret = -1;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ if (p_notify->result != AUX_RET_SUCCESS) {
+ /*
+ * Transient states before tunneling is enabled could
+ * lead to this error. We can ignore this for now.
+ */
+ if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ payload->address, payload->length,
+ p_notify->result);
+ }
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && p_notify->aux_reply.length &&
+ (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+
+ if (payload->length != p_notify->aux_reply.length) {
+ DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+ p_notify->aux_reply.length,
+ payload->address, payload->length);
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+ memcpy(payload->data, p_notify->aux_reply.data,
+ p_notify->aux_reply.length);
+ }
+
+ /* success */
+ ret = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+int amdgpu_dm_process_dmub_set_config_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ bool is_cmd_complete;
+ int ret;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
+ link_index, payload, adev->dm.dmub_notify);
+
+ if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ ret = 0;
+ *operation_result = adev->dm.dmub_notify->sc_status;
+ } else {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ ret = -1;
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}
diff --git a/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/preimage.1 b/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/preimage.1
new file mode 100644
index 000000000000..f864e6da4215
--- /dev/null
+++ b/rr-cache/d636b431e3d21b51122f1141ec3c0f2be3aba9cf/preimage.1
@@ -0,0 +1,11571 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* The caprices of the preprocessor require that this be declared right here */
+#define CREATE_TRACE_POINTS
+
+#include "dm_services_types.h"
+#include "dc.h"
+#include "link_enc_cfg.h"
+#include "dc/inc/core_types.h"
+#include "dal_asic_id.h"
+#include "dmub/dmub_srv.h"
+#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc/dc_dmub_srv.h"
+#include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
+#include "dc/dc_state.h"
+#include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
+
+#include "vid.h"
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "amdgpu_ucode.h"
+#include "atom.h"
+#include "amdgpu_dm.h"
+#include "amdgpu_dm_plane.h"
+#include "amdgpu_dm_crtc.h"
+#include "amdgpu_dm_hdcp.h"
+#include <drm/display/drm_hdcp_helper.h>
+#include "amdgpu_dm_wb.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_atombios.h"
+
+#include "amd_shared.h"
+#include "amdgpu_dm_irq.h"
+#include "dm_helpers.h"
+#include "amdgpu_dm_mst_types.h"
+#if defined(CONFIG_DEBUG_FS)
+#include "amdgpu_dm_debugfs.h"
+#endif
+#include "amdgpu_dm_psr.h"
+#include "amdgpu_dm_replay.h"
+
+#include "ivsrcid/ivsrcid_vislands30.h"
+
+#include <linux/backlight.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+#include <linux/component.h>
+#include <linux/dmi.h>
+
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_blend.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_audio_component.h>
+#include <drm/drm_gem_atomic_helper.h>
+
+#include <acpi/video.h>
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+#include "dcn/dcn_1_0_offset.h"
+#include "dcn/dcn_1_0_sh_mask.h"
+#include "soc15_hw_ip.h"
+#include "soc15_common.h"
+#include "vega10_ip_offset.h"
+
+#include "gc/gc_11_0_0_offset.h"
+#include "gc/gc_11_0_0_sh_mask.h"
+
+#include "modules/inc/mod_freesync.h"
+#include "modules/power/power_helpers.h"
+
+#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
+#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
+#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
+#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
+#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
+#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
+#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
+#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
+#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
+#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
+
+#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
+#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
+
+#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
+#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+
+#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB);
+
+/* Number of bytes in PSP header for firmware. */
+#define PSP_HEADER_BYTES 0x100
+
+/* Number of bytes in PSP footer for firmware. */
+#define PSP_FOOTER_BYTES 0x100
+
+/**
+ * DOC: overview
+ *
+ * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
+ * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
+ * requests into DC requests, and DC responses into DRM responses.
+ *
+ * The root control structure is &struct amdgpu_display_manager.
+ */
+
+/* basic init/fini API */
+static int amdgpu_dm_init(struct amdgpu_device *adev);
+static void amdgpu_dm_fini(struct amdgpu_device *adev);
+static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
+
+static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
+{
+ switch (link->dpcd_caps.dongle_type) {
+ case DISPLAY_DONGLE_NONE:
+ return DRM_MODE_SUBCONNECTOR_Native;
+ case DISPLAY_DONGLE_DP_VGA_CONVERTER:
+ return DRM_MODE_SUBCONNECTOR_VGA;
+ case DISPLAY_DONGLE_DP_DVI_CONVERTER:
+ case DISPLAY_DONGLE_DP_DVI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_DVID;
+ case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
+ case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+ return DRM_MODE_SUBCONNECTOR_HDMIA;
+ case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+ default:
+ return DRM_MODE_SUBCONNECTOR_Unknown;
+ }
+}
+
+static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ struct drm_connector *connector = &aconnector->base;
+ enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return;
+
+ if (aconnector->dc_sink)
+ subconnector = get_subconnector_type(link);
+
+ drm_object_property_set_value(&connector->base,
+ connector->dev->mode_config.dp_subconnector_property,
+ subconnector);
+}
+
+/*
+ * initializes drm_device display related structures, based on the information
+ * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
+ * drm_encoder, drm_mode_config
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
+/* removes and deallocates the drm structures, created by the above function */
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
+
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *amdgpu_dm_connector,
+ u32 link_index,
+ struct amdgpu_encoder *amdgpu_encoder);
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index);
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
+
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
+
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+static void handle_hpd_rx_irq(void *param);
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state);
+/*
+ * dm_vblank_get_counter
+ *
+ * @brief
+ * Get counter for number of vertical blanks
+ *
+ * @param
+ * struct amdgpu_device *adev - [in] desired amdgpu device
+ * int disp_idx - [in] which CRTC to get the counter from
+ *
+ * @return
+ * Counter for vertical blanks
+ */
+static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
+{
+ struct amdgpu_crtc *acrtc = NULL;
+
+ if (crtc >= adev->mode_info.num_crtc)
+ return 0;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
+}
+
+static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
+ u32 *vbl, u32 *position)
+{
+ u32 v_blank_start, v_blank_end, h_position, v_position;
+ struct amdgpu_crtc *acrtc = NULL;
+ struct dc *dc = adev->dm.dc;
+
+ if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
+ return -EINVAL;
+
+ acrtc = adev->mode_info.crtcs[crtc];
+
+ if (!acrtc->dm_irq_params.stream) {
+ DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
+ crtc);
+ return 0;
+ }
+
+ if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dc, false);
+
+ /*
+ * TODO rework base driver to use values directly.
+ * for now parse it back into reg-format
+ */
+ dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
+ &v_blank_start,
+ &v_blank_end,
+ &h_position,
+ &v_position);
+
+ *position = v_position | (h_position << 16);
+ *vbl = v_blank_start | (v_blank_end << 16);
+
+ return 0;
+}
+
+static bool dm_is_idle(void *handle)
+{
+ /* XXX todo */
+ return true;
+}
+
+static int dm_wait_for_idle(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static bool dm_check_soft_reset(void *handle)
+{
+ return false;
+}
+
+static int dm_soft_reset(void *handle)
+{
+ /* XXX todo */
+ return 0;
+}
+
+static struct amdgpu_crtc *
+get_crtc_by_otg_inst(struct amdgpu_device *adev,
+ int otg_inst)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ if (WARN_ON(otg_inst == -1))
+ return adev->mode_info.crtcs[0];
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->otg_inst == otg_inst)
+ return amdgpu_crtc;
+ }
+
+ return NULL;
+}
+
+static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
+ return true;
+ else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
+ return true;
+ else
+ return false;
+}
+
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+ int planes_count)
+{
+ int i, j;
+
+ for (i = 0, j = planes_count - 1; i < j; i++, j--)
+ swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ *
+ * @dc: Display Core control structure
+ * @update_type: specify whether it is FULL/MEDIUM/FAST update
+ * @planes_count: planes count to update
+ * @stream: stream state
+ * @stream_update: stream update
+ * @array_of_surface_update: dc surface update pointer
+ *
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+ int update_type,
+ int planes_count,
+ struct dc_stream_state *stream,
+ struct dc_stream_update *stream_update,
+ struct dc_surface_update *array_of_surface_update)
+{
+ reverse_planes_order(array_of_surface_update, planes_count);
+
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ */
+ if (update_type == UPDATE_TYPE_FAST)
+ dc_post_update_surfaces_to_stream(dc);
+
+ return dc_update_planes_and_stream(dc,
+ array_of_surface_update,
+ planes_count,
+ stream,
+ stream_update);
+}
+
+/**
+ * dm_pflip_high_irq() - Handle pageflip interrupt
+ * @interrupt_params: ignored
+ *
+ * Handles the pageflip interrupt by notifying all interested parties
+ * that the pageflip has been completed.
+ */
+static void dm_pflip_high_irq(void *interrupt_params)
+{
+ struct amdgpu_crtc *amdgpu_crtc;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_device *dev = adev_to_drm(adev);
+ unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ u32 vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
+
+ amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
+
+ /* IRQ could occur when in initial stage */
+ /* TODO work and BO cleanup */
+ if (amdgpu_crtc == NULL) {
+ drm_dbg_state(dev, "CRTC is null, returning.\n");
+ return;
+ }
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
+ drm_dbg_state(dev,
+ "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+ amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
+ amdgpu_crtc->crtc_id, amdgpu_crtc);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ return;
+ }
+
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
+
+ WARN_ON(!e);
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->dm_irq_params.last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
+
+ amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ drm_dbg_state(dev,
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct drm_device *drm_dev;
+ struct drm_vblank_crtc *vblank;
+ ktime_t frame_duration_ns, previous_timestamp;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+ drm_dev = acrtc->base.dev;
+ vblank = &drm_dev->vblank[acrtc->base.index];
+ previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
+ frame_duration_ns = vblank->time - previous_timestamp;
+
+ if (frame_duration_ns > 0) {
+ trace_amdgpu_refresh_rate_track(acrtc->base.index,
+ frame_duration_ns,
+ ktime_divns(NSEC_PER_SEC, frame_duration_ns));
+ atomic64_set(&irq_params->previous_timestamp, vblank->time);
+ }
+
+ drm_dbg_vbl(drm_dev,
+ "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ vrr_active);
+
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (vrr_active) {
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc->dm_irq_params.stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+ }
+ }
+}
+
+/**
+ * dm_crtc_high_irq() - Handles CRTC interrupt
+ * @interrupt_params: used for determining the CRTC instance
+ *
+ * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
+ * event handler.
+ */
+static void dm_crtc_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct drm_writeback_job *job;
+ struct amdgpu_crtc *acrtc;
+ unsigned long flags;
+ int vrr_active;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
+ if (!acrtc)
+ return;
+
+ if (acrtc->wb_pending) {
+ if (acrtc->wb_conn) {
+ spin_lock_irqsave(&acrtc->wb_conn->job_lock, flags);
+ job = list_first_entry_or_null(&acrtc->wb_conn->job_queue,
+ struct drm_writeback_job,
+ list_entry);
+ spin_unlock_irqrestore(&acrtc->wb_conn->job_lock, flags);
+
+ if (job) {
+ unsigned int v_total, refresh_hz;
+ struct dc_stream_state *stream = acrtc->dm_irq_params.stream;
+
+ v_total = stream->adjust.v_total_max ?
+ stream->adjust.v_total_max : stream->timing.v_total;
+ refresh_hz = div_u64((uint64_t) stream->timing.pix_clk_100hz *
+ 100LL, (v_total * stream->timing.h_total));
+ mdelay(1000 / refresh_hz);
+
+ drm_writeback_signal_completion(acrtc->wb_conn, 0);
+ dc_stream_fc_disable_writeback(adev->dm.dc,
+ acrtc->dm_irq_params.stream, 0);
+ }
+ } else
+ DRM_ERROR("%s: no amdgpu_crtc wb_conn\n", __func__);
+ acrtc->wb_pending = false;
+ }
+
+ vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
+
+ drm_dbg_vbl(adev_to_drm(adev),
+ "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+ vrr_active, acrtc->dm_irq_params.active_planes);
+
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!vrr_active)
+ amdgpu_dm_crtc_handle_vblank(acrtc);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+
+ if (acrtc->dm_irq_params.stream &&
+ acrtc->dm_irq_params.vrr_params.supported &&
+ acrtc->dm_irq_params.freesync_config.state ==
+ VRR_STATE_ACTIVE_VARIABLE) {
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params);
+
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
+ &acrtc->dm_irq_params.vrr_params.adjust);
+ }
+
+ /*
+ * If there aren't any active_planes then DCH HUBP may be clock-gated.
+ * In that case, pageflip completion interrupts won't fire and pageflip
+ * completion events won't get delivered. Prevent this by sending
+ * pending pageflip events from here if a flip is still pending.
+ *
+ * If any planes are enabled, use dm_pflip_high_irq() instead, to
+ * avoid race conditions between flip programming and completion,
+ * which could cause too early flip completion events.
+ */
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ acrtc->dm_irq_params.active_planes == 0) {
+ if (acrtc->event) {
+ drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
+ acrtc->event = NULL;
+ drm_crtc_vblank_put(&acrtc->base);
+ }
+ acrtc->pflip_status = AMDGPU_FLIP_NONE;
+ }
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+/**
+ * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
+ * DCN generation ASICs
+ * @interrupt_params: interrupt parameters
+ *
+ * Used to set crc window/read out crc value at vertical line 0 position
+ */
+static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
+
+ if (!acrtc)
+ return;
+
+ amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
+}
+#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
+
+/**
+ * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+static void dmub_hpd_callback(struct amdgpu_device *adev,
+ struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ u8 link_index = 0;
+ struct drm_device *dev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ link_index = notify->link_index;
+ link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+ else
+ DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+ notify->type, link_index);
+
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (hpd_aconnector) {
+ if (notify->type == DMUB_NOTIFICATION_HPD)
+ handle_hpd_irq_helper(hpd_aconnector);
+ else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+ handle_hpd_rx_irq(hpd_aconnector);
+ }
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+static bool register_dmub_notify_callback(struct amdgpu_device *adev,
+ enum dmub_notification_type type,
+ dmub_notify_interrupt_callback_t callback,
+ bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
+#define DMUB_TRACE_MAX_READ 64
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+ struct dmub_notification notify;
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dmcub_trace_buf_entry entry = { 0 };
+ u32 count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
+
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (!dm->dmub_callback[notify.type]) {
+ DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+ GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status == DP_HPD_PLUG;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
+ }
+
+
+ do {
+ if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+ trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+ entry.param0, entry.param1);
+
+ DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+ entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+ } else
+ break;
+
+ count++;
+
+ } while (count <= DMUB_TRACE_MAX_READ);
+
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
+}
+
+static int dm_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int dm_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+/* Prototypes of private functions */
+static int dm_early_init(void *handle);
+
+/* Allocate memory for FBC compressed data */
+static void amdgpu_dm_fbc_init(struct drm_connector *connector)
+{
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dm_compressor_info *compressor = &adev->dm.compressor;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
+ struct drm_display_mode *mode;
+ unsigned long max_size = 0;
+
+ if (adev->dm.dc->fbc_compressor == NULL)
+ return;
+
+ if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ if (compressor->bo_ptr)
+ return;
+
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ if (max_size < mode->htotal * mode->vtotal)
+ max_size = mode->htotal * mode->vtotal;
+ }
+
+ if (max_size) {
+ int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
+ &compressor->gpu_addr, &compressor->cpu_addr);
+
+ if (r)
+ DRM_ERROR("DM: Failed to initialize FBC\n");
+ else {
+ adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
+ DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
+ }
+
+ }
+
+}
+
+static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
+ int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct amdgpu_dm_connector *aconnector;
+ int ret = 0;
+
+ *enabled = false;
+
+ mutex_lock(&adev->dm.audio_lock);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->audio_inst != port)
+ continue;
+
+ *enabled = true;
+ ret = drm_eld_size(connector->eld);
+ memcpy(buf, connector->eld, min(max_bytes, ret));
+
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ mutex_unlock(&adev->dm.audio_lock);
+
+ DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
+
+ return ret;
+}
+
+static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
+ .get_eld = amdgpu_dm_audio_component_get_eld,
+};
+
+static int amdgpu_dm_audio_component_bind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct drm_device *dev = dev_get_drvdata(kdev);
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = &amdgpu_dm_audio_component_ops;
+ acomp->dev = kdev;
+ adev->dm.audio_component = acomp;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_component_unbind(struct device *kdev,
+ struct device *hda_kdev, void *data)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev_get_drvdata(kdev));
+ struct drm_audio_component *acomp = data;
+
+ acomp->ops = NULL;
+ acomp->dev = NULL;
+ adev->dm.audio_component = NULL;
+}
+
+static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
+ .bind = amdgpu_dm_audio_component_bind,
+ .unbind = amdgpu_dm_audio_component_unbind,
+};
+
+static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
+{
+ int i, ret;
+
+ if (!amdgpu_audio)
+ return 0;
+
+ adev->mode_info.audio.enabled = true;
+
+ adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
+
+ for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
+ adev->mode_info.audio.pin[i].channels = -1;
+ adev->mode_info.audio.pin[i].rate = -1;
+ adev->mode_info.audio.pin[i].bits_per_sample = -1;
+ adev->mode_info.audio.pin[i].status_bits = 0;
+ adev->mode_info.audio.pin[i].category_code = 0;
+ adev->mode_info.audio.pin[i].connected = false;
+ adev->mode_info.audio.pin[i].id =
+ adev->dm.dc->res_pool->audios[i]->inst;
+ adev->mode_info.audio.pin[i].offset = 0;
+ }
+
+ ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ if (ret < 0)
+ return ret;
+
+ adev->dm.audio_registered = true;
+
+ return 0;
+}
+
+static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
+{
+ if (!amdgpu_audio)
+ return;
+
+ if (!adev->mode_info.audio.enabled)
+ return;
+
+ if (adev->dm.audio_registered) {
+ component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
+ adev->dm.audio_registered = false;
+ }
+
+ /* TODO: Disable audio? */
+
+ adev->mode_info.audio.enabled = false;
+}
+
+static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
+{
+ struct drm_audio_component *acomp = adev->dm.audio_component;
+
+ if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
+
+ acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ pin, -1);
+ }
+}
+
+static int dm_dmub_hw_init(struct amdgpu_device *adev)
+{
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ const struct firmware *dmub_fw = adev->dm.dmub_fw;
+ struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
+ struct abm *abm = adev->dm.dc->res_pool->abm;
+ struct dc_context *ctx = adev->dm.dc->ctx;
+ struct dmub_srv_hw_params hw_params;
+ enum dmub_status status;
+ const unsigned char *fw_inst_const, *fw_bss_data;
+ u32 i, fw_inst_const_size, fw_bss_data_size;
+ bool has_hw_support;
+
+ if (!dmub_srv)
+ /* DMUB isn't supported on the ASIC. */
+ return 0;
+
+ if (!fb_info) {
+ DRM_ERROR("No framebuffer info for DMUB service.\n");
+ return -EINVAL;
+ }
+
+ if (!dmub_fw) {
+ /* Firmware required for DMUB support. */
+ DRM_ERROR("No firmware provided for DMUB.\n");
+ return -EINVAL;
+ }
+
+ /* initialize register offsets for ASICs with runtime initialization available */
+ if (dmub_srv->hw_funcs.init_reg_offsets)
+ dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
+
+ status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
+ return -EINVAL;
+ }
+
+ if (!has_hw_support) {
+ DRM_INFO("DMUB unsupported on ASIC\n");
+ return 0;
+ }
+
+ /* Reset DMCUB if it was previously running - before we overwrite its memory. */
+ status = dmub_srv_hw_reset(dmub_srv);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Error resetting DMUB HW: %d\n", status);
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
+
+ fw_inst_const = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+
+ fw_bss_data = dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes);
+
+ /* Copy firmware and bios info into FB memory. */
+ fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+
+ fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
+ memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
+ adev->bios_size);
+
+ /* Reset regions that need to be reset. */
+ memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
+
+ memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
+ fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
+
+ /* Initialize hardware. */
+ memset(&hw_params, 0, sizeof(hw_params));
+ hw_params.fb_base = adev->gmc.fb_start;
+ hw_params.fb_offset = adev->vm_manager.vram_base_offset;
+
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
+ if (dmcu)
+ hw_params.psp_version = dmcu->psp_version;
+
+ for (i = 0; i < fb_info->num_fb; ++i)
+ hw_params.fb[i] = &fb_info->fb[i];
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ hw_params.dpia_supported = true;
+ hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
+ break;
+ default:
+ break;
+ }
+
+ status = dmub_srv_hw_init(dmub_srv, &hw_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error initializing DMUB HW: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+
+ /* Init DMCU and ABM if available. */
+ if (dmcu && abm) {
+ dmcu->funcs->dmcu_init(dmcu);
+ abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
+ }
+
+ if (!adev->dm.dc->ctx->dmub_srv)
+ adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+ if (!adev->dm.dc->ctx->dmub_srv) {
+ DRM_ERROR("Couldn't allocate DC DMUB server!\n");
+ return -ENOMEM;
+ }
+
+ DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+
+ return 0;
+}
+
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
+static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
+{
+ u64 pt_base;
+ u32 logical_addr_low;
+ u32 logical_addr_high;
+ u32 agp_base, agp_bot, agp_top;
+ PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
+
+ memset(pa_config, 0, sizeof(*pa_config));
+
+ agp_base = 0;
+ agp_bot = adev->gmc.agp_start >> 24;
+ agp_top = adev->gmc.agp_end >> 24;
+
+ /* AGP aperture is disabled */
+ if (agp_bot > agp_top) {
+ logical_addr_low = adev->gmc.fb_start >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
+ else
+ logical_addr_high = adev->gmc.fb_end >> 18;
+ } else {
+ logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+ if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
+ AMD_APU_IS_RENOIR |
+ AMD_APU_IS_GREEN_SARDINE))
+ /*
+ * Raven2 has a HW issue that it is unable to use the vram which
+ * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
+ * workaround that increase system aperture high address (add 1)
+ * to get rid of the VM fault and hardware hang.
+ */
+ logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
+ else
+ logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
+ }
+
+ pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
+
+ page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
+ AMDGPU_GPU_PAGE_SHIFT);
+ page_table_base.high_part = upper_32_bits(pt_base);
+ page_table_base.low_part = lower_32_bits(pt_base);
+
+ pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
+ pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
+
+ pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
+ pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
+ pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
+
+ pa_config->system_aperture.fb_base = adev->gmc.fb_start;
+ pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
+ pa_config->system_aperture.fb_top = adev->gmc.fb_end;
+
+ pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
+ pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
+ pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
+
+ pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+
+}
+
+static void force_connector_state(
+ struct amdgpu_dm_connector *aconnector,
+ enum drm_connector_force force_state)
+{
+ struct drm_connector *connector = &aconnector->base;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ aconnector->base.force = force_state;
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ mutex_lock(&aconnector->hpd_lock);
+ drm_kms_helper_connector_hotplug_event(connector);
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+ union test_response test_response;
+
+ memset(&test_response, 0, sizeof(test_response));
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ goto skip;
+ }
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ dc_link_dp_handle_automated_test(dc_link);
+
+ if (aconnector->timing_changed) {
+ /* force connector disconnect and reconnect */
+ force_connector_state(aconnector, DRM_FORCE_OFF);
+ msleep(100);
+ force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+ }
+
+ test_response.bits.ACK = 1;
+
+ core_link_write_dpcd(
+ dc_link,
+ DP_TEST_RESPONSE,
+ &test_response.raw,
+ sizeof(test_response));
+ } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ /* offload_work->data is from handle_hpd_rx_irq->
+ * schedule_hpd_rx_offload_work.this is defer handle
+ * for hpd short pulse. upon here, link status may be
+ * changed, need get latest link status from dpcd
+ * registers. if link status is good, skip run link
+ * training again.
+ */
+ union hpd_irq_data irq_data;
+
+ memset(&irq_data, 0, sizeof(irq_data));
+
+ /* before dc_link_dp_handle_link_loss, allow new link lost handle
+ * request be added to work queue if link lost at end of dc_link_
+ * dp_handle_link_loss
+ */
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+ if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+ dc_link_check_link_loss_status(dc_link, &irq_data))
+ dc_link_dp_handle_link_loss(dc_link);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ goto out_err;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+
+out_err:
+ for (i = 0; i < max_caps; i++) {
+ if (hpd_rx_offload_wq[i].wq)
+ destroy_workqueue(hpd_rx_offload_wq[i].wq);
+ }
+ kfree(hpd_rx_offload_wq);
+ return NULL;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
+static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
+ },
+ },
+ {}
+ /* TODO: refactor this from a fixed table to a dynamic option */
+};
+
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+{
+ const struct dmi_system_id *dmi_id;
+
+ dm->aux_hpd_discon_quirk = false;
+
+ dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
+ if (dmi_id) {
+ dm->aux_hpd_discon_quirk = true;
+ DRM_INFO("aux_hpd_discon_quirk attached\n");
+ }
+}
+
+static int amdgpu_dm_init(struct amdgpu_device *adev)
+{
+ struct dc_init_data init_data;
+ struct dc_callback_init init_params;
+ int r;
+
+ adev->dm.ddev = adev_to_drm(adev);
+ adev->dm.adev = adev;
+
+ /* Zero all the fields */
+ memset(&init_data, 0, sizeof(init_data));
+ memset(&init_params, 0, sizeof(init_params));
+
+ mutex_init(&adev->dm.dpia_aux_lock);
+ mutex_init(&adev->dm.dc_lock);
+ mutex_init(&adev->dm.audio_lock);
+
+ if (amdgpu_dm_irq_init(adev)) {
+ DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
+ goto error;
+ }
+
+ init_data.asic_id.chip_family = adev->family;
+
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
+ init_data.asic_id.hw_internal_rev = adev->external_rev_id;
+ init_data.asic_id.chip_id = adev->pdev->device;
+
+ init_data.asic_id.vram_width = adev->gmc.vram_width;
+ /* TODO: initialize init_data.asic_id.vram_type here!!!! */
+ init_data.asic_id.atombios_base_address =
+ adev->mode_info.atom_context->bios;
+
+ init_data.driver = adev;
+
+ adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
+
+ if (!adev->dm.cgs_device) {
+ DRM_ERROR("amdgpu: failed to create cgs device.\n");
+ goto error;
+ }
+
+ init_data.cgs_device = adev->dm.cgs_device;
+
+ init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ switch (adev->dm.dmcub_fw_version) {
+ case 0: /* development */
+ case 0x1: /* linux-firmware.git hash 6d9f399 */
+ case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
+ init_data.flags.disable_dmcu = false;
+ break;
+ default:
+ init_data.flags.disable_dmcu = true;
+ }
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
+
+ /* APU support S/G display by default except:
+ * ASICs before Carrizo,
+ * RAVEN1 (Users reported stability issue)
+ */
+
+ if (adev->asic_type < CHIP_CARRIZO) {
+ init_data.flags.gpu_vm_support = false;
+ } else if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN)
+ init_data.flags.gpu_vm_support = false;
+ else
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+ } else {
+ init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+ }
+
+ adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
+
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
+ if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
+ init_data.flags.multi_mon_pp_mclk_switch = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
+ init_data.flags.disable_fractional_pwm = true;
+
+ if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+ init_data.flags.edp_no_power_sequencing = true;
+
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+
+ init_data.flags.seamless_boot_edp_requested = false;
+
+ if (amdgpu_device_seamless_boot_supported(adev)) {
+ init_data.flags.seamless_boot_edp_requested = true;
+ init_data.flags.allow_seamless_boot_optimization = true;
+ DRM_INFO("Seamless boot condition check passed\n");
+ }
+
+ init_data.flags.enable_mipi_converter_optimization = true;
+
+ init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
+ init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+ init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+ else
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ init_data.flags.disable_ips_in_vpb = 0;
+
+ /* Enable DWB for tested platforms only */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
+ init_data.num_virtual_links = 1;
+
+ INIT_LIST_HEAD(&adev->dm.da_list);
+
+ retrieve_dmi_info(&adev->dm);
+
+ /* Display Core create. */
+ adev->dm.dc = dc_create(&init_data);
+
+ if (adev->dm.dc) {
+ DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
+ dce_version_to_string(adev->dm.dc->ctx->dce_version));
+ } else {
+ DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
+ goto error;
+ }
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
+ if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
+ adev->dm.dc->debug.force_subvp_mclk_switch = true;
+
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
+ adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
+
+ /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
+ adev->dm.dc->debug.ignore_cable_id = true;
+
+ if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+ DRM_INFO("DP-HDMI FRL PCON supported\n");
+
+ r = dm_dmub_hw_init(adev);
+ if (r) {
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ goto error;
+ }
+
+ dc_hardware_init(adev->dm.dc);
+
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
+ if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
+ struct dc_phy_addr_space_config pa_config;
+
+ mmhub_read_system_context(adev, &pa_config);
+
+ // Call the DC init_memory func
+ dc_setup_system_context(adev->dm.dc, &pa_config);
+ }
+
+ adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
+ if (!adev->dm.freesync_module) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize freesync_module.\n");
+ } else
+ DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
+ adev->dm.freesync_module);
+
+ amdgpu_dm_init_color_mod();
+
+ if (adev->dm.dc->caps.max_links > 0) {
+ adev->dm.vblank_control_workqueue =
+ create_singlethread_workqueue("dm_vblank_control_workqueue");
+ if (!adev->dm.vblank_control_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
+ }
+
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
+
+ if (!adev->dm.hdcp_workqueue)
+ DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
+ else
+ DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
+
+ dc_init_callbacks(adev->dm.dc, &init_params);
+ }
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ init_completion(&adev->dm.dmub_aux_transfer_done);
+ adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+ if (!adev->dm.dmub_notify) {
+ DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+ goto error;
+ }
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
+ amdgpu_dm_outbox_init(adev);
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
+ * It is expected that DMUB will resend any pending notifications at this point. Note
+ * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
+ * align legacy interface initialization sequence. Connection status will be proactivly
+ * detected once in the amdgpu_dm_initialize_drm_device.
+ */
+ dc_enable_dmub_outbox(adev->dm.dc);
+
+ /* DPIA trace goes to dmesg logs only if outbox is enabled */
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
+ dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
+ }
+
+ if (amdgpu_dm_initialize_drm_device(adev)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+ /* create fake encoders for MST */
+ dm_dp_create_fake_mst_encoders(adev);
+
+ /* TODO: Add_display_info? */
+
+ /* TODO use dynamic cursor width */
+ adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
+ adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
+
+ if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
+ DRM_ERROR(
+ "amdgpu: failed to initialize sw for display support.\n");
+ goto error;
+ }
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+ if (!adev->dm.secure_display_ctxs)
+ DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
+#endif
+
+ DRM_DEBUG_DRIVER("KMS initialized.\n");
+
+ return 0;
+error:
+ amdgpu_dm_fini(adev);
+
+ return -EINVAL;
+}
+
+static int amdgpu_dm_early_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_audio_fini(adev);
+
+ return 0;
+}
+
+static void amdgpu_dm_fini(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->dm.vblank_control_workqueue) {
+ destroy_workqueue(adev->dm.vblank_control_workqueue);
+ adev->dm.vblank_control_workqueue = NULL;
+ }
+
+ amdgpu_dm_destroy_drm_device(&adev->dm);
+
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (adev->dm.secure_display_ctxs) {
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ if (adev->dm.secure_display_ctxs[i].crtc) {
+ flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+ flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+ }
+ }
+ kfree(adev->dm.secure_display_ctxs);
+ adev->dm.secure_display_ctxs = NULL;
+ }
+#endif
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
+ adev->dm.hdcp_workqueue = NULL;
+ }
+
+ if (adev->dm.dc) {
+ dc_deinit_callbacks(adev->dm.dc);
+ dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+ if (dc_enable_dmub_notifications(adev->dm.dc)) {
+ kfree(adev->dm.dmub_notify);
+ adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
+ }
+ }
+
+ if (adev->dm.dmub_bo)
+ amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+
+ if (adev->dm.hpd_rx_offload_wq && adev->dm.dc) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
+ /* DC Destroy TODO: Replace destroy DAL */
+ if (adev->dm.dc)
+ dc_destroy(&adev->dm.dc);
+ /*
+ * TODO: pageflip, vlank interrupt
+ *
+ * amdgpu_dm_irq_fini(adev);
+ */
+
+ if (adev->dm.cgs_device) {
+ amdgpu_cgs_destroy_device(adev->dm.cgs_device);
+ adev->dm.cgs_device = NULL;
+ }
+ if (adev->dm.freesync_module) {
+ mod_freesync_destroy(adev->dm.freesync_module);
+ adev->dm.freesync_module = NULL;
+ }
+
+ mutex_destroy(&adev->dm.audio_lock);
+ mutex_destroy(&adev->dm.dc_lock);
+ mutex_destroy(&adev->dm.dpia_aux_lock);
+}
+
+static int load_dmcu_fw(struct amdgpu_device *adev)
+{
+ const char *fw_name_dmcu = NULL;
+ int r;
+ const struct dmcu_firmware_header_v1_0 *hdr;
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
+ case CHIP_RAVEN:
+ if (ASICREV_IS_PICASSO(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
+ fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
+ else
+ return 0;
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ return 0;
+ default:
+ break;
+ }
+ DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
+ return -EINVAL;
+ }
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
+ return 0;
+ }
+
+ r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+ if (r == -ENODEV) {
+ /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
+ DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
+ adev->dm.fw_dmcu = NULL;
+ return 0;
+ }
+ if (r) {
+ dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
+ fw_name_dmcu);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+ return r;
+ }
+
+ hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
+
+ adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
+
+ return 0;
+}
+
+static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_read_reg(adev->dm.dc->ctx, address);
+}
+
+static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
+ uint32_t value)
+{
+ struct amdgpu_device *adev = ctx;
+
+ return dm_write_reg(adev->dm.dc->ctx, address, value);
+}
+
+static int dm_dmub_sw_init(struct amdgpu_device *adev)
+{
+ struct dmub_srv_create_params create_params;
+ struct dmub_srv_region_params region_params;
+ struct dmub_srv_region_info region_info;
+ struct dmub_srv_memory_params memory_params;
+ struct dmub_srv_fb_info *fb_info;
+ struct dmub_srv *dmub_srv;
+ const struct dmcub_firmware_header_v1_0 *hdr;
+ enum dmub_asic dmub_asic;
+ enum dmub_status status;
+ static enum dmub_window_memory_type window_memory_type[DMUB_WINDOW_TOTAL] = {
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_0_INST_CONST
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_1_STACK
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_2_BSS_DATA
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_3_VBIOS
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ };
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ dmub_asic = DMUB_ASIC_DCN21;
+ break;
+ case IP_VERSION(3, 0, 0):
+ dmub_asic = DMUB_ASIC_DCN30;
+ break;
+ case IP_VERSION(3, 0, 1):
+ dmub_asic = DMUB_ASIC_DCN301;
+ break;
+ case IP_VERSION(3, 0, 2):
+ dmub_asic = DMUB_ASIC_DCN302;
+ break;
+ case IP_VERSION(3, 0, 3):
+ dmub_asic = DMUB_ASIC_DCN303;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
+ break;
+ case IP_VERSION(3, 1, 4):
+ dmub_asic = DMUB_ASIC_DCN314;
+ break;
+ case IP_VERSION(3, 1, 5):
+ dmub_asic = DMUB_ASIC_DCN315;
+ break;
+ case IP_VERSION(3, 1, 6):
+ dmub_asic = DMUB_ASIC_DCN316;
+ break;
+ case IP_VERSION(3, 2, 0):
+ dmub_asic = DMUB_ASIC_DCN32;
+ break;
+ case IP_VERSION(3, 2, 1):
+ dmub_asic = DMUB_ASIC_DCN321;
+ break;
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ dmub_asic = DMUB_ASIC_DCN35;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+
+ hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
+
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+
+ adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
+ dmub_srv = adev->dm.dmub_srv;
+
+ if (!dmub_srv) {
+ DRM_ERROR("Failed to allocate DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ memset(&create_params, 0, sizeof(create_params));
+ create_params.user_ctx = adev;
+ create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
+ create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
+ create_params.asic = dmub_asic;
+
+ /* Create the DMUB service. */
+ status = dmub_srv_create(dmub_srv, &create_params);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error creating DMUB service: %d\n", status);
+ return -EINVAL;
+ }
+
+ /* Calculate the size of all the regions for the DMUB service. */
+ memset(&region_params, 0, sizeof(region_params));
+
+ region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
+ PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
+ region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
+ region_params.vbios_size = adev->bios_size;
+ region_params.fw_bss_data = region_params.bss_data_size ?
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ le32_to_cpu(hdr->inst_const_bytes) : NULL;
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
+ region_params.window_memory_type = window_memory_type;
+
+ status = dmub_srv_calc_region_info(dmub_srv, &region_params,
+ &region_info);
+
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB region info: %d\n", status);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a framebuffer based on the total size of all the regions.
+ * TODO: Move this into GART.
+ */
+ r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT,
+ &adev->dm.dmub_bo,
+ &adev->dm.dmub_bo_gpu_addr,
+ &adev->dm.dmub_bo_cpu_addr);
+ if (r)
+ return r;
+
+ /* Rebase the regions on the framebuffer address. */
+ memset(&memory_params, 0, sizeof(memory_params));
+ memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
+ memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
+ memory_params.region_info = &region_info;
+ memory_params.window_memory_type = window_memory_type;
+
+ adev->dm.dmub_fb_info =
+ kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
+ fb_info = adev->dm.dmub_fb_info;
+
+ if (!fb_info) {
+ DRM_ERROR(
+ "Failed to allocate framebuffer info for DMUB service!\n");
+ return -ENOMEM;
+ }
+
+ status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
+ if (status != DMUB_STATUS_OK) {
+ DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dm_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int r;
+
+ r = dm_dmub_sw_init(adev);
+ if (r)
+ return r;
+
+ return load_dmcu_fw(adev);
+}
+
+static int dm_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ kfree(adev->dm.dmub_fb_info);
+ adev->dm.dmub_fb_info = NULL;
+
+ if (adev->dm.dmub_srv) {
+ dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
+ adev->dm.dmub_srv = NULL;
+ }
+
+ amdgpu_ucode_release(&adev->dm.dmub_fw);
+ amdgpu_ucode_release(&adev->dm.fw_dmcu);
+
+ return 0;
+}
+
+static int detect_mst_link_for_all_connectors(struct drm_device *dev)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
+ DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
+ aconnector,
+ aconnector->base.base.id);
+
+ ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
+ if (ret < 0) {
+ DRM_ERROR("DM_MST: Failed to start MST\n");
+ aconnector->dc_link->type =
+ dc_connection_single;
+ ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ break;
+ }
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ return ret;
+}
+
+static int dm_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ struct dmcu_iram_parameters params;
+ unsigned int linear_lut[16];
+ int i;
+ struct dmcu *dmcu = NULL;
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
+
+ for (i = 0; i < 16; i++)
+ linear_lut[i] = 0xFFFF * i / 15;
+
+ params.set = 0;
+ params.backlight_ramping_override = false;
+ params.backlight_ramping_start = 0xCCCC;
+ params.backlight_ramping_reduction = 0xCCCCCCCC;
+ params.backlight_lut_array_size = 16;
+ params.backlight_lut_array = linear_lut;
+
+ /* Min backlight level after ABM reduction, Don't allow below 1%
+ * 0xFFFF x 0.01 = 0x28F
+ */
+ params.min_abm_backlight = 0x28F;
+ /* In the case where abm is implemented on dmcub,
+ * dmcu object will be null.
+ * ABM 2.4 and up are implemented on dmcub.
+ */
+ if (dmcu) {
+ if (!dmcu_load_iram(dmcu, params))
+ return -EINVAL;
+ } else if (adev->dm.dc->ctx->dmub_srv) {
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ int edp_num;
+
+ dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
+ for (i = 0; i < edp_num; i++) {
+ if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+ return -EINVAL;
+ }
+ }
+
+ return detect_mst_link_for_all_connectors(adev_to_drm(adev));
+}
+
+static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
+{
+ int ret;
+ u8 guid[16];
+ u64 tmp64;
+
+ mutex_lock(&mgr->lock);
+ if (!mgr->mst_primary)
+ goto out_fail;
+
+ if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ /* Some hubs forget their guids after they resume */
+ ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+
+ if (memchr_inv(guid, 0, 16) == NULL) {
+ tmp64 = get_jiffies_64();
+ memcpy(&guid[0], &tmp64, sizeof(u64));
+ memcpy(&guid[8], &tmp64, sizeof(u64));
+
+ ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
+
+ if (ret != 16) {
+ drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
+ goto out_fail;
+ }
+ }
+
+ memcpy(mgr->mst_primary->guid, guid, 16);
+
+out_fail:
+ mutex_unlock(&mgr->lock);
+}
+
+static void s3_handle_mst(struct drm_device *dev, bool suspend)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_dp_mst_topology_mgr *mgr;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ mgr = &aconnector->mst_mgr;
+
+ if (suspend) {
+ drm_dp_mst_topology_mgr_suspend(mgr);
+ } else {
+ /* if extended timeout is supported in hardware,
+ * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+ * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+ */
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+ if (!dp_is_lttpr_present(aconnector->dc_link))
+ try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
+ /* TODO: move resume_mst_branch_status() into drm mst resume again
+ * once topology probing work is pulled out from mst resume into mst
+ * resume 2nd step. mst resume 2nd step should be called after old
+ * state getting restored (i.e. drm_atomic_helper_resume()).
+ */
+ resume_mst_branch_status(mgr);
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+}
+
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ int ret = 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ break;
+ default:
+ return 0;
+ }
+
+ ret = amdgpu_dpm_write_watermarks_table(adev);
+ if (ret) {
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * dm_hw_init() - Initialize DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Initialize the &struct amdgpu_display_manager device. This involves calling
+ * the initializers of each DM component, then populating the struct with them.
+ *
+ * Although the function implies hardware initialization, both hardware and
+ * software are initialized here. Splitting them out to their relevant init
+ * hooks is a future TODO item.
+ *
+ * Some notable things that are initialized here:
+ *
+ * - Display Core, both software and hardware
+ * - DC modules that we need (freesync and color management)
+ * - DRM software states
+ * - Interrupt sources and handlers
+ * - Vblank support
+ * - Debug FS entries, if enabled
+ */
+static int dm_hw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Create DAL display manager */
+ amdgpu_dm_init(adev);
+ amdgpu_dm_hpd_init(adev);
+
+ return 0;
+}
+
+/**
+ * dm_hw_fini() - Teardown DC device
+ * @handle: The base driver device containing the amdgpu_dm device.
+ *
+ * Teardown components within &struct amdgpu_display_manager that require
+ * cleanup. This involves cleaning up the DRM device, DC, and any modules that
+ * were loaded. Also flush IRQ workqueues and disable them.
+ */
+static int dm_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ amdgpu_dm_hpd_fini(adev);
+
+ amdgpu_dm_irq_fini(adev);
+ amdgpu_dm_fini(adev);
+ return 0;
+}
+
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
+ } else
+ rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
+
+ if (rc)
+ DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
+
+ irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
+ /* During gpu-reset we disable and then enable vblank irq, so
+ * don't use amdgpu_irq_get/put() to avoid refcount change.
+ */
+ if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
+ DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
+ }
+ }
+
+}
+
+static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+ struct dc_commit_streams_params params = {};
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_state_create_current_copy(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+ params.streams = context->streams;
+ params.stream_count = context->stream_count;
+ res = dc_commit_streams(dc, &params);
+
+fail:
+ dc_state_release(context);
+
+context_alloc_fail:
+ return res;
+}
+
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
+static int dm_suspend(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct amdgpu_display_manager *dm = &adev->dm;
+ int ret = 0;
+
+ if (amdgpu_in_reset(adev)) {
+ mutex_lock(&dm->dc_lock);
+
+ dc_allow_idle_optimizations(adev->dm.dc, false);
+
+ dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ return ret;
+ }
+
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+ if (IS_ERR(adev->dm.cached_state))
+ return PTR_ERR(adev->dm.cached_state);
+
+ s3_handle_mst(adev_to_drm(adev), true);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ hpd_rx_irq_work_suspend(dm);
+
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3);
+
+ return 0;
+}
+
+struct drm_connector *
+amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ u32 i;
+ struct drm_connector_state *new_con_state;
+ struct drm_connector *connector;
+ struct drm_crtc *crtc_from_state;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ crtc_from_state = new_con_state->crtc;
+
+ if (crtc_from_state == crtc)
+ return connector;
+ }
+
+ return NULL;
+}
+
+static void emulated_link_detect(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct display_sink_capability sink_caps = { 0 };
+ enum dc_edid_status edid_status;
+ struct dc_context *dc_ctx = link->ctx;
+ struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
+ struct dc_sink *sink = NULL;
+ struct dc_sink *prev_sink = NULL;
+
+ link->type = dc_connection_none;
+ prev_sink = link->local_sink;
+
+ if (prev_sink)
+ dc_sink_release(prev_sink);
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_HDMI_TYPE_A: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_DVI_DUAL_LINK: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+ break;
+ }
+
+ case SIGNAL_TYPE_LVDS: {
+ sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+ sink_caps.signal = SIGNAL_TYPE_LVDS;
+ break;
+ }
+
+ case SIGNAL_TYPE_EDP: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_EDP;
+ break;
+ }
+
+ case SIGNAL_TYPE_DISPLAY_PORT: {
+ sink_caps.transaction_type =
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+ sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
+ break;
+ }
+
+ default:
+ drm_err(dev, "Invalid connector type! signal:%d\n",
+ link->connector_signal);
+ return;
+ }
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = sink_caps.signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ drm_err(dev, "Failed to create sink!\n");
+ return;
+ }
+
+ /* dc_sink_create returns a new reference */
+ link->local_sink = sink;
+
+ edid_status = dm_helpers_read_local_edid(
+ link->ctx,
+ link,
+ sink);
+
+ if (edid_status != EDID_OK)
+ drm_err(dev, "Failed to read EDID\n");
+
+}
+
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dm->ddev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+
+ update_planes_and_stream_adapter(dm->dc,
+ UPDATE_TYPE_FULL,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k],
+ &bundle->stream_update,
+ bundle->surface_updates);
+ }
+
+cleanup:
+ kfree(bundle);
+}
+
+static int dm_resume(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct dc_state *dc_state;
+ int i, r, j, ret;
+ bool need_hotplug = false;
+ struct dc_commit_streams_params commit_params = {};
+
+ if (dm->dc->caps.ips_support) {
+ dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
+ }
+
+ if (amdgpu_in_reset(adev)) {
+ dc_state = dm->cached_dc_state;
+
+ /*
+ * The dc->current_state is backed up into dm->cached_dc_state
+ * before we commit 0 streams.
+ *
+ * DC will clear link encoder assignments on the real state
+ * but the changes won't propagate over to the copy we made
+ * before the 0 streams commit.
+ *
+ * DC expects that link encoder assignments are *not* valid
+ * when committing a state, so as a workaround we can copy
+ * off of the current state.
+ *
+ * We lose the previous assignments, but we had already
+ * commit 0 streams anyway.
+ */
+ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
+ dc_state->stream_status[i].plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ commit_params.streams = dc_state->streams;
+ commit_params.stream_count = dc_state->stream_count;
+ WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_state_release(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_state_release(dm_state->context);
+ dm_state->context = dc_state_create(dm->dc, NULL);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+
+ /* Before powering on DC we need to re-initialize DMUB. */
+ dm_dmub_hw_resume(adev);
+
+ /* Re-enable outbox interrupts for DPIA. */
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ amdgpu_dm_outbox_init(adev);
+ dc_enable_dmub_outbox(adev->dm.dc);
+ }
+
+ /* power on hardware */
+ dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
+ /* program HPD filter */
+ dc_resume(dm->dc);
+
+ /*
+ * early enable HPD Rx IRQ, should be done before set mode as short
+ * pulse interrupts are used for MST
+ */
+ amdgpu_dm_irq_resume_early(adev);
+
+ /* On resume we need to rewrite the MSTM control bits to enable MST*/
+ s3_handle_mst(ddev, false);
+
+ /* Do detection*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_link)
+ continue;
+
+ /*
+ * this is the case when traversing through already created end sink
+ * MST connectors, should be skipped
+ */
+ if (aconnector && aconnector->mst_root)
+ continue;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+ } else {
+ mutex_lock(&dm->dc_lock);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ mutex_unlock(&aconnector->hpd_lock);
+ }
+ drm_connector_list_iter_end(&iter);
+
+ /* Force mode set in atomic commit */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
+ new_crtc_state->active_changed = true;
+
+ /*
+ * atomic_check is expected to create the dc states. We need to release
+ * them here, since they were duplicated as part of the suspend
+ * procedure.
+ */
+ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->stream) {
+ WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
+ dc_stream_release(dm_new_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+ }
+ }
+
+ for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ if (dm_new_plane_state->dc_state) {
+ WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
+ dc_plane_state_release(dm_new_plane_state->dc_state);
+ dm_new_plane_state->dc_state = NULL;
+ }
+ }
+
+ drm_atomic_helper_resume(ddev, dm->cached_state);
+
+ dm->cached_state = NULL;
+
+ /* Do mst topology probing after resuming cached state*/
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (aconnector->dc_link->type != dc_connection_mst_branch ||
+ aconnector->mst_root)
+ continue;
+
+ ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
+
+ if (ret < 0) {
+ dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+ aconnector->dc_link);
+ need_hotplug = true;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ if (need_hotplug)
+ drm_kms_helper_hotplug_event(ddev);
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
+ return 0;
+}
+
+/**
+ * DOC: DM Lifecycle
+ *
+ * DM (and consequently DC) is registered in the amdgpu base driver as a IP
+ * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
+ * the base driver's device list to be initialized and torn down accordingly.
+ *
+ * The functions to do so are provided as hooks in &struct amd_ip_funcs.
+ */
+
+static const struct amd_ip_funcs amdgpu_dm_funcs = {
+ .name = "dm",
+ .early_init = dm_early_init,
+ .late_init = dm_late_init,
+ .sw_init = dm_sw_init,
+ .sw_fini = dm_sw_fini,
+ .early_fini = amdgpu_dm_early_fini,
+ .hw_init = dm_hw_init,
+ .hw_fini = dm_hw_fini,
+ .suspend = dm_suspend,
+ .resume = dm_resume,
+ .is_idle = dm_is_idle,
+ .wait_for_idle = dm_wait_for_idle,
+ .check_soft_reset = dm_check_soft_reset,
+ .soft_reset = dm_soft_reset,
+ .set_clockgating_state = dm_set_clockgating_state,
+ .set_powergating_state = dm_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version dm_ip_block = {
+ .type = AMD_IP_BLOCK_TYPE_DCE,
+ .major = 1,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &amdgpu_dm_funcs,
+};
+
+
+/**
+ * DOC: atomic
+ *
+ * *WIP*
+ */
+
+static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
+ .fb_create = amdgpu_display_user_framebuffer_create,
+ .get_format_info = amdgpu_dm_plane_get_format_info,
+ .atomic_check = amdgpu_dm_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
+ .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
+ .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
+};
+
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ struct amdgpu_dm_backlight_caps *caps;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ struct drm_luminance_range_info *luminance_range;
+
+ if (aconnector->bl_idx == -1 ||
+ aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = drm_to_adev(conn_base->dev);
+
+ caps = &adev->dm.backlight_caps[aconnector->bl_idx];
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+
+ if (caps->ext_caps->bits.oled == 1
+ /*
+ * ||
+ * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ * caps->ext_caps->bits.hdr_aux_backlight_control == 1
+ */)
+ caps->aux_support = true;
+
+ if (amdgpu_backlight == 0)
+ caps->aux_support = false;
+ else if (amdgpu_backlight == 1)
+ caps->aux_support = true;
+
+ luminance_range = &conn_base->display_info.luminance_range;
+
+ if (luminance_range->max_luminance) {
+ caps->aux_min_input_signal = luminance_range->min_luminance;
+ caps->aux_max_input_signal = luminance_range->max_luminance;
+ } else {
+ caps->aux_min_input_signal = 0;
+ caps->aux_max_input_signal = 512;
+ }
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_sink *sink;
+
+ /* MST handled by drm_mst framework */
+ if (aconnector->mst_mgr.mst_state == true)
+ return;
+
+ sink = aconnector->dc_link->local_sink;
+ if (sink)
+ dc_sink_retain(sink);
+
+ /*
+ * Edid mgmt connector gets first update only in mode_valid hook and then
+ * the connector sink is set to either fake or physical sink depends on link status.
+ * Skip if already done during boot.
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
+ && aconnector->dc_em_sink) {
+
+ /*
+ * For S3 resume with headless use eml_sink to fake stream
+ * because on resume connector->sink is set to NULL
+ */
+ mutex_lock(&dev->mode_config.mutex);
+
+ if (sink) {
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ /*
+ * retain and release below are used to
+ * bump up refcount for sink because the link doesn't point
+ * to it anymore after disconnect, so on next crtc to connector
+ * reshuffle by UMD we will get into unwanted dc_sink release
+ */
+ dc_sink_release(aconnector->dc_sink);
+ }
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ amdgpu_dm_update_freesync_caps(connector,
+ aconnector->edid);
+ } else {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ if (!aconnector->dc_sink) {
+ aconnector->dc_sink = aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ /*
+ * TODO: temporary guard to look for proper fix
+ * if this sink is MST sink, we should not do anything
+ */
+ if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dc_sink_release(sink);
+ return;
+ }
+
+ if (aconnector->dc_sink == sink) {
+ /*
+ * We got a DP short pulse (Link Loss, DP CTS, etc...).
+ * Do nothing!!
+ */
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
+ aconnector->connector_id);
+ if (sink)
+ dc_sink_release(sink);
+ return;
+ }
+
+ DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
+ aconnector->connector_id, aconnector->dc_sink, sink);
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * 1. Update status of the drm connector
+ * 2. Send an event and let userspace tell us what to do
+ */
+ if (sink) {
+ /*
+ * TODO: check if we still need the S3 mode update workaround.
+ * If yes, put it here.
+ */
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ aconnector->dc_sink = sink;
+ dc_sink_retain(aconnector->dc_sink);
+ if (sink->dc_edid.length == 0) {
+ aconnector->edid = NULL;
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
+ } else {
+ aconnector->edid =
+ (struct edid *)sink->dc_edid.raw_edid;
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
+ }
+
+ if (!aconnector->timing_requested) {
+ aconnector->timing_requested =
+ kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+ if (!aconnector->timing_requested)
+ drm_err(dev,
+ "failed to create aconnector->requested_timing\n");
+ }
+
+ drm_connector_update_edid_property(connector, aconnector->edid);
+ amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
+ update_connector_ext_caps(aconnector);
+ } else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ amdgpu_dm_update_freesync_caps(connector, NULL);
+ drm_connector_update_edid_property(connector, NULL);
+ aconnector->num_modes = 0;
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
+ kfree(aconnector->timing_requested);
+ aconnector->timing_requested = NULL;
+ /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
+ if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ update_subconnector_property(aconnector);
+
+ if (sink)
+ dc_sink_release(sink);
+}
+
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ bool ret = false;
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * In case of failure or MST no need to update connector status or notify the OS
+ * since (for MST case) MST does this in its own context.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ if (adev->dm.hdcp_workqueue) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ dm_con_state->update_hdcp = true;
+ }
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ aconnector->timing_changed = false;
+
+ if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(aconnector->dc_link);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ mutex_unlock(&adev->dm.dc_lock);
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ mutex_unlock(&aconnector->hpd_lock);
+
+}
+
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
+static void handle_hpd_rx_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+ struct drm_connector *connector = &aconnector->base;
+ struct drm_device *dev = connector->dev;
+ struct dc_link *dc_link = aconnector->dc_link;
+ bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
+ bool result = false;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ union hpd_irq_data hpd_irq_data;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = dc_link->link_index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+
+ memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
+
+ if (adev->dm.disable_hpd_irq)
+ return;
+
+ /*
+ * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
+ * conflict, after implement i2c helper, this mutex should be
+ * retired.
+ */
+ mutex_lock(&aconnector->hpd_lock);
+
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
+
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ bool skip = false;
+
+ /*
+ * DOWN_REP_MSG_RDY is also handled by polling method
+ * mgr->cbs->poll_hpd_irq()
+ */
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_mst_msg_rdy_event;
+
+ if (!skip)
+ offload_wq->is_handling_mst_msg_rdy_event = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+
+ if (link_loss) {
+ bool skip = false;
+
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
+
+out:
+ if (result && !is_mst_root_connector) {
+ /* Downstream Port status changed. */
+ if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(dc_link);
+
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&adev->dm.dc_lock);
+ ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
+ mutex_unlock(&adev->dm.dc_lock);
+
+ if (ret) {
+ if (aconnector->fake_enable)
+ aconnector->fake_enable = false;
+
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
+ drm_modeset_lock_all(dev);
+ dm_restore_drm_connector_state(dev, connector);
+ drm_modeset_unlock_all(dev);
+
+ drm_kms_helper_connector_hotplug_event(connector);
+ }
+ }
+ }
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
+
+ if (dc_link->type != dc_connection_mst_branch)
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
+
+ mutex_unlock(&aconnector->hpd_lock);
+}
+
+static void register_hpd_handlers(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_connector *connector;
+ struct amdgpu_dm_connector *aconnector;
+ const struct dc_link *dc_link;
+ struct dc_interrupt_params int_params = {0};
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ }
+
+ list_for_each_entry(connector,
+ &dev->mode_config.connector_list, head) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ dc_link = aconnector->dc_link;
+
+ if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_irq,
+ (void *) aconnector);
+ }
+
+ if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
+
+ /* Also register for DP short pulse (hpd_rx). */
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source = dc_link->irq_source_hpd_rx;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ handle_hpd_rx_irq,
+ (void *) aconnector);
+ }
+ }
+}
+
+#if defined(CONFIG_DRM_AMD_DC_SI)
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce60_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = 0; i < adev->mode_info.num_crtc; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i + 1, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+#endif
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dce110_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+ unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
+
+ if (adev->family >= AMDGPU_FAMILY_AI)
+ client_id = SOC15_IH_CLIENTID_DCE;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VBLANK interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
+ i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, client_id,
+ VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+
+/* Register IRQ sources and initialize IRQ callbacks */
+static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r;
+ int i;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ static const unsigned int vrtl_int_srcid[] = {
+ DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
+ DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
+ };
+#endif
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ /*
+ * Actions of amdgpu_irq_add_id():
+ * 1. Register a set() function with base driver.
+ * Base driver will call set() function to enable/disable an
+ * interrupt in DC hardware.
+ * 2. Register amdgpu_dm_irq_handler().
+ * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
+ * coming from DC hardware.
+ * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
+ * for acknowledging and handling.
+ */
+
+ /* Use VSTARTUP interrupt */
+ for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
+ i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add crtc irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use otg vertical line interrupt */
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
+ vrtl_int_srcid[i], &adev->vline0_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vline0 irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
+
+ if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
+ DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
+ break;
+ }
+
+ c_irq_params = &adev->dm.vline0_params[int_params.irq_source
+ - DC_IRQ_SOURCE_DC1_VLINE0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
+ }
+#endif
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
+ /* Use GRPH_PFLIP interrupt */
+ for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
+ if (r) {
+ DRM_ERROR("Failed to add page flip irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_pflip_high_irq, c_irq_params);
+
+ }
+
+ /* HPD */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+ &adev->hpd_irq);
+ if (r) {
+ DRM_ERROR("Failed to add hpd irq id!\n");
+ return r;
+ }
+
+ register_hpd_handlers(adev);
+
+ return 0;
+}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+ struct dc *dc = adev->dm.dc;
+ struct common_irq_params *c_irq_params;
+ struct dc_interrupt_params int_params = {0};
+ int r, i;
+
+ int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+ int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+ &adev->dmub_outbox_irq);
+ if (r) {
+ DRM_ERROR("Failed to add outbox irq id!\n");
+ return r;
+ }
+
+ if (dc->ctx->dmub_srv) {
+ i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+ int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.dmub_outbox_params[0];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_dmub_outbox1_low_irq, c_irq_params);
+ }
+
+ return 0;
+}
+
+/*
+ * Acquires the lock for the atomic state object and returns
+ * the new atomic state.
+ *
+ * This should only be called during atomic check.
+ */
+int dm_atomic_get_state(struct drm_atomic_state *state,
+ struct dm_atomic_state **dm_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_state *priv_state;
+
+ if (*dm_state)
+ return 0;
+
+ priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ *dm_state = to_dm_atomic_state(priv_state);
+
+ return 0;
+}
+
+static struct dm_atomic_state *
+dm_atomic_get_new_state(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_private_obj *obj;
+ struct drm_private_state *new_obj_state;
+ int i;
+
+ for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
+ if (obj->funcs == dm->atomic_obj.funcs)
+ return to_dm_atomic_state(new_obj_state);
+ }
+
+ return NULL;
+}
+
+static struct drm_private_state *
+dm_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct dm_atomic_state *old_state, *new_state;
+
+ new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
+
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_state_create_copy(old_state->context);
+
+ if (!new_state->context) {
+ kfree(new_state);
+ return NULL;
+ }
+
+ return &new_state->base;
+}
+
+static void dm_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
+
+ if (dm_state && dm_state->context)
+ dc_state_release(dm_state->context);
+
+ kfree(dm_state);
+}
+
+static struct drm_private_state_funcs dm_atomic_state_funcs = {
+ .atomic_duplicate_state = dm_atomic_duplicate_state,
+ .atomic_destroy_state = dm_atomic_destroy_state,
+};
+
+static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
+{
+ struct dm_atomic_state *state;
+ int r;
+
+ adev->mode_info.mode_config_initialized = true;
+
+ adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
+ adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
+
+ adev_to_drm(adev)->mode_config.max_width = 16384;
+ adev_to_drm(adev)->mode_config.max_height = 16384;
+
+ adev_to_drm(adev)->mode_config.preferred_depth = 24;
+ if (adev->asic_type == CHIP_HAWAII)
+ /* disable prefer shadow for now due to hibernation issues */
+ adev_to_drm(adev)->mode_config.prefer_shadow = 0;
+ else
+ adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+ /* indicates support for immediate flip */
+ adev_to_drm(adev)->mode_config.async_page_flip = true;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->context = dc_state_create_current_copy(adev->dm.dc);
+ if (!state->context) {
+ kfree(state);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(adev_to_drm(adev),
+ &adev->dm.atomic_obj,
+ &state->base,
+ &dm_atomic_state_funcs);
+
+ r = amdgpu_display_modeset_create_props(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+#ifdef AMD_PRIVATE_COLOR
+ if (amdgpu_dm_create_color_properties(adev))
+ return -ENOMEM;
+#endif
+
+ r = amdgpu_dm_audio_init(adev);
+ if (r) {
+ dc_state_release(state->context);
+ kfree(state);
+ return r;
+ }
+
+ return 0;
+}
+
+#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
+#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
+
+static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+#if defined(CONFIG_ACPI)
+ struct amdgpu_dm_backlight_caps caps;
+
+ memset(&caps, 0, sizeof(caps));
+
+ if (dm->backlight_caps[bl_idx].caps_valid)
+ return;
+
+ amdgpu_acpi_get_backlight_caps(&caps);
+ if (caps.caps_valid) {
+ dm->backlight_caps[bl_idx].caps_valid = true;
+ if (caps.aux_support)
+ return;
+ dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
+ dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
+ } else {
+ dm->backlight_caps[bl_idx].min_input_signal =
+ AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal =
+ AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+ }
+#else
+ if (dm->backlight_caps[bl_idx].aux_support)
+ return;
+
+ dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
+ dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
+#endif
+}
+
+static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
+ unsigned int *min, unsigned int *max)
+{
+ if (!caps)
+ return 0;
+
+ if (caps->aux_support) {
+ // Firmware limits are in nits, DC API wants millinits.
+ *max = 1000 * caps->aux_max_input_signal;
+ *min = 1000 * caps->aux_min_input_signal;
+ } else {
+ // Firmware limits are 8-bit, PWM control is 16-bit.
+ *max = 0x101 * caps->max_input_signal;
+ *min = 0x101 * caps->min_input_signal;
+ }
+ return 1;
+}
+
+static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ // Rescale 0..255 to min..max
+ return min + DIV_ROUND_CLOSEST((max - min) * brightness,
+ AMDGPU_MAX_BL_LEVEL);
+}
+
+static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
+ uint32_t brightness)
+{
+ unsigned int min, max;
+
+ if (!get_brightness_range(caps, &min, &max))
+ return brightness;
+
+ if (brightness < min)
+ return 0;
+ // Rescale min..max to 0..255
+ return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
+ max - min);
+}
+
+static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+ int bl_idx,
+ u32 user_brightness)
+{
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link;
+ u32 brightness;
+ bool rc;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ dm->brightness[bl_idx] = user_brightness;
+ /* update scratch register */
+ if (bl_idx == 0)
+ amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
+ brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
+ link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ /* Change brightness based on AUX property */
+ if (caps.aux_support) {
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
+ } else {
+ rc = dc_link_set_backlight_level(link, brightness, 0);
+ if (!rc)
+ DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
+ }
+
+ if (rc)
+ dm->actual_brightness[bl_idx] = user_brightness;
+}
+
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
+
+ return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
+ int bl_idx)
+{
+ int ret;
+ struct amdgpu_dm_backlight_caps caps;
+ struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ caps = dm->backlight_caps[bl_idx];
+
+ if (caps.aux_support) {
+ u32 avg, peak;
+ bool rc;
+
+ rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
+ if (!rc)
+ return dm->brightness[bl_idx];
+ return convert_brightness_to_user(&caps, avg);
+ }
+
+ ret = dc_link_get_backlight_level(link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return dm->brightness[bl_idx];
+
+ return convert_brightness_to_user(&caps, ret);
+}
+
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int i;
+
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (bd == dm->backlight_dev[i])
+ break;
+ }
+ if (i >= AMDGPU_DM_MAX_NUM_EDP)
+ i = 0;
+ return amdgpu_dm_backlight_get_level(dm, i);
+}
+
+static const struct backlight_ops amdgpu_dm_backlight_ops = {
+ .options = BL_CORE_SUSPENDRESUME,
+ .get_brightness = amdgpu_dm_backlight_get_brightness,
+ .update_status = amdgpu_dm_backlight_update_status,
+};
+
+static void
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_device *drm = aconnector->base.dev;
+ struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
+ struct backlight_properties props = { 0 };
+ char bl_name[16];
+
+ if (aconnector->bl_idx == -1)
+ return;
+
+ if (!acpi_video_backlight_use_native()) {
+ drm_info(drm, "Skipping amdgpu DM backlight registration\n");
+ /* Try registering an ACPI video backlight device instead. */
+ acpi_video_register_backlight();
+ return;
+ }
+
+ props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
+ props.type = BACKLIGHT_RAW;
+
+ snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
+ drm->primary->index + aconnector->bl_idx);
+
+ dm->backlight_dev[aconnector->bl_idx] =
+ backlight_device_register(bl_name, aconnector->base.kdev, dm,
+ &amdgpu_dm_backlight_ops, &props);
+
+ if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
+ DRM_ERROR("DM: Backlight registration failed!\n");
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ } else
+ DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
+}
+
+static int initialize_plane(struct amdgpu_display_manager *dm,
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
+{
+ struct drm_plane *plane;
+ unsigned long possible_crtcs;
+ int ret = 0;
+
+ plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
+ if (!plane) {
+ DRM_ERROR("KMS: Failed to allocate plane\n");
+ return -ENOMEM;
+ }
+ plane->type = plane_type;
+
+ /*
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
+ */
+ possible_crtcs = 1 << plane_id;
+ if (plane_id >= dm->dc->caps.max_streams)
+ possible_crtcs = 0xff;
+
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
+
+ if (ret) {
+ DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
+ return ret;
+ }
+
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
+ return ret;
+}
+
+
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = aconnector->dc_link;
+ int bl_idx = dm->num_of_edps;
+
+ if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+ link->type == dc_connection_none)
+ return;
+
+ if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+ drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+ return;
+ }
+
+ aconnector->bl_idx = bl_idx;
+
+ amdgpu_dm_update_backlight_caps(dm, bl_idx);
+ dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+ dm->backlight_link[bl_idx] = link;
+ dm->num_of_edps++;
+
+ update_connector_ext_caps(aconnector);
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector);
+
+/*
+ * In this architecture, the association
+ * connector -> encoder -> crtc
+ * id not really requried. The crtc and connector will hold the
+ * display_index as an abstraction to use with DAL component
+ *
+ * Returns 0 on success
+ */
+static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
+{
+ struct amdgpu_display_manager *dm = &adev->dm;
+ s32 i;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct amdgpu_encoder *aencoder = NULL;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ u32 link_cnt;
+ s32 primary_planes;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
+ bool replay_feature_enabled = false;
+ int max_overlay = dm->dc->caps.max_slave_planes;
+
+ dm->display_indexes_num = dm->dc->caps.max_streams;
+ /* Update the actual used number of crtc */
+ adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+
+ amdgpu_dm_set_irq_funcs(adev);
+
+ link_cnt = dm->dc->caps.max_links;
+ if (amdgpu_dm_mode_config_init(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize mode config\n");
+ return -EINVAL;
+ }
+
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
+
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ goto fail;
+ }
+ }
+
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ /* Do not create overlay if MPO disabled */
+ if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
+ break;
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (max_overlay-- == 0)
+ break;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < dm->dc->caps.max_streams; i++)
+ if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
+ DRM_ERROR("KMS: Failed to initialize crtc\n");
+ goto fail;
+ }
+
+ /* Use Outbox interrupt */
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (register_outbox_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
+ }
+
+ /* Determine whether to enable Replay support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ replay_feature_enabled = true;
+ break;
+ default:
+ replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
+ break;
+ }
+ }
+
+ /* loops over all connectors on the board */
+ for (i = 0; i < link_cnt; i++) {
+ struct dc_link *link = NULL;
+
+ if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
+ DRM_ERROR(
+ "KMS: Cannot support more than %d display indexes\n",
+ AMDGPU_DM_MAX_DISPLAY_INDEX);
+ continue;
+ }
+
+ link = dc_get_link_at_index(dm->dc, i);
+
+ if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) {
+ struct amdgpu_dm_wb_connector *wbcon = kzalloc(sizeof(*wbcon), GFP_KERNEL);
+
+ if (!wbcon) {
+ DRM_ERROR("KMS: Failed to allocate writeback connector\n");
+ continue;
+ }
+
+ if (amdgpu_dm_wb_connector_init(dm, wbcon, i)) {
+ DRM_ERROR("KMS: Failed to initialize writeback connector\n");
+ kfree(wbcon);
+ continue;
+ }
+
+ link->psr_settings.psr_feature_enabled = false;
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ continue;
+ }
+
+ aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
+ if (!aconnector)
+ goto fail;
+
+ aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
+ if (!aencoder)
+ goto fail;
+
+ if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
+ DRM_ERROR("KMS: Failed to initialize encoder\n");
+ goto fail;
+ }
+
+ if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
+ DRM_ERROR("KMS: Failed to initialize connector\n");
+ goto fail;
+ }
+
+ if (dm->hpd_rx_offload_wq)
+ dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
+ aconnector;
+
+ if (!dc_link_detect_connection_type(link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+
+ if (aconnector->base.force && new_connection_type == dc_connection_none) {
+ emulated_link_detect(link);
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ } else {
+ bool ret = false;
+
+ mutex_lock(&dm->dc_lock);
+ ret = dc_link_detect(link, DETECT_REASON_BOOT);
+ mutex_unlock(&dm->dc_lock);
+
+ if (ret) {
+ amdgpu_dm_update_connector_after_detect(aconnector);
+ setup_backlight_device(dm, aconnector);
+
+ /* Disable PSR if Replay can be enabled */
+ if (replay_feature_enabled)
+ if (amdgpu_dm_set_replay_caps(link, aconnector))
+ psr_feature_enabled = false;
+
+ if (psr_feature_enabled)
+ amdgpu_dm_set_psr_caps(link);
+
+ /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
+ * PSR is also supported.
+ */
+ if (link->psr_settings.psr_feature_enabled)
+ adev_to_drm(adev)->vblank_disable_immediate = false;
+ }
+ }
+ amdgpu_set_panel_orientation(&aconnector->base);
+ }
+
+ /* Software is initialized. Now we can register interrupt handlers. */
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ case CHIP_OLAND:
+ if (dce60_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
+ case CHIP_VEGAM:
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ if (dce110_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ goto fail;
+ }
+ break;
+ }
+
+ return 0;
+fail:
+ kfree(aencoder);
+ kfree(aconnector);
+
+ return -EINVAL;
+}
+
+static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
+{
+ drm_atomic_private_obj_fini(&dm->atomic_obj);
+}
+
+/******************************************************************************
+ * amdgpu_display_funcs functions
+ *****************************************************************************/
+
+/*
+ * dm_bandwidth_update - program display watermarks
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Calculate and program the display watermarks and line buffer allocation.
+ */
+static void dm_bandwidth_update(struct amdgpu_device *adev)
+{
+ /* TODO: implement later */
+}
+
+static const struct amdgpu_display_funcs dm_display_funcs = {
+ .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
+ .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
+ .hpd_sense = NULL,/* called unconditionally */
+ .hpd_set_polarity = NULL, /* called unconditionally */
+ .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
+ .page_flip_get_scanoutpos =
+ dm_crtc_get_scanoutpos,/* called unconditionally */
+ .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
+ .add_connector = NULL, /* VBIOS parsing. DAL does it. */
+};
+
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+
+static ssize_t s3_debug_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ int ret;
+ int s3_state;
+ struct drm_device *drm_dev = dev_get_drvdata(device);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
+
+ ret = kstrtoint(buf, 0, &s3_state);
+
+ if (ret == 0) {
+ if (s3_state) {
+ dm_resume(adev);
+ drm_kms_helper_hotplug_event(adev_to_drm(adev));
+ } else
+ dm_suspend(adev);
+ }
+
+ return ret == 0 ? count : 0;
+}
+
+DEVICE_ATTR_WO(s3_debug);
+
+#endif
+
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+ char *fw_name_dmub;
+ int r;
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 1, 0):
+ fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+ if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+ fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+ break;
+ case IP_VERSION(3, 0, 0):
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ else
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ break;
+ case IP_VERSION(3, 0, 1):
+ fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 2):
+ fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+ break;
+ case IP_VERSION(3, 0, 3):
+ fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+ break;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+ break;
+ case IP_VERSION(3, 1, 4):
+ fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+ break;
+ case IP_VERSION(3, 1, 5):
+ fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+ break;
+ case IP_VERSION(3, 1, 6):
+ fw_name_dmub = FIRMWARE_DCN316_DMUB;
+ break;
+ case IP_VERSION(3, 2, 0):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+ break;
+ case IP_VERSION(3, 2, 1):
+ fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+ break;
+ case IP_VERSION(3, 5, 0):
+ fw_name_dmub = FIRMWARE_DCN_35_DMUB;
+ break;
+ case IP_VERSION(3, 5, 1):
+ fw_name_dmub = FIRMWARE_DCN_351_DMUB;
+ break;
+ default:
+ /* ASIC doesn't support DMUB. */
+ return 0;
+ }
+ r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+ return r;
+}
+
+static int dm_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ struct atom_context *ctx = mode_info->atom_context;
+ int index = GetIndexIntoMasterTable(DATA, Object_Header);
+ u16 data_offset;
+
+ /* if there is no object header, skip DM */
+ if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
+ dev_info(adev->dev, "No object header, skipping DM\n");
+ return -ENOENT;
+ }
+
+ switch (adev->asic_type) {
+#if defined(CONFIG_DRM_AMD_DC_SI)
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_VERDE:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_OLAND:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+#endif
+ case CHIP_BONAIRE:
+ case CHIP_HAWAII:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_KAVERI:
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_FIJI:
+ case CHIP_TONGA:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 7;
+ break;
+ case CHIP_CARRIZO:
+ adev->mode_info.num_crtc = 3;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_STONEY:
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 9;
+ break;
+ case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case CHIP_POLARIS10:
+ case CHIP_VEGAM:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ default:
+
+ switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(3, 1, 4):
+ case IP_VERSION(3, 1, 5):
+ case IP_VERSION(3, 1, 6):
+ case IP_VERSION(3, 2, 0):
+ case IP_VERSION(3, 2, 1):
+ case IP_VERSION(3, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ amdgpu_ip_version(adev, DCE_HWIP, 0));
+ return -EINVAL;
+ }
+ break;
+ }
+
+ if (adev->mode_info.funcs == NULL)
+ adev->mode_info.funcs = &dm_display_funcs;
+
+ /*
+ * Note: Do NOT change adev->audio_endpt_rreg and
+ * adev->audio_endpt_wreg because they are initialised in
+ * amdgpu_device_init()
+ */
+#if defined(CONFIG_DEBUG_KERNEL_DC)
+ device_create_file(
+ adev_to_drm(adev)->dev,
+ &dev_attr_s3_debug);
+#endif
+ adev->dc_enabled = true;
+
+ return dm_init_microcode(adev);
+}
+
+static bool modereset_required(struct drm_crtc_state *crtc_state)
+{
+ return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
+ .destroy = amdgpu_dm_encoder_destroy,
+};
+
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
+{
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const u64 tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ int ret;
+
+ memset(plane_info, 0, sizeof(*plane_info));
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
+ case DRM_FORMAT_XRGB16161616:
+ case DRM_FORMAT_ARGB16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+ break;
+ case DRM_FORMAT_XBGR16161616:
+ case DRM_FORMAT_ABGR16161616:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
+ return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
+
+
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+
+ plane_info->layer_index = plane_state->normalized_zpos;
+
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address,
+ tmz_surface, force_disable_dcc);
+ if (ret)
+ return ret;
+
+ amdgpu_dm_plane_fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ int ret;
+ bool force_disable_dcc = false;
+
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
+ ret = fill_dc_plane_info_and_addr(adev, plane_state,
+ afb->tiling_flags,
+ &plane_info,
+ &dc_plane_state->address,
+ afb->tmz_surface,
+ force_disable_dcc);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+ dc_plane_state->layer_index = plane_info.layer_index;
+ dc_plane_state->flip_int_enabled = true;
+
+ /*
+ * Always set input transfer function, since plane state is refreshed
+ * every time.
+ */
+ ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state,
+ plane_state,
+ dc_plane_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline void fill_dc_dirty_rect(struct drm_plane *plane,
+ struct rect *dirty_rect, int32_t x,
+ s32 y, s32 width, s32 height,
+ int *i, bool ffu)
+{
+ WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
+
+ dirty_rect->x = x;
+ dirty_rect->y = y;
+ dirty_rect->width = width;
+ dirty_rect->height = height;
+
+ if (ffu)
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
+ plane->base.id, width, height);
+ else
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
+ plane->base.id, x, y, width, height);
+
+ (*i)++;
+}
+
+/**
+ * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
+ *
+ * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
+ * remote fb
+ * @old_plane_state: Old state of @plane
+ * @new_plane_state: New state of @plane
+ * @crtc_state: New state of CRTC connected to the @plane
+ * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @is_psr_su: Flag indicating whether Panel Self Refresh Selective Update (PSR SU) is enabled.
+ * If PSR SU is enabled and damage clips are available, only the regions of the screen
+ * that have changed will be updated. If PSR SU is not enabled,
+ * or if damage clips are not available, the entire screen will be updated.
+ * @dirty_regions_changed: dirty regions changed
+ *
+ * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
+ * (referred to as "damage clips" in DRM nomenclature) that require updating on
+ * the eDP remote buffer. The responsibility of specifying the dirty regions is
+ * amdgpu_dm's.
+ *
+ * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
+ * plane with regions that require flushing to the eDP remote buffer. In
+ * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
+ * implicitly provide damage clips without any client support via the plane
+ * bounds.
+ */
+static void fill_dc_dirty_rects(struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ struct drm_crtc_state *crtc_state,
+ struct dc_flip_addrs *flip_addrs,
+ bool is_psr_su,
+ bool *dirty_regions_changed)
+{
+ struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
+ struct rect *dirty_rects = flip_addrs->dirty_rects;
+ u32 num_clips;
+ struct drm_mode_rect *clips;
+ bool bb_changed;
+ bool fb_changed;
+ u32 i = 0;
+ *dirty_regions_changed = false;
+
+ /*
+ * Cursor plane has it's own dirty rect update interface. See
+ * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
+ */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return;
+
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
+ goto ffu;
+
+ num_clips = drm_plane_get_damage_clips_count(new_plane_state);
+ clips = drm_plane_get_damage_clips(new_plane_state);
+
+ if (num_clips && (!amdgpu_damage_clips || (amdgpu_damage_clips < 0 &&
+ is_psr_su)))
+ goto ffu;
+
+ if (!dm_crtc_state->mpo_requested) {
+ if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ for (; flip_addrs->dirty_rect_count < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[flip_addrs->dirty_rect_count],
+ clips->x1, clips->y1,
+ clips->x2 - clips->x1, clips->y2 - clips->y1,
+ &flip_addrs->dirty_rect_count,
+ false);
+ return;
+ }
+
+ /*
+ * MPO is requested. Add entire plane bounding box to dirty rects if
+ * flipped to or damaged.
+ *
+ * If plane is moved or resized, also add old bounding box to dirty
+ * rects.
+ */
+ fb_changed = old_plane_state->fb->base.id !=
+ new_plane_state->fb->base.id;
+ bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h);
+
+ drm_dbg(plane->dev,
+ "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
+ new_plane_state->plane->base.id,
+ bb_changed, fb_changed, num_clips);
+
+ *dirty_regions_changed = bb_changed;
+
+ if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
+ goto ffu;
+
+ if (bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+
+ /* Add old plane bounding-box if plane is moved or resized */
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ old_plane_state->crtc_x,
+ old_plane_state->crtc_y,
+ old_plane_state->crtc_w,
+ old_plane_state->crtc_h, &i, false);
+ }
+
+ if (num_clips) {
+ for (; i < num_clips; clips++)
+ fill_dc_dirty_rect(new_plane_state->plane,
+ &dirty_rects[i], clips->x1,
+ clips->y1, clips->x2 - clips->x1,
+ clips->y2 - clips->y1, &i, false);
+ } else if (fb_changed && !bb_changed) {
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
+ new_plane_state->crtc_x,
+ new_plane_state->crtc_y,
+ new_plane_state->crtc_w,
+ new_plane_state->crtc_h, &i, false);
+ }
+
+ flip_addrs->dirty_rect_count = i;
+ return;
+
+ffu:
+ fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
+ dm_crtc_state->base.mode.crtc_hdisplay,
+ dm_crtc_state->base.mode.crtc_vdisplay,
+ &flip_addrs->dirty_rect_count, true);
+}
+
+static void update_stream_scaling_settings(const struct drm_display_mode *mode,
+ const struct dm_connector_state *dm_state,
+ struct dc_stream_state *stream)
+{
+ enum amdgpu_rmx_type rmx_type;
+
+ struct rect src = { 0 }; /* viewport in composition space*/
+ struct rect dst = { 0 }; /* stream addressable area */
+
+ /* no mode. nothing to be done */
+ if (!mode)
+ return;
+
+ /* Full screen scaling by default */
+ src.width = mode->hdisplay;
+ src.height = mode->vdisplay;
+ dst.width = stream->timing.h_addressable;
+ dst.height = stream->timing.v_addressable;
+
+ if (dm_state) {
+ rmx_type = dm_state->scaling;
+ if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
+ if (src.width * dst.height <
+ src.height * dst.width) {
+ /* height needs less upscaling/more downscaling */
+ dst.width = src.width *
+ dst.height / src.height;
+ } else {
+ /* width needs less upscaling/more downscaling */
+ dst.height = src.height *
+ dst.width / src.width;
+ }
+ } else if (rmx_type == RMX_CENTER) {
+ dst = src;
+ }
+
+ dst.x = (stream->timing.h_addressable - dst.width) / 2;
+ dst.y = (stream->timing.v_addressable - dst.height) / 2;
+
+ if (dm_state->underscan_enable) {
+ dst.x += dm_state->underscan_hborder / 2;
+ dst.y += dm_state->underscan_vborder / 2;
+ dst.width -= dm_state->underscan_hborder;
+ dst.height -= dm_state->underscan_vborder;
+ }
+ }
+
+ stream->src = src;
+ stream->dst = dst;
+
+ DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
+ dst.x, dst.y, dst.width, dst.height);
+
+}
+
+static enum dc_color_depth
+convert_color_depth_from_display_info(const struct drm_connector *connector,
+ bool is_y420, int requested_bpc)
+{
+ u8 bpc;
+
+ if (is_y420) {
+ bpc = 8;
+
+ /* Cap display bpc based on HDMI 2.0 HF-VSDB */
+ if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
+ bpc = 16;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
+ bpc = 12;
+ else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
+ bpc = 10;
+ } else {
+ bpc = (uint8_t)connector->display_info.bpc;
+ /* Assume 8 bpc by default if no bpc is specified. */
+ bpc = bpc ? bpc : 8;
+ }
+
+ if (requested_bpc > 0) {
+ /*
+ * Cap display bpc based on the user requested value.
+ *
+ * The value for state->max_bpc may not correctly updated
+ * depending on when the connector gets added to the state
+ * or if this was called outside of atomic check, so it
+ * can't be used directly.
+ */
+ bpc = min_t(u8, bpc, requested_bpc);
+
+ /* Round down to the nearest even number. */
+ bpc = bpc - (bpc & 1);
+ }
+
+ switch (bpc) {
+ case 0:
+ /*
+ * Temporary Work around, DRM doesn't parse color depth for
+ * EDID revision before 1.4
+ * TODO: Fix edid parsing
+ */
+ return COLOR_DEPTH_888;
+ case 6:
+ return COLOR_DEPTH_666;
+ case 8:
+ return COLOR_DEPTH_888;
+ case 10:
+ return COLOR_DEPTH_101010;
+ case 12:
+ return COLOR_DEPTH_121212;
+ case 14:
+ return COLOR_DEPTH_141414;
+ case 16:
+ return COLOR_DEPTH_161616;
+ default:
+ return COLOR_DEPTH_UNDEFINED;
+ }
+}
+
+static enum dc_aspect_ratio
+get_aspect_ratio(const struct drm_display_mode *mode_in)
+{
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
+}
+
+static enum dc_color_space
+get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
+ const struct drm_connector_state *connector_state)
+{
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+
+ switch (connector_state->colorspace) {
+ case DRM_MODE_COLORIMETRY_BT601_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ break;
+ case DRM_MODE_COLORIMETRY_BT709_YCC:
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ break;
+ case DRM_MODE_COLORIMETRY_OPRGB:
+ color_space = COLOR_SPACE_ADOBERGB;
+ break;
+ case DRM_MODE_COLORIMETRY_BT2020_RGB:
+ case DRM_MODE_COLORIMETRY_BT2020_YCC:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
+ color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
+ else
+ color_space = COLOR_SPACE_2020_YCBCR;
+ break;
+ case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
+ default:
+ if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ color_space = COLOR_SPACE_SRGB;
+ /*
+ * 27030khz is the separation point between HDTV and SDTV
+ * according to HDMI spec, we use YCbCr709 and YCbCr601
+ * respectively
+ */
+ } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR709_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR709;
+ } else {
+ if (dc_crtc_timing->flags.Y_ONLY)
+ color_space =
+ COLOR_SPACE_YCBCR601_LIMITED;
+ else
+ color_space = COLOR_SPACE_YCBCR601;
+ }
+ break;
+ }
+
+ return color_space;
+}
+
+static enum display_content_type
+get_output_content_type(const struct drm_connector_state *connector_state)
+{
+ switch (connector_state->content_type) {
+ default:
+ case DRM_MODE_CONTENT_TYPE_NO_DATA:
+ return DISPLAY_CONTENT_TYPE_NO_DATA;
+ case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+ return DISPLAY_CONTENT_TYPE_GRAPHICS;
+ case DRM_MODE_CONTENT_TYPE_PHOTO:
+ return DISPLAY_CONTENT_TYPE_PHOTO;
+ case DRM_MODE_CONTENT_TYPE_CINEMA:
+ return DISPLAY_CONTENT_TYPE_CINEMA;
+ case DRM_MODE_CONTENT_TYPE_GAME:
+ return DISPLAY_CONTENT_TYPE_GAME;
+ }
+}
+
+static bool adjust_colour_depth_from_display_info(
+ struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ enum dc_color_depth depth = timing_out->display_color_depth;
+ int normalized_clk;
+
+ do {
+ normalized_clk = timing_out->pix_clk_100hz / 10;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (depth) {
+ case COLOR_DEPTH_888:
+ break;
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ /* The above depths are the only ones valid for HDMI. */
+ return false;
+ }
+ if (normalized_clk <= info->max_tmds_clock) {
+ timing_out->display_color_depth = depth;
+ return true;
+ }
+ } while (--depth > COLOR_DEPTH_666);
+ return false;
+}
+
+static void fill_stream_properties_from_drm_display_mode(
+ struct dc_stream_state *stream,
+ const struct drm_display_mode *mode_in,
+ const struct drm_connector *connector,
+ const struct drm_connector_state *connector_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct hdmi_vendor_infoframe hv_frame;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ memset(&hv_frame, 0, sizeof(hv_frame));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+
+ timing_out->h_border_left = 0;
+ timing_out->h_border_right = 0;
+ timing_out->v_border_top = 0;
+ timing_out->v_border_bottom = 0;
+ /* TODO: un-hardcode */
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if (drm_mode_is_420_also(info, mode_in)
+ && aconnector
+ && aconnector->force_yuv420_output)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
+ && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
+ else
+ timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
+
+ timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
+ timing_out->display_color_depth = convert_color_depth_from_display_info(
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
+ timing_out->scan_type = SCANNING_TYPE_NODATA;
+ timing_out->hdmi_vic = 0;
+
+ if (old_stream) {
+ timing_out->vic = old_stream->timing.vic;
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
+ } else {
+ timing_out->vic = drm_match_cea_mode(mode_in);
+ if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
+ timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
+ if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
+ timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
+ }
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->vic = avi_frame.video_code;
+ drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
+ timing_out->hdmi_vic = hv_frame.vic;
+ }
+
+ if (aconnector && is_freesync_video_mode(mode_in, aconnector)) {
+ timing_out->h_addressable = mode_in->hdisplay;
+ timing_out->h_total = mode_in->htotal;
+ timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
+ timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
+ timing_out->v_total = mode_in->vtotal;
+ timing_out->v_addressable = mode_in->vdisplay;
+ timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
+ timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
+ timing_out->pix_clk_100hz = mode_in->clock * 10;
+ } else {
+ timing_out->h_addressable = mode_in->crtc_hdisplay;
+ timing_out->h_total = mode_in->crtc_htotal;
+ timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
+ timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
+ timing_out->v_total = mode_in->crtc_vtotal;
+ timing_out->v_addressable = mode_in->crtc_vdisplay;
+ timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
+ timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
+ timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
+ }
+
+ timing_out->aspect_ratio = get_aspect_ratio(mode_in);
+
+ stream->out_transfer_func.type = TF_TYPE_PREDEFINED;
+ stream->out_transfer_func.tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (!adjust_colour_depth_from_display_info(timing_out, info) &&
+ drm_mode_is_420_also(info, mode_in) &&
+ timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ adjust_colour_depth_from_display_info(timing_out, info);
+ }
+ }
+
+ stream->output_color_space = get_output_color_space(timing_out, connector_state);
+ stream->content_type = get_output_content_type(connector_state);
+}
+
+static void fill_audio_info(struct audio_info *audio_info,
+ const struct drm_connector *drm_connector,
+ const struct dc_sink *dc_sink)
+{
+ int i = 0;
+ int cea_revision = 0;
+ const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
+
+ audio_info->manufacture_id = edid_caps->manufacturer_id;
+ audio_info->product_id = edid_caps->product_id;
+
+ cea_revision = drm_connector->display_info.cea_rev;
+
+ strscpy(audio_info->display_name,
+ edid_caps->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+
+ if (cea_revision >= 3) {
+ audio_info->mode_count = edid_caps->audio_mode_count;
+
+ for (i = 0; i < audio_info->mode_count; ++i) {
+ audio_info->modes[i].format_code =
+ (enum audio_format_code)
+ (edid_caps->audio_modes[i].format_code);
+ audio_info->modes[i].channel_count =
+ edid_caps->audio_modes[i].channel_count;
+ audio_info->modes[i].sample_rates.all =
+ edid_caps->audio_modes[i].sample_rate;
+ audio_info->modes[i].sample_size =
+ edid_caps->audio_modes[i].sample_size;
+ }
+ }
+
+ audio_info->flags.all = edid_caps->speaker_flags;
+
+ /* TODO: We only check for the progressive mode, check for interlace mode too */
+ if (drm_connector->latency_present[0]) {
+ audio_info->video_latency = drm_connector->video_latency[0];
+ audio_info->audio_latency = drm_connector->audio_latency[0];
+ }
+
+ /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
+
+}
+
+static void
+copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
+ struct drm_display_mode *dst_mode)
+{
+ dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
+ dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
+ dst_mode->crtc_clock = src_mode->crtc_clock;
+ dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
+ dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
+ dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
+ dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
+ dst_mode->crtc_htotal = src_mode->crtc_htotal;
+ dst_mode->crtc_hskew = src_mode->crtc_hskew;
+ dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
+ dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
+ dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
+ dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
+ dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
+}
+
+static void
+decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
+ const struct drm_display_mode *native_mode,
+ bool scale_enabled)
+{
+ if (scale_enabled) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else if (native_mode->clock == drm_mode->clock &&
+ native_mode->htotal == drm_mode->htotal &&
+ native_mode->vtotal == drm_mode->vtotal) {
+ copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
+ } else {
+ /* no scaling nor amdgpu inserted, no need to patch */
+ }
+}
+
+static struct dc_sink *
+create_fake_sink(struct dc_link *link)
+{
+ struct dc_sink_init_data sink_init_data = { 0 };
+ struct dc_sink *sink = NULL;
+
+ sink_init_data.link = link;
+ sink_init_data.sink_signal = link->connector_signal;
+
+ sink = dc_sink_create(&sink_init_data);
+ if (!sink) {
+ DRM_ERROR("Failed to create sink!\n");
+ return NULL;
+ }
+ sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
+
+ return sink;
+}
+
+static void set_multisync_trigger_params(
+ struct dc_stream_state *stream)
+{
+ struct dc_stream_state *master = NULL;
+
+ if (stream->triggered_crtc_reset.enabled) {
+ master = stream->triggered_crtc_reset.event_source;
+ stream->triggered_crtc_reset.event =
+ master->timing.flags.VSYNC_POSITIVE_POLARITY ?
+ CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
+ stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
+ }
+}
+
+static void set_master_stream(struct dc_stream_state *stream_set[],
+ int stream_count)
+{
+ int j, highest_rfr = 0, master_stream = 0;
+
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
+ int refresh_rate = 0;
+
+ refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
+ (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
+ if (refresh_rate > highest_rfr) {
+ highest_rfr = refresh_rate;
+ master_stream = j;
+ }
+ }
+ }
+ for (j = 0; j < stream_count; j++) {
+ if (stream_set[j])
+ stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
+ }
+}
+
+static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
+{
+ int i = 0;
+ struct dc_stream_state *stream;
+
+ if (context->stream_count < 2)
+ return;
+ for (i = 0; i < context->stream_count ; i++) {
+ if (!context->streams[i])
+ continue;
+ /*
+ * TODO: add a function to read AMD VSDB bits and set
+ * crtc_sync_master.multi_sync_enabled flag
+ * For now it's set to false
+ */
+ }
+
+ set_master_stream(context->streams, context->stream_count);
+
+ for (i = 0; i < context->stream_count ; i++) {
+ stream = context->streams[i];
+
+ if (!stream)
+ continue;
+
+ set_multisync_trigger_params(stream);
+ }
+}
+
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
+static struct drm_display_mode *
+get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
+ bool use_probed_modes)
+{
+ struct drm_display_mode *m, *m_pref = NULL;
+ u16 current_refresh, highest_refresh;
+ struct list_head *list_head = use_probed_modes ?
+ &aconnector->base.probed_modes :
+ &aconnector->base.modes;
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return NULL;
+
+ if (aconnector->freesync_vid_base.clock != 0)
+ return &aconnector->freesync_vid_base;
+
+ /* Find the preferred mode */
+ list_for_each_entry(m, list_head, head) {
+ if (m->type & DRM_MODE_TYPE_PREFERRED) {
+ m_pref = m;
+ break;
+ }
+ }
+
+ if (!m_pref) {
+ /* Probably an EDID with no preferred mode. Fallback to first entry */
+ m_pref = list_first_entry_or_null(
+ &aconnector->base.modes, struct drm_display_mode, head);
+ if (!m_pref) {
+ DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
+ return NULL;
+ }
+ }
+
+ highest_refresh = drm_mode_vrefresh(m_pref);
+
+ /*
+ * Find the mode with highest refresh rate with same resolution.
+ * For some monitors, preferred mode is not the mode with highest
+ * supported refresh rate.
+ */
+ list_for_each_entry(m, list_head, head) {
+ current_refresh = drm_mode_vrefresh(m);
+
+ if (m->hdisplay == m_pref->hdisplay &&
+ m->vdisplay == m_pref->vdisplay &&
+ highest_refresh < current_refresh) {
+ highest_refresh = current_refresh;
+ m_pref = m;
+ }
+ }
+
+ drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
+ return m_pref;
+}
+
+static bool is_freesync_video_mode(const struct drm_display_mode *mode,
+ struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_display_mode *high_mode;
+ int timing_diff;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!high_mode || !mode)
+ return false;
+
+ timing_diff = high_mode->vtotal - mode->vtotal;
+
+ if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
+ high_mode->hdisplay != mode->hdisplay ||
+ high_mode->vdisplay != mode->vdisplay ||
+ high_mode->hsync_start != mode->hsync_start ||
+ high_mode->hsync_end != mode->hsync_end ||
+ high_mode->htotal != mode->htotal ||
+ high_mode->hskew != mode->hskew ||
+ high_mode->vscan != mode->vscan ||
+ high_mode->vsync_start - mode->vsync_start != timing_diff ||
+ high_mode->vsync_end - mode->vsync_end != timing_diff)
+ return false;
+ else
+ return true;
+}
+
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
+
+ if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
+ sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ dsc_caps);
+ }
+}
+
+
+static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps,
+ uint32_t max_dsc_target_bpp_limit_override)
+{
+ const struct dc_link_settings *verified_link_cap = NULL;
+ u32 link_bw_in_kbps;
+ u32 edp_min_bpp_x16, edp_max_bpp_x16;
+ struct dc *dc = sink->ctx->dc;
+ struct dc_dsc_bw_range bw_range = {0};
+ struct dc_dsc_config dsc_cfg = {0};
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ verified_link_cap = dc_link_get_link_cap(stream->link);
+ link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
+ edp_min_bpp_x16 = 8 * 16;
+ edp_max_bpp_x16 = 8 * 16;
+
+ if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
+ edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
+
+ if (edp_max_bpp_x16 < edp_min_bpp_x16)
+ edp_min_bpp_x16 = edp_max_bpp_x16;
+
+ if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
+ dc->debug.dsc_min_slice_height_override,
+ edp_min_bpp_x16, edp_max_bpp_x16,
+ dsc_caps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &bw_range)) {
+
+ if (bw_range.max_kbps < link_bw_in_kbps) {
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ 0,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
+ }
+ return;
+ }
+ }
+
+ if (dc_dsc_compute_config(dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &dsc_cfg)) {
+ stream->timing.dsc_cfg = dsc_cfg;
+ stream->timing.flags.DSC = 1;
+ }
+}
+
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+ struct dc_sink *sink, struct dc_stream_state *stream,
+ struct dsc_dec_dpcd_caps *dsc_caps)
+{
+ struct drm_connector *drm_connector = &aconnector->base;
+ u32 link_bandwidth_kbps;
+ struct dc *dc = sink->ctx->dc;
+ u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+ u32 dsc_max_supported_bw_in_kbps;
+ u32 max_dsc_target_bpp_limit_override =
+ drm_connector->display_info.max_dsc_bpp;
+ struct dc_dsc_config_options dsc_options = {0};
+
+ dc_dsc_get_default_config_option(dc, &dsc_options);
+ dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
+
+ link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+ dc_link_get_link_cap(aconnector->dc_link));
+
+ /* Set DSC policy according to dsc_clock_en */
+ dc_dsc_policy_set_enable_dsc_when_not_needed(
+ aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+ if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
+ !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
+ dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
+
+ apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
+
+ } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ link_bandwidth_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ }
+ } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+ timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link));
+ max_supported_bw_in_kbps = link_bandwidth_kbps;
+ dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
+
+ if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
+ max_supported_bw_in_kbps > 0 &&
+ dsc_max_supported_bw_in_kbps > 0)
+ if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+ dsc_caps,
+ &dsc_options,
+ dsc_max_supported_bw_in_kbps,
+ &stream->timing,
+ dc_link_get_highest_encoding_format(aconnector->dc_link),
+ &stream->timing.dsc_cfg)) {
+ stream->timing.flags.DSC = 1;
+ DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ __func__, drm_connector->name);
+ }
+ }
+ }
+
+ /* Overwrite the stream flag if DSC is enabled through debugfs */
+ if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+ stream->timing.flags.DSC = 1;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+ stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+ stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+ if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+ stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+
+static struct dc_stream_state *
+create_stream_for_sink(struct drm_connector *connector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
+{
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_display_mode *preferred_mode = NULL;
+ const struct drm_connector_state *con_state = &dm_state->base;
+ struct dc_stream_state *stream = NULL;
+ struct drm_display_mode mode;
+ struct drm_display_mode saved_mode;
+ struct drm_display_mode *freesync_mode = NULL;
+ bool native_mode_found = false;
+ bool recalculate_timing = false;
+ bool scale = dm_state->scaling != RMX_OFF;
+ int mode_refresh;
+ int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
+ struct dsc_dec_dpcd_caps dsc_caps;
+
+ struct dc_link *link = NULL;
+ struct dc_sink *sink = NULL;
+
+ drm_mode_init(&mode, drm_mode);
+ memset(&saved_mode, 0, sizeof(saved_mode));
+
+ if (connector == NULL) {
+ DRM_ERROR("connector is NULL!\n");
+ return stream;
+ }
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) {
+ aconnector = NULL;
+ aconnector = to_amdgpu_dm_connector(connector);
+ link = aconnector->dc_link;
+ } else {
+ struct drm_writeback_connector *wbcon = NULL;
+ struct amdgpu_dm_wb_connector *dm_wbcon = NULL;
+
+ wbcon = drm_connector_to_writeback(connector);
+ dm_wbcon = to_amdgpu_dm_wb_connector(wbcon);
+ link = dm_wbcon->link;
+ }
+
+ if (!aconnector || !aconnector->dc_sink) {
+ sink = create_fake_sink(link);
+ if (!sink)
+ return stream;
+
+ } else {
+ sink = aconnector->dc_sink;
+ dc_sink_retain(sink);
+ }
+
+ stream = dc_create_stream_for_sink(sink);
+
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ goto finish;
+ }
+
+ /* We leave this NULL for writeback connectors */
+ stream->dm_stream_context = aconnector;
+
+ stream->timing.flags.LTE_340MCSC_SCRAMBLE =
+ connector->display_info.hdmi.scdc.scrambling.low_rates;
+
+ list_for_each_entry(preferred_mode, &connector->modes, head) {
+ /* Search for preferred mode */
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
+ native_mode_found = true;
+ break;
+ }
+ }
+ if (!native_mode_found)
+ preferred_mode = list_first_entry_or_null(
+ &connector->modes,
+ struct drm_display_mode,
+ head);
+
+ mode_refresh = drm_mode_vrefresh(&mode);
+
+ if (preferred_mode == NULL) {
+ /*
+ * This may not be an error, the use case is when we have no
+ * usermode calls to reset and set mode upon hotplug. In this
+ * case, we call set mode ourselves to restore the previous mode
+ * and the modelist may not be filled in time.
+ */
+ DRM_DEBUG_DRIVER("No preferred mode found\n");
+ } else if (aconnector) {
+ recalculate_timing = amdgpu_freesync_vid_mode &&
+ is_freesync_video_mode(&mode, aconnector);
+ if (recalculate_timing) {
+ freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
+ drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
+ drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
+ } else {
+ decide_crtc_timing_for_drm_display_mode(
+ &mode, preferred_mode, scale);
+
+ preferred_refresh = drm_mode_vrefresh(preferred_mode);
+ }
+ }
+
+ if (recalculate_timing)
+ drm_mode_set_crtcinfo(&saved_mode, 0);
+
+ /*
+ * If scaling is enabled and refresh rate didn't change
+ * we copy the vic and polarities of the old timings
+ */
+ if (!scale || mode_refresh != preferred_refresh)
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, NULL,
+ requested_bpc);
+ else
+ fill_stream_properties_from_drm_display_mode(
+ stream, &mode, connector, con_state, old_stream,
+ requested_bpc);
+
+ /* The rest isn't needed for writeback connectors */
+ if (!aconnector)
+ goto finish;
+
+ if (aconnector->timing_changed) {
+ drm_dbg(aconnector->base.dev,
+ "overriding timing for automated test, bpc %d, changing to %d\n",
+ stream->timing.display_color_depth,
+ aconnector->timing_requested->display_color_depth);
+ stream->timing = *aconnector->timing_requested;
+ }
+
+ /* SST DSC determination policy */
+ update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+ if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+ apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
+
+ update_stream_scaling_settings(&mode, dm_state, stream);
+
+ fill_audio_info(
+ &stream->audio_info,
+ connector,
+ sink);
+
+ update_stream_signal(stream, sink);
+
+ if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
+
+<<<<<<<
+ if (stream->link->psr_settings.psr_feature_enabled || stream->link->replay_settings.replay_feature_enabled) {
+=======
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
+ stream->signal == SIGNAL_TYPE_EDP) {
+>>>>>>>
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED;
+
+ if (stream->out_transfer_func.tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
+ aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
+ }
+finish:
+ dc_sink_release(sink);
+
+ return stream;
+}
+
+static enum drm_connector_status
+amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
+{
+ bool connected;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ /*
+ * Notes:
+ * 1. This interface is NOT called in context of HPD irq.
+ * 2. This interface *is called* in context of user-mode ioctl. Which
+ * makes it a bad place for *any* MST-related activity.
+ */
+
+ if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
+ !aconnector->fake_enable)
+ connected = (aconnector->dc_sink != NULL);
+ else
+ connected = (aconnector->base.force == DRM_FORCE_ON ||
+ aconnector->base.force == DRM_FORCE_ON_DIGITAL);
+
+ update_subconnector_property(aconnector);
+
+ return (connected ? connector_status_connected :
+ connector_status_disconnected);
+}
+
+int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *connector_state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_old_state =
+ to_dm_connector_state(connector->state);
+ struct dm_connector_state *dm_new_state =
+ to_dm_connector_state(connector_state);
+
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ enum amdgpu_rmx_type rmx_type;
+
+ switch (val) {
+ case DRM_MODE_SCALE_CENTER:
+ rmx_type = RMX_CENTER;
+ break;
+ case DRM_MODE_SCALE_ASPECT:
+ rmx_type = RMX_ASPECT;
+ break;
+ case DRM_MODE_SCALE_FULLSCREEN:
+ rmx_type = RMX_FULL;
+ break;
+ case DRM_MODE_SCALE_NONE:
+ default:
+ rmx_type = RMX_OFF;
+ break;
+ }
+
+ if (dm_old_state->scaling == rmx_type)
+ return 0;
+
+ dm_new_state->scaling = rmx_type;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ dm_new_state->underscan_hborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ dm_new_state->underscan_vborder = val;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ dm_new_state->underscan_enable = val;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_connector_state *dm_state =
+ to_dm_connector_state(state);
+ int ret = -EINVAL;
+
+ if (property == dev->mode_config.scaling_mode_property) {
+ switch (dm_state->scaling) {
+ case RMX_CENTER:
+ *val = DRM_MODE_SCALE_CENTER;
+ break;
+ case RMX_ASPECT:
+ *val = DRM_MODE_SCALE_ASPECT;
+ break;
+ case RMX_FULL:
+ *val = DRM_MODE_SCALE_FULLSCREEN;
+ break;
+ case RMX_OFF:
+ default:
+ *val = DRM_MODE_SCALE_NONE;
+ break;
+ }
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_hborder_property) {
+ *val = dm_state->underscan_hborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_vborder_property) {
+ *val = dm_state->underscan_vborder;
+ ret = 0;
+ } else if (property == adev->mode_info.underscan_property) {
+ *val = dm_state->underscan_enable;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/**
+ * DOC: panel power savings
+ *
+ * The display manager allows you to set your desired **panel power savings**
+ * level (between 0-4, with 0 representing off), e.g. using the following::
+ *
+ * # echo 3 > /sys/class/drm/card0-eDP-1/amdgpu/panel_power_savings
+ *
+ * Modifying this value can have implications on color accuracy, so tread
+ * carefully.
+ */
+
+static ssize_t panel_power_savings_show(struct device *device,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ u8 val;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ val = to_dm_connector_state(connector->state)->abm_level ==
+ ABM_LEVEL_IMMEDIATE_DISABLE ? 0 :
+ to_dm_connector_state(connector->state)->abm_level;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ return sysfs_emit(buf, "%u\n", val);
+}
+
+static ssize_t panel_power_savings_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct drm_connector *connector = dev_get_drvdata(device);
+ struct drm_device *dev = connector->dev;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 0, &val);
+
+ if (ret)
+ return ret;
+
+ if (val < 0 || val > 4)
+ return -EINVAL;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ to_dm_connector_state(connector->state)->abm_level = val ?:
+ ABM_LEVEL_IMMEDIATE_DISABLE;
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ drm_kms_helper_hotplug_event(dev);
+
+ return count;
+}
+
+static DEVICE_ATTR_RW(panel_power_savings);
+
+static struct attribute *amdgpu_attrs[] = {
+ &dev_attr_panel_power_savings.attr,
+ NULL
+};
+
+static const struct attribute_group amdgpu_group = {
+ .name = "amdgpu",
+ .attrs = amdgpu_attrs
+};
+
+static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0)
+ sysfs_remove_group(&connector->kdev->kobj, &amdgpu_group);
+
+ drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
+}
+
+static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ /*
+ * Call only if mst_mgr was initialized before since it's not done
+ * for all connector types.
+ */
+ if (aconnector->mst_mgr.dev)
+ drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
+
+ if (aconnector->bl_idx != -1) {
+ backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+ dm->backlight_dev[aconnector->bl_idx] = NULL;
+ }
+
+ if (aconnector->dc_em_sink)
+ dc_sink_release(aconnector->dc_em_sink);
+ aconnector->dc_em_sink = NULL;
+ if (aconnector->dc_sink)
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ if (aconnector->i2c) {
+ i2c_del_adapter(&aconnector->i2c->base);
+ kfree(aconnector->i2c);
+ }
+ kfree(aconnector->dm_dp_aux.aux.name);
+
+ kfree(connector);
+}
+
+void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ if (connector->state)
+ __drm_atomic_helper_connector_destroy_state(connector->state);
+
+ kfree(state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (state) {
+ state->scaling = RMX_OFF;
+ state->underscan_enable = false;
+ state->underscan_hborder = 0;
+ state->underscan_vborder = 0;
+ state->base.max_requested_bpc = 8;
+ state->vcpi_slots = 0;
+ state->pbn = 0;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (amdgpu_dm_abm_level <= 0)
+ state->abm_level = ABM_LEVEL_IMMEDIATE_DISABLE;
+ else
+ state->abm_level = amdgpu_dm_abm_level;
+ }
+
+ __drm_atomic_helper_connector_reset(connector, &state->base);
+ }
+}
+
+struct drm_connector_state *
+amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct dm_connector_state *state =
+ to_dm_connector_state(connector->state);
+
+ struct dm_connector_state *new_state =
+ kmemdup(state, sizeof(*state), GFP_KERNEL);
+
+ if (!new_state)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
+
+ new_state->freesync_capable = state->freesync_capable;
+ new_state->abm_level = state->abm_level;
+ new_state->scaling = state->scaling;
+ new_state->underscan_enable = state->underscan_enable;
+ new_state->underscan_hborder = state->underscan_hborder;
+ new_state->underscan_vborder = state->underscan_vborder;
+ new_state->vcpi_slots = state->vcpi_slots;
+ new_state->pbn = state->pbn;
+ return &new_state->base;
+}
+
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int r;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP &&
+ amdgpu_dm_abm_level < 0) {
+ r = sysfs_create_group(&connector->kdev->kobj,
+ &amdgpu_group);
+ if (r)
+ return r;
+ }
+
+ amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
+static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ aconnector->edid = edid;
+
+ /* Update emulated (virtual) sink's EDID */
+ if (dc_em_sink && dc_link) {
+ memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
+ memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
+ dm_helpers_parse_edid_caps(
+ dc_link,
+ &dc_em_sink->dc_edid,
+ &dc_em_sink->edid_caps);
+ }
+}
+
+static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
+ .reset = amdgpu_dm_connector_funcs_reset,
+ .detect = amdgpu_dm_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = amdgpu_dm_connector_destroy,
+ .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
+ .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
+ .early_unregister = amdgpu_dm_connector_unregister,
+ .force = amdgpu_dm_connector_funcs_force
+};
+
+static int get_modes(struct drm_connector *connector)
+{
+ return amdgpu_dm_connector_get_modes(connector);
+}
+
+static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
+ struct dc_sink_init_data init_params = {
+ .link = aconnector->dc_link,
+ .sink_signal = SIGNAL_TYPE_VIRTUAL
+ };
+ struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
+
+ /*
+ * Note: drm_get_edid gets edid in the following order:
+ * 1) override EDID if set via edid_override debugfs,
+ * 2) firmware EDID if set via edid_firmware module parameter
+ * 3) regular DDC read.
+ */
+ edid = drm_get_edid(connector, ddc);
+ if (!edid) {
+ DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+ return;
+ }
+
+ if (drm_detect_hdmi_monitor(edid))
+ init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+
+ aconnector->edid = edid;
+
+ aconnector->dc_em_sink = dc_link_add_remote_sink(
+ aconnector->dc_link,
+ (uint8_t *)edid,
+ (edid->extensions + 1) * EDID_LENGTH,
+ &init_params);
+
+ if (aconnector->base.force == DRM_FORCE_ON) {
+ aconnector->dc_sink = aconnector->dc_link->local_sink ?
+ aconnector->dc_link->local_sink :
+ aconnector->dc_em_sink;
+ dc_sink_retain(aconnector->dc_sink);
+ }
+}
+
+static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
+{
+ struct dc_link *link = (struct dc_link *)aconnector->dc_link;
+
+ /*
+ * In case of headless boot with force on for DP managed connector
+ * Those settings have to be != 0 to get initial modeset
+ */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
+ link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
+ }
+
+ create_eml_sink(aconnector);
+}
+
+static enum dc_status dm_validate_stream_and_context(struct dc *dc,
+ struct dc_stream_state *stream)
+{
+ enum dc_status dc_result = DC_ERROR_UNEXPECTED;
+ struct dc_plane_state *dc_plane_state = NULL;
+ struct dc_state *dc_state = NULL;
+
+ if (!stream)
+ goto cleanup;
+
+ dc_plane_state = dc_create_plane_state(dc);
+ if (!dc_plane_state)
+ goto cleanup;
+
+ dc_state = dc_state_create(dc, NULL);
+ if (!dc_state)
+ goto cleanup;
+
+ /* populate stream to plane */
+ dc_plane_state->src_rect.height = stream->src.height;
+ dc_plane_state->src_rect.width = stream->src.width;
+ dc_plane_state->dst_rect.height = stream->src.height;
+ dc_plane_state->dst_rect.width = stream->src.width;
+ dc_plane_state->clip_rect.height = stream->src.height;
+ dc_plane_state->clip_rect.width = stream->src.width;
+ dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
+ dc_plane_state->plane_size.surface_size.height = stream->src.height;
+ dc_plane_state->plane_size.surface_size.width = stream->src.width;
+ dc_plane_state->plane_size.chroma_size.height = stream->src.height;
+ dc_plane_state->plane_size.chroma_size.width = stream->src.width;
+ dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
+ dc_plane_state->rotation = ROTATION_ANGLE_0;
+ dc_plane_state->is_tiling_rotated = false;
+ dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
+
+ dc_result = dc_validate_stream(dc, stream);
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_plane(dc, dc_plane_state);
+
+ if (dc_result == DC_OK)
+ dc_result = dc_state_add_stream(dc, dc_state, stream);
+
+ if (dc_result == DC_OK && !dc_state_add_plane(
+ dc,
+ stream,
+ dc_plane_state,
+ dc_state))
+ dc_result = DC_FAIL_ATTACH_SURFACES;
+
+ if (dc_result == DC_OK)
+ dc_result = dc_validate_global_state(dc, dc_state, true);
+
+cleanup:
+ if (dc_state)
+ dc_state_release(dc_state);
+
+ if (dc_plane_state)
+ dc_plane_state_release(dc_plane_state);
+
+ return dc_result;
+}
+
+struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct dc_stream_state *stream;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(connector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return stream;
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+ if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
+
+ if (dc_result == DC_OK)
+ dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result,
+ dc_status_to_str(dc_result));
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
+ DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
+
+ aconnector->force_yuv420_output = true;
+ stream = create_validate_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream);
+ aconnector->force_yuv420_output = false;
+ }
+
+ return stream;
+}
+
+enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int result = MODE_ERROR;
+ struct dc_sink *dc_sink;
+ /* TODO: Unhardcode stream count */
+ struct dc_stream_state *stream;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
+ (mode->flags & DRM_MODE_FLAG_DBLSCAN))
+ return result;
+
+ /*
+ * Only run this the first time mode_valid is called to initilialize
+ * EDID mgmt
+ */
+ if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
+ !aconnector->dc_em_sink)
+ handle_edid_mgmt(aconnector);
+
+ dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
+
+ if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
+ aconnector->base.force != DRM_FORCE_ON) {
+ DRM_ERROR("dc_sink is NULL!\n");
+ goto fail;
+ }
+
+ drm_mode_set_crtcinfo(mode, 0);
+
+ stream = create_validate_stream_for_sink(aconnector, mode,
+ to_dm_connector_state(connector->state),
+ NULL);
+ if (stream) {
+ dc_stream_release(stream);
+ result = MODE_OK;
+ }
+
+fail:
+ /* TODO: error handling*/
+ return result;
+}
+
+static int fill_hdr_info_packet(const struct drm_connector_state *state,
+ struct dc_info_packet *out)
+{
+ struct hdmi_drm_infoframe frame;
+ unsigned char buf[30]; /* 26 + 4 */
+ ssize_t len;
+ int ret, i;
+
+ memset(out, 0, sizeof(*out));
+
+ if (!state->hdr_output_metadata)
+ return 0;
+
+ ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
+ if (ret)
+ return ret;
+
+ len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
+ if (len < 0)
+ return (int)len;
+
+ /* Static metadata is a fixed 26 bytes + 4 byte header. */
+ if (len != 30)
+ return -EINVAL;
+
+ /* Prepare the infopacket for DC. */
+ switch (state->connector->connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ out->hb0 = 0x87; /* type */
+ out->hb1 = 0x01; /* version */
+ out->hb2 = 0x1A; /* length */
+ out->sb[0] = buf[3]; /* checksum */
+ i = 1;
+ break;
+
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ out->hb0 = 0x00; /* sdp id, zero */
+ out->hb1 = 0x87; /* type */
+ out->hb2 = 0x1D; /* payload len - 1 */
+ out->hb3 = (0x13 << 2); /* sdp version */
+ out->sb[0] = 0x01; /* version */
+ out->sb[1] = 0x1A; /* length */
+ i = 2;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(&out->sb[i], &buf[4], 26);
+ out->valid = true;
+
+ print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
+ sizeof(out->sb), false);
+
+ return 0;
+}
+
+static int
+amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *new_con_state =
+ drm_atomic_get_new_connector_state(state, conn);
+ struct drm_connector_state *old_con_state =
+ drm_atomic_get_old_connector_state(state, conn);
+ struct drm_crtc *crtc = new_con_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
+ int ret;
+
+ trace_amdgpu_dm_connector_atomic_check(new_con_state);
+
+ if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!crtc)
+ return 0;
+
+ if (new_con_state->colorspace != old_con_state->colorspace) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (new_con_state->content_type != old_con_state->content_type) {
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ new_crtc_state->mode_changed = true;
+ }
+
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
+ struct dc_info_packet hdr_infopacket;
+
+ ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
+ if (ret)
+ return ret;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(new_crtc_state))
+ return PTR_ERR(new_crtc_state);
+
+ /*
+ * DC considers the stream backends changed if the
+ * static metadata changes. Forcing the modeset also
+ * gives a simple way for userspace to switch from
+ * 8bpc to 10bpc when setting the metadata to enter
+ * or exit HDR.
+ *
+ * Changing the static metadata after it's been
+ * set is permissible, however. So only force a
+ * modeset if we're entering or exiting HDR.
+ */
+ new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
+ !old_con_state->hdr_output_metadata ||
+ !new_con_state->hdr_output_metadata;
+ }
+
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs
+amdgpu_dm_connector_helper_funcs = {
+ /*
+ * If hotplugging a second bigger display in FB Con mode, bigger resolution
+ * modes will be filtered by drm_mode_validate_size(), and those modes
+ * are missing after user start lightdm. So we need to renew modes list.
+ * in get_modes call back, not just return the modes count
+ */
+ .get_modes = get_modes,
+ .mode_valid = amdgpu_dm_connector_mode_valid,
+ .atomic_check = amdgpu_dm_connector_atomic_check,
+};
+
+static void dm_encoder_helper_disable(struct drm_encoder *encoder)
+{
+
+}
+
+int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
+{
+ switch (display_color_depth) {
+ case COLOR_DEPTH_666:
+ return 6;
+ case COLOR_DEPTH_888:
+ return 8;
+ case COLOR_DEPTH_101010:
+ return 10;
+ case COLOR_DEPTH_121212:
+ return 12;
+ case COLOR_DEPTH_141414:
+ return 14;
+ case COLOR_DEPTH_161616:
+ return 16;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *connector = conn_state->connector;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_dp_mst_topology_mgr *mst_mgr;
+ struct drm_dp_mst_port *mst_port;
+ struct drm_dp_mst_topology_state *mst_state;
+ enum dc_color_depth color_depth;
+ int clock, bpp = 0;
+ bool is_y420 = false;
+
+ if (!aconnector->mst_output_port)
+ return 0;
+
+ mst_port = aconnector->mst_output_port;
+ mst_mgr = &aconnector->mst_root->mst_mgr;
+
+ if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
+ return 0;
+
+ mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
+ if (IS_ERR(mst_state))
+ return PTR_ERR(mst_state);
+
+ mst_state->pbn_div.full = dfixed_const(dm_mst_get_pbn_divider(aconnector->mst_root->dc_link));
+
+ if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
+
+ is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
+ aconnector->force_yuv420_output;
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
+ bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
+ clock = adjusted_mode->clock;
+ dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
+ }
+
+ dm_new_connector_state->vcpi_slots =
+ drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
+ dm_new_connector_state->pbn);
+ if (dm_new_connector_state->vcpi_slots < 0) {
+ DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
+ return dm_new_connector_state->vcpi_slots;
+ }
+ return 0;
+}
+
+const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
+ .disable = dm_encoder_helper_disable,
+ .atomic_check = dm_encoder_helper_atomic_check
+};
+
+static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
+ struct dc_state *dc_state,
+ struct dsc_mst_fairness_vars *vars)
+{
+ struct dc_stream_state *stream = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_state *new_con_state;
+ struct amdgpu_dm_connector *aconnector;
+ struct dm_connector_state *dm_conn_state;
+ int i, j, ret;
+ int vcpi, pbn_div, pbn, slot_num = 0;
+
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->mst_output_port)
+ continue;
+
+ if (!new_con_state || !new_con_state->crtc)
+ continue;
+
+ dm_conn_state = to_dm_connector_state(new_con_state);
+
+ for (j = 0; j < dc_state->stream_count; j++) {
+ stream = dc_state->streams[j];
+ if (!stream)
+ continue;
+
+ if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
+ break;
+
+ stream = NULL;
+ }
+
+ if (!stream)
+ continue;
+
+ pbn_div = dm_mst_get_pbn_divider(stream->link);
+ /* pbn is calculated by compute_mst_dsc_configs_for_state*/
+ for (j = 0; j < dc_state->stream_count; j++) {
+ if (vars[j].aconnector == aconnector) {
+ pbn = vars[j].pbn;
+ break;
+ }
+ }
+
+ if (j == dc_state->stream_count)
+ continue;
+
+ slot_num = DIV_ROUND_UP(pbn, pbn_div);
+
+ if (stream->timing.flags.DSC != 1) {
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = slot_num;
+
+ ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
+ dm_conn_state->pbn, false);
+ if (ret < 0)
+ return ret;
+
+ continue;
+ }
+
+ vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
+ if (vcpi < 0)
+ return vcpi;
+
+ dm_conn_state->pbn = pbn;
+ dm_conn_state->vcpi_slots = vcpi;
+ }
+ return 0;
+}
+
+static int to_drm_connector_type(enum signal_type st)
+{
+ switch (st) {
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case SIGNAL_TYPE_EDP:
+ return DRM_MODE_CONNECTOR_eDP;
+ case SIGNAL_TYPE_LVDS:
+ return DRM_MODE_CONNECTOR_LVDS;
+ case SIGNAL_TYPE_RGB:
+ return DRM_MODE_CONNECTOR_VGA;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ return DRM_MODE_CONNECTOR_DisplayPort;
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ return DRM_MODE_CONNECTOR_DVID;
+ case SIGNAL_TYPE_VIRTUAL:
+ return DRM_MODE_CONNECTOR_VIRTUAL;
+
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+
+ /* There is only one encoder per connector */
+ drm_connector_for_each_possible_encoder(connector, encoder)
+ return encoder;
+
+ return NULL;
+}
+
+static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (encoder == NULL)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ amdgpu_encoder->native_mode.clock = 0;
+
+ if (!list_empty(&connector->probed_modes)) {
+ struct drm_display_mode *preferred_mode = NULL;
+
+ list_for_each_entry(preferred_mode,
+ &connector->probed_modes,
+ head) {
+ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
+ amdgpu_encoder->native_mode = *preferred_mode;
+
+ break;
+ }
+
+ }
+}
+
+static struct drm_display_mode *
+amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
+ char *name,
+ int hdisplay, int vdisplay)
+{
+ struct drm_device *dev = encoder->dev;
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+
+ mode = drm_mode_duplicate(dev, native_mode);
+
+ if (mode == NULL)
+ return NULL;
+
+ mode->hdisplay = hdisplay;
+ mode->vdisplay = vdisplay;
+ mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ return mode;
+
+}
+
+static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
+ struct drm_display_mode *mode = NULL;
+ struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ int i;
+ int n;
+ struct mode_size {
+ char name[DRM_DISPLAY_MODE_LEN];
+ int w;
+ int h;
+ } common_modes[] = {
+ { "640x480", 640, 480},
+ { "800x600", 800, 600},
+ { "1024x768", 1024, 768},
+ { "1280x720", 1280, 720},
+ { "1280x800", 1280, 800},
+ {"1280x1024", 1280, 1024},
+ { "1440x900", 1440, 900},
+ {"1680x1050", 1680, 1050},
+ {"1600x1200", 1600, 1200},
+ {"1920x1080", 1920, 1080},
+ {"1920x1200", 1920, 1200}
+ };
+
+ n = ARRAY_SIZE(common_modes);
+
+ for (i = 0; i < n; i++) {
+ struct drm_display_mode *curmode = NULL;
+ bool mode_existed = false;
+
+ if (common_modes[i].w > native_mode->hdisplay ||
+ common_modes[i].h > native_mode->vdisplay ||
+ (common_modes[i].w == native_mode->hdisplay &&
+ common_modes[i].h == native_mode->vdisplay))
+ continue;
+
+ list_for_each_entry(curmode, &connector->probed_modes, head) {
+ if (common_modes[i].w == curmode->hdisplay &&
+ common_modes[i].h == curmode->vdisplay) {
+ mode_existed = true;
+ break;
+ }
+ }
+
+ if (mode_existed)
+ continue;
+
+ mode = amdgpu_dm_create_common_mode(encoder,
+ common_modes[i].name, common_modes[i].w,
+ common_modes[i].h);
+ if (!mode)
+ continue;
+
+ drm_mode_probed_add(connector, mode);
+ amdgpu_dm_connector->num_modes++;
+ }
+}
+
+static void amdgpu_set_panel_orientation(struct drm_connector *connector)
+{
+ struct drm_encoder *encoder;
+ struct amdgpu_encoder *amdgpu_encoder;
+ const struct drm_display_mode *native_mode;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ return;
+
+ mutex_lock(&connector->dev->mode_config.mutex);
+ amdgpu_dm_connector_get_modes(connector);
+ mutex_unlock(&connector->dev->mode_config.mutex);
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+ if (!encoder)
+ return;
+
+ amdgpu_encoder = to_amdgpu_encoder(encoder);
+
+ native_mode = &amdgpu_encoder->native_mode;
+ if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
+ return;
+
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
+ native_mode->hdisplay,
+ native_mode->vdisplay);
+}
+
+static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (edid) {
+ /* empty probed_modes */
+ INIT_LIST_HEAD(&connector->probed_modes);
+ amdgpu_dm_connector->num_modes =
+ drm_add_edid_modes(connector, edid);
+
+ /* sorting the probed modes before calling function
+ * amdgpu_dm_get_native_mode() since EDID can have
+ * more than one preferred mode. The modes that are
+ * later in the probed mode list could be of higher
+ * and preferred resolution. For example, 3840x2160
+ * resolution in base EDID preferred timing and 4096x2160
+ * preferred resolution in DID extension block later.
+ */
+ drm_mode_sort(&connector->probed_modes);
+ amdgpu_dm_get_native_mode(connector);
+
+ /* Freesync capabilities are reset by calling
+ * drm_add_edid_modes() and need to be
+ * restored here.
+ */
+ amdgpu_dm_update_freesync_caps(connector, edid);
+ } else {
+ amdgpu_dm_connector->num_modes = 0;
+ }
+}
+
+static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
+ struct drm_display_mode *mode)
+{
+ struct drm_display_mode *m;
+
+ list_for_each_entry(m, &aconnector->base.probed_modes, head) {
+ if (drm_mode_equal(m, mode))
+ return true;
+ }
+
+ return false;
+}
+
+static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
+{
+ const struct drm_display_mode *m;
+ struct drm_display_mode *new_mode;
+ uint i;
+ u32 new_modes_count = 0;
+
+ /* Standard FPS values
+ *
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
+ */
+ static const u32 common_rates[] = {
+ 23976, 24000, 25000, 29970, 30000,
+ 48000, 50000, 60000, 72000, 96000, 120000
+ };
+
+ /*
+ * Find mode with highest refresh rate with the same resolution
+ * as the preferred mode. Some monitors report a preferred mode
+ * with lower resolution than the highest refresh rate supported.
+ */
+
+ m = get_highest_refresh_rate_mode(aconnector, true);
+ if (!m)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
+ u64 target_vtotal, target_vtotal_diff;
+ u64 num, den;
+
+ if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
+ continue;
+
+ if (common_rates[i] < aconnector->min_vfreq * 1000 ||
+ common_rates[i] > aconnector->max_vfreq * 1000)
+ continue;
+
+ num = (unsigned long long)m->clock * 1000 * 1000;
+ den = common_rates[i] * (unsigned long long)m->htotal;
+ target_vtotal = div_u64(num, den);
+ target_vtotal_diff = target_vtotal - m->vtotal;
+
+ /* Check for illegal modes */
+ if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
+ m->vsync_end + target_vtotal_diff < m->vsync_start ||
+ m->vtotal + target_vtotal_diff < m->vsync_end)
+ continue;
+
+ new_mode = drm_mode_duplicate(aconnector->base.dev, m);
+ if (!new_mode)
+ goto out;
+
+ new_mode->vtotal += (u16)target_vtotal_diff;
+ new_mode->vsync_start += (u16)target_vtotal_diff;
+ new_mode->vsync_end += (u16)target_vtotal_diff;
+ new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
+ new_mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (!is_duplicate_mode(aconnector, new_mode)) {
+ drm_mode_probed_add(&aconnector->base, new_mode);
+ new_modes_count += 1;
+ } else
+ drm_mode_destroy(aconnector->base.dev, new_mode);
+ }
+ out:
+ return new_modes_count;
+}
+
+static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
+ struct edid *edid)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+ if (!(amdgpu_freesync_vid_mode && edid))
+ return;
+
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ amdgpu_dm_connector->num_modes +=
+ add_fs_modes(amdgpu_dm_connector);
+}
+
+static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct drm_encoder *encoder;
+ struct edid *edid = amdgpu_dm_connector->edid;
+ struct dc_link_settings *verified_link_cap =
+ &amdgpu_dm_connector->dc_link->verified_link_cap;
+ const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
+
+ encoder = amdgpu_dm_connector_to_encoder(connector);
+
+ if (!drm_edid_is_valid(edid)) {
+ amdgpu_dm_connector->num_modes =
+ drm_add_modes_noedid(connector, 640, 480);
+ if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+ amdgpu_dm_connector->num_modes +=
+ drm_add_modes_noedid(connector, 1920, 1080);
+ } else {
+ amdgpu_dm_connector_ddc_get_modes(connector, edid);
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
+ amdgpu_dm_connector_add_freesync_modes(connector, edid);
+ }
+ amdgpu_dm_fbc_init(connector);
+
+ return amdgpu_dm_connector->num_modes;
+}
+
+static const u32 supported_colorspaces =
+ BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
+ BIT(DRM_MODE_COLORIMETRY_OPRGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
+ BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
+
+void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ int connector_type,
+ struct dc_link *link,
+ int link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dm->ddev);
+
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
+ aconnector->connector_id = link_index;
+ aconnector->bl_idx = -1;
+ aconnector->dc_link = link;
+ aconnector->base.interlace_allowed = false;
+ aconnector->base.doublescan_allowed = false;
+ aconnector->base.stereo_allowed = false;
+ aconnector->base.dpms = DRM_MODE_DPMS_OFF;
+ aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
+ aconnector->audio_inst = -1;
+ aconnector->pack_sdp_v1_3 = false;
+ aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+ memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
+ mutex_init(&aconnector->hpd_lock);
+ mutex_init(&aconnector->handle_mst_msg_ready);
+
+ /*
+ * configure support HPD hot plug connector_>polled default value is 0
+ * which means HPD hot plug not supported
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ link->link_enc = link_enc_cfg_get_link_enc(link);
+ ASSERT(link->link_enc);
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
+ link->link_enc->features.dp_ycbcr420_supported ? true : false;
+ break;
+ case DRM_MODE_CONNECTOR_DVID:
+ aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
+ break;
+ default:
+ break;
+ }
+
+ drm_object_attach_property(&aconnector->base.base,
+ dm->ddev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_NONE);
+
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_property,
+ UNDERSCAN_OFF);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_hborder_property,
+ 0);
+ drm_object_attach_property(&aconnector->base.base,
+ adev->mode_info.underscan_vborder_property,
+ 0);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+
+ aconnector->base.state->max_bpc = 16;
+ aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ /* Content Type is currently only implemented for HDMI. */
+ drm_connector_attach_content_type_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
+ drm_connector_attach_colorspace_property(&aconnector->base);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_eDP) {
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
+
+ if (!aconnector->mst_root)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
+ if (adev->dm.hdcp_workqueue)
+ drm_connector_attach_content_protection_property(&aconnector->base, true);
+ }
+}
+
+static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
+ struct ddc_service *ddc_service = i2c->ddc_service;
+ struct i2c_command cmd;
+ int i;
+ int result = -EIO;
+
+ if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
+ return result;
+
+ cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
+
+ if (!cmd.payloads)
+ return result;
+
+ cmd.number_of_payloads = num;
+ cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
+ cmd.speed = 100;
+
+ for (i = 0; i < num; i++) {
+ cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
+ cmd.payloads[i].address = msgs[i].addr;
+ cmd.payloads[i].length = msgs[i].len;
+ cmd.payloads[i].data = msgs[i].buf;
+ }
+
+ if (dc_submit_i2c(
+ ddc_service->ctx->dc,
+ ddc_service->link->link_index,
+ &cmd))
+ result = num;
+
+ kfree(cmd.payloads);
+ return result;
+}
+
+static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
+ .master_xfer = amdgpu_dm_i2c_xfer,
+ .functionality = amdgpu_dm_i2c_func,
+};
+
+static struct amdgpu_i2c_adapter *
+create_i2c(struct ddc_service *ddc_service,
+ int link_index,
+ int *res)
+{
+ struct amdgpu_device *adev = ddc_service->ctx->driver_context;
+ struct amdgpu_i2c_adapter *i2c;
+
+ i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
+ if (!i2c)
+ return NULL;
+ i2c->base.owner = THIS_MODULE;
+ i2c->base.dev.parent = &adev->pdev->dev;
+ i2c->base.algo = &amdgpu_dm_i2c_algo;
+ snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
+ i2c_set_adapdata(&i2c->base, i2c);
+ i2c->ddc_service = ddc_service;
+
+ return i2c;
+}
+
+
+/*
+ * Note: this function assumes that dc_link_detect() was called for the
+ * dc_link which will be represented by this aconnector.
+ */
+static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
+ struct amdgpu_dm_connector *aconnector,
+ u32 link_index,
+ struct amdgpu_encoder *aencoder)
+{
+ int res = 0;
+ int connector_type;
+ struct dc *dc = dm->dc;
+ struct dc_link *link = dc_get_link_at_index(dc, link_index);
+ struct amdgpu_i2c_adapter *i2c;
+
+ /* Not needed for writeback connector */
+ link->priv = aconnector;
+
+
+ i2c = create_i2c(link->ddc, link->link_index, &res);
+ if (!i2c) {
+ DRM_ERROR("Failed to create i2c adapter data\n");
+ return -ENOMEM;
+ }
+
+ aconnector->i2c = i2c;
+ res = i2c_add_adapter(&i2c->base);
+
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
+ goto out_free;
+ }
+
+ connector_type = to_drm_connector_type(link->connector_signal);
+
+ res = drm_connector_init_with_ddc(
+ dm->ddev,
+ &aconnector->base,
+ &amdgpu_dm_connector_funcs,
+ connector_type,
+ &i2c->base);
+
+ if (res) {
+ DRM_ERROR("connector_init failed\n");
+ aconnector->connector_id = -1;
+ goto out_free;
+ }
+
+ drm_connector_helper_add(
+ &aconnector->base,
+ &amdgpu_dm_connector_helper_funcs);
+
+ amdgpu_dm_connector_init_helper(
+ dm,
+ aconnector,
+ connector_type,
+ link,
+ link_index);
+
+ drm_connector_attach_encoder(
+ &aconnector->base, &aencoder->base);
+
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
+ || connector_type == DRM_MODE_CONNECTOR_eDP)
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
+
+out_free:
+ if (res) {
+ kfree(i2c);
+ aconnector->i2c = NULL;
+ }
+ return res;
+}
+
+int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
+{
+ switch (adev->mode_info.num_crtc) {
+ case 1:
+ return 0x1;
+ case 2:
+ return 0x3;
+ case 3:
+ return 0x7;
+ case 4:
+ return 0xf;
+ case 5:
+ return 0x1f;
+ case 6:
+ default:
+ return 0x3f;
+ }
+}
+
+static int amdgpu_dm_encoder_init(struct drm_device *dev,
+ struct amdgpu_encoder *aencoder,
+ uint32_t link_index)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ int res = drm_encoder_init(dev,
+ &aencoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS,
+ NULL);
+
+ aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+
+ if (!res)
+ aencoder->encoder_id = link_index;
+ else
+ aencoder->encoder_id = -1;
+
+ drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
+
+ return res;
+}
+
+static void manage_dm_interrupts(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ bool enable)
+{
+ /*
+ * We have no guarantee that the frontend index maps to the same
+ * backend index - some even map to more than one.
+ *
+ * TODO: Use a different interrupt or check DC itself for the mapping.
+ */
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(
+ adev,
+ acrtc->crtc_id);
+
+ if (enable) {
+ drm_crtc_vblank_on(&acrtc->base);
+ amdgpu_irq_get(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_get(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ } else {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ amdgpu_irq_put(
+ adev,
+ &adev->vline0_irq,
+ irq_type);
+#endif
+ amdgpu_irq_put(
+ adev,
+ &adev->pageflip_irq,
+ irq_type);
+ drm_crtc_vblank_off(&acrtc->base);
+ }
+}
+
+static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc)
+{
+ int irq_type =
+ amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
+
+ /**
+ * This reads the current state for the IRQ and force reapplies
+ * the setting to hardware.
+ */
+ amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
+}
+
+static bool
+is_scaling_state_different(const struct dm_connector_state *dm_state,
+ const struct dm_connector_state *old_dm_state)
+{
+ if (dm_state->scaling != old_dm_state->scaling)
+ return true;
+ if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
+ if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
+ if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
+ return true;
+ } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
+ dm_state->underscan_vborder != old_dm_state->underscan_vborder)
+ return true;
+ return false;
+}
+
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_connector_state *new_conn_state,
+ struct drm_connector_state *old_conn_state,
+ const struct drm_connector *connector,
+ struct hdcp_workqueue *hdcp_w)
+{
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_conn_state->content_protection, new_conn_state->content_protection);
+
+ if (old_crtc_state)
+ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* hdcp content type change */
+ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ return true;
+ }
+
+ /* CP is being re enabled, ignore this */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+ return true;
+ }
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
+ *
+ * Handles: UNDESIRED -> ENABLED
+ */
+ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Stream removed and re-enabled
+ *
+ * Can sometimes overlap with the HPD case,
+ * thus set update_hdcp to false to avoid
+ * setting HDCP multiple times.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+ new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ /* Hot-plug, headless s3, dpms
+ *
+ * Only start HDCP if the display is connected/enabled.
+ * update_hdcp flag will be set to false until the next
+ * HPD comes in.
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+ if (dm_con_state->update_hdcp &&
+ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ dm_con_state->update_hdcp = false;
+ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+ __func__);
+ return true;
+ }
+
+ if (old_conn_state->content_protection == new_conn_state->content_protection) {
+ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ if (new_crtc_state && new_crtc_state->mode_changed) {
+ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+ __func__);
+ return true;
+ }
+ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+ __func__);
+ return false;
+ }
+
+ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ return false;
+ }
+
+ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+ __func__);
+ return true;
+ }
+
+ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ return false;
+}
+
+static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+{
+ /* this is the update mode case */
+
+ acrtc->otg_inst = -1;
+ acrtc->enabled = false;
+}
+
+static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
+{
+
+ assert_spin_locked(&acrtc->base.dev->event_lock);
+ WARN_ON(acrtc->event);
+
+ acrtc->event = acrtc->base.state->event;
+
+ /* Set the flip status */
+ acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
+
+ /* Mark this event as consumed */
+ acrtc->base.state->event = NULL;
+
+ drm_dbg_state(acrtc->base.dev,
+ "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+ acrtc->crtc_id);
+}
+
+static void update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state,
+ struct dc_stream_state *new_stream,
+ struct dc_plane_state *surface,
+ u32 flip_timestamp_in_us)
+{
+ struct mod_vrr_params vrr_params;
+ struct dc_info_packet vrr_infopacket = {0};
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+ bool pack_sdp_v1_3 = false;
+ struct amdgpu_dm_connector *aconn;
+ enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (surface) {
+ mod_freesync_handle_preflip(
+ dm->freesync_module,
+ surface,
+ new_stream,
+ flip_timestamp_in_us,
+ &vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+
+ /* Need to call this before the frame ends. */
+ dc_stream_adjust_vmin_vmax(dm->dc,
+ new_crtc_state->stream,
+ &vrr_params.adjust);
+ }
+ }
+
+ aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+ if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
+ pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+ if (aconn->vsdb_info.amd_vsdb_version == 1)
+ packet_type = PACKET_TYPE_FS_V1;
+ else if (aconn->vsdb_info.amd_vsdb_version == 2)
+ packet_type = PACKET_TYPE_FS_V2;
+ else if (aconn->vsdb_info.amd_vsdb_version == 3)
+ packet_type = PACKET_TYPE_FS_V3;
+
+ mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+ &new_stream->adaptive_sync_infopacket);
+ }
+
+ mod_freesync_build_vrr_infopacket(
+ dm->freesync_module,
+ new_stream,
+ &vrr_params,
+ packet_type,
+ TRANSFER_FUNC_UNKNOWN,
+ &vrr_infopacket,
+ pack_sdp_v1_3);
+
+ new_crtc_state->freesync_vrr_info_changed |=
+ (memcmp(&new_crtc_state->vrr_infopacket,
+ &vrr_infopacket,
+ sizeof(vrr_infopacket)) != 0);
+
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ new_crtc_state->vrr_infopacket = vrr_infopacket;
+
+ new_stream->vrr_infopacket = vrr_infopacket;
+ new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
+
+ if (new_crtc_state->freesync_vrr_info_changed)
+ DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
+ new_crtc_state->base.crtc->base.id,
+ (int)new_crtc_state->base.vrr_enabled,
+ (int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void update_stream_irq_parameters(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ vrr_params = acrtc->dm_irq_params.vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ /*
+ * if freesync compatible mode was set, config.state will be set
+ * in atomic check
+ */
+ if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
+ (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
+ new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
+ vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
+ vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
+ vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
+ vrr_params.state = VRR_STATE_ACTIVE_FIXED;
+ } else {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ }
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_config = config;
+ /* Copy state for access from DM IRQ handler */
+ acrtc->dm_irq_params.freesync_config = config;
+ acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
+ acrtc->dm_irq_params.vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
+ WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_old_plane_in_state(state, plane, old_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
+}
+
+static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
+{
+ struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
+
+ return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
+}
+
+static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
+ struct drm_device *dev,
+ struct amdgpu_display_manager *dm,
+ struct drm_crtc *pcrtc,
+ bool wait_for_vblank)
+{
+ u32 i;
+ u64 timestamp_ns = ktime_get_ns();
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
+ struct drm_crtc_state *new_pcrtc_state =
+ drm_atomic_get_new_crtc_state(state, pcrtc);
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
+ int planes_count = 0, vpos, hpos;
+ unsigned long flags;
+ u32 target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
+ bool cursor_update = false;
+ bool pflip_present = false;
+ bool dirty_rects_changed = false;
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ drm_err(dev, "Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
+ /* update planes when needed */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
+ bool plane_needs_flip;
+ struct dc_plane_state *dc_plane;
+ struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
+
+ /* Cursor plane is handled after stream updates */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if ((fb && crtc == pcrtc) ||
+ (old_plane_state->fb && old_plane_state->crtc == pcrtc))
+ cursor_update = true;
+
+ continue;
+ }
+
+ if (!fb || !crtc || pcrtc != crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (!new_crtc_state->active)
+ continue;
+
+ dc_plane = dm_new_plane_state->dc_state;
+ if (!dc_plane)
+ continue;
+
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = &dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = &dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
+ bundle->surface_updates[planes_count].hdr_mult = dc_plane->hdr_mult;
+ bundle->surface_updates[planes_count].func_shaper = &dc_plane->in_shaper_func;
+ bundle->surface_updates[planes_count].lut3d_func = &dc_plane->lut3d_func;
+ bundle->surface_updates[planes_count].blend_tf = &dc_plane->blend_tf;
+ }
+
+ amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
+ &bundle->scaling_infos[planes_count]);
+
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+
+ pflip_present = pflip_present || plane_needs_flip;
+
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
+
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state,
+ afb->tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address,
+ afb->tmz_surface, false);
+
+ drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
+
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
+
+ if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
+ acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ fill_dc_dirty_rects(plane, old_plane_state,
+ new_plane_state, new_crtc_state,
+ &bundle->flip_addrs[planes_count],
+ acrtc_state->stream->link->psr_settings.psr_version ==
+ DC_PSR_VERSION_SU_1,
+ &dirty_rects_changed);
+
+ /*
+ * If the dirty regions changed, PSR-SU need to be disabled temporarily
+ * and enabled it again after dirty regions are stable to avoid video glitch.
+ * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+ * during the PSR-SU was disabled.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ dirty_rects_changed) {
+ mutex_lock(&dm->dc_lock);
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+ timestamp_ns;
+ if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+ }
+ }
+
+ /*
+ * Only allow immediate flips for fast updates that don't
+ * change memory domain, FB pitch, DCC state, rotation or
+ * mirroring.
+ *
+ * dm_crtc_helper_atomic_check() only accepts async flips with
+ * fast updates.
+ */
+ if (crtc->state->async_flip &&
+ (acrtc_state->update_type != UPDATE_TYPE_FAST ||
+ get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
+ drm_warn_once(state->dev,
+ "[PLANE:%d:%s] async flip with non-fast update\n",
+ plane->base.id, plane->name);
+
+ bundle->flip_addrs[planes_count].flip_immediate =
+ crtc->state->async_flip &&
+ acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ get_mem_type(old_plane_state->fb) == get_mem_type(fb);
+
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
+
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
+ }
+
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
+
+ drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
+
+ planes_count += 1;
+
+ }
+
+ if (pflip_present) {
+ if (!vrr_active) {
+ /* Use old throttling in non-vrr fixed refresh rate mode
+ * to keep flip scheduling based on target vblank counts
+ * working in a backwards compatible way, e.g., for
+ * clients using the GLX_OML_sync_control extension or
+ * DRI3/Present extension with defined target_msc.
+ */
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
+ } else {
+ /* For variable refresh rate mode only:
+ * Get vblank of last completed flip to avoid > 1 vrr
+ * flips per video frame by use of throttling, but allow
+ * flip programming anywhere in the possibly large
+ * variable vrr vblank interval for fine-grained flip
+ * timing control and more opportunity to avoid stutter
+ * on late submission of flips.
+ */
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ target_vblank = last_flip_vblank + wait_for_vblank;
+
+ /*
+ * Wait until we're out of the vertical blank period before the one
+ * targeted by the flip
+ */
+ while ((acrtc_attach->enabled &&
+ (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
+ 0, &vpos, &hpos, NULL,
+ NULL, &pcrtc->hwmode)
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
+ (int)(target_vblank -
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
+ usleep_range(1000, 1100);
+ }
+
+ /**
+ * Prepare the flip event for the pageflip interrupt to handle.
+ *
+ * This only works in the case where we've already turned on the
+ * appropriate hardware blocks (eg. HUBP) so in the transition case
+ * from 0 -> n planes we have to skip a hardware generated event
+ * and rely on sending it from software.
+ */
+ if (acrtc_attach->base.state->event &&
+ acrtc_state->active_planes > 0) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
+ prepare_flip_isr(acrtc_attach);
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ if (acrtc_state->stream) {
+ if (acrtc_state->freesync_vrr_info_changed)
+ bundle->stream_update.vrr_infopacket =
+ &acrtc_state->stream->vrr_infopacket;
+ }
+ } else if (cursor_update && acrtc_state->active_planes > 0 &&
+ acrtc_attach->base.state->event) {
+ drm_crtc_vblank_get(pcrtc);
+
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+
+ acrtc_attach->event = acrtc_attach->base.state->event;
+ acrtc_attach->base.state->event = NULL;
+
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+
+ /* Update the planes if changed or disable if we don't have any. */
+ if ((planes_count || acrtc_state->active_planes == 0) &&
+ acrtc_state->stream) {
+ /*
+ * If PSR or idle optimizations are enabled then flush out
+ * any pending work before hardware programming.
+ */
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ bundle->stream_update.stream = acrtc_state->stream;
+ if (new_pcrtc_state->mode_changed) {
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
+ }
+
+ if (new_pcrtc_state->color_mgmt_changed) {
+ /*
+ * TODO: This isn't fully correct since we've actually
+ * already modified the stream in place.
+ */
+ bundle->stream_update.gamut_remap =
+ &acrtc_state->stream->gamut_remap_matrix;
+ bundle->stream_update.output_csc_transform =
+ &acrtc_state->stream->csc_color_matrix;
+ bundle->stream_update.out_transfer_func =
+ &acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.lut3d_func =
+ (struct dc_3dlut *) acrtc_state->stream->lut3d_func;
+ bundle->stream_update.func_shaper =
+ (struct dc_transfer_func *) acrtc_state->stream->func_shaper;
+ }
+
+ acrtc_state->stream->abm_level = acrtc_state->abm_level;
+ if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
+
+ mutex_lock(&dm->dc_lock);
+ if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
+ amdgpu_dm_psr_disable(acrtc_state->stream);
+ mutex_unlock(&dm->dc_lock);
+
+ /*
+ * If FreeSync state on the stream has changed then we need to
+ * re-adjust the min/max bounds now that DC doesn't handle this
+ * as part of commit.
+ */
+ if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
+ spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
+ dc_stream_adjust_vmin_vmax(
+ dm->dc, acrtc_state->stream,
+ &acrtc_attach->dm_irq_params.vrr_params.adjust);
+ spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
+ }
+ mutex_lock(&dm->dc_lock);
+ update_planes_and_stream_adapter(dm->dc,
+ acrtc_state->update_type,
+ planes_count,
+ acrtc_state->stream,
+ &bundle->stream_update,
+ bundle->surface_updates);
+
+ /**
+ * Enable or disable the interrupts on the backend.
+ *
+ * Most pipes are put into power gating when unused.
+ *
+ * When power gating is enabled on a pipe we lose the
+ * interrupt enablement state when power gating is disabled.
+ *
+ * So we need to update the IRQ control state in hardware
+ * whenever the pipe turns on (since it could be previously
+ * power gated) or off (since some pipes can't be power gated
+ * on some ASICs).
+ */
+ if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
+ dm_update_pflip_irq_state(drm_to_adev(dev),
+ acrtc_attach);
+
+ if (acrtc_state->update_type > UPDATE_TYPE_FAST) {
+ if (acrtc_state->stream->link->replay_settings.config.replay_supported &&
+ !acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+ amdgpu_dm_link_setup_replay(acrtc_state->stream->link, aconn);
+ } else if (acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+
+ struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+ acrtc_state->stream->dm_stream_context;
+
+ if (!aconn->disallow_edp_enter_psr)
+ amdgpu_dm_link_setup_psr(acrtc_state->stream);
+ }
+ }
+
+ /* Decrement skip count when PSR is enabled and we're doing fast updates. */
+ if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
+ struct amdgpu_dm_connector *aconn =
+ (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
+
+ if (aconn->psr_skip_count > 0)
+ aconn->psr_skip_count--;
+
+ /* Allow PSR when skip count is 0. */
+ acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
+
+ /*
+ * If sink supports PSR SU, there is no need to rely on
+ * a vblank event disable request to enable PSR. PSR SU
+ * can be enabled immediately once OS demonstrates an
+ * adequate number of fast atomic commits to notify KMD
+ * of update events. See `vblank_control_worker()`.
+ */
+ if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+ acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+ !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+ !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+ !aconn->disallow_edp_enter_psr &&
+ (timestamp_ns -
+ acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+ 500000000)
+ amdgpu_dm_psr_enable(acrtc_state->stream);
+ } else {
+ acrtc_attach->dm_irq_params.allow_psr_entry = false;
+ }
+
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
+
+cleanup:
+ kfree(bundle);
+}
+
+static void amdgpu_dm_commit_audio(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct dm_crtc_state *new_dm_crtc_state;
+ const struct dc_stream_status *status;
+ int i, inst;
+
+ /* Notify device removals. */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ if (old_con_state->crtc != new_con_state->crtc) {
+ /* CRTC changes require notification. */
+ goto notify;
+ }
+
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+notify:
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = aconnector->audio_inst;
+ aconnector->audio_inst = -1;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+
+ /* Notify audio device additions. */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(
+ state, new_con_state->crtc);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (!new_dm_crtc_state->stream)
+ continue;
+
+ status = dc_stream_get_status(new_dm_crtc_state->stream);
+ if (!status)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ mutex_lock(&adev->dm.audio_lock);
+ inst = status->audio_inst;
+ aconnector->audio_inst = inst;
+ mutex_unlock(&adev->dm.audio_lock);
+
+ amdgpu_dm_audio_eld_notify(adev, inst);
+ }
+}
+
+/*
+ * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
+ * @crtc_state: the DRM CRTC state
+ * @stream_state: the DC stream state.
+ *
+ * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
+ * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
+ */
+static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
+ struct dc_stream_state *stream_state)
+{
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
+}
+
+static void dm_clear_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state)
+{
+ dc_stream_remove_writeback(dm->dc, crtc_state->stream, 0);
+}
+
+static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
+ struct dc_state *dc_state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_connector_state *old_con_state;
+ struct drm_connector *connector;
+ bool mode_set_reset_required = false;
+ u32 i;
+ struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count};
+
+ /* Disable writeback */
+ for_each_old_connector_in_state(state, connector, old_con_state, i) {
+ struct dm_connector_state *dm_old_con_state;
+ struct amdgpu_crtc *acrtc;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ old_crtc_state = NULL;
+
+ dm_old_con_state = to_dm_connector_state(old_con_state);
+ if (!dm_old_con_state->base.crtc)
+ continue;
+
+ acrtc = to_amdgpu_crtc(dm_old_con_state->base.crtc);
+ if (acrtc)
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+
+ if (!acrtc->wb_enabled)
+ continue;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ dm_clear_writeback(dm, dm_old_crtc_state);
+ acrtc->wb_enabled = false;
+ }
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (old_crtc_state->active &&
+ (!new_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ manage_dm_interrupts(adev, acrtc, false);
+ dc_stream_release(dm_old_crtc_state->stream);
+ }
+ }
+
+ drm_atomic_helper_calc_timestamping_constants(state);
+
+ /* update changed items */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Disable cursor if disabling crtc */
+ if (old_crtc_state->active && !new_crtc_state->active) {
+ struct dc_cursor_position position;
+
+ memset(&position, 0, sizeof(position));
+ mutex_lock(&dm->dc_lock);
+ dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ mutex_unlock(&dm->dc_lock);
+ }
+
+ /* Copy all transient state flags into dc state */
+ if (dm_new_crtc_state->stream) {
+ amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
+ dm_new_crtc_state->stream);
+ }
+
+ /* handles headless hotplug case, updating new_state and
+ * aconnector as needed
+ */
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+
+ DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
+
+ if (!dm_new_crtc_state->stream) {
+ /*
+ * this could happen because of issues with
+ * userspace notifications delivery.
+ * In this case userspace tries to set mode on
+ * display which is disconnected in fact.
+ * dc_sink is NULL in this case on aconnector.
+ * We expect reset mode will come soon.
+ *
+ * This can also happen when unplug is done
+ * during resume sequence ended
+ *
+ * In this case, we want to pretend we still
+ * have a sink to keep the pipe running so that
+ * hw state is consistent with the sw state
+ */
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ continue;
+ }
+
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ pm_runtime_get_noresume(dev->dev);
+
+ acrtc->enabled = true;
+ acrtc->hw_mode = new_crtc_state->mode;
+ crtc->hwmode = new_crtc_state->mode;
+ mode_set_reset_required = true;
+ } else if (modereset_required(new_crtc_state)) {
+ DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
+ /* i.e. reset mode */
+ if (dm_old_crtc_state->stream)
+ remove_stream(adev, acrtc, dm_old_crtc_state->stream);
+
+ mode_set_reset_required = true;
+ }
+ } /* for_each_crtc_in_state() */
+
+ /* if there mode set or reset, disable eDP PSR, Replay */
+ if (mode_set_reset_required) {
+ if (dm->vblank_control_workqueue)
+ flush_workqueue(dm->vblank_control_workqueue);
+
+ amdgpu_dm_replay_disable_all(dm);
+ amdgpu_dm_psr_disable_all(dm);
+ }
+
+ dm_enable_per_frame_crtc_master_sync(dc_state);
+ mutex_lock(&dm->dc_lock);
+ WARN_ON(!dc_commit_streams(dm->dc, &params));
+
+ /* Allow idle optimization when vblank count is 0 for display off */
+ if (dm->active_vblank_irq_count == 0)
+ dc_allow_idle_optimizations(dm->dc, true);
+ mutex_unlock(&dm->dc_lock);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream != NULL) {
+ const struct dc_stream_status *status =
+ dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (!status)
+ status = dc_state_get_stream_status(dc_state,
+ dm_new_crtc_state->stream);
+ if (!status)
+ drm_err(dev,
+ "got no status for stream %p on acrtc%p\n",
+ dm_new_crtc_state->stream, acrtc);
+ else
+ acrtc->otg_inst = status->primary_otg_inst;
+ }
+ }
+}
+
+static void dm_set_writeback(struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *crtc_state,
+ struct drm_connector *connector,
+ struct drm_connector_state *new_con_state)
+{
+ struct drm_writeback_connector *wb_conn = drm_connector_to_writeback(connector);
+ struct amdgpu_device *adev = dm->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dc_writeback_info *wb_info;
+ struct pipe_ctx *pipe = NULL;
+ struct amdgpu_framebuffer *afb;
+ int i = 0;
+
+ wb_info = kzalloc(sizeof(*wb_info), GFP_KERNEL);
+ if (!wb_info) {
+ DRM_ERROR("Failed to allocate wb_info\n");
+ return;
+ }
+
+ acrtc = to_amdgpu_crtc(wb_conn->encoder.crtc);
+ if (!acrtc) {
+ DRM_ERROR("no amdgpu_crtc found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ afb = to_amdgpu_framebuffer(new_con_state->writeback_job->fb);
+ if (!afb) {
+ DRM_ERROR("No amdgpu_framebuffer found\n");
+ kfree(wb_info);
+ return;
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dm->dc->current_state->res_ctx.pipe_ctx[i].stream == crtc_state->stream) {
+ pipe = &dm->dc->current_state->res_ctx.pipe_ctx[i];
+ break;
+ }
+ }
+
+ /* fill in wb_info */
+ wb_info->wb_enabled = true;
+
+ wb_info->dwb_pipe_inst = 0;
+ wb_info->dwb_params.dwbscl_black_color = 0;
+ wb_info->dwb_params.hdr_mult = 0x1F000;
+ wb_info->dwb_params.csc_params.gamut_adjust_type = CM_GAMUT_ADJUST_TYPE_BYPASS;
+ wb_info->dwb_params.csc_params.gamut_coef_format = CM_GAMUT_REMAP_COEF_FORMAT_S2_13;
+ wb_info->dwb_params.output_depth = DWB_OUTPUT_PIXEL_DEPTH_10BPC;
+ wb_info->dwb_params.cnv_params.cnv_out_bpc = DWB_CNV_OUT_BPC_10BPC;
+
+ /* width & height from crtc */
+ wb_info->dwb_params.cnv_params.src_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.cnv_params.src_height = acrtc->base.mode.crtc_vdisplay;
+ wb_info->dwb_params.dest_width = acrtc->base.mode.crtc_hdisplay;
+ wb_info->dwb_params.dest_height = acrtc->base.mode.crtc_vdisplay;
+
+ wb_info->dwb_params.cnv_params.crop_en = false;
+ wb_info->dwb_params.stereo_params.stereo_enabled = false;
+
+ wb_info->dwb_params.cnv_params.out_max_pix_val = 0x3ff; // 10 bits
+ wb_info->dwb_params.cnv_params.out_min_pix_val = 0;
+ wb_info->dwb_params.cnv_params.fc_out_format = DWB_OUT_FORMAT_32BPP_ARGB;
+ wb_info->dwb_params.cnv_params.out_denorm_mode = DWB_OUT_DENORM_BYPASS;
+
+ wb_info->dwb_params.out_format = dwb_scaler_mode_bypass444;
+
+ wb_info->dwb_params.capture_rate = dwb_capture_rate_0;
+
+ wb_info->dwb_params.scaler_taps.h_taps = 4;
+ wb_info->dwb_params.scaler_taps.v_taps = 4;
+ wb_info->dwb_params.scaler_taps.h_taps_c = 2;
+ wb_info->dwb_params.scaler_taps.v_taps_c = 2;
+ wb_info->dwb_params.subsample_position = DWB_INTERSTITIAL_SUBSAMPLING;
+
+ wb_info->mcif_buf_params.luma_pitch = afb->base.pitches[0];
+ wb_info->mcif_buf_params.chroma_pitch = afb->base.pitches[1];
+
+ for (i = 0; i < DWB_MCIF_BUF_COUNT; i++) {
+ wb_info->mcif_buf_params.luma_address[i] = afb->address;
+ wb_info->mcif_buf_params.chroma_address[i] = 0;
+ }
+
+ wb_info->mcif_buf_params.p_vmid = 1;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0)) {
+ wb_info->mcif_warmup_params.start_address.quad_part = afb->address;
+ wb_info->mcif_warmup_params.region_size =
+ wb_info->mcif_buf_params.luma_pitch * wb_info->dwb_params.dest_height;
+ }
+ wb_info->mcif_warmup_params.p_vmid = 1;
+ wb_info->writeback_source_plane = pipe->plane_state;
+
+ dc_stream_add_writeback(dm->dc, crtc_state->stream, wb_info);
+
+ acrtc->wb_pending = true;
+ acrtc->wb_conn = wb_conn;
+ drm_writeback_queue_job(wb_conn, new_con_state);
+}
+
+/**
+ * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
+ * @state: The atomic state to commit
+ *
+ * This will tell DC to commit the constructed DC state from atomic_check,
+ * programming the hardware. Any failures here implies a hardware failure, since
+ * atomic check should have filtered anything non-kosher.
+ */
+static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+{
+ struct drm_device *dev = state->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct dm_atomic_state *dm_state;
+ struct dc_state *dc_state = NULL;
+ u32 i, j;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ unsigned long flags;
+ bool wait_for_vblank = true;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
+
+ trace_amdgpu_dm_atomic_commit_tail_begin(state);
+
+ if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+ dc_allow_idle_optimizations(dm->dc, false);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, state);
+ drm_dp_mst_atomic_wait_for_dependencies(state);
+
+ dm_state = dm_atomic_get_new_state(state);
+ if (dm_state && dm_state->context) {
+ dc_state = dm_state->context;
+ amdgpu_dm_commit_streams(state, dc_state);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+ if (!connector)
+ continue;
+
+ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+ connector->index, connector->status, connector->dpms);
+ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+ old_con_state->content_protection, new_con_state->content_protection);
+
+ if (aconnector->dc_sink) {
+ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+ aconnector->dc_sink->edid_caps.display_name);
+ }
+ }
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ if (old_crtc_state)
+ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ old_crtc_state->enable,
+ old_crtc_state->active,
+ old_crtc_state->mode_changed,
+ old_crtc_state->active_changed,
+ old_crtc_state->connectors_changed);
+
+ if (new_crtc_state)
+ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+ }
+
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!adev->dm.hdcp_workqueue)
+ continue;
+
+ new_crtc_state = NULL;
+ old_crtc_state = NULL;
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+ hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
+ new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ dm_new_con_state->update_hdcp = true;
+ continue;
+ }
+
+ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+ old_con_state, connector, adev->dm.hdcp_workqueue)) {
+ /* when display is unplugged from mst hub, connctor will
+ * be destroyed within dm_dp_mst_connector_destroy. connector
+ * hdcp perperties, like type, undesired, desired, enabled,
+ * will be lost. So, save hdcp properties into hdcp_work within
+ * amdgpu_dm_atomic_commit_tail. if the same display is
+ * plugged back with same display index, its hdcp properties
+ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+ */
+
+ bool enable_encryption = false;
+
+ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ if (aconnector->dc_link && aconnector->dc_sink &&
+ aconnector->dc_link->type == dc_connection_mst_branch) {
+ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+ struct hdcp_workqueue *hdcp_w =
+ &hdcp_work[aconnector->dc_link->link_index];
+
+ hdcp_w->hdcp_content_type[connector->index] =
+ new_con_state->hdcp_content_type;
+ hdcp_w->content_protection[connector->index] =
+ new_con_state->content_protection;
+ }
+
+ if (new_crtc_state && new_crtc_state->mode_changed &&
+ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+ enable_encryption = true;
+
+ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+ new_con_state->hdcp_content_type, enable_encryption);
+ }
+ }
+
+ /* Handle connector state changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct dc_surface_update *dummy_updates;
+ struct dc_stream_update stream_update;
+ struct dc_info_packet hdr_packet;
+ struct dc_stream_status *status = NULL;
+ bool abm_changed, hdr_changed, scaling_changed;
+
+ memset(&stream_update, 0, sizeof(stream_update));
+
+ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+ }
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ scaling_changed = is_scaling_state_different(dm_new_con_state,
+ dm_old_con_state);
+
+ abm_changed = dm_new_crtc_state->abm_level !=
+ dm_old_crtc_state->abm_level;
+
+ hdr_changed =
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
+
+ if (!scaling_changed && !abm_changed && !hdr_changed)
+ continue;
+
+ stream_update.stream = dm_new_crtc_state->stream;
+ if (scaling_changed) {
+ update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
+ dm_new_con_state, dm_new_crtc_state->stream);
+
+ stream_update.src = dm_new_crtc_state->stream->src;
+ stream_update.dst = dm_new_crtc_state->stream->dst;
+ }
+
+ if (abm_changed) {
+ dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
+
+ stream_update.abm_level = &dm_new_crtc_state->abm_level;
+ }
+
+ if (hdr_changed) {
+ fill_hdr_info_packet(new_con_state, &hdr_packet);
+ stream_update.hdr_static_metadata = &hdr_packet;
+ }
+
+ status = dc_stream_get_status(dm_new_crtc_state->stream);
+
+ if (WARN_ON(!status))
+ continue;
+
+ WARN_ON(!status->plane_count);
+
+ /*
+ * TODO: DC refuses to perform stream updates without a dc_surface_update.
+ * Here we create an empty update on each plane.
+ * To fix this, DC should permit updating only stream properties.
+ */
+ dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
+ if (!dummy_updates) {
+ DRM_ERROR("Failed to allocate memory for dummy_updates.\n");
+ continue;
+ }
+ for (j = 0; j < status->plane_count; j++)
+ dummy_updates[j].surface = status->plane_states[0];
+
+
+ mutex_lock(&dm->dc_lock);
+ dc_update_planes_and_stream(dm->dc,
+ dummy_updates,
+ status->plane_count,
+ dm_new_crtc_state->stream,
+ &stream_update);
+ mutex_unlock(&dm->dc_lock);
+ kfree(dummy_updates);
+ }
+
+ /**
+ * Enable interrupts for CRTCs that are newly enabled or went through
+ * a modeset. It was intentionally deferred until after the front end
+ * state was modified to wait until the OTG was on and so the IRQ
+ * handlers didn't access stale or invalid state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+#ifdef CONFIG_DEBUG_FS
+ enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#endif
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ /* For freesync config update on crtc state and params for irq */
+ update_stream_irq_parameters(dm, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ cur_crc_src = acrtc->dm_irq_params.crc_src;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+#endif
+
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ dc_stream_retain(dm_new_crtc_state->stream);
+ acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
+ manage_dm_interrupts(adev, acrtc, true);
+ }
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
+
+#ifdef CONFIG_DEBUG_FS
+ if (new_crtc_state->active &&
+ (!old_crtc_state->active ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
+ /**
+ * Frontend may have changed so reapply the CRC capture
+ * settings for the stream.
+ */
+ if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+ if (amdgpu_dm_crc_window_is_activated(crtc)) {
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ acrtc->dm_irq_params.window_param.update_win = true;
+
+ /**
+ * It takes 2 frames for HW to stably generate CRC when
+ * resuming from suspend, so we set skip_frame_cnt 2.
+ */
+ acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+ }
+#endif
+ if (amdgpu_dm_crtc_configure_crc_source(
+ crtc, dm_new_crtc_state, cur_crc_src))
+ DRM_DEBUG_DRIVER("Failed to configure crc source");
+ }
+ }
+#endif
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->async_flip)
+ wait_for_vblank = false;
+
+ /* update planes when needed per crtc*/
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (dm_new_crtc_state->stream)
+ amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
+ }
+
+ /* Enable writeback */
+ for_each_new_connector_in_state(state, connector, new_con_state, i) {
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ if (!new_con_state->writeback_job)
+ continue;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+
+ if (!new_crtc_state)
+ continue;
+
+ if (acrtc->wb_enabled)
+ continue;
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ dm_set_writeback(dm, dm_new_crtc_state, connector, new_con_state);
+ acrtc->wb_enabled = true;
+ }
+
+ /* Update audio instances for each connector. */
+ amdgpu_dm_commit_audio(dev, state);
+
+ /* restore the backlight level */
+ for (i = 0; i < dm->num_of_edps; i++) {
+ if (dm->backlight_dev[i] &&
+ (dm->actual_brightness[i] != dm->brightness[i]))
+ amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
+ }
+
+ /*
+ * send vblank event on all events not handled in flip and
+ * mark consumed event for drm_atomic_helper_commit_hw_done
+ */
+ spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+
+ if (new_crtc_state->event)
+ drm_send_event_locked(dev, &new_crtc_state->event->base);
+
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+
+ /* Signal HW programming completion */
+ drm_atomic_helper_commit_hw_done(state);
+
+ if (wait_for_vblank)
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+
+ drm_atomic_helper_cleanup_planes(dev, state);
+
+ /* Don't free the memory if we are hitting this as part of suspend.
+ * This way we don't free any memory during suspend; see
+ * amdgpu_bo_free_kernel(). The memory will be freed in the first
+ * non-suspend modeset or when the driver is torn down.
+ */
+ if (!adev->in_suspend) {
+ /* return the stolen vga memory back to VRAM */
+ if (!adev->mman.keep_stolen_vga_memory)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
+ amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
+ }
+
+ /*
+ * Finally, drop a runtime PM reference for each newly disabled CRTC,
+ * so we can put the GPU into runtime suspend if we're not driving any
+ * displays anymore
+ */
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+}
+
+static int dm_force_atomic_commit(struct drm_connector *connector)
+{
+ int ret = 0;
+ struct drm_device *ddev = connector->dev;
+ struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
+ struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ struct drm_plane *plane = disconnected_acrtc->base.primary;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_plane_state *plane_state;
+
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = ddev->mode_config.acquire_ctx;
+
+ /* Construct an atomic state to restore previous display setting */
+
+ /*
+ * Attach connectors to drm_atomic_state
+ */
+ conn_state = drm_atomic_get_connector_state(state, connector);
+
+ ret = PTR_ERR_OR_ZERO(conn_state);
+ if (ret)
+ goto out;
+
+ /* Attach crtc to drm_atomic_state*/
+ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
+
+ ret = PTR_ERR_OR_ZERO(crtc_state);
+ if (ret)
+ goto out;
+
+ /* force a restore */
+ crtc_state->mode_changed = true;
+
+ /* Attach plane to drm_atomic_state */
+ plane_state = drm_atomic_get_plane_state(state, plane);
+
+ ret = PTR_ERR_OR_ZERO(plane_state);
+ if (ret)
+ goto out;
+
+ /* Call commit internally with the state we just constructed */
+ ret = drm_atomic_commit(state);
+
+out:
+ drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
+
+ return ret;
+}
+
+/*
+ * This function handles all cases when set mode does not come upon hotplug.
+ * This includes when a display is unplugged then plugged back into the
+ * same port and when running without usermode desktop manager supprot
+ */
+void dm_restore_drm_connector_state(struct drm_device *dev,
+ struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_crtc *disconnected_acrtc;
+ struct dm_crtc_state *acrtc_state;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ if (!aconnector->dc_sink || !connector->state || !connector->encoder)
+ return;
+
+ disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
+ if (!disconnected_acrtc)
+ return;
+
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
+ return;
+
+ /*
+ * If the previous sink is not released and different from the current,
+ * we deduce we are in a state where we can not rely on usermode call
+ * to turn on the display, so we do it here
+ */
+ if (acrtc_state->stream->sink != aconnector->dc_sink)
+ dm_force_atomic_commit(&aconnector->base);
+}
+
+/*
+ * Grabs all modesetting locks to serialize against any blocking commits,
+ * Waits for completion of all non blocking commits.
+ */
+static int do_aquire_global_lock(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_commit *commit;
+ long ret;
+
+ /*
+ * Adding all modeset locks to aquire_ctx will
+ * ensure that when the framework release it the
+ * extra locks we are locking here will get released to
+ */
+ ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ spin_lock(&crtc->commit_lock);
+ commit = list_first_entry_or_null(&crtc->commit_list,
+ struct drm_crtc_commit, commit_entry);
+ if (commit)
+ drm_crtc_commit_get(commit);
+ spin_unlock(&crtc->commit_lock);
+
+ if (!commit)
+ continue;
+
+ /*
+ * Make sure all pending HW programming completed and
+ * page flips done
+ */
+ ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
+
+ if (ret > 0)
+ ret = wait_for_completion_interruptible_timeout(
+ &commit->flip_done, 10*HZ);
+
+ if (ret == 0)
+ DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
+ crtc->base.id, crtc->name);
+
+ drm_crtc_commit_put(commit);
+ }
+
+ return ret < 0 ? ret : 0;
+}
+
+static void get_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state,
+ struct dm_connector_state *new_con_state)
+{
+ struct mod_freesync_config config = {0};
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_display_mode *mode = &new_crtc_state->base.mode;
+ int vrefresh = drm_mode_vrefresh(mode);
+ bool fs_vid_mode = false;
+
+ if (new_con_state->base.connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ return;
+
+ aconnector = to_amdgpu_dm_connector(new_con_state->base.connector);
+
+ new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
+ vrefresh >= aconnector->min_vfreq &&
+ vrefresh <= aconnector->max_vfreq;
+
+ if (new_crtc_state->vrr_supported) {
+ new_crtc_state->stream->ignore_msa_timing_param = true;
+ fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+
+ config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
+ config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
+ config.vsif_supported = true;
+ config.btr = true;
+
+ if (fs_vid_mode) {
+ config.state = VRR_STATE_ACTIVE_FIXED;
+ config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
+ goto out;
+ } else if (new_crtc_state->base.vrr_enabled) {
+ config.state = VRR_STATE_ACTIVE_VARIABLE;
+ } else {
+ config.state = VRR_STATE_INACTIVE;
+ }
+ }
+out:
+ new_crtc_state->freesync_config = config;
+}
+
+static void reset_freesync_config_for_crtc(
+ struct dm_crtc_state *new_crtc_state)
+{
+ new_crtc_state->vrr_supported = false;
+
+ memset(&new_crtc_state->vrr_infopacket, 0,
+ sizeof(new_crtc_state->vrr_infopacket));
+}
+
+static bool
+is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state)
+{
+ const struct drm_display_mode *old_mode, *new_mode;
+
+ if (!old_crtc_state || !new_crtc_state)
+ return false;
+
+ old_mode = &old_crtc_state->mode;
+ new_mode = &new_crtc_state->mode;
+
+ if (old_mode->clock == new_mode->clock &&
+ old_mode->hdisplay == new_mode->hdisplay &&
+ old_mode->vdisplay == new_mode->vdisplay &&
+ old_mode->htotal == new_mode->htotal &&
+ old_mode->vtotal != new_mode->vtotal &&
+ old_mode->hsync_start == new_mode->hsync_start &&
+ old_mode->vsync_start != new_mode->vsync_start &&
+ old_mode->hsync_end == new_mode->hsync_end &&
+ old_mode->vsync_end != new_mode->vsync_end &&
+ old_mode->hskew == new_mode->hskew &&
+ old_mode->vscan == new_mode->vscan &&
+ (old_mode->vsync_end - old_mode->vsync_start) ==
+ (new_mode->vsync_end - new_mode->vsync_start))
+ return true;
+
+ return false;
+}
+
+static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
+{
+ u64 num, den, res;
+ struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
+
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
+
+ num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
+ den = (unsigned long long)new_crtc_state->mode.htotal *
+ (unsigned long long)new_crtc_state->mode.vtotal;
+
+ res = div_u64(num, den);
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
+}
+
+static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
+ bool enable,
+ bool *lock_and_validation_needed)
+{
+ struct dm_atomic_state *dm_state = NULL;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct dc_stream_state *new_stream;
+ int ret = 0;
+
+ /*
+ * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
+ * update changed items
+ */
+ struct amdgpu_crtc *acrtc = NULL;
+ struct drm_connector *connector = NULL;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
+ struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
+
+ new_stream = NULL;
+
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ acrtc = to_amdgpu_crtc(crtc);
+ connector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
+ if (connector)
+ aconnector = to_amdgpu_dm_connector(connector);
+
+ /* TODO This hack should go away */
+ if (connector && enable) {
+ /* Make sure fake sink is created in plug-in scenario */
+ drm_new_conn_state = drm_atomic_get_new_connector_state(state,
+ connector);
+ drm_old_conn_state = drm_atomic_get_old_connector_state(state,
+ connector);
+
+ if (IS_ERR(drm_new_conn_state)) {
+ ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
+ goto fail;
+ }
+
+ dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
+ dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
+
+ /*
+ * we can have no stream on ACTION_SET if a display
+ * was disconnected during S3, in this case it is not an
+ * error, the OS will be updated after detection, and
+ * will do the right thing on next atomic commit
+ */
+
+ if (!new_stream) {
+ DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
+ __func__, acrtc->base.base.id);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /*
+ * TODO: Check VSDB bits to decide whether this should
+ * be enabled or not.
+ */
+ new_stream->triggered_crtc_reset.enabled =
+ dm->force_timing_sync;
+
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ ret = fill_hdr_info_packet(drm_new_conn_state,
+ &new_stream->hdr_static_metadata);
+ if (ret)
+ goto fail;
+
+ /*
+ * If we already removed the old stream from the context
+ * (and set the new stream to NULL) then we can't reuse
+ * the old stream even if the stream and scaling are unchanged.
+ * We'll hit the BUG_ON and black screen.
+ *
+ * TODO: Refactor this function to allow this check to work
+ * in all conditions.
+ */
+ if (amdgpu_freesync_vid_mode &&
+ dm_new_crtc_state->stream &&
+ is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
+ goto skip_modeset;
+
+ if (dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+ }
+ }
+
+ /* mode_changed flag may get updated above, need to check again */
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ goto skip_modeset;
+
+ drm_dbg_state(state->dev,
+ "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
+ acrtc->crtc_id,
+ new_crtc_state->enable,
+ new_crtc_state->active,
+ new_crtc_state->planes_changed,
+ new_crtc_state->mode_changed,
+ new_crtc_state->active_changed,
+ new_crtc_state->connectors_changed);
+
+ /* Remove stream for any changed/disabled CRTC */
+ if (!enable) {
+
+ if (!dm_old_crtc_state->stream)
+ goto skip_modeset;
+
+ /* Unset freesync video if it was active before */
+ if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
+ dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
+ dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
+ }
+
+ /* Now check if we should set freesync video mode */
+ if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
+ dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ is_timing_unchanged_for_freesync(new_crtc_state,
+ old_crtc_state)) {
+ new_crtc_state->mode_changed = false;
+ DRM_DEBUG_DRIVER(
+ "Mode change not required for front porch change, setting mode_changed to %d",
+ new_crtc_state->mode_changed);
+
+ set_freesync_fixed_config(dm_new_crtc_state);
+
+ goto skip_modeset;
+ } else if (amdgpu_freesync_vid_mode && aconnector &&
+ is_freesync_video_mode(&new_crtc_state->mode,
+ aconnector)) {
+ struct drm_display_mode *high_mode;
+
+ high_mode = get_highest_refresh_rate_mode(aconnector, false);
+ if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
+ set_freesync_fixed_config(dm_new_crtc_state);
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ /* i.e. reset mode */
+ if (dc_state_remove_stream(
+ dm->dc,
+ dm_state->context,
+ dm_old_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dc_stream_release(dm_old_crtc_state->stream);
+ dm_new_crtc_state->stream = NULL;
+
+ reset_freesync_config_for_crtc(dm_new_crtc_state);
+
+ *lock_and_validation_needed = true;
+
+ } else {/* Add stream for any updated/enabled CRTC */
+ /*
+ * Quick fix to prevent NULL pointer on new_stream when
+ * added MST connectors not found in existing crtc_state in the chained mode
+ * TODO: need to dig out the root cause of that
+ */
+ if (!connector)
+ goto skip_modeset;
+
+ if (modereset_required(new_crtc_state))
+ goto skip_modeset;
+
+ if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
+ dm_old_crtc_state->stream)) {
+
+ WARN_ON(dm_new_crtc_state->stream);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ goto fail;
+
+ dm_new_crtc_state->stream = new_stream;
+
+ dc_stream_retain(new_stream);
+
+ DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
+ crtc->base.id);
+
+ if (dc_state_add_stream(
+ dm->dc,
+ dm_state->context,
+ dm_new_crtc_state->stream) != DC_OK) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ *lock_and_validation_needed = true;
+ }
+ }
+
+skip_modeset:
+ /* Release extra reference */
+ if (new_stream)
+ dc_stream_release(new_stream);
+
+ /*
+ * We want to do dc stream updates that do not require a
+ * full modeset below.
+ */
+ if (!(enable && connector && new_crtc_state->active))
+ return 0;
+ /*
+ * Given above conditions, the dc state cannot be NULL because:
+ * 1. We're in the process of enabling CRTCs (just been added
+ * to the dc context, or already is on the context)
+ * 2. Has a valid connector attached, and
+ * 3. Is currently active and enabled.
+ * => The dc stream state currently exists.
+ */
+ BUG_ON(dm_new_crtc_state->stream == NULL);
+
+ /* Scaling or underscan settings */
+ if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))
+ update_stream_scaling_settings(
+ &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
+ /*
+ * Color management settings. We also update color properties
+ * when a modeset is needed, to ensure it gets reprogrammed.
+ */
+ if (dm_new_crtc_state->base.color_mgmt_changed ||
+ dm_old_crtc_state->regamma_tf != dm_new_crtc_state->regamma_tf ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
+ if (ret)
+ goto fail;
+ }
+
+ /* Update Freesync settings. */
+ get_freesync_config_for_crtc(dm_new_crtc_state,
+ dm_new_conn_state);
+
+ return ret;
+
+fail:
+ if (new_stream)
+ dc_stream_release(new_stream);
+ return ret;
+}
+
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
+ int i;
+
+ /*
+ * TODO: Remove this hack for all asics once it proves that the
+ * fast updates works fine on DCN3.2+.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(3, 2, 0) &&
+ state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ /* CRTC Degamma changes currently require us to recreate planes. */
+ if (new_crtc_state->color_mgmt_changed)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ struct amdgpu_framebuffer *old_afb, *new_afb;
+ struct dm_plane_state *dm_new_other_state, *dm_old_other_state;
+
+ dm_new_other_state = to_dm_plane_state(new_other_state);
+ dm_old_other_state = to_dm_plane_state(old_other_state);
+
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* Src/dst size and scaling updates. */
+ if (old_other_state->src_w != new_other_state->src_w ||
+ old_other_state->src_h != new_other_state->src_h ||
+ old_other_state->crtc_w != new_other_state->crtc_w ||
+ old_other_state->crtc_h != new_other_state->crtc_h)
+ return true;
+
+ /* Rotation / mirroring updates. */
+ if (old_other_state->rotation != new_other_state->rotation)
+ return true;
+
+ /* Blending updates. */
+ if (old_other_state->pixel_blend_mode !=
+ new_other_state->pixel_blend_mode)
+ return true;
+
+ /* Alpha updates. */
+ if (old_other_state->alpha != new_other_state->alpha)
+ return true;
+
+ /* Colorspace changes. */
+ if (old_other_state->color_range != new_other_state->color_range ||
+ old_other_state->color_encoding != new_other_state->color_encoding)
+ return true;
+
+ /* HDR/Transfer Function changes. */
+ if (dm_old_other_state->degamma_tf != dm_new_other_state->degamma_tf ||
+ dm_old_other_state->degamma_lut != dm_new_other_state->degamma_lut ||
+ dm_old_other_state->hdr_mult != dm_new_other_state->hdr_mult ||
+ dm_old_other_state->ctm != dm_new_other_state->ctm ||
+ dm_old_other_state->shaper_lut != dm_new_other_state->shaper_lut ||
+ dm_old_other_state->shaper_tf != dm_new_other_state->shaper_tf ||
+ dm_old_other_state->lut3d != dm_new_other_state->lut3d ||
+ dm_old_other_state->blend_lut != dm_new_other_state->blend_lut ||
+ dm_old_other_state->blend_tf != dm_new_other_state->blend_tf)
+ return true;
+
+ /* Framebuffer checks fall at the end. */
+ if (!old_other_state->fb || !new_other_state->fb)
+ continue;
+
+ /* Pixel format changes can require bandwidth updates. */
+ if (old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+
+ old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
+ new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
+
+ /* Tiling and DCC changes also require bandwidth updates. */
+ if (old_afb->tiling_flags != new_afb->tiling_flags ||
+ old_afb->base.modifier != new_afb->base.modifier)
+ return true;
+ }
+
+ return false;
+}
+
+static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
+ struct drm_plane_state *new_plane_state,
+ struct drm_framebuffer *fb)
+{
+ struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
+ struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
+ unsigned int pitch;
+ bool linear;
+
+ if (fb->width > new_acrtc->max_cursor_width ||
+ fb->height > new_acrtc->max_cursor_height) {
+ DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
+ new_plane_state->fb->width,
+ new_plane_state->fb->height);
+ return -EINVAL;
+ }
+ if (new_plane_state->src_w != fb->width << 16 ||
+ new_plane_state->src_h != fb->height << 16) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ /* Pitch in pixels */
+ pitch = fb->pitches[0] / fb->format->cpp[0];
+
+ if (fb->width != pitch) {
+ DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
+ fb->width, pitch);
+ return -EINVAL;
+ }
+
+ switch (pitch) {
+ case 64:
+ case 128:
+ case 256:
+ /* FB pitch is supported by cursor plane */
+ break;
+ default:
+ DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
+ return -EINVAL;
+ }
+
+ /* Core DRM takes care of checking FB modifiers, so we only need to
+ * check tiling flags when the FB doesn't have a modifier.
+ */
+ if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
+ if (adev->family < AMDGPU_FAMILY_AI) {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
+ AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
+ } else {
+ linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
+ }
+ if (!linear) {
+ DRM_DEBUG_ATOMIC("Cursor FB not linear");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dm_update_plane_state(struct dc *dc,
+ struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state,
+ bool enable,
+ bool *lock_and_validation_needed,
+ bool *is_top_most_overlay)
+{
+
+ struct dm_atomic_state *dm_state = NULL;
+ struct drm_crtc *new_plane_crtc, *old_plane_crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
+ struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
+ bool needs_reset;
+ int ret = 0;
+
+
+ new_plane_crtc = new_plane_state->crtc;
+ old_plane_crtc = old_plane_state->crtc;
+ dm_new_plane_state = to_dm_plane_state(new_plane_state);
+ dm_old_plane_state = to_dm_plane_state(old_plane_state);
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
+ DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
+ return -EINVAL;
+ }
+
+ if (new_plane_state->fb) {
+ ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
+ new_plane_state->fb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+ }
+
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
+ /* Remove any changed/removed planes */
+ if (!enable) {
+ if (!needs_reset)
+ return 0;
+
+ if (!old_plane_crtc)
+ return 0;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(
+ state, old_plane_crtc);
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!dm_old_crtc_state->stream)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, old_plane_crtc->base.id);
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret)
+ return ret;
+
+ if (!dc_state_remove_plane(
+ dc,
+ dm_old_crtc_state->stream,
+ dm_old_plane_state->dc_state,
+ dm_state->context)) {
+
+ return -EINVAL;
+ }
+
+ if (dm_old_plane_state->dc_state)
+ dc_plane_state_release(dm_old_plane_state->dc_state);
+
+ dm_new_plane_state->dc_state = NULL;
+
+ *lock_and_validation_needed = true;
+
+ } else { /* Add new planes */
+ struct dc_plane_state *dc_new_plane_state;
+
+ if (drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ if (!new_plane_crtc)
+ return 0;
+
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+ if (!dm_new_crtc_state->stream)
+ return 0;
+
+ if (!needs_reset)
+ return 0;
+
+ ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+ if (ret)
+ return ret;
+
+ WARN_ON(dm_new_plane_state->dc_state);
+
+ dc_new_plane_state = dc_create_plane_state(dc);
+ if (!dc_new_plane_state)
+ return -ENOMEM;
+
+ /* Block top most plane from being a video plane */
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+ if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+ return -EINVAL;
+
+ *is_top_most_overlay = false;
+ }
+
+ DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
+ plane->base.id, new_plane_crtc->base.id);
+
+ ret = fill_dc_plane_attributes(
+ drm_to_adev(new_plane_crtc->dev),
+ dc_new_plane_state,
+ new_plane_state,
+ new_crtc_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ dc_plane_state_release(dc_new_plane_state);
+ return ret;
+ }
+
+ /*
+ * Any atomic check errors that occur after this will
+ * not need a release. The plane state will be attached
+ * to the stream, and therefore part of the atomic
+ * state. It'll be released when the atomic state is
+ * cleaned.
+ */
+ if (!dc_state_add_plane(
+ dc,
+ dm_new_crtc_state->stream,
+ dc_new_plane_state,
+ dm_state->context)) {
+
+ dc_plane_state_release(dc_new_plane_state);
+ return -EINVAL;
+ }
+
+ dm_new_plane_state->dc_state = dc_new_plane_state;
+
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
+ /* Tell DC to do a full surface update every time there
+ * is a plane change. Inefficient, but works for now.
+ */
+ dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
+
+ *lock_and_validation_needed = true;
+ }
+
+
+ return ret;
+}
+
+static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
+ int *src_w, int *src_h)
+{
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_90:
+ case DRM_MODE_ROTATE_270:
+ *src_w = plane_state->src_h >> 16;
+ *src_h = plane_state->src_w >> 16;
+ break;
+ case DRM_MODE_ROTATE_0:
+ case DRM_MODE_ROTATE_180:
+ default:
+ *src_w = plane_state->src_w >> 16;
+ *src_h = plane_state->src_h >> 16;
+ break;
+ }
+}
+
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+ int *out_plane_scale_w, int *out_plane_scale_h)
+{
+ int plane_src_w, plane_src_h;
+
+ dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+ *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+ *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
+static int dm_check_crtc_cursor(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
+ bool any_relevant_change = false;
+
+ /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
+ * cursor per pipe but it's going to inherit the scaling and
+ * positioning from the underlying pipe. Check the cursor plane's
+ * blending properties match the underlying planes'.
+ */
+
+ /* If no plane was enabled or changed scaling, no need to check again */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+ if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+ continue;
+
+ if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+ any_relevant_change = true;
+ break;
+ }
+
+ if (new_plane_state->fb == old_plane_state->fb &&
+ new_plane_state->crtc_w == old_plane_state->crtc_w &&
+ new_plane_state->crtc_h == old_plane_state->crtc_h)
+ continue;
+
+ dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+ dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+ if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+ any_relevant_change = true;
+ break;
+ }
+ }
+
+ if (!any_relevant_change)
+ return 0;
+
+ new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+ if (IS_ERR(new_cursor_state))
+ return PTR_ERR(new_cursor_state);
+
+ if (!new_cursor_state->fb)
+ return 0;
+
+ dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+ /* Need to check all enabled planes, even if this commit doesn't change
+ * their state
+ */
+ i = drm_atomic_add_affected_planes(state, crtc);
+ if (i)
+ return i;
+
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
+
+ dm_get_plane_scale(new_underlying_state,
+ &underlying_scale_w, &underlying_scale_h);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
+ }
+
+ return 0;
+}
+
+static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state, *old_conn_state;
+ struct amdgpu_dm_connector *aconnector = NULL;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
+ if (!conn_state->crtc)
+ conn_state = old_conn_state;
+
+ if (conn_state->crtc != crtc)
+ continue;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)
+ continue;
+
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (!aconnector->mst_output_port || !aconnector->mst_root)
+ aconnector = NULL;
+ else
+ break;
+ }
+
+ if (!aconnector)
+ return 0;
+
+ return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
+}
+
+/**
+ * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
+ *
+ * @dev: The DRM device
+ * @state: The atomic state to commit
+ *
+ * Validate that the given atomic state is programmable by DC into hardware.
+ * This involves constructing a &struct dc_state reflecting the new hardware
+ * state we wish to commit, then querying DC to see if it is programmable. It's
+ * important not to modify the existing DC state. Otherwise, atomic_check
+ * may unexpectedly commit hardware changes.
+ *
+ * When validating the DC state, it's important that the right locks are
+ * acquired. For full updates case which removes/adds/updates streams on one
+ * CRTC while flipping on another CRTC, acquiring global lock will guarantee
+ * that any such full update commit will wait for completion of any outstanding
+ * flip using DRMs synchronization events.
+ *
+ * Note that DM adds the affected connectors for all CRTCs in state, when that
+ * might not seem necessary. This is because DC stream creation requires the
+ * DC sink, which is tied to the DRM connector state. Cleaning this up should
+ * be possible but non-trivial - a possible TODO item.
+ *
+ * Return: -Error code if validation failed.
+ */
+static int amdgpu_dm_atomic_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dm_atomic_state *dm_state = NULL;
+ struct dc *dc = adev->dm.dc;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_con_state, *new_con_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ enum dc_status status;
+ int ret, i;
+ bool lock_and_validation_needed = false;
+ bool is_top_most_overlay = true;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ struct dsc_mst_fairness_vars vars[MAX_PIPES];
+
+ trace_amdgpu_dm_atomic_check_begin(state);
+
+ ret = drm_atomic_helper_check_modeset(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
+ goto fail;
+ }
+
+ /* Check connector changes */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+
+ /* Skip connectors that are disabled or part of modeset already. */
+ if (!new_con_state->crtc)
+ continue;
+
+ new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
+ if (IS_ERR(new_crtc_state)) {
+ DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
+ ret = PTR_ERR(new_crtc_state);
+ goto fail;
+ }
+
+ if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+ dm_old_con_state->scaling != dm_new_con_state->scaling)
+ new_crtc_state->connectors_changed = true;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
+ ret = add_affected_mst_dsc_crtcs(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
+ goto fail;
+ }
+ }
+ }
+ }
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed &&
+ old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
+ dm_old_crtc_state->dsc_force_changed == false)
+ continue;
+
+ ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
+ goto fail;
+ }
+
+ if (!new_crtc_state->enable)
+ continue;
+
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
+ goto fail;
+ }
+
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
+ goto fail;
+ }
+
+ if (dm_old_crtc_state->dsc_force_changed)
+ new_crtc_state->mode_changed = true;
+ }
+
+ /*
+ * Add all primary and overlay planes on the CRTC to the state
+ * whenever a plane is enabled to maintain correct z-ordering
+ * and to enable fast surface updates.
+ */
+ drm_for_each_crtc(crtc, dev) {
+ bool modified = false;
+
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (new_plane_state->crtc == crtc ||
+ old_plane_state->crtc == crtc) {
+ modified = true;
+ break;
+ }
+ }
+
+ if (!modified)
+ continue;
+
+ drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state =
+ drm_atomic_get_plane_state(state, plane);
+
+ if (IS_ERR(new_plane_state)) {
+ ret = PTR_ERR(new_plane_state);
+ DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
+ goto fail;
+ }
+ }
+ }
+
+ /*
+ * DC consults the zpos (layer_index in DC terminology) to determine the
+ * hw plane on which to enable the hw cursor (see
+ * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
+ * atomic state, so call drm helper to normalize zpos.
+ */
+ ret = drm_atomic_normalize_zpos(dev, state);
+ if (ret) {
+ drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+ goto fail;
+ }
+
+ /* Remove exiting planes if they are modified */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ if (old_plane_state->fb && new_plane_state->fb &&
+ get_mem_type(old_plane_state->fb) !=
+ get_mem_type(new_plane_state->fb))
+ lock_and_validation_needed = true;
+
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ false,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Disable all crtcs which require disable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ false,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Enable all crtcs which require enable */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ ret = dm_update_crtc_state(&adev->dm, state, crtc,
+ old_crtc_state,
+ new_crtc_state,
+ true,
+ &lock_and_validation_needed);
+ if (ret) {
+ DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
+ goto fail;
+ }
+ }
+
+ /* Add new/modified planes */
+ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+ ret = dm_update_plane_state(dc, state, plane,
+ old_plane_state,
+ new_plane_state,
+ true,
+ &lock_and_validation_needed,
+ &is_top_most_overlay);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
+ goto fail;
+ }
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = pre_validate_dsc(state, &dm_state, vars);
+ if (ret != 0)
+ goto fail;
+ }
+
+ /* Run this here since we want to validate the streams we created */
+ ret = drm_atomic_helper_check_planes(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
+ goto fail;
+ }
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
+
+ /* Check cursor planes scaling */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
+ goto fail;
+ }
+ }
+
+ if (state->legacy_cursor_update) {
+ /*
+ * This is a fast cursor update coming from the plane update
+ * helper, check if it can be done asynchronously for better
+ * performance.
+ */
+ state->async_update =
+ !drm_atomic_helper_async_check(dev, state);
+
+ /*
+ * Skip the remaining global validation if this is an async
+ * update. Cursor updates can be done without affecting
+ * state or bandwidth calcs and this avoids the performance
+ * penalty of locking the private state object and
+ * allocating a new dc_state.
+ */
+ if (state->async_update)
+ return 0;
+ }
+
+ /* Check scaling and underscan changes*/
+ /* TODO Removed scaling changes validation due to inability to commit
+ * new stream into context w\o causing full reset. Need to
+ * decide how to handle.
+ */
+ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+ struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
+ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+
+ /* Skip any modesets/resets */
+ if (!acrtc || drm_atomic_crtc_needs_modeset(
+ drm_atomic_get_new_crtc_state(state, &acrtc->base)))
+ continue;
+
+ /* Skip any thing not scale or underscan changes */
+ if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
+ continue;
+
+ lock_and_validation_needed = true;
+ }
+
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ if (connector->index == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ }
+
+ /**
+ * Streams and planes are reset when there are changes that affect
+ * bandwidth. Anything that affects bandwidth needs to go through
+ * DC global validation to ensure that the configuration can be applied
+ * to hardware.
+ *
+ * We have to currently stall out here in atomic_check for outstanding
+ * commits to finish in this case because our IRQ handlers reference
+ * DRM state directly - we can end up disabling interrupts too early
+ * if we don't.
+ *
+ * TODO: Remove this stall and drop DM state private objects.
+ */
+ if (lock_and_validation_needed) {
+ ret = dm_atomic_get_state(state, &dm_state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
+ goto fail;
+ }
+
+ ret = do_aquire_global_lock(dev, state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
+ goto fail;
+ }
+
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+
+ ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
+ goto fail;
+ }
+
+ /*
+ * Perform validation of MST topology in the state:
+ * We need to perform MST atomic check before calling
+ * dc_validate_global_state(), or there is a chance
+ * to get stuck in an infinite loop and hang eventually.
+ */
+ ret = drm_dp_mst_atomic_check(state);
+ if (ret) {
+ DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
+ goto fail;
+ }
+ status = dc_validate_global_state(dc, dm_state->context, true);
+ if (status != DC_OK) {
+ DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
+ dc_status_to_str(status), status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ } else {
+ /*
+ * The commit is a fast update. Fast updates shouldn't change
+ * the DC context, affect global validation, and can have their
+ * commit work done in parallel with other commits not touching
+ * the same resource. If we have a new DC context as part of
+ * the DM atomic state from validation we need to free it and
+ * retain the existing one instead.
+ *
+ * Furthermore, since the DM atomic state only contains the DC
+ * context and can safely be annulled, we can free the state
+ * and clear the associated private object now to free
+ * some memory and avoid a possible use-after-free later.
+ */
+
+ for (i = 0; i < state->num_private_objs; i++) {
+ struct drm_private_obj *obj = state->private_objs[i].ptr;
+
+ if (obj->funcs == adev->dm.atomic_obj.funcs) {
+ int j = state->num_private_objs-1;
+
+ dm_atomic_destroy_state(obj,
+ state->private_objs[i].state);
+
+ /* If i is not at the end of the array then the
+ * last element needs to be moved to where i was
+ * before the array can safely be truncated.
+ */
+ if (i != j)
+ state->private_objs[i] =
+ state->private_objs[j];
+
+ state->private_objs[j].ptr = NULL;
+ state->private_objs[j].state = NULL;
+ state->private_objs[j].old_state = NULL;
+ state->private_objs[j].new_state = NULL;
+
+ state->num_private_objs = j;
+ break;
+ }
+ }
+ }
+
+ /* Store the overall update type for use later in atomic check. */
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ /*
+ * Only allow async flips for fast updates that don't change
+ * the FB pitch, the DCC state, rotation, etc.
+ */
+ if (new_crtc_state->async_flip && lock_and_validation_needed) {
+ drm_dbg_atomic(crtc->dev,
+ "[CRTC:%d:%s] async flips are only supported for fast updates\n",
+ crtc->base.id, crtc->name);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ dm_new_crtc_state->update_type = lock_and_validation_needed ?
+ UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
+ }
+
+ /* Must be success */
+ WARN_ON(ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+
+fail:
+ if (ret == -EDEADLK)
+ DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
+ else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
+ DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
+ else
+ DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
+
+ trace_amdgpu_dm_atomic_check_finish(state, ret);
+
+ return ret;
+}
+
+static bool is_dp_capable_without_timing_msa(struct dc *dc,
+ struct amdgpu_dm_connector *amdgpu_dm_connector)
+{
+ u8 dpcd_data;
+ bool capable = false;
+
+ if (amdgpu_dm_connector->dc_link &&
+ dm_helpers_dp_read_dpcd(
+ NULL,
+ amdgpu_dm_connector->dc_link,
+ DP_DOWN_STREAM_PORT_COUNT,
+ &dpcd_data,
+ sizeof(dpcd_data))) {
+ capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
+ }
+
+ return capable;
+}
+
+static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
+ unsigned int offset,
+ unsigned int total_length,
+ u8 *data,
+ unsigned int length,
+ struct amdgpu_hdmi_vsdb_info *vsdb)
+{
+ bool res;
+ union dmub_rb_cmd cmd;
+ struct dmub_cmd_send_edid_cea *input;
+ struct dmub_cmd_edid_cea_output *output;
+
+ if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
+ return false;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ input = &cmd.edid_cea.data.input;
+
+ cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
+ cmd.edid_cea.header.sub_type = 0;
+ cmd.edid_cea.header.payload_bytes =
+ sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
+ input->offset = offset;
+ input->length = length;
+ input->cea_total_length = total_length;
+ memcpy(input->payload, data, length);
+
+ res = dc_wake_and_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
+ if (!res) {
+ DRM_ERROR("EDID CEA parser failed\n");
+ return false;
+ }
+
+ output = &cmd.edid_cea.data.output;
+
+ if (output->type == DMUB_CMD__EDID_CEA_ACK) {
+ if (!output->ack.success) {
+ DRM_ERROR("EDID CEA ack failed at offset %d\n",
+ output->ack.offset);
+ }
+ } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
+ if (!output->amd_vsdb.vsdb_found)
+ return false;
+
+ vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
+ vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
+ vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
+ vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
+ } else {
+ DRM_WARN("Unknown EDID CEA parser results\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ bool res;
+ int offset;
+
+ /* send 8 bytes a time */
+ if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
+ return false;
+
+ if (i+8 == len) {
+ /* EDID block sent completed, expect result */
+ int version, min_rate, max_rate;
+
+ res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
+ if (res) {
+ /* amd vsdb found */
+ vsdb_info->freesync_supported = 1;
+ vsdb_info->amd_vsdb_version = version;
+ vsdb_info->min_refresh_rate_hz = min_rate;
+ vsdb_info->max_refresh_rate_hz = max_rate;
+ return true;
+ }
+ /* not amd vsdb */
+ return false;
+ }
+
+ /* check for ack*/
+ res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
+ if (!res)
+ return false;
+ }
+
+ return false;
+}
+
+static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ int i;
+
+ /* send extension block to DMCU for parsing */
+ for (i = 0; i < len; i += 8) {
+ /* send 8 bytes a time */
+ if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
+ return false;
+ }
+
+ return vsdb_info->freesync_supported;
+}
+
+static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
+ u8 *edid_ext, int len,
+ struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+ bool ret;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (adev->dm.dmub_srv)
+ ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+ else
+ ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+ mutex_unlock(&adev->dm.dc_lock);
+ return ret;
+}
+
+static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ int j = 0;
+
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find DisplayID extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (void *)(edid + (i + 1));
+ if (edid_ext[0] == DISPLAYID_EXT)
+ break;
+ }
+
+ while (j < EDID_LENGTH) {
+ struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
+ unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
+
+ if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
+ amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
+ vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
+ vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
+ DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
+
+ return true;
+ }
+ j++;
+ }
+
+ return false;
+}
+
+static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
+ struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
+{
+ u8 *edid_ext = NULL;
+ int i;
+ bool valid_vsdb_found = false;
+
+ /*----- drm_find_cea_extension() -----*/
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return -ENODEV;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return -ENODEV;
+
+ /*----- cea_db_offsets() -----*/
+ if (edid_ext[0] != CEA_EXT)
+ return -ENODEV;
+
+ valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
+
+ return valid_vsdb_found ? i : -ENODEV;
+}
+
+/**
+ * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
+ *
+ * @connector: Connector to query.
+ * @edid: EDID from monitor
+ *
+ * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
+ * track of some of the display information in the internal data struct used by
+ * amdgpu_dm. This function checks which type of connector we need to set the
+ * FreeSync parameters.
+ */
+void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
+ struct edid *edid)
+{
+ int i = 0;
+ struct detailed_timing *timing;
+ struct detailed_non_pixel *data;
+ struct detailed_data_monitor_range *range;
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
+
+ struct amdgpu_device *adev = drm_to_adev(connector->dev);
+ struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
+ bool freesync_capable = false;
+ enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
+
+ if (!connector->state) {
+ DRM_ERROR("%s - Connector has no state", __func__);
+ goto update;
+ }
+
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ amdgpu_dm_connector->min_vfreq = 0;
+ amdgpu_dm_connector->max_vfreq = 0;
+ amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
+
+ goto update;
+ }
+
+ dm_con_state = to_dm_connector_state(connector->state);
+
+ if (!adev->dm.freesync_module)
+ goto update;
+
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
+ bool edid_check_required = false;
+
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
+ }
+
+ if (edid_check_required) {
+ for (i = 0; i < 4; i++) {
+
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+ range = &data->data.range;
+ /*
+ * Check if monitor has continuous frequency mode
+ */
+ if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ continue;
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != 1)
+ continue;
+
+ connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
+ connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
+
+ if (edid->revision >= 4) {
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MIN_VFREQ)
+ connector->display_info.monitor_range.min_vfreq += 255;
+ if (data->pad2 & DRM_EDID_RANGE_OFFSET_MAX_VFREQ)
+ connector->display_info.monitor_range.max_vfreq += 255;
+ }
+
+ amdgpu_dm_connector->min_vfreq =
+ connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq =
+ connector->display_info.monitor_range.max_vfreq;
+ amdgpu_dm_connector->pixel_clock_mhz =
+ range->pixel_clock_mhz * 10;
+
+ break;
+ }
+
+ if (amdgpu_dm_connector->max_vfreq -
+ amdgpu_dm_connector->min_vfreq > 10) {
+
+ freesync_capable = true;
+ }
+ }
+ parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+
+ if (vsdb_info.replay_mode) {
+ amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
+ amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
+ amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
+ }
+
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported) {
+ timing = &edid->detailed_timings[i];
+ data = &timing->data.other_data;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+ as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+ if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+ i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+ if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+ amdgpu_dm_connector->pack_sdp_v1_3 = true;
+ amdgpu_dm_connector->as_type = as_type;
+ amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+ amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+ amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+ if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+ freesync_capable = true;
+
+ connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+ connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+ }
+ }
+
+update:
+ if (dm_con_state)
+ dm_con_state->freesync_capable = freesync_capable;
+
+ if (connector->vrr_capable_property)
+ drm_connector_set_vrr_capable_property(connector,
+ freesync_capable);
+}
+
+void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+ struct dc *dc = adev->dm.dc;
+ int i;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (dc->current_state) {
+ for (i = 0; i < dc->current_state->stream_count; ++i)
+ dc->current_state->streams[i]
+ ->triggered_crtc_reset.enabled =
+ adev->dm.force_timing_sync;
+
+ dm_enable_per_frame_crtc_master_sync(dc->current_state);
+ dc_trigger_sync(dc, dc->current_state);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+}
+
+void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
+ u32 value, const char *func_name)
+{
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register write. address = 0");
+ return;
+ }
+#endif
+ cgs_write_register(ctx->cgs_device, address, value);
+ trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
+}
+
+uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
+ const char *func_name)
+{
+ u32 value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ drm_err(adev_to_drm(ctx->driver_context),
+ "invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+
+ if (ctx->dmub_srv &&
+ ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
+ !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
+ ASSERT(false);
+ return 0;
+ }
+
+ value = cgs_read_register(ctx->cgs_device, address);
+
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct aux_payload *payload,
+ enum aux_return_code_type *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+ int ret = -1;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ goto out;
+ }
+
+ if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ goto out;
+ }
+
+ if (p_notify->result != AUX_RET_SUCCESS) {
+ /*
+ * Transient states before tunneling is enabled could
+ * lead to this error. We can ignore this for now.
+ */
+ if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
+ DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
+ payload->address, payload->length,
+ p_notify->result);
+ }
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && p_notify->aux_reply.length &&
+ (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
+
+ if (payload->length != p_notify->aux_reply.length) {
+ DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
+ p_notify->aux_reply.length,
+ payload->address, payload->length);
+ *operation_result = AUX_RET_ERROR_INVALID_REPLY;
+ goto out;
+ }
+
+ memcpy(payload->data, p_notify->aux_reply.data,
+ p_notify->aux_reply.length);
+ }
+
+ /* success */
+ ret = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+out:
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+int amdgpu_dm_process_dmub_set_config_sync(
+ struct dc_context *ctx,
+ unsigned int link_index,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ bool is_cmd_complete;
+ int ret;
+
+ mutex_lock(&adev->dm.dpia_aux_lock);
+ is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
+ link_index, payload, adev->dm.dmub_notify);
+
+ if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
+ ret = 0;
+ *operation_result = adev->dm.dmub_notify->sc_status;
+ } else {
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ ret = -1;
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+
+ if (!is_cmd_complete)
+ reinit_completion(&adev->dm.dmub_aux_transfer_done);
+ mutex_unlock(&adev->dm.dpia_aux_lock);
+ return ret;
+}
+
+bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
+}
+
+bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
+{
+ return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
+}