All of lore.kernel.org
 help / color / mirror / Atom feed
From: Harry Wentland <harry.wentland@amd.com>
To: dri-devel@lists.freedesktop.org
Subject: [PATCH 24/29] drm/amd/dal: Add display core
Date: Thu, 11 Feb 2016 12:20:04 -0500	[thread overview]
Message-ID: <1455211209-26733-25-git-send-email-harry.wentland@amd.com> (raw)
In-Reply-To: <1455211209-26733-1-git-send-email-harry.wentland@amd.com>

Adds a logical representation of our hardware. Provides ability to
- dc_validate_resources - validate a display configuration
- dc_commit_targets - commit a display configuration
- dc_commit_surfaces_to_target - update surfaces
- dc_link_detect - detect displays at link
- dc_resume - resume display HW
- dc_interrupt_set/ack - set and ack interrupts
- etc.

Signed-off-by: Harry Wentland <harry.wentland@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
---
 drivers/gpu/drm/amd/dal/dc/Makefile               |   28 +
 drivers/gpu/drm/amd/dal/dc/core/dc.c              |  932 +++++++++++
 drivers/gpu/drm/amd/dal/dc/core/dc_hw_sequencer.c |   56 +
 drivers/gpu/drm/amd/dal/dc/core/dc_link.c         | 1644 ++++++++++++++++++++
 drivers/gpu/drm/amd/dal/dc/core/dc_link_ddc.c     | 1151 ++++++++++++++
 drivers/gpu/drm/amd/dal/dc/core/dc_link_dp.c      | 1728 +++++++++++++++++++++
 drivers/gpu/drm/amd/dal/dc/core/dc_link_hwss.c    |  201 +++
 drivers/gpu/drm/amd/dal/dc/core/dc_resource.c     | 1243 +++++++++++++++
 drivers/gpu/drm/amd/dal/dc/core/dc_sink.c         |  116 ++
 drivers/gpu/drm/amd/dal/dc/core/dc_stream.c       |  188 +++
 drivers/gpu/drm/amd/dal/dc/core/dc_surface.c      |  123 ++
 drivers/gpu/drm/amd/dal/dc/core/dc_target.c       |  548 +++++++
 12 files changed, 7958 insertions(+)
 create mode 100644 drivers/gpu/drm/amd/dal/dc/Makefile
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_hw_sequencer.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_link.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_link_ddc.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_link_dp.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_link_hwss.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_resource.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_sink.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_stream.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_surface.c
 create mode 100644 drivers/gpu/drm/amd/dal/dc/core/dc_target.c

diff --git a/drivers/gpu/drm/amd/dal/dc/Makefile b/drivers/gpu/drm/amd/dal/dc/Makefile
new file mode 100644
index 000000000000..aed26eec81f9
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for Display Core (dc) component.
+#
+
+DC_LIBS = adapter asic_capability audio basics bios calcs \
+gpio gpu i2caux irq virtual
+
+ifdef CONFIG_DRM_AMD_DAL_DCE11_0
+DC_LIBS += dce110
+endif
+
+ifdef CONFIG_DRM_AMD_DAL_DCE10_0
+DC_LIBS += dce100
+endif
+
+AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DAL_PATH)/dc/,$(DC_LIBS)))
+
+include $(AMD_DC)
+
+DISPLAY_CORE = dc.o dc_link.o dc_resource.o dc_target.o dc_sink.o dc_stream.o \
+dc_hw_sequencer.o dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o
+
+AMD_DISPLAY_CORE = $(addprefix $(AMDDALPATH)/dc/core/,$(DISPLAY_CORE))
+
+AMD_DAL_FILES += $(AMD_DISPLAY_CORE)
+
+
+
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc.c b/drivers/gpu/drm/amd/dal/dc/core/dc.c
new file mode 100644
index 000000000000..0b8f158c0ec2
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ */
+
+#include "dm_services.h"
+
+#include "dc.h"
+
+#include "core_status.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+
+#include "resource.h"
+
+#include "adapter_service_interface.h"
+#include "clock_source.h"
+#include "dc_bios_types.h"
+
+#include "bandwidth_calcs.h"
+#include "include/irq_service_interface.h"
+#include "transform.h"
+#include "timing_generator.h"
+#include "virtual/virtual_link_encoder.h"
+
+#include "link_hwss.h"
+#include "link_encoder.h"
+
+#include "dc_link_ddc.h"
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+struct dc_target_sync_report {
+	uint32_t h_count;
+	uint32_t v_count;
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destroy_links(struct dc *dc)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->link_count; i++) {
+		if (NULL != dc->links[i])
+			link_destroy(&dc->links[i]);
+	}
+}
+
+static bool create_links(struct dc *dc, const struct dc_init_data *init_params)
+{
+	int i;
+	int connectors_num;
+	struct dc_bios *dcb;
+
+	dc->link_count = 0;
+
+	dcb = dal_adapter_service_get_bios_parser(init_params->adapter_srv);
+
+	connectors_num = dcb->funcs->get_connectors_number(dcb);
+
+	if (connectors_num > ENUM_ID_COUNT) {
+		dm_error(
+			"DC: Number of connectors %d exceeds maximum of %d!\n",
+			connectors_num,
+			ENUM_ID_COUNT);
+		return false;
+	}
+
+	if (connectors_num == 0 && init_params->num_virtual_links == 0) {
+		dm_error("DC: Number of connectors can not be zero!\n");
+		return false;
+	}
+
+	dm_output_to_console(
+		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
+		__func__,
+		connectors_num,
+		init_params->num_virtual_links);
+
+	for (i = 0; i < connectors_num; i++) {
+		struct link_init_data link_init_params = {0};
+		struct core_link *link;
+
+		link_init_params.ctx = init_params->ctx;
+		link_init_params.adapter_srv = init_params->adapter_srv;
+		link_init_params.connector_index = i;
+		link_init_params.link_index = dc->link_count;
+		link_init_params.dc = dc;
+		link = link_create(&link_init_params);
+
+		if (link) {
+			dc->links[dc->link_count] = link;
+			link->dc = dc;
+			++dc->link_count;
+		} else {
+			dm_error("DC: failed to create link!\n");
+		}
+	}
+
+	for (i = 0; i < init_params->num_virtual_links; i++) {
+		struct core_link *link = dm_alloc(
+			dc->ctx,
+			sizeof(*link));
+		struct encoder_init_data enc_init = {0};
+
+		if (link == NULL) {
+			BREAK_TO_DEBUGGER();
+			goto failed_alloc;
+		}
+
+		link->adapter_srv = init_params->adapter_srv;
+		link->ctx = init_params->ctx;
+		link->dc = dc;
+		link->public.connector_signal = SIGNAL_TYPE_VIRTUAL;
+		link->link_id.type = OBJECT_TYPE_CONNECTOR;
+		link->link_id.id = CONNECTOR_ID_VIRTUAL;
+		link->link_id.enum_id = ENUM_ID_1;
+		link->link_enc = dm_alloc(
+			dc->ctx,
+			sizeof(*link->link_enc));
+
+		enc_init.adapter_service = init_params->adapter_srv;
+		enc_init.ctx = init_params->ctx;
+		enc_init.channel = CHANNEL_ID_UNKNOWN;
+		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
+		enc_init.transmitter = TRANSMITTER_UNKNOWN;
+		enc_init.connector = link->link_id;
+		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
+		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
+		enc_init.encoder.enum_id = ENUM_ID_1;
+		virtual_link_encoder_construct(link->link_enc, &enc_init);
+
+		link->public.link_index = dc->link_count;
+		dc->links[dc->link_count] = link;
+		dc->link_count++;
+	}
+
+	return true;
+
+failed_alloc:
+	return false;
+}
+
+
+static void init_hw(struct dc *dc)
+{
+	int i;
+	struct dc_bios *bp;
+	struct transform *xfm;
+
+	bp = dal_adapter_service_get_bios_parser(dc->res_pool.adapter_srv);
+	for(i = 0; i < dc->res_pool.controller_count; i++) {
+		xfm = dc->res_pool.transforms[i];
+
+		dc->hwss.enable_display_power_gating(
+				dc->ctx, i, bp,
+				PIPE_GATING_CONTROL_INIT);
+		dc->hwss.enable_display_power_gating(
+				dc->ctx, i, bp,
+				PIPE_GATING_CONTROL_DISABLE);
+
+		xfm->funcs->transform_power_up(xfm);
+		dc->hwss.enable_display_pipe_clock_gating(
+			dc->ctx,
+			true);
+	}
+
+	dc->hwss.clock_gating_power_up(dc->ctx, false);
+	bp->funcs->power_up(bp);
+	/***************************************/
+
+	for (i = 0; i < dc->link_count; i++) {
+		/****************************************/
+		/* Power up AND update implementation according to the
+		 * required signal (which may be different from the
+		 * default signal on connector). */
+		struct core_link *link = dc->links[i];
+		link->link_enc->funcs->hw_init(link->link_enc);
+	}
+
+	for(i = 0; i < dc->res_pool.controller_count; i++) {
+		struct timing_generator *tg = dc->res_pool.timing_generators[i];
+
+		tg->funcs->disable_vga(tg);
+
+		/* Blank controller using driver code instead of
+		 * command table. */
+		tg->funcs->set_blank(tg, true);
+	}
+
+	for(i = 0; i < dc->res_pool.audio_count; i++) {
+		struct audio *audio = dc->res_pool.audios[i];
+
+		if (dal_audio_power_up(audio) != AUDIO_RESULT_OK)
+			dm_error("Failed audio power up!\n");
+	}
+
+}
+
+static struct adapter_service *create_as(
+		struct dc_init_data *dc_init_data,
+		const struct dal_init_data *init)
+{
+	struct adapter_service *as = NULL;
+	struct as_init_data init_data;
+
+	dm_memset(&init_data, 0, sizeof(init_data));
+
+	init_data.ctx = dc_init_data->ctx;
+
+	/* BIOS parser init data */
+	init_data.bp_init_data.ctx = dc_init_data->ctx;
+	init_data.bp_init_data.bios = init->asic_id.atombios_base_address;
+
+	/* HW init data */
+	init_data.hw_init_data.chip_id = init->asic_id.chip_id;
+	init_data.hw_init_data.chip_family = init->asic_id.chip_family;
+	init_data.hw_init_data.pci_revision_id = init->asic_id.pci_revision_id;
+	init_data.hw_init_data.fake_paths_num = init->asic_id.fake_paths_num;
+	init_data.hw_init_data.feature_flags = init->asic_id.feature_flags;
+	init_data.hw_init_data.hw_internal_rev = init->asic_id.hw_internal_rev;
+	init_data.hw_init_data.runtime_flags = init->asic_id.runtime_flags;
+	init_data.hw_init_data.vram_width = init->asic_id.vram_width;
+	init_data.hw_init_data.vram_type = init->asic_id.vram_type;
+
+	/* bdf is BUS,DEVICE,FUNCTION*/
+	init_data.bdf_info = init->bdf_info;
+
+	init_data.display_param = &init->display_param;
+	init_data.vbios_override = init->vbios_override;
+	init_data.dce_environment = init->dce_environment;
+
+	as = dal_adapter_service_create(&init_data);
+
+	return as;
+}
+
+static void bw_calcs_data_update_from_pplib(struct dc *dc)
+{
+	struct dc_pp_clock_levels clks = {0};
+
+	/*do system clock*/
+	dm_pp_get_clock_levels_by_type(
+			dc->ctx,
+			DC_PP_CLOCK_TYPE_ENGINE_CLK,
+			&clks);
+	/* convert all the clock fro kHz to fix point mHz */
+	dc->bw_vbios.high_sclk = bw_frc_to_fixed(
+			clks.clocks_in_khz[clks.num_levels-1], 1000);
+	dc->bw_vbios.mid_sclk  = bw_frc_to_fixed(
+			clks.clocks_in_khz[clks.num_levels>>1], 1000);
+	dc->bw_vbios.low_sclk  = bw_frc_to_fixed(
+			clks.clocks_in_khz[0], 1000);
+
+	/*do display clock*/
+	dm_pp_get_clock_levels_by_type(
+			dc->ctx,
+			DC_PP_CLOCK_TYPE_DISPLAY_CLK,
+			&clks);
+
+	dc->bw_vbios.high_voltage_max_dispclk = bw_frc_to_fixed(
+			clks.clocks_in_khz[clks.num_levels-1], 1000);
+	dc->bw_vbios.mid_voltage_max_dispclk  = bw_frc_to_fixed(
+			clks.clocks_in_khz[clks.num_levels>>1], 1000);
+	dc->bw_vbios.low_voltage_max_dispclk  = bw_frc_to_fixed(
+			clks.clocks_in_khz[0], 1000);
+
+	/*do memory clock*/
+	dm_pp_get_clock_levels_by_type(
+			dc->ctx,
+			DC_PP_CLOCK_TYPE_MEMORY_CLK,
+			&clks);
+
+	dc->bw_vbios.low_yclk = bw_frc_to_fixed(
+		clks.clocks_in_khz[0] * MEMORY_TYPE_MULTIPLIER, 1000);
+	dc->bw_vbios.mid_yclk = bw_frc_to_fixed(
+		clks.clocks_in_khz[clks.num_levels>>1] * MEMORY_TYPE_MULTIPLIER,
+		1000);
+	dc->bw_vbios.high_yclk = bw_frc_to_fixed(
+		clks.clocks_in_khz[clks.num_levels-1] * MEMORY_TYPE_MULTIPLIER,
+		1000);
+}
+
+static bool construct(struct dc *dc, const struct dal_init_data *init_params)
+{
+	struct dal_logger *logger;
+	/* Tempory code
+	 * TODO: replace dal_init_data with dc_init_data when dal is removed
+	 */
+	struct dc_init_data dc_init_data = {0};
+
+	/* Create dc context */
+	/* A temp dc context is used only to allocate the memory for actual
+	 * dc context */
+	struct dc_context ctx = {0};
+	ctx.cgs_device = init_params->cgs_device;
+	ctx.dc = dc;
+
+	dc_init_data.ctx = dm_alloc(&ctx, sizeof(*dc_init_data.ctx));
+	if (!dc_init_data.ctx) {
+		dm_error("%s: failed to create ctx\n", __func__);
+		goto ctx_fail;
+	}
+	dc_init_data.ctx->driver_context = init_params->driver;
+	dc_init_data.ctx->cgs_device = init_params->cgs_device;
+	dc_init_data.num_virtual_links = init_params->num_virtual_links;
+	dc_init_data.ctx->dc = dc;
+
+	/* Create logger */
+	logger = dal_logger_create(dc_init_data.ctx);
+
+	if (!logger) {
+		/* can *not* call logger. call base driver 'print error' */
+		dm_error("%s: failed to create Logger!\n", __func__);
+		goto logger_fail;
+	}
+	dc_init_data.ctx->logger = logger;
+
+	/* Create adapter service */
+	dc_init_data.adapter_srv = create_as(&dc_init_data, init_params);
+
+	if (!dc_init_data.adapter_srv) {
+		dm_error("%s: create_as() failed!\n", __func__);
+		goto as_fail;
+	}
+
+	/* Initialize HW controlled by Adapter Service */
+	if (false == dal_adapter_service_initialize_hw_data(
+			dc_init_data.adapter_srv)) {
+		dm_error("%s: dal_adapter_service_initialize_hw_data()"\
+				"  failed!\n", __func__);
+		/* Note that AS exist, so have to destroy it.*/
+		goto as_fail;
+	}
+
+	dc->ctx = dc_init_data.ctx;
+
+	dc->ctx->dce_environment = dal_adapter_service_get_dce_environment(
+			dc_init_data.adapter_srv);
+
+	/* Create hardware sequencer */
+	if (!dc_construct_hw_sequencer(dc_init_data.adapter_srv, dc))
+		goto hwss_fail;
+
+	if (!dc_construct_resource_pool(
+		dc_init_data.adapter_srv, dc, dc_init_data.num_virtual_links))
+		goto construct_resource_fail;
+
+	if (!create_links(dc, &dc_init_data))
+		goto create_links_fail;
+
+	bw_calcs_init(&dc->bw_dceip, &dc->bw_vbios);
+
+	bw_calcs_data_update_from_pplib(dc);
+
+	return true;
+
+	/**** error handling here ****/
+construct_resource_fail:
+create_links_fail:
+as_fail:
+	dal_logger_destroy(&dc_init_data.ctx->logger);
+logger_fail:
+hwss_fail:
+	dm_free(&ctx, dc_init_data.ctx);
+ctx_fail:
+	return false;
+}
+
+static void destruct(struct dc *dc)
+{
+	destroy_links(dc);
+	dc->res_pool.funcs->destruct(&dc->res_pool);
+	dal_logger_destroy(&dc->ctx->logger);
+	dm_free(dc->ctx, dc->ctx);
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+struct dc *dc_create(const struct dal_init_data *init_params)
+ {
+	struct dc_context ctx = {
+		.driver_context = init_params->driver,
+		.cgs_device = init_params->cgs_device
+	};
+	struct dc *dc = dm_alloc(&ctx, sizeof(*dc));
+
+	if (NULL == dc)
+		goto alloc_fail;
+
+	ctx.dc = dc;
+	if (false == construct(dc, init_params))
+		goto construct_fail;
+
+	/*TODO: separate HW and SW initialization*/
+	init_hw(dc);
+
+	return dc;
+
+construct_fail:
+	dm_free(&ctx, dc);
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_destroy(struct dc **dc)
+{
+	struct dc_context ctx = *(*dc)->ctx;
+	destruct(*dc);
+	dm_free(&ctx, *dc);
+	*dc = NULL;
+}
+
+bool dc_validate_resources(
+		const struct dc *dc,
+		const struct dc_validation_set set[],
+		uint8_t set_count)
+{
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct validate_context *context;
+
+	context = dm_alloc(dc->ctx, sizeof(struct validate_context));
+	if(context == NULL)
+		goto context_alloc_fail;
+
+	result = dc->res_pool.funcs->validate_with_context(
+						dc, set, set_count, context);
+
+	dm_free(dc->ctx, context);
+context_alloc_fail:
+
+	return (result == DC_OK);
+
+}
+
+static void program_timing_sync(
+		struct dc_context *dc_ctx,
+		struct validate_context *ctx)
+{
+	uint8_t i;
+	uint8_t j;
+	uint8_t group_size = 0;
+	uint8_t tg_count = ctx->res_ctx.pool.controller_count;
+	struct timing_generator *tg_set[3];
+
+	for (i = 0; i < tg_count; i++) {
+		if (!ctx->res_ctx.controller_ctx[i].stream)
+			continue;
+
+		tg_set[0] = ctx->res_ctx.pool.timing_generators[i];
+		group_size = 1;
+
+		/* Add tg to the set, search rest of the tg's for ones with
+		 * same timing, add all tgs with same timing to the group
+		 */
+		for (j = i + 1; j < tg_count; j++) {
+			if (!ctx->res_ctx.controller_ctx[j].stream)
+				continue;
+
+			if (is_same_timing(
+				&ctx->res_ctx.controller_ctx[j].stream->public
+								.timing,
+				&ctx->res_ctx.controller_ctx[i].stream->public
+								.timing)) {
+				tg_set[group_size] =
+					ctx->res_ctx.pool.timing_generators[j];
+				group_size++;
+			}
+		}
+
+		/* Right now we limit to one timing sync group so if one is
+		 * found we break. A group has to be more than one tg.*/
+		if (group_size > 1)
+			break;
+	}
+
+	if(group_size > 1) {
+		dc_ctx->dc->hwss.enable_timing_synchronization(dc_ctx, group_size, tg_set);
+	}
+}
+
+static bool targets_changed(
+		struct dc *dc,
+		struct dc_target *targets[],
+		uint8_t target_count)
+{
+	uint8_t i;
+
+	if (target_count != dc->current_context.target_count)
+		return true;
+
+	for (i = 0; i < dc->current_context.target_count; i++) {
+		if (&dc->current_context.targets[i]->public != targets[i])
+			return true;
+	}
+
+	return false;
+}
+
+bool dc_commit_targets(
+	struct dc *dc,
+	struct dc_target *targets[],
+	uint8_t target_count)
+{
+	enum dc_status result = DC_ERROR_UNEXPECTED;
+	struct validate_context *context;
+	struct dc_validation_set set[4];
+	uint8_t i;
+
+	if (false == targets_changed(dc, targets, target_count))
+		return DC_OK;
+
+	dal_logger_write(dc->ctx->logger,
+				LOG_MAJOR_INTERFACE_TRACE,
+				LOG_MINOR_COMPONENT_DC,
+				"%s: %d targets\n",
+				__func__,
+				target_count);
+
+	for (i = 0; i < target_count; i++) {
+		struct dc_target *target = targets[i];
+
+		dc_target_log(target,
+				dc->ctx->logger,
+				LOG_MAJOR_INTERFACE_TRACE,
+				LOG_MINOR_COMPONENT_DC);
+
+		set[i].target = targets[i];
+		set[i].surface_count = 0;
+
+	}
+
+	context = dm_alloc(dc->ctx, sizeof(struct validate_context));
+	if (context == NULL)
+		goto context_alloc_fail;
+
+	result = dc->res_pool.funcs->validate_with_context(dc, set, target_count, context);
+	if (result != DC_OK){
+		BREAK_TO_DEBUGGER();
+		goto fail;
+	}
+
+	pplib_apply_safe_state(dc);
+
+	if (!dal_adapter_service_is_in_accelerated_mode(
+						dc->res_pool.adapter_srv)) {
+		dc->hwss.enable_accelerated_mode(dc);
+	}
+
+	for (i = 0; i < dc->current_context.target_count; i++) {
+		/*TODO: optimize this to happen only when necessary*/
+		dc_target_disable_memory_requests(
+				&dc->current_context.targets[i]->public);
+	}
+
+	if (result == DC_OK) {
+		dc->hwss.reset_hw_ctx(dc, context, target_count);
+
+		if (context->target_count > 0)
+			result = dc->hwss.apply_ctx_to_hw(dc, context);
+	}
+
+	for (i = 0; i < context->target_count; i++) {
+		struct dc_target *dc_target = &context->targets[i]->public;
+		if (context->targets[i]->status.surface_count > 0)
+			dc_target_enable_memory_requests(dc_target);
+	}
+
+	/* Release old targets */
+	for (i = 0; i < dc->current_context.target_count; i++) {
+		dc_target_release(
+				&dc->current_context.targets[i]->public);
+		dc->current_context.targets[i] = NULL;
+	}
+	/* Retain new targets*/
+	for (i = 0; i < context->target_count; i++) {
+		dc_target_retain(&context->targets[i]->public);
+	}
+
+	dc->current_context = *context;
+
+	program_timing_sync(dc->ctx, context);
+
+	pplib_apply_display_requirements(dc, context);
+
+	/* TODO: disable unused plls*/
+fail:
+	dm_free(dc->ctx, context);
+
+context_alloc_fail:
+	return (result == DC_OK);
+}
+
+uint8_t dc_get_current_target_count(const struct dc *dc)
+{
+	return dc->current_context.target_count;
+}
+
+struct dc_target *dc_get_target_at_index(const struct dc *dc, uint8_t i)
+{
+	if (i < dc->current_context.target_count)
+		return &dc->current_context.targets[i]->public;
+	return NULL;
+}
+
+const struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_index)
+{
+	return &dc->links[link_index]->public;
+}
+
+const struct graphics_object_id dc_get_link_id_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	return dc->links[link_index]->link_id;
+}
+
+const struct ddc_service *dc_get_ddc_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	return dc->links[link_index]->ddc;
+}
+
+const enum dc_irq_source dc_get_hpd_irq_source_at_index(
+	struct dc *dc, uint32_t link_index)
+{
+	return dc->links[link_index]->public.irq_source_hpd;
+}
+
+const struct audio **dc_get_audios(struct dc *dc)
+{
+	return (const struct audio **)dc->res_pool.audios;
+}
+
+void dc_get_caps(const struct dc *dc, struct dc_caps *caps)
+{
+	caps->max_targets = dc->res_pool.controller_count;
+	caps->max_links = dc->link_count;
+	caps->max_audios = dc->res_pool.audio_count;
+}
+
+void dc_flip_surface_addrs(struct dc* dc,
+		const struct dc_surface *const surfaces[],
+		struct dc_flip_addrs flip_addrs[],
+		uint32_t count)
+{
+	uint8_t i;
+	for (i = 0; i < count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
+		/*
+		 * TODO figure out a good way to keep track of address. Until
+		 * then we'll have to awkwardly bypass the "const" surface.
+		 */
+		surface->public.address = flip_addrs[i].address;
+		surface->public.flip_immediate = flip_addrs[i].flip_immediate;
+
+		dc->hwss.update_plane_address(
+			dc,
+			surface,
+			DC_TARGET_TO_CORE(surface->status.dc_target));
+	}
+}
+
+enum dc_irq_source dc_interrupt_to_irq_source(
+		struct dc *dc,
+		uint32_t src_id,
+		uint32_t ext_id)
+{
+	return dal_irq_service_to_irq_source(dc->res_pool.irqs, src_id, ext_id);
+}
+
+
+void dc_interrupt_set(const struct dc *dc, enum dc_irq_source src, bool enable)
+{
+	dal_irq_service_set(dc->res_pool.irqs, src, enable);
+}
+
+void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
+{
+	dal_irq_service_ack(dc->res_pool.irqs, src);
+}
+
+const struct dc_target *dc_get_target_on_irq_source(
+		const struct dc *dc,
+		enum dc_irq_source src)
+{
+	uint8_t i, j;
+	uint8_t crtc_idx;
+
+	switch (src) {
+	case DC_IRQ_SOURCE_VUPDATE1:
+	case DC_IRQ_SOURCE_VUPDATE2:
+	case DC_IRQ_SOURCE_VUPDATE3:
+	case DC_IRQ_SOURCE_VUPDATE4:
+	case DC_IRQ_SOURCE_VUPDATE5:
+	case DC_IRQ_SOURCE_VUPDATE6:
+		crtc_idx = src - DC_IRQ_SOURCE_VUPDATE1;
+		break;
+	case DC_IRQ_SOURCE_PFLIP1:
+	case DC_IRQ_SOURCE_PFLIP2:
+	case DC_IRQ_SOURCE_PFLIP3:
+	case DC_IRQ_SOURCE_PFLIP4:
+	case DC_IRQ_SOURCE_PFLIP5:
+	case DC_IRQ_SOURCE_PFLIP6:
+	case DC_IRQ_SOURCE_PFLIP_UNDERLAY0:
+		crtc_idx = src - DC_IRQ_SOURCE_PFLIP1;
+		break;
+	default:
+		dm_error("%s: invalid irq source: %d\n!" ,__func__, src);
+		return NULL;
+	}
+
+	for (i = 0; i < dc->current_context.target_count; i++) {
+		struct core_target *target = dc->current_context.targets[i];
+
+		struct dc_target *dc_target;
+
+		if (NULL == target) {
+			dm_error("%s: 'dc_target' is NULL for irq source: %d\n!",
+					__func__, src);
+			continue;
+		}
+
+		dc_target = &target->public;
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			const struct core_stream *stream =
+				DC_STREAM_TO_CORE(dc_target->streams[j]);
+			const uint8_t controller_idx = stream->controller_idx;
+
+			if (controller_idx == crtc_idx)
+				return dc_target;
+		}
+	}
+
+	return NULL;
+}
+
+void dc_set_power_state(
+	struct dc *dc,
+	enum dc_acpi_cm_power_state power_state,
+	enum dc_video_power_state video_power_state)
+{
+	dc->previous_power_state = dc->current_power_state;
+	dc->current_power_state = video_power_state;
+
+	switch (power_state) {
+	case DC_ACPI_CM_POWER_STATE_D0:
+		init_hw(dc);
+		break;
+	default:
+		/* NULL means "reset/release all DC targets" */
+		dc_commit_targets(dc, NULL, 0);
+
+		dc->hwss.power_down(dc);
+		break;
+	}
+
+}
+
+void dc_resume(const struct dc *dc)
+{
+	uint32_t i;
+
+	for (i = 0; i < dc->link_count; i++)
+		core_link_resume(dc->links[i]);
+}
+
+bool dc_read_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		uint8_t *data,
+		uint32_t size)
+{
+	struct core_link *link =
+			DC_LINK_TO_LINK(dc_get_link_at_index(dc, link_index));
+
+	enum ddc_result r = dal_ddc_service_read_dpcd_data(
+			link->ddc,
+			address,
+			data,
+			size);
+	return r == DDC_RESULT_SUCESSFULL;
+}
+
+bool dc_write_dpcd(
+		struct dc *dc,
+		uint32_t link_index,
+		uint32_t address,
+		const uint8_t *data,
+		uint32_t size)
+{
+	struct core_link *link =
+			DC_LINK_TO_LINK(dc_get_link_at_index(dc, link_index));
+
+	enum ddc_result r = dal_ddc_service_write_dpcd_data(
+			link->ddc,
+			address,
+			data,
+			size);
+	return r == DDC_RESULT_SUCESSFULL;
+}
+
+bool dc_link_add_remote_sink(const struct dc_link *link, struct dc_sink *sink)
+{
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+	struct dc_link *dc_link = &core_link->public;
+
+	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	dc_link->remote_sinks[link->sink_count] = sink;
+	dc_link->sink_count++;
+
+	return true;
+}
+
+void dc_link_set_sink(const struct dc_link *link, struct dc_sink *sink)
+{
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+	struct dc_link *dc_link = &core_link->public;
+
+	dc_link->local_sink = sink;
+
+	if (sink == NULL) {
+		dc_link->sink_count = 0;
+		dc_link->type = dc_connection_none;
+	} else {
+		dc_link->sink_count = 1;
+		dc_link->type = dc_connection_single;
+	}
+}
+
+void dc_link_remove_remote_sink(const struct dc_link *link, const struct dc_sink *sink)
+{
+	int i;
+	struct core_link *core_link = DC_LINK_TO_LINK(link);
+	struct dc_link *dc_link = &core_link->public;
+
+	if (!link->sink_count) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+
+	for (i = 0; i < dc_link->sink_count; i++) {
+		if (dc_link->remote_sinks[i] == sink) {
+			dc_sink_release(sink);
+			dc_link->remote_sinks[i] = NULL;
+
+			/* shrink array to remove empty place */
+			while (i < dc_link->sink_count - 1) {
+				dc_link->remote_sinks[i] = dc_link->remote_sinks[i+1];
+				i++;
+			}
+
+			dc_link->sink_count--;
+			return;
+		}
+	}
+}
+
+uint8_t dc_get_dig_index(const struct dc_stream *stream)
+{
+
+	struct core_stream *core_stream = DC_STREAM_TO_CORE(stream);
+
+	switch (core_stream->stream_enc->id) {
+	case ENGINE_ID_DIGA:
+		return 0;
+	case ENGINE_ID_DIGB:
+		return 1;
+	case ENGINE_ID_DIGC:
+		return 2;
+	case ENGINE_ID_DIGD:
+		return 3;
+	case ENGINE_ID_DIGE:
+		return 4;
+	case ENGINE_ID_DIGF:
+		return 5;
+	case ENGINE_ID_DIGG:
+		return 6;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+enum gpio_ddc_line dc_get_ddc_line(
+		const struct dc_stream *stream)
+{
+
+	struct core_sink *core_sink = DC_SINK_TO_CORE(stream->sink);
+	struct ddc *ddc_line = dal_ddc_service_get_ddc_pin(
+			core_sink->link->ddc);
+
+	return dal_ddc_get_line(ddc_line);
+}
+
+enum signal_type dc_get_display_signal(
+		const struct dc_stream *stream)
+{
+	return stream->sink->sink_signal;
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/dal/dc/core/dc_hw_sequencer.c
new file mode 100644
index 000000000000..db4f1313e056
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_hw_sequencer.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "core_types.h"
+
+#if defined(CONFIG_DRM_AMD_DAL_DCE10_0)
+#include "dce100/dce100_hw_sequencer.h"
+#endif
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+#include "dce110/dce110_hw_sequencer.h"
+#endif
+
+bool dc_construct_hw_sequencer(
+				struct adapter_service *adapter_serv,
+				struct dc *dc)
+{
+	enum dce_version dce_ver = dal_adapter_service_get_dce_version(adapter_serv);
+
+	switch (dce_ver)
+	{
+#if defined(CONFIG_DRM_AMD_DAL_DCE10_0)
+	case DCE_VERSION_10_0:
+		return dce100_hw_sequencer_construct(dc);
+#endif
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+	case DCE_VERSION_11_0:
+		return dce110_hw_sequencer_construct(dc);
+#endif
+	default:
+		break;
+	}
+
+	return false;
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_link.c b/drivers/gpu/drm/amd/dal/dc/core/dc_link.c
new file mode 100644
index 000000000000..b0ef028ad0fc
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_link.c
@@ -0,0 +1,1644 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "dc.h"
+#include "core_dc.h"
+#include "adapter_service_interface.h"
+#include "grph_object_id.h"
+#include "gpio_service_interface.h"
+#include "core_status.h"
+#include "dc_link_dp.h"
+#include "dc_link_ddc.h"
+#include "link_hwss.h"
+#include "stream_encoder.h"
+#include "link_encoder.h"
+#include "hw_sequencer.h"
+#include "fixed31_32.h"
+
+
+#define LINK_INFO(...) \
+	dal_logger_write(dc_ctx->logger, \
+		LOG_MAJOR_HW_TRACE, LOG_MINOR_HW_TRACE_HOTPLUG, \
+		__VA_ARGS__)
+
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+
+enum {
+	LINK_RATE_REF_FREQ_IN_MHZ = 27,
+	PEAK_FACTOR_X1000 = 1006
+};
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void destruct(struct core_link *link)
+{
+	if (link->ddc)
+		dal_ddc_service_destroy(&link->ddc);
+
+	if(link->link_enc)
+		link->ctx->dc->res_pool.funcs->link_enc_destroy(&link->link_enc);
+}
+
+/*
+ *  Function: program_hpd_filter
+ *
+ *  @brief
+ *     Programs HPD filter on associated HPD line
+ *
+ *  @param [in] delay_on_connect_in_ms: Connect filter timeout
+ *  @param [in] delay_on_disconnect_in_ms: Disconnect filter timeout
+ *
+ *  @return
+ *     true on success, false otherwise
+ */
+static bool program_hpd_filter(
+	const struct core_link *link)
+{
+	bool result = false;
+
+	struct irq *hpd;
+
+	int delay_on_connect_in_ms = 0;
+	int delay_on_disconnect_in_ms = 0;
+
+	/* Verify feature is supported */
+	switch (link->public.connector_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		/* Program hpd filter */
+		delay_on_connect_in_ms = 500;
+		delay_on_disconnect_in_ms = 100;
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* Program hpd filter to allow DP signal to settle */
+		delay_on_connect_in_ms = 20;
+		delay_on_disconnect_in_ms = 0;
+		break;
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_EDP:
+	default:
+		/* Don't program hpd filter */
+		return false;
+	}
+
+	/* Obtain HPD handle */
+	hpd = dal_adapter_service_obtain_hpd_irq(
+		link->adapter_srv, link->link_id);
+
+	if (!hpd)
+		return result;
+
+	/* Setup HPD filtering */
+	if (dal_irq_open(hpd) == GPIO_RESULT_OK) {
+		struct gpio_hpd_config config;
+
+		config.delay_on_connect = delay_on_connect_in_ms;
+		config.delay_on_disconnect = delay_on_disconnect_in_ms;
+
+		dal_irq_setup_hpd_filter(hpd, &config);
+
+		dal_irq_close(hpd);
+
+		result = true;
+	} else {
+		ASSERT_CRITICAL(false);
+	}
+
+	/* Release HPD handle */
+	dal_adapter_service_release_irq(link->adapter_srv, hpd);
+
+	return result;
+}
+
+static bool detect_sink(struct core_link *link, enum dc_connection_type *type)
+{
+	uint32_t is_hpd_high = 0;
+	struct irq *hpd_pin;
+
+	/* todo: may need to lock gpio access */
+	hpd_pin = dal_adapter_service_obtain_hpd_irq(
+			link->adapter_srv,
+			link->link_id);
+	if (hpd_pin == NULL)
+		goto hpd_gpio_failure;
+
+	dal_irq_open(hpd_pin);
+	dal_irq_get_value(hpd_pin, &is_hpd_high);
+	dal_irq_close(hpd_pin);
+	dal_adapter_service_release_irq(
+		link->adapter_srv,
+		hpd_pin);
+
+	if (is_hpd_high) {
+		*type = dc_connection_single;
+		/* TODO: need to do the actual detection */
+	} else {
+		*type = dc_connection_none;
+	}
+
+	return true;
+
+hpd_gpio_failure:
+	return false;
+}
+
+
+enum ddc_transaction_type get_ddc_transaction_type(
+		enum signal_type sink_signal)
+{
+	enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
+
+
+	switch (sink_signal) {
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	case SIGNAL_TYPE_LVDS:
+	case SIGNAL_TYPE_RGB:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		/* MST does not use I2COverAux, but there is the
+		 * SPECIAL use case for "immediate dwnstrm device
+		 * access" (EPR#370830). */
+		transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+		break;
+
+	default:
+		break;
+	}
+
+
+	return transaction_type;
+}
+
+static enum signal_type get_basic_signal_type(
+	struct graphics_object_id encoder,
+	struct graphics_object_id downstream)
+{
+	if (downstream.type == OBJECT_TYPE_CONNECTOR) {
+		switch (downstream.id) {
+		case CONNECTOR_ID_SINGLE_LINK_DVII:
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_SINGLE_LINK;
+			}
+		break;
+		case CONNECTOR_ID_DUAL_LINK_DVII:
+		{
+			switch (encoder.id) {
+			case ENCODER_ID_INTERNAL_DAC1:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC1:
+			case ENCODER_ID_INTERNAL_DAC2:
+			case ENCODER_ID_INTERNAL_KLDSCP_DAC2:
+				return SIGNAL_TYPE_RGB;
+			default:
+				return SIGNAL_TYPE_DVI_DUAL_LINK;
+			}
+		}
+		break;
+		case CONNECTOR_ID_SINGLE_LINK_DVID:
+			return SIGNAL_TYPE_DVI_SINGLE_LINK;
+		case CONNECTOR_ID_DUAL_LINK_DVID:
+			return SIGNAL_TYPE_DVI_DUAL_LINK;
+		case CONNECTOR_ID_VGA:
+			return SIGNAL_TYPE_RGB;
+		case CONNECTOR_ID_HDMI_TYPE_A:
+			return SIGNAL_TYPE_HDMI_TYPE_A;
+		case CONNECTOR_ID_LVDS:
+			return SIGNAL_TYPE_LVDS;
+		case CONNECTOR_ID_DISPLAY_PORT:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		case CONNECTOR_ID_EDP:
+			return SIGNAL_TYPE_EDP;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	} else if (downstream.type == OBJECT_TYPE_ENCODER) {
+		switch (downstream.id) {
+		case ENCODER_ID_EXTERNAL_NUTMEG:
+		case ENCODER_ID_EXTERNAL_TRAVIS:
+			return SIGNAL_TYPE_DISPLAY_PORT;
+		default:
+			return SIGNAL_TYPE_NONE;
+		}
+	}
+
+	return SIGNAL_TYPE_NONE;
+}
+
+/*
+ * @brief
+ * Check whether there is a dongle on DP connector
+ */
+static bool is_dp_sink_present(struct core_link *link)
+{
+	enum gpio_result gpio_result;
+	uint32_t clock_pin = 0;
+	uint32_t data_pin = 0;
+
+	struct ddc *ddc;
+
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(link->link_id);
+
+	bool present =
+		((connector_id == CONNECTOR_ID_DISPLAY_PORT) ||
+		(connector_id == CONNECTOR_ID_EDP));
+
+	ddc = dal_adapter_service_obtain_ddc(link->adapter_srv, link->link_id);
+
+	if (!ddc)
+		return present;
+
+	/* Open GPIO and set it to I2C mode */
+	/* Note: this GpioMode_Input will be converted
+	 * to GpioConfigType_I2cAuxDualMode in GPIO component,
+	 * which indicates we need additional delay */
+
+	if (GPIO_RESULT_OK != dal_ddc_open(
+		ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+		dal_adapter_service_release_ddc(link->adapter_srv, ddc);
+
+		return present;
+	}
+
+	/* Read GPIO: DP sink is present if both clock and data pins are zero */
+	/* [anaumov] in DAL2, there was no check for GPIO failure */
+
+	gpio_result = dal_ddc_get_clock(ddc, &clock_pin);
+	ASSERT(gpio_result == GPIO_RESULT_OK);
+
+	if (gpio_result == GPIO_RESULT_OK)
+		if (link->link_enc->features.flags.bits.
+						DP_SINK_DETECT_POLL_DATA_PIN)
+			gpio_result = dal_ddc_get_data(ddc, &data_pin);
+
+	present = (gpio_result == GPIO_RESULT_OK) && !(clock_pin || data_pin);
+
+	dal_ddc_close(ddc);
+
+	dal_adapter_service_release_ddc(link->adapter_srv, ddc);
+
+	return present;
+}
+
+/*
+ * @brief
+ * Detect output sink type
+ */
+static enum signal_type link_detect_sink(struct core_link *link)
+{
+	enum signal_type result = get_basic_signal_type(
+		link->link_enc->id, link->link_id);
+
+	/* Internal digital encoder will detect only dongles
+	 * that require digital signal */
+
+	/* Detection mechanism is different
+	 * for different native connectors.
+	 * LVDS connector supports only LVDS signal;
+	 * PCIE is a bus slot, the actual connector needs to be detected first;
+	 * eDP connector supports only eDP signal;
+	 * HDMI should check straps for audio */
+
+	/* PCIE detects the actual connector on add-on board */
+
+	if (link->link_id.id == CONNECTOR_ID_PCIE) {
+		/* ZAZTODO implement PCIE add-on card detection */
+	}
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A: {
+		/* check audio support:
+		 * if native HDMI is not supported, switch to DVI */
+		union audio_support audio_support =
+			dal_adapter_service_get_audio_support(
+				link->adapter_srv);
+
+		if (!audio_support.bits.HDMI_AUDIO_NATIVE)
+			if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
+				result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+	break;
+	case CONNECTOR_ID_DISPLAY_PORT: {
+
+		/* Check whether DP signal detected: if not -
+		 * we assume signal is DVI; it could be corrected
+		 * to HDMI after dongle detection */
+		if (!is_dp_sink_present(link))
+			result = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+	break;
+	default:
+	break;
+	}
+
+	return result;
+}
+
+static enum signal_type decide_signal_from_strap_and_dongle_type(
+		enum display_dongle_type dongle_type,
+		union audio_support *audio_support)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+
+	switch (dongle_type) {
+	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
+		if (audio_support->bits.HDMI_AUDIO_ON_DONGLE)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_DVI_DONGLE:
+		signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
+		if (audio_support->bits.HDMI_AUDIO_NATIVE)
+			signal =  SIGNAL_TYPE_HDMI_TYPE_A;
+		else
+			signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	default:
+		signal = SIGNAL_TYPE_NONE;
+		break;
+	}
+
+	return signal;
+}
+
+static enum signal_type dp_passive_dongle_detection(
+		struct ddc_service *ddc,
+		struct display_sink_capability *sink_cap,
+		union audio_support *audio_support)
+{
+	/* TODO:These 2 functions should be protected for upstreaming purposes
+	 * in case hackers want to save 10 cents hdmi license fee
+	 */
+	dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+						ddc, sink_cap);
+	return decide_signal_from_strap_and_dongle_type(
+			sink_cap->dongle_type,
+			audio_support);
+}
+
+static void link_disconnect_sink(struct core_link *link)
+{
+	if (link->public.local_sink) {
+		dc_sink_release(link->public.local_sink);
+		link->public.local_sink = NULL;
+	}
+
+	link->dpcd_sink_count = 0;
+}
+
+static enum dc_edid_status read_edid(
+	struct core_link *link,
+	struct core_sink *sink)
+{
+	uint32_t edid_retry = 3;
+	enum dc_edid_status edid_status;
+
+	/* some dongles read edid incorrectly the first time,
+	 * do check sum and retry to make sure read correct edid.
+	 */
+	do {
+		sink->public.dc_edid.length =
+				dal_ddc_service_edid_query(link->ddc);
+
+		if (0 == sink->public.dc_edid.length)
+			return EDID_NO_RESPONSE;
+
+		dal_ddc_service_get_edid_buf(link->ddc,
+				sink->public.dc_edid.raw_edid);
+		edid_status = dm_helpers_parse_edid_caps(
+				sink->ctx,
+				&sink->public.dc_edid,
+				&sink->public.edid_caps);
+		--edid_retry;
+		if (edid_status == EDID_BAD_CHECKSUM)
+			dal_logger_write(link->ctx->logger,
+					LOG_MAJOR_WARNING,
+					LOG_MINOR_DETECTION_EDID_PARSER,
+					"Bad EDID checksum, retry remain: %d\n",
+					edid_retry);
+	} while (edid_status == EDID_BAD_CHECKSUM && edid_retry > 0);
+
+	return edid_status;
+}
+
+static void detect_dp(
+	struct core_link *link,
+	struct display_sink_capability *sink_caps,
+	bool *converter_disable_audio,
+	union audio_support *audio_support,
+	bool boot)
+{
+	sink_caps->signal = link_detect_sink(link);
+	sink_caps->transaction_type =
+		get_ddc_transaction_type(sink_caps->signal);
+
+	if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+		sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+		detect_dp_sink_caps(link);
+
+		/* DP active dongles */
+		if (is_dp_active_dongle(link)) {
+			if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+				link->public.type = dc_connection_none;
+				/*
+				 * active dongle unplug processing for short irq
+				 */
+				link_disconnect_sink(link);
+				return;
+			}
+
+			if (link->dpcd_caps.dongle_type !=
+			DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
+				*converter_disable_audio = true;
+			}
+		}
+		if (is_mst_supported(link)) {
+			sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+
+			/*
+			 * This call will initiate MST topology discovery. Which
+			 * will detect MST ports and add new DRM connector DRM
+			 * framework. Then read EDID via remote i2c over aux. In
+			 * the end, will notify DRM detect result and save EDID
+			 * into DRM framework.
+			 *
+			 * .detect is called by .fill_modes.
+			 * .fill_modes is called by user mode ioctl
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * .get_modes is called by .fill_modes.
+			 *
+			 * call .get_modes, AMDGPU DM implementation will create
+			 * new dc_sink and add to dc_link. For long HPD plug
+			 * in/out, MST has its own handle.
+			 *
+			 * Therefore, just after dc_create, link->sink is not
+			 * created for MST until user mode app calls
+			 * DRM_IOCTL_MODE_GETCONNECTOR.
+			 *
+			 * Need check ->sink usages in case ->sink = NULL
+			 * TODO: s3 resume check
+			 */
+
+			if (dm_helpers_dp_mst_start_top_mgr(
+				link->ctx,
+				&link->public, boot)) {
+				link->public.type = dc_connection_mst_branch;
+			} else {
+				/* MST not supported */
+				sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+			}
+		}
+	} else {
+		/* DP passive dongles */
+		sink_caps->signal = dp_passive_dongle_detection(link->ddc,
+				sink_caps,
+				audio_support);
+	}
+}
+
+bool dc_link_detect(const struct dc_link *dc_link, bool boot)
+{
+	struct core_link *link = DC_LINK_TO_LINK(dc_link);
+	struct dc_sink_init_data sink_init_data = { 0 };
+	struct display_sink_capability sink_caps = { 0 };
+	uint8_t i;
+	bool converter_disable_audio = false;
+	union audio_support audio_support =
+		dal_adapter_service_get_audio_support(
+			link->adapter_srv);
+	enum dc_edid_status edid_status;
+	struct dc_context *dc_ctx = link->ctx;
+	struct dc_sink *dc_sink;
+	struct core_sink *sink = NULL;
+	enum dc_connection_type new_connection_type = dc_connection_none;
+
+	if (link->public.connector_signal == SIGNAL_TYPE_VIRTUAL)
+		return false;
+
+	if (false == detect_sink(link, &new_connection_type)) {
+		BREAK_TO_DEBUGGER();
+		return false;
+	}
+
+	link_disconnect_sink(link);
+
+	if (new_connection_type != dc_connection_none) {
+		link->public.type = new_connection_type;
+
+		/* From Disconnected-to-Connected. */
+		switch (link->public.connector_signal) {
+		case SIGNAL_TYPE_HDMI_TYPE_A: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			if (audio_support.bits.HDMI_AUDIO_NATIVE)
+				sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
+			else
+				sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_SINGLE_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_DVI_DUAL_LINK: {
+			sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
+			sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+			break;
+		}
+
+		case SIGNAL_TYPE_EDP: {
+			detect_dp_sink_caps(link);
+			sink_caps.transaction_type =
+				DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
+			sink_caps.signal = SIGNAL_TYPE_EDP;
+			break;
+		}
+
+		case SIGNAL_TYPE_DISPLAY_PORT: {
+			detect_dp(
+				link,
+				&sink_caps,
+				&converter_disable_audio,
+				&audio_support, boot);
+
+			/* Active dongle downstream unplug */
+			if (link->public.type == dc_connection_none)
+				return true;
+
+			if (link->public.type == dc_connection_mst_branch) {
+				LINK_INFO("link=%d, mst branch is now Connected\n",
+					link->public.link_index);
+				return false;
+			}
+
+			break;
+		}
+
+		default:
+			DC_ERROR("Invalid connector type! signal:%d\n",
+				link->public.connector_signal);
+			return false;
+		} /* switch() */
+
+		if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
+			link->dpcd_sink_count = link->dpcd_caps.sink_count.
+					bits.SINK_COUNT;
+			else
+				link->dpcd_sink_count = 1;
+
+
+		dal_ddc_service_set_transaction_type(
+						link->ddc,
+						sink_caps.transaction_type);
+
+		sink_init_data.link = &link->public;
+		sink_init_data.sink_signal = sink_caps.signal;
+		sink_init_data.dongle_max_pix_clk =
+			sink_caps.max_hdmi_pixel_clock;
+		sink_init_data.converter_disable_audio =
+			converter_disable_audio;
+
+		dc_sink = dc_sink_create(&sink_init_data);
+		if (!dc_sink) {
+			DC_ERROR("Failed to create sink!\n");
+			return false;
+		}
+
+		sink = DC_SINK_TO_CORE(dc_sink);
+		link->public.local_sink = &sink->public;
+
+		edid_status = read_edid(link, sink);
+
+		switch (edid_status) {
+		case EDID_BAD_CHECKSUM:
+			dal_logger_write(link->ctx->logger,
+				LOG_MAJOR_ERROR,
+				LOG_MINOR_DETECTION_EDID_PARSER,
+				"EDID checksum invalid.\n");
+			break;
+		case EDID_NO_RESPONSE:
+			dal_logger_write(link->ctx->logger,
+				LOG_MAJOR_ERROR,
+				LOG_MINOR_DETECTION_EDID_PARSER,
+				"No EDID read.\n");
+			return false;
+
+		default:
+			break;
+		}
+
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_DETECTION,
+			LOG_MINOR_DETECTION_EDID_PARSER,
+			"%s: "
+			"manufacturer_id = %X, "
+			"product_id = %X, "
+			"serial_number = %X, "
+			"manufacture_week = %d, "
+			"manufacture_year = %d, "
+			"display_name = %s, "
+			"speaker_flag = %d, "
+			"audio_mode_count = %d\n",
+			__func__,
+			sink->public.edid_caps.manufacturer_id,
+			sink->public.edid_caps.product_id,
+			sink->public.edid_caps.serial_number,
+			sink->public.edid_caps.manufacture_week,
+			sink->public.edid_caps.manufacture_year,
+			sink->public.edid_caps.display_name,
+			sink->public.edid_caps.speaker_flags,
+			sink->public.edid_caps.audio_mode_count);
+
+		for (i = 0; i < sink->public.edid_caps.audio_mode_count; i++) {
+			dal_logger_write(link->ctx->logger,
+				LOG_MAJOR_DETECTION,
+				LOG_MINOR_DETECTION_EDID_PARSER,
+				"%s: mode number = %d, "
+				"format_code = %d, "
+				"channel_count = %d, "
+				"sample_rate = %d, "
+				"sample_size = %d\n",
+				__func__,
+				i,
+				sink->public.edid_caps.audio_modes[i].format_code,
+				sink->public.edid_caps.audio_modes[i].channel_count,
+				sink->public.edid_caps.audio_modes[i].sample_rate,
+				sink->public.edid_caps.audio_modes[i].sample_size);
+		}
+
+	} else {
+		/* From Connected-to-Disconnected. */
+		if (link->public.type == dc_connection_mst_branch) {
+			LINK_INFO("link=%d, mst branch is now Disconnected\n",
+				link->public.link_index);
+			dm_helpers_dp_mst_stop_top_mgr(link->ctx, &link->public);
+		}
+
+		link->public.type = dc_connection_none;
+		sink_caps.signal = SIGNAL_TYPE_NONE;
+	}
+
+	LINK_INFO("link=%d, dc_sink_in=%p is now %s\n",
+		link->public.link_index, &sink->public,
+		(sink_caps.signal == SIGNAL_TYPE_NONE ?
+			"Disconnected":"Connected"));
+
+	return true;
+}
+
+static enum hpd_source_id get_hpd_line(
+		struct core_link *link,
+		struct adapter_service *as)
+{
+	struct irq *hpd;
+	enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
+
+	hpd = dal_adapter_service_obtain_hpd_irq(as, link->link_id);
+
+	if (hpd) {
+		switch (dal_irq_get_source(hpd)) {
+		case DC_IRQ_SOURCE_HPD1:
+			hpd_id = HPD_SOURCEID1;
+		break;
+		case DC_IRQ_SOURCE_HPD2:
+			hpd_id = HPD_SOURCEID2;
+		break;
+		case DC_IRQ_SOURCE_HPD3:
+			hpd_id = HPD_SOURCEID3;
+		break;
+		case DC_IRQ_SOURCE_HPD4:
+			hpd_id = HPD_SOURCEID4;
+		break;
+		case DC_IRQ_SOURCE_HPD5:
+			hpd_id = HPD_SOURCEID5;
+		break;
+		case DC_IRQ_SOURCE_HPD6:
+			hpd_id = HPD_SOURCEID6;
+		break;
+		default:
+			BREAK_TO_DEBUGGER();
+		break;
+		}
+
+		dal_adapter_service_release_irq(as, hpd);
+	}
+
+	return hpd_id;
+}
+
+static enum channel_id get_ddc_line(struct core_link *link, struct adapter_service *as)
+{
+	struct ddc *ddc;
+	enum channel_id channel = CHANNEL_ID_UNKNOWN;
+
+	ddc = dal_adapter_service_obtain_ddc(as, link->link_id);
+
+	if (ddc) {
+		switch (dal_ddc_get_line(ddc)) {
+		case GPIO_DDC_LINE_DDC1:
+			channel = CHANNEL_ID_DDC1;
+			break;
+		case GPIO_DDC_LINE_DDC2:
+			channel = CHANNEL_ID_DDC2;
+			break;
+		case GPIO_DDC_LINE_DDC3:
+			channel = CHANNEL_ID_DDC3;
+			break;
+		case GPIO_DDC_LINE_DDC4:
+			channel = CHANNEL_ID_DDC4;
+			break;
+		case GPIO_DDC_LINE_DDC5:
+			channel = CHANNEL_ID_DDC5;
+			break;
+		case GPIO_DDC_LINE_DDC6:
+			channel = CHANNEL_ID_DDC6;
+			break;
+		case GPIO_DDC_LINE_DDC_VGA:
+			channel = CHANNEL_ID_DDC_VGA;
+			break;
+		case GPIO_DDC_LINE_I2C_PAD:
+			channel = CHANNEL_ID_I2C_PAD;
+			break;
+		default:
+			BREAK_TO_DEBUGGER();
+			break;
+		}
+
+		dal_adapter_service_release_ddc(as, ddc);
+	}
+
+	return channel;
+}
+
+static enum transmitter translate_encoder_to_transmitter(
+	struct graphics_object_id encoder)
+{
+	switch (encoder.id) {
+	case ENCODER_ID_INTERNAL_UNIPHY:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_A;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_B;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY1:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_C;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_D;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY2:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_E;
+		case ENUM_ID_2:
+			return TRANSMITTER_UNIPHY_F;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_INTERNAL_UNIPHY3:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_UNIPHY_G;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_NUTMEG:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_NUTMEG_CRT;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	case ENCODER_ID_EXTERNAL_TRAVIS:
+		switch (encoder.enum_id) {
+		case ENUM_ID_1:
+			return TRANSMITTER_TRAVIS_CRT;
+		case ENUM_ID_2:
+			return TRANSMITTER_TRAVIS_LCD;
+		default:
+			return TRANSMITTER_UNKNOWN;
+		}
+	break;
+	default:
+		return TRANSMITTER_UNKNOWN;
+	}
+}
+
+
+static bool construct(
+	struct core_link *link,
+	const struct link_init_data *init_params)
+{
+	uint8_t i;
+	struct adapter_service *as = init_params->adapter_srv;
+	struct irq *hpd_gpio = NULL;
+	struct ddc_service_init_data ddc_service_init_data = { 0 };
+	struct dc_context *dc_ctx = init_params->ctx;
+	struct encoder_init_data enc_init_data = { 0 };
+	struct integrated_info info = {{{ 0 }}};
+
+	link->dc = init_params->dc;
+	link->adapter_srv = as;
+	link->ctx = dc_ctx;
+	link->public.link_index = init_params->link_index;
+
+	link->link_id = dal_adapter_service_get_connector_obj_id(
+			as,
+			init_params->connector_index);
+
+	if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
+		dm_error("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d!\n",
+				__func__, init_params->connector_index);
+		goto create_fail;
+	}
+
+	switch (link->link_id.id) {
+	case CONNECTOR_ID_HDMI_TYPE_A:
+		link->public.connector_signal = SIGNAL_TYPE_HDMI_TYPE_A;
+		break;
+	case CONNECTOR_ID_SINGLE_LINK_DVID:
+	case CONNECTOR_ID_SINGLE_LINK_DVII:
+		link->public.connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+		break;
+	case CONNECTOR_ID_DUAL_LINK_DVID:
+	case CONNECTOR_ID_DUAL_LINK_DVII:
+		link->public.connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		break;
+	case CONNECTOR_ID_DISPLAY_PORT:
+		link->public.connector_signal =	SIGNAL_TYPE_DISPLAY_PORT;
+		hpd_gpio = dal_adapter_service_obtain_hpd_irq(
+					as,
+					link->link_id);
+
+		if (hpd_gpio != NULL) {
+			link->public.irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+			dal_adapter_service_release_irq(
+					as, hpd_gpio);
+		}
+
+		break;
+	case CONNECTOR_ID_EDP:
+		link->public.connector_signal = SIGNAL_TYPE_EDP;
+		hpd_gpio = dal_adapter_service_obtain_hpd_irq(
+					as,
+					link->link_id);
+
+		if (hpd_gpio != NULL) {
+			link->public.irq_source_hpd_rx =
+					dal_irq_get_rx_source(hpd_gpio);
+			dal_adapter_service_release_irq(
+					as, hpd_gpio);
+		}
+		break;
+	default:
+		dal_logger_write(dc_ctx->logger,
+			LOG_MAJOR_WARNING, LOG_MINOR_TM_LINK_SRV,
+			"Unsupported Connector type:%d!\n", link->link_id.id);
+		goto create_fail;
+	}
+
+	/* TODO: #DAL3 Implement id to str function.*/
+	LINK_INFO("Connector[%d] description:"
+			"signal %d\n",
+			init_params->connector_index,
+			link->public.connector_signal);
+
+	hpd_gpio = dal_adapter_service_obtain_hpd_irq(as, link->link_id);
+
+	if (hpd_gpio != NULL) {
+		link->public.irq_source_hpd = dal_irq_get_source(hpd_gpio);
+		dal_adapter_service_release_irq(as, hpd_gpio);
+	}
+
+	ddc_service_init_data.as = as;
+	ddc_service_init_data.ctx = link->ctx;
+	ddc_service_init_data.id = link->link_id;
+	link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+
+	if (NULL == link->ddc) {
+		DC_ERROR("Failed to create ddc_service!\n");
+		goto create_fail;
+	}
+
+	enc_init_data.adapter_service = as;
+	enc_init_data.ctx = dc_ctx;
+	enc_init_data.encoder = dal_adapter_service_get_src_obj(
+							as, link->link_id, 0);
+	enc_init_data.connector = link->link_id;
+	enc_init_data.channel = get_ddc_line(link, as);
+	enc_init_data.hpd_source = get_hpd_line(link, as);
+	enc_init_data.transmitter =
+			translate_encoder_to_transmitter(enc_init_data.encoder);
+	link->link_enc = dc_ctx->dc->res_pool.funcs->link_enc_create(
+								&enc_init_data);
+
+	if( link->link_enc == NULL) {
+		DC_ERROR("Failed to create link encoder!\n");
+		goto create_fail;
+	}
+
+	dal_adapter_service_get_integrated_info(as, &info);
+
+	for (i = 0; ; i++) {
+		if (!dal_adapter_service_get_device_tag(
+				as, link->link_id, i, &link->device_tag)) {
+			DC_ERROR("Failed to find device tag!\n");
+			goto create_fail;
+		}
+
+		/* Look for device tag that matches connector signal,
+		 * CRT for rgb, LCD for other supported signal tyes
+		 */
+		if (!dal_adapter_service_is_device_id_supported(
+						as, link->device_tag.dev_id))
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
+			&& link->public.connector_signal != SIGNAL_TYPE_RGB)
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
+			&& link->public.connector_signal == SIGNAL_TYPE_RGB)
+			continue;
+		if (link->device_tag.dev_id.device_type == DEVICE_TYPE_WIRELESS
+			&& link->public.connector_signal != SIGNAL_TYPE_WIRELESS)
+			continue;
+		break;
+	}
+
+	/* Look for channel mapping corresponding to connector and device tag */
+	for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
+		struct external_display_path *path =
+			&info.ext_disp_conn_info.path[i];
+		if (path->device_connector_id.enum_id == link->link_id.enum_id
+			&& path->device_connector_id.id == link->link_id.id
+			&& path->device_connector_id.type == link->link_id.type
+			&& path->device_acpi_enum
+					== link->device_tag.acpi_device) {
+			link->ddi_channel_mapping = path->channel_mapping;
+			break;
+		}
+	}
+
+	/*
+	 * TODO check if GPIO programmed correctly
+	 *
+	 * If GPIO isn't programmed correctly HPD might not rise or drain
+	 * fast enough, leading to bounces.
+	 */
+	program_hpd_filter(link);
+
+	return true;
+
+create_fail:
+	return false;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+struct core_link *link_create(const struct link_init_data *init_params)
+{
+	struct core_link *link =
+			dm_alloc(init_params->ctx, sizeof(*link));
+
+	if (NULL == link)
+		goto alloc_fail;
+
+	if (false == construct(link, init_params))
+		goto construct_fail;
+
+	return link;
+
+construct_fail:
+	dm_free(init_params->ctx, link);
+
+alloc_fail:
+	return NULL;
+}
+
+void link_destroy(struct core_link **link)
+{
+	destruct(*link);
+	dm_free((*link)->ctx, *link);
+	*link = NULL;
+}
+
+static void dpcd_configure_panel_mode(
+	struct core_link *link,
+	enum dp_panel_mode panel_mode)
+{
+	union dpcd_edp_config edp_config_set;
+	bool panel_mode_edp = false;
+
+	dm_memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config));
+
+	if (DP_PANEL_MODE_DEFAULT != panel_mode) {
+
+		switch (panel_mode) {
+		case DP_PANEL_MODE_EDP:
+		case DP_PANEL_MODE_SPECIAL:
+			panel_mode_edp = true;
+			break;
+
+		default:
+			break;
+		}
+
+		/*set edp panel mode in receiver*/
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_EDP_CONFIG_SET,
+			&edp_config_set.raw,
+			sizeof(edp_config_set.raw));
+
+		if (edp_config_set.bits.PANEL_MODE_EDP
+			!= panel_mode_edp) {
+			enum ddc_result result = DDC_RESULT_UNKNOWN;
+
+			edp_config_set.bits.PANEL_MODE_EDP =
+			panel_mode_edp;
+			result = core_link_write_dpcd(
+				link,
+				DPCD_ADDRESS_EDP_CONFIG_SET,
+				&edp_config_set.raw,
+				sizeof(edp_config_set.raw));
+
+			ASSERT(result == DDC_RESULT_SUCESSFULL);
+		}
+	}
+	dal_logger_write(link->ctx->logger, LOG_MAJOR_DETECTION,
+			LOG_MINOR_DETECTION_DP_CAPS,
+			"Link: %d eDP panel mode supported: %d "
+			"eDP panel mode enabled: %d \n",
+			link->public.link_index,
+			link->dpcd_caps.panel_mode_edp,
+			panel_mode_edp);
+}
+
+static enum dc_status enable_link_dp(struct core_stream *stream)
+{
+	enum dc_status status;
+	bool skip_video_pattern;
+	struct core_link *link = stream->sink->link;
+	struct link_settings link_settings = {0};
+	enum dp_panel_mode panel_mode;
+
+	/* get link settings for video mode timing */
+	decide_link_settings(stream, &link_settings);
+	dp_enable_link_phy(
+		stream->sink->link,
+		stream->signal,
+		&link_settings);
+
+	panel_mode = dp_get_panel_mode(link);
+	dpcd_configure_panel_mode(link, panel_mode);
+
+	skip_video_pattern = true;
+
+	if (link_settings.link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+	if (perform_link_training(link, &link_settings, skip_video_pattern)) {
+		link->cur_link_settings = link_settings;
+		status = DC_OK;
+	}
+	else
+		status = DC_ERROR_UNEXPECTED;
+
+	return status;
+}
+
+static enum dc_status enable_link_dp_mst(struct core_stream *stream)
+{
+	struct core_link *link = stream->sink->link;
+
+	/* sink signal type after MST branch is MST. Multiple MST sinks
+	 * share one link. Link DP PHY is enable or training only once.
+	 */
+	if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
+		return DC_OK;
+
+	return enable_link_dp(stream);
+}
+
+static void enable_link_hdmi(struct core_stream *stream)
+{
+	struct core_link *link = stream->sink->link;
+
+	/* enable video output */
+	/* here we need to specify that encoder output settings
+	 * need to be calculated as for the set mode,
+	 * it will lead to querying dynamic link capabilities
+	 * which should be done before enable output */
+	uint32_t normalized_pix_clk = stream->public.timing.pix_clk_khz;
+	switch (stream->public.timing.display_color_depth) {
+	case COLOR_DEPTH_888:
+		break;
+	case COLOR_DEPTH_101010:
+		normalized_pix_clk = (normalized_pix_clk * 30) / 24;
+		break;
+	case COLOR_DEPTH_121212:
+		normalized_pix_clk = (normalized_pix_clk * 36) / 24;
+		break;
+	case COLOR_DEPTH_161616:
+		normalized_pix_clk = (normalized_pix_clk * 48) / 24;
+		break;
+	default:
+		break;
+	}
+
+	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		dal_ddc_service_write_scdc_data(
+			stream->sink->link->ddc,
+			normalized_pix_clk,
+			stream->public.timing.flags.LTE_340MCSC_SCRAMBLE);
+
+	dm_memset(&stream->sink->link->cur_link_settings, 0,
+			sizeof(struct link_settings));
+
+	link->link_enc->funcs->enable_tmds_output(
+			link->link_enc,
+			stream->clock_source->id,
+			stream->public.timing.display_color_depth,
+			stream->signal == SIGNAL_TYPE_HDMI_TYPE_A,
+			stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK,
+			stream->public.timing.pix_clk_khz);
+
+	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+		dal_ddc_service_read_scdc_data(link->ddc);
+}
+
+/****************************enable_link***********************************/
+static enum dc_status enable_link(struct core_stream *stream)
+{
+	enum dc_status status = DC_ERROR_UNEXPECTED;
+	switch (stream->signal) {
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_EDP:
+		status = enable_link_dp(stream);
+		break;
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+		status = enable_link_dp_mst(stream);
+		dm_sleep_in_milliseconds(stream->ctx, 200);
+		break;
+	case SIGNAL_TYPE_DVI_SINGLE_LINK:
+	case SIGNAL_TYPE_DVI_DUAL_LINK:
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+		enable_link_hdmi(stream);
+		status = DC_OK;
+		break;
+	case SIGNAL_TYPE_VIRTUAL:
+		status = DC_OK;
+		break;
+	default:
+		break;
+	}
+
+	if (stream->audio && status == DC_OK) {
+		/* notify audio driver for audio modes of monitor */
+		dal_audio_enable_azalia_audio_jack_presence(stream->audio,
+				stream->stream_enc->id);
+
+		/* un-mute audio */
+		dal_audio_unmute(stream->audio, stream->stream_enc->id,
+				stream->signal);
+	}
+
+	return status;
+}
+
+static void disable_link(struct core_stream *stream)
+{
+	/* TODO  dp_set_hw_test_pattern */
+
+	/* here we need to specify that encoder output settings
+	 * need to be calculated as for the set mode,
+	 * it will lead to querying dynamic link capabilities
+	 * which should be done before enable output */
+
+	if (dc_is_dp_signal(stream->signal)) {
+		/* SST DP, eDP */
+		if (dc_is_dp_sst_signal(stream->signal))
+			dp_disable_link_phy(
+					stream->sink->link, stream->signal);
+		else {
+			dp_disable_link_phy_mst(
+					stream->sink->link, stream);
+		}
+	} else {
+		struct link_encoder *encoder =
+				stream->sink->link->link_enc;
+
+		encoder->funcs->disable_output(encoder, stream->signal);
+	}
+}
+
+enum dc_status dc_link_validate_mode_timing(
+		const struct core_sink *sink,
+		struct core_link *link,
+		const struct dc_crtc_timing *timing)
+{
+	uint32_t max_pix_clk = sink->dongle_max_pix_clk;
+
+	if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
+		return DC_EXCEED_DONGLE_MAX_CLK;
+
+	switch (sink->public.sink_signal) {
+		case SIGNAL_TYPE_DISPLAY_PORT:
+			if(!dp_validate_mode_timing(
+					link,
+					timing))
+				return DC_NO_DP_LINK_BANDWIDTH;
+			break;
+
+		default:
+			break;
+	}
+
+	return DC_OK;
+}
+
+bool dc_link_set_backlight_level(const struct dc_link *public, uint32_t level)
+{
+	struct core_link *link = DC_LINK_TO_CORE(public);
+	struct dc_context *ctx = link->ctx;
+
+	dal_logger_write(ctx->logger, LOG_MAJOR_BACKLIGHT,
+			LOG_MINOR_BACKLIGHT_INTERFACE,
+			"New Backlight level: %d (0x%X)\n", level, level);
+
+	link->link_enc->funcs->set_lcd_backlight_level(link->link_enc, level);
+
+	return true;
+}
+
+void core_link_resume(struct core_link *link)
+{
+	if (link->public.connector_signal != SIGNAL_TYPE_VIRTUAL)
+		program_hpd_filter(link);
+}
+
+static struct fixed31_32 get_pbn_per_slot(struct core_stream *stream)
+{
+	struct link_settings *link_settings =
+			&stream->sink->link->cur_link_settings;
+	uint32_t link_rate_in_mbps =
+			link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
+	struct fixed31_32 mbps = dal_fixed31_32_from_int(
+			link_rate_in_mbps * link_settings->lane_count);
+
+	return dal_fixed31_32_div_int(mbps, 54);
+}
+
+static int get_color_depth(struct core_stream *stream)
+{
+	switch (stream->pix_clk_params.color_depth) {
+	case COLOR_DEPTH_666: return 6;
+	case COLOR_DEPTH_888: return 8;
+	case COLOR_DEPTH_101010: return 10;
+	case COLOR_DEPTH_121212: return 12;
+	case COLOR_DEPTH_141414: return 14;
+	case COLOR_DEPTH_161616: return 16;
+	default: return 0;
+	}
+}
+
+static struct fixed31_32 get_pbn_from_timing(struct core_stream *stream)
+{
+	uint32_t bpc;
+	uint64_t kbps;
+	struct fixed31_32 peak_kbps;
+	uint32_t numerator;
+	uint32_t denominator;
+
+	bpc = get_color_depth(stream);
+	kbps = stream->pix_clk_params.requested_pix_clk * bpc * 3;
+
+	/*
+	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+	 * common multiplier to render an integer PBN for all link rate/lane
+	 * counts combinations
+	 * calculate
+	 * peak_kbps *= (1006/1000)
+	 * peak_kbps *= (64/54)
+	 * peak_kbps *= 8    convert to bytes
+	 */
+
+	numerator = 64 * PEAK_FACTOR_X1000;
+	denominator = 54 * 8 * 1000 * 1000;
+	kbps *= numerator;
+	peak_kbps = dal_fixed31_32_from_fraction(kbps, denominator);
+
+	return peak_kbps;
+}
+
+static void update_mst_stream_alloc_table(
+	struct core_link *link,
+	struct core_stream *stream,
+	const struct dp_mst_stream_allocation_table *proposed_table)
+{
+	struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
+			{ 0 } };
+	struct link_mst_stream_allocation *dc_alloc;
+
+	int i;
+	int j;
+
+	/* if DRM proposed_table has more than one new payload */
+	ASSERT(proposed_table->stream_count -
+			link->mst_stream_alloc_table.stream_count < 2);
+
+	/* copy proposed_table to core_link, add stream encoder */
+	for (i = 0; i < proposed_table->stream_count; i++) {
+
+		for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) {
+			dc_alloc =
+			&link->mst_stream_alloc_table.stream_allocations[j];
+
+			if (dc_alloc->vcp_id ==
+				proposed_table->stream_allocations[i].vcp_id) {
+
+				work_table[i] = *dc_alloc;
+				break; /* exit j loop */
+			}
+		}
+
+		/* new vcp_id */
+		if (j == link->mst_stream_alloc_table.stream_count) {
+			work_table[i].vcp_id =
+				proposed_table->stream_allocations[i].vcp_id;
+			work_table[i].slot_count =
+				proposed_table->stream_allocations[i].slot_count;
+			work_table[i].stream_enc = stream->stream_enc;
+		}
+	}
+
+	/* update link->mst_stream_alloc_table with work_table */
+	link->mst_stream_alloc_table.stream_count =
+			proposed_table->stream_count;
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++)
+		link->mst_stream_alloc_table.stream_allocations[i] =
+				work_table[i];
+}
+
+/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
+ * because stream_encoder is not exposed to dm
+ */
+static enum dc_status allocate_mst_payload(struct core_stream *stream)
+{
+	struct core_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = stream->stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp;
+	struct fixed31_32 pbn;
+	struct fixed31_32 pbn_per_slot;
+	uint8_t i;
+
+	/* enable_link_dp_mst already check link->enabled_stream_count
+	 * and stream is in link->stream[]. This is called during set mode,
+	 * stream_enc is available.
+	 */
+
+	/* get calculate VC payload for stream: stream_alloc */
+	dm_helpers_dp_mst_write_payload_allocation_table(
+		stream->ctx,
+		&stream->public,
+		&proposed_table,
+		true);
+
+	update_mst_stream_alloc_table(link, stream, &proposed_table);
+
+	dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_MST,
+			LOG_MINOR_MST_PROGRAMMING,
+			"%s  "
+			"stream_count: %d: \n ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_MST,
+		LOG_MINOR_MST_PROGRAMMING,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	ASSERT(proposed_table.stream_count > 0);
+
+	/*
+	 * temporary fix. Unplug of MST chain happened (two displays),
+	 * table is empty on first reset mode, and cause 0 division in
+	 * avg_time_slots_per_mtp calculation
+	 */
+
+	/* to be removed or debugged */
+	if (proposed_table.stream_count == 0)
+		return DC_OK;
+
+	/* program DP source TX for payload */
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	/* send down message */
+	dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			&stream->public);
+
+	dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			&stream->public,
+			true);
+
+	/* slot X.Y for only current stream */
+	pbn_per_slot = get_pbn_per_slot(stream);
+	pbn = get_pbn_from_timing(stream);
+	avg_time_slots_per_mtp = dal_fixed31_32_div(pbn, pbn_per_slot);
+
+
+
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	return DC_OK;
+
+}
+
+static enum dc_status deallocate_mst_payload(struct core_stream *stream)
+{
+	struct core_link *link = stream->sink->link;
+	struct link_encoder *link_encoder = link->link_enc;
+	struct stream_encoder *stream_encoder = stream->stream_enc;
+	struct dp_mst_stream_allocation_table proposed_table = {0};
+	struct fixed31_32 avg_time_slots_per_mtp = dal_fixed31_32_from_int(0);
+	uint8_t i;
+	bool mst_mode = (link->public.type == dc_connection_mst_branch);
+
+	/* deallocate_mst_payload is called before disable link. When mode or
+	 * disable/enable monitor, new stream is created which is not in link
+	 * stream[] yet. For this, payload is not allocated yet, so de-alloc
+	 * should not done. For new mode set, map_resources will get engine
+	 * for new stream, so stream_enc->id should be validated until here.
+	 */
+
+	/* slot X.Y */
+	stream_encoder->funcs->set_mst_bandwidth(
+		stream_encoder,
+		avg_time_slots_per_mtp);
+
+	/* TODO: which component is responsible for remove payload table? */
+	if (mst_mode)
+		dm_helpers_dp_mst_write_payload_allocation_table(
+				stream->ctx,
+				&stream->public,
+				&proposed_table,
+				false);
+
+	update_mst_stream_alloc_table(link, stream, &proposed_table);
+
+	dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_MST,
+			LOG_MINOR_MST_PROGRAMMING,
+			"%s"
+			"stream_count: %d: ",
+			__func__,
+			link->mst_stream_alloc_table.stream_count);
+
+	for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+		dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_MST,
+		LOG_MINOR_MST_PROGRAMMING,
+		"stream_enc[%d]: 0x%x      "
+		"stream[%d].vcp_id: %d      "
+		"stream[%d].slot_count: %d\n",
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+		i,
+		link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+	}
+
+	link_encoder->funcs->update_mst_stream_allocation_table(
+		link_encoder,
+		&link->mst_stream_alloc_table);
+
+	if (mst_mode) {
+		dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+			stream->ctx,
+			&stream->public);
+
+		dm_helpers_dp_mst_send_payload_allocation(
+			stream->ctx,
+			&stream->public,
+			false);
+	}
+
+	return DC_OK;
+}
+
+void core_link_enable_stream(
+		struct core_link *link,
+		struct core_stream *stream)
+{
+	struct dc *dc = stream->ctx->dc;
+
+	if (DC_OK != enable_link(stream)) {
+			BREAK_TO_DEBUGGER();
+			return;
+	}
+
+	dc->hwss.enable_stream(stream);
+
+	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		allocate_mst_payload(stream);
+}
+
+void core_link_disable_stream(
+		struct core_link *link,
+		struct core_stream *stream)
+{
+	struct dc *dc = stream->ctx->dc;
+
+	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+		deallocate_mst_payload(stream);
+
+	dc->hwss.disable_stream(stream);
+
+	disable_link(stream);
+
+}
+
+
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/dal/dc/core/dc_link_ddc.c
new file mode 100644
index 000000000000..62b8c264a593
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_link_ddc.c
@@ -0,0 +1,1151 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/adapter_service_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/grph_object_id.h"
+#include "include/dpcd_defs.h"
+#include "include/logger_interface.h"
+#include "include/vector.h"
+
+#include "dc_link_ddc.h"
+
+#define AUX_POWER_UP_WA_DELAY 500
+#define I2C_OVER_AUX_DEFER_WA_DELAY 70
+
+/* CV smart dongle slave address for retrieving supported HDTV modes*/
+#define CV_SMART_DONGLE_ADDRESS 0x20
+/* DVI-HDMI dongle slave address for retrieving dongle signature*/
+#define DVI_HDMI_DONGLE_ADDRESS 0x68
+static const int8_t dvi_hdmi_dongle_signature_str[] = "6140063500G";
+struct dvi_hdmi_dongle_signature_data {
+	int8_t vendor[3];/* "AMD" */
+	uint8_t version[2];
+	uint8_t size;
+	int8_t id[11];/* "6140063500G"*/
+};
+/* DP-HDMI dongle slave address for retrieving dongle signature*/
+#define DP_HDMI_DONGLE_ADDRESS 0x40
+static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR";
+#define DP_HDMI_DONGLE_SIGNATURE_EOT 0x04
+
+struct dp_hdmi_dongle_signature_data {
+	int8_t id[15];/* "DP-HDMI ADAPTOR"*/
+	uint8_t eot;/* end of transmition '\x4' */
+};
+
+/* Address range from 0x00 to 0x1F.*/
+#define DP_ADAPTOR_TYPE2_SIZE 0x20
+#define DP_ADAPTOR_TYPE2_REG_ID 0x10
+#define DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK 0x1D
+/* Identifies adaptor as Dual-mode adaptor */
+#define DP_ADAPTOR_TYPE2_ID 0xA0
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MAX_TMDS_CLK 600
+/* MHz*/
+#define DP_ADAPTOR_TYPE2_MIN_TMDS_CLK 25
+/* kHZ*/
+#define DP_ADAPTOR_DVI_MAX_TMDS_CLK 165000
+/* kHZ*/
+#define DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK 165000
+
+#define DDC_I2C_COMMAND_ENGINE I2C_COMMAND_ENGINE_SW
+
+enum edid_read_result {
+	EDID_READ_RESULT_EDID_MATCH = 0,
+	EDID_READ_RESULT_EDID_MISMATCH,
+	EDID_READ_RESULT_CHECKSUM_READ_ERR,
+	EDID_READ_RESULT_VENDOR_READ_ERR
+};
+
+/* SCDC Address defines (HDMI 2.0)*/
+#define HDMI_SCDC_WRITE_UPDATE_0_ARRAY 3
+#define HDMI_SCDC_ADDRESS  0x54
+#define HDMI_SCDC_SINK_VERSION 0x01
+#define HDMI_SCDC_SOURCE_VERSION 0x02
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS 0x40
+#define HDMI_SCDC_ERR_DETECT 0x50
+#define HDMI_SCDC_TEST_CONFIG 0xC0
+
+
+union hdmi_scdc_update_read_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t STATUS_UPDATE:1;
+		uint8_t CED_UPDATE:1;
+		uint8_t RR_TEST:1;
+		uint8_t RESERVED:5;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_status_flags_data {
+	uint8_t byte[2];
+	struct {
+		uint8_t CLOCK_DETECTED:1;
+		uint8_t CH0_LOCKED:1;
+		uint8_t CH1_LOCKED:1;
+		uint8_t CH2_LOCKED:1;
+		uint8_t RESERVED:4;
+		uint8_t RESERVED2:8;
+	} fields;
+};
+
+union hdmi_scdc_ced_data {
+	uint8_t byte[7];
+	struct {
+		uint8_t CH0_8LOW:8;
+		uint8_t CH0_7HIGH:7;
+		uint8_t CH0_VALID:1;
+		uint8_t CH1_8LOW:8;
+		uint8_t CH1_7HIGH:7;
+		uint8_t CH1_VALID:1;
+		uint8_t CH2_8LOW:8;
+		uint8_t CH2_7HIGH:7;
+		uint8_t CH2_VALID:1;
+		uint8_t CHECKSUM:8;
+	} fields;
+};
+
+union hdmi_scdc_test_config_Data {
+	uint8_t byte;
+	struct {
+		uint8_t TEST_READ_REQUEST_DELAY:7;
+		uint8_t TEST_READ_REQUEST: 1;
+	} fields;
+};
+
+
+
+union ddc_wa {
+	struct {
+		uint32_t DP_SKIP_POWER_OFF:1;
+		uint32_t DP_AUX_POWER_UP_WA_DELAY:1;
+	} bits;
+	uint32_t raw;
+};
+
+struct ddc_flags {
+	uint8_t EDID_QUERY_DONE_ONCE:1;
+	uint8_t IS_INTERNAL_DISPLAY:1;
+	uint8_t FORCE_READ_REPEATED_START:1;
+	uint8_t EDID_STRESS_READ:1;
+
+};
+
+struct ddc_service {
+	struct ddc *ddc_pin;
+	struct ddc_flags flags;
+	union ddc_wa wa;
+	enum ddc_transaction_type transaction_type;
+	enum display_dongle_type dongle_type;
+	struct dp_receiver_id_info dp_receiver_id_info;
+	struct adapter_service *as;
+	struct dc_context *ctx;
+
+	uint32_t address;
+	uint32_t edid_buf_len;
+	uint8_t edid_buf[MAX_EDID_BUFFER_SIZE];
+};
+
+struct i2c_payloads {
+	struct vector payloads;
+};
+
+struct aux_payloads {
+	struct vector payloads;
+};
+
+struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct i2c_payloads *payloads;
+
+	payloads = dm_alloc(ctx, sizeof(struct i2c_payloads));
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+		return payloads;
+
+	dm_free(ctx, payloads);
+	return NULL;
+
+}
+
+struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+{
+	return (struct i2c_payload *)p->payloads.container;
+}
+
+uint32_t  dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+{
+	return p->payloads.count;
+}
+
+void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+{
+	if (!p || !*p)
+		return;
+	dal_vector_destruct(&(*p)->payloads);
+	dm_free((*p)->payloads.ctx, *p);
+	*p = NULL;
+
+}
+
+struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+{
+	struct aux_payloads *payloads;
+
+	payloads = dm_alloc(ctx, sizeof(struct aux_payloads));
+
+	if (!payloads)
+		return NULL;
+
+	if (dal_vector_construct(
+		&payloads->payloads, ctx, count, sizeof(struct aux_payloads)))
+		return payloads;
+
+	dm_free(ctx, payloads);
+	return NULL;
+}
+
+struct aux_payload *dal_ddc_aux_payloads_get(struct aux_payloads *p)
+{
+	return (struct aux_payload *)p->payloads.container;
+}
+
+uint32_t  dal_ddc_aux_payloads_get_count(struct aux_payloads *p)
+{
+	return p->payloads.count;
+}
+
+
+void dal_ddc_aux_payloads_destroy(struct aux_payloads **p)
+{
+	if (!p || !*p)
+		return;
+
+	dal_vector_destruct(&(*p)->payloads);
+	dm_free((*p)->payloads.ctx, *p);
+	*p = NULL;
+}
+
+#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
+
+void dal_ddc_i2c_payloads_add(
+	struct i2c_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = EDID_SEGMENT_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct i2c_payload payload = {
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+
+}
+
+void dal_ddc_aux_payloads_add(
+	struct aux_payloads *payloads,
+	uint32_t address,
+	uint32_t len,
+	uint8_t *data,
+	bool write)
+{
+	uint32_t payload_size = DEFAULT_AUX_MAX_DATA_SIZE;
+	uint32_t pos;
+
+	for (pos = 0; pos < len; pos += payload_size) {
+		struct aux_payload payload = {
+			.i2c_over_aux = true,
+			.write = write,
+			.address = address,
+			.length = DDC_MIN(payload_size, len - pos),
+			.data = data + pos };
+		dal_vector_append(&payloads->payloads, &payload);
+	}
+}
+
+
+static bool construct(
+	struct ddc_service *ddc_service,
+	struct ddc_service_init_data *init_data)
+{
+	enum connector_id connector_id =
+		dal_graphics_object_id_get_connector_id(init_data->id);
+
+	ddc_service->ctx = init_data->ctx;
+	ddc_service->as = init_data->as;
+	ddc_service->ddc_pin = dal_adapter_service_obtain_ddc(
+			init_data->as, init_data->id);
+
+	ddc_service->flags.EDID_QUERY_DONE_ONCE = false;
+
+	ddc_service->flags.FORCE_READ_REPEATED_START =
+		dal_adapter_service_is_feature_supported(
+			FEATURE_DDC_READ_FORCE_REPEATED_START);
+
+	ddc_service->flags.EDID_STRESS_READ =
+			dal_adapter_service_is_feature_supported(
+				FEATURE_EDID_STRESS_READ);
+
+
+	ddc_service->flags.IS_INTERNAL_DISPLAY =
+		connector_id == CONNECTOR_ID_EDP ||
+		connector_id == CONNECTOR_ID_LVDS;
+
+	ddc_service->wa.raw = 0;
+	return true;
+}
+
+struct ddc_service *dal_ddc_service_create(
+	struct ddc_service_init_data *init_data)
+{
+	struct ddc_service *ddc_service;
+
+	ddc_service = dm_alloc(init_data->ctx, sizeof(struct ddc_service));
+
+	if (!ddc_service)
+		return NULL;
+
+	if (construct(ddc_service, init_data))
+		return ddc_service;
+
+	dm_free(init_data->ctx, ddc_service);
+	return NULL;
+}
+
+static void destruct(struct ddc_service *ddc)
+{
+	if (ddc->ddc_pin)
+		dal_adapter_service_release_ddc(ddc->as, ddc->ddc_pin);
+}
+
+void dal_ddc_service_destroy(struct ddc_service **ddc)
+{
+	if (!ddc || !*ddc) {
+		BREAK_TO_DEBUGGER();
+		return;
+	}
+	destruct(*ddc);
+	dm_free((*ddc)->ctx, *ddc);
+	*ddc = NULL;
+}
+
+enum ddc_service_type dal_ddc_service_get_type(struct ddc_service *ddc)
+{
+	return DDC_SERVICE_TYPE_CONNECTOR;
+}
+
+void dal_ddc_service_set_transaction_type(
+	struct ddc_service *ddc,
+	enum ddc_transaction_type type)
+{
+	ddc->transaction_type = type;
+}
+
+bool dal_ddc_service_is_in_aux_transaction_mode(struct ddc_service *ddc)
+{
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER:
+		return true;
+	default:
+		break;
+	}
+	return false;
+}
+
+void ddc_service_set_dongle_type(struct ddc_service *ddc,
+		enum display_dongle_type dongle_type)
+{
+	ddc->dongle_type = dongle_type;
+}
+
+static uint32_t defer_delay_converter_wa(
+	struct ddc_service *ddc,
+	uint32_t defer_delay)
+{
+	struct dp_receiver_id_info dp_rec_info = {0};
+
+	if (dal_ddc_service_get_dp_receiver_id_info(ddc, &dp_rec_info) &&
+		(dp_rec_info.branch_id == DP_BRANCH_DEVICE_ID_4) &&
+		!dm_strncmp(dp_rec_info.branch_name,
+			DP_DVI_CONVERTER_ID_4,
+			sizeof(dp_rec_info.branch_name)))
+		return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ?
+			defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY;
+
+	return defer_delay;
+
+}
+
+#define DP_TRANSLATOR_DELAY 5
+
+static uint32_t get_defer_delay(struct ddc_service *ddc)
+{
+	uint32_t defer_delay = 0;
+
+	switch (ddc->transaction_type) {
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX:
+		if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) ||
+			(DISPLAY_DONGLE_DP_HDMI_CONVERTER ==
+				ddc->dongle_type)) {
+
+			defer_delay = DP_TRANSLATOR_DELAY;
+
+			defer_delay =
+				defer_delay_converter_wa(ddc, defer_delay);
+
+		} else /*sink has a delay different from an Active Converter*/
+			defer_delay = 0;
+		break;
+	case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER:
+		defer_delay = DP_TRANSLATOR_DELAY;
+		break;
+	default:
+		break;
+	}
+	return defer_delay;
+}
+
+static bool i2c_read(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *buffer,
+	uint32_t len)
+{
+	uint8_t offs_data = 0;
+	struct i2c_payload payloads[2] = {
+		{
+		.write = true,
+		.address = address,
+		.length = 1,
+		.data = &offs_data },
+		{
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = buffer } };
+
+	struct i2c_command command = {
+		.payloads = payloads,
+		.number_of_payloads = 2,
+		.engine = DDC_I2C_COMMAND_ENGINE,
+		.speed = dal_adapter_service_get_sw_i2c_speed(ddc->as) };
+
+	return dal_i2caux_submit_i2c_command(
+		dal_adapter_service_get_i2caux(ddc->as),
+		ddc->ddc_pin,
+		&command);
+}
+
+static uint8_t aux_read_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf)
+{
+	struct aux_command cmd = {
+		.payloads = NULL,
+		.number_of_payloads = 0,
+		.defer_delay = get_defer_delay(ddc),
+		.max_defer_write_retry = 0 };
+
+	uint8_t retrieved = 0;
+	uint8_t base_offset =
+		(index % DDC_EDID_BLOCKS_PER_SEGMENT) * DDC_EDID_BLOCK_SIZE;
+	uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
+
+	for (retrieved = 0; retrieved < DDC_EDID_BLOCK_SIZE;
+		retrieved += DEFAULT_AUX_MAX_DATA_SIZE) {
+
+		uint8_t offset = base_offset + retrieved;
+
+		struct aux_payload payloads[3] = {
+			{
+			.i2c_over_aux = true,
+			.write = true,
+			.address = DDC_EDID_SEGMENT_ADDRESS,
+			.length = 1,
+			.data = &segment },
+			{
+			.i2c_over_aux = true,
+			.write = true,
+			.address = address,
+			.length = 1,
+			.data = &offset },
+			{
+			.i2c_over_aux = true,
+			.write = false,
+			.address = address,
+			.length = DEFAULT_AUX_MAX_DATA_SIZE,
+			.data = &buf[retrieved] } };
+
+		if (segment == 0) {
+			cmd.payloads = &payloads[1];
+			cmd.number_of_payloads = 2;
+		} else {
+			cmd.payloads = payloads;
+			cmd.number_of_payloads = 3;
+		}
+
+		if (!dal_i2caux_submit_aux_command(
+			dal_adapter_service_get_i2caux(ddc->as),
+			ddc->ddc_pin,
+			&cmd))
+			/* cannot read, break*/
+			break;
+	}
+
+	/* Reset segment to 0. Needed by some panels */
+	if (0 != segment) {
+		struct aux_payload payloads[1] = { {
+			.i2c_over_aux = true,
+			.write = true,
+			.address = DDC_EDID_SEGMENT_ADDRESS,
+			.length = 1,
+			.data = &segment } };
+		bool result = false;
+
+		segment = 0;
+
+		cmd.number_of_payloads = ARRAY_SIZE(payloads);
+		cmd.payloads = payloads;
+
+		result = dal_i2caux_submit_aux_command(
+			dal_adapter_service_get_i2caux(ddc->as),
+			ddc->ddc_pin,
+			&cmd);
+
+		if (false == result)
+			dal_logger_write(
+				ddc->ctx->logger,
+				LOG_MAJOR_ERROR,
+				LOG_MINOR_COMPONENT_DISPLAY_CAPABILITY_SERVICE,
+				"%s: Writing of EDID Segment (0x30) failed!\n",
+				__func__);
+	}
+
+	return retrieved;
+}
+
+static uint8_t i2c_read_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf)
+{
+	bool ret = false;
+	uint8_t offset = (index % DDC_EDID_BLOCKS_PER_SEGMENT) *
+		DDC_EDID_BLOCK_SIZE;
+	uint8_t segment = index / DDC_EDID_BLOCKS_PER_SEGMENT;
+
+	struct i2c_command cmd = {
+		.payloads = NULL,
+		.number_of_payloads = 0,
+		.engine = DDC_I2C_COMMAND_ENGINE,
+		.speed = dal_adapter_service_get_sw_i2c_speed(ddc->as) };
+
+	struct i2c_payload payloads[3] = {
+		{
+		.write = true,
+		.address = DDC_EDID_SEGMENT_ADDRESS,
+		.length = 1,
+		.data = &segment },
+		{
+		.write = true,
+		.address = address,
+		.length = 1,
+		.data = &offset },
+		{
+		.write = false,
+		.address = address,
+		.length = DDC_EDID_BLOCK_SIZE,
+		.data = buf } };
+/*
+ * Some I2C engines don't handle stop/start between write-offset and read-data
+ * commands properly. For those displays, we have to force the newer E-DDC
+ * behavior of repeated-start which can be enabled by runtime parameter. */
+/* Originally implemented for OnLive using NXP receiver chip */
+
+	if (index == 0 && !ddc->flags.FORCE_READ_REPEATED_START) {
+		/* base block, use use DDC2B, submit as 2 commands */
+		cmd.payloads = &payloads[1];
+		cmd.number_of_payloads = 1;
+
+		if (dal_i2caux_submit_i2c_command(
+			dal_adapter_service_get_i2caux(ddc->as),
+			ddc->ddc_pin,
+			&cmd)) {
+
+			cmd.payloads = &payloads[2];
+			cmd.number_of_payloads = 1;
+
+			ret = dal_i2caux_submit_i2c_command(
+				dal_adapter_service_get_i2caux(ddc->as),
+				ddc->ddc_pin,
+				&cmd);
+		}
+
+	} else {
+		/*
+		 * extension block use E-DDC, submit as 1 command
+		 * or if repeated-start is forced by runtime parameter
+		 */
+		if (segment != 0) {
+			/* include segment offset in command*/
+			cmd.payloads = payloads;
+			cmd.number_of_payloads = 3;
+		} else {
+			/* we are reading first segment,
+			 * segment offset is not required */
+			cmd.payloads = &payloads[1];
+			cmd.number_of_payloads = 2;
+		}
+
+		ret = dal_i2caux_submit_i2c_command(
+			dal_adapter_service_get_i2caux(ddc->as),
+			ddc->ddc_pin,
+			&cmd);
+	}
+
+	return ret ? DDC_EDID_BLOCK_SIZE : 0;
+}
+
+static uint32_t query_edid_block(
+	struct ddc_service *ddc,
+	uint8_t address,
+	uint8_t index,
+	uint8_t *buf,
+	uint32_t size)
+{
+	uint32_t size_retrieved = 0;
+
+	if (size < DDC_EDID_BLOCK_SIZE)
+		return 0;
+
+	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+		ASSERT(index < 2);
+		size_retrieved =
+			aux_read_edid_block(ddc, address, index, buf);
+	} else {
+		size_retrieved =
+			i2c_read_edid_block(ddc, address, index, buf);
+	}
+
+	return size_retrieved;
+}
+
+#define DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS 0x261
+#define DDC_TEST_ACK_ADDRESS 0x260
+#define DDC_DPCD_EDID_TEST_ACK 0x04
+#define DDC_DPCD_EDID_TEST_MASK 0x04
+#define DDC_DPCD_TEST_REQUEST_ADDRESS 0x218
+
+/* AG TODO GO throug DM callback here like for DPCD */
+
+static void write_dp_edid_checksum(
+	struct ddc_service *ddc,
+	uint8_t checksum)
+{
+	uint8_t dpcd_data;
+
+	dal_ddc_service_read_dpcd_data(
+		ddc,
+		DDC_DPCD_TEST_REQUEST_ADDRESS,
+		&dpcd_data,
+		1);
+
+	if (dpcd_data & DDC_DPCD_EDID_TEST_MASK) {
+
+		dal_ddc_service_write_dpcd_data(
+			ddc,
+			DDC_DPCD_EDID_CHECKSUM_WRITE_ADDRESS,
+			&checksum,
+			1);
+
+		dpcd_data = DDC_DPCD_EDID_TEST_ACK;
+
+		dal_ddc_service_write_dpcd_data(
+			ddc,
+			DDC_TEST_ACK_ADDRESS,
+			&dpcd_data,
+			1);
+	}
+}
+
+uint32_t dal_ddc_service_edid_query(struct ddc_service *ddc)
+{
+	uint32_t bytes_read = 0;
+	uint32_t ext_cnt = 0;
+
+	uint8_t address;
+	uint32_t i;
+
+	for (address = DDC_EDID_ADDRESS_START;
+		address <= DDC_EDID_ADDRESS_END; ++address) {
+
+		bytes_read = query_edid_block(
+			ddc,
+			address,
+			0,
+			ddc->edid_buf,
+			sizeof(ddc->edid_buf) - bytes_read);
+
+		if (bytes_read != DDC_EDID_BLOCK_SIZE)
+			continue;
+
+		/* get the number of ext blocks*/
+		ext_cnt = ddc->edid_buf[DDC_EDID_EXT_COUNT_OFFSET];
+
+		/* EDID 2.0, need to read 1 more block because EDID2.0 is
+		 * 256 byte in size*/
+		if (ddc->edid_buf[DDC_EDID_20_SIGNATURE_OFFSET] ==
+			DDC_EDID_20_SIGNATURE)
+				ext_cnt = 1;
+
+		for (i = 0; i < ext_cnt; i++) {
+			/* read additional ext blocks accordingly */
+			bytes_read += query_edid_block(
+					ddc,
+					address,
+					i+1,
+					&ddc->edid_buf[bytes_read],
+					sizeof(ddc->edid_buf) - bytes_read);
+		}
+
+		/*this is special code path for DP compliance*/
+		if (DDC_TRANSACTION_TYPE_I2C_OVER_AUX == ddc->transaction_type)
+			write_dp_edid_checksum(
+				ddc,
+				ddc->edid_buf[(ext_cnt * DDC_EDID_BLOCK_SIZE) +
+				DDC_EDID1X_CHECKSUM_OFFSET]);
+
+		/*remembers the address where we fetch the EDID from
+		 * for later signature check use */
+		ddc->address = address;
+
+		break;/* already read edid, done*/
+	}
+
+	ddc->edid_buf_len = bytes_read;
+	return bytes_read;
+}
+
+uint32_t dal_ddc_service_get_edid_buf_len(struct ddc_service *ddc)
+{
+	return ddc->edid_buf_len;
+}
+
+void dal_ddc_service_get_edid_buf(struct ddc_service *ddc, uint8_t *edid_buf)
+{
+	dm_memmove(edid_buf,
+			ddc->edid_buf, ddc->edid_buf_len);
+}
+
+void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
+	struct ddc_service *ddc,
+	struct display_sink_capability *sink_cap)
+{
+	uint8_t i;
+	bool is_valid_hdmi_signature;
+	enum display_dongle_type *dongle = &sink_cap->dongle_type;
+	uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
+	bool is_type2_dongle = false;
+	struct dp_hdmi_dongle_signature_data *dongle_signature;
+
+	/* Assume we have no valid DP passive dongle connected */
+	*dongle = DISPLAY_DONGLE_NONE;
+	sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK;
+
+	/* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/
+	if (!i2c_read(
+		ddc,
+		DP_HDMI_DONGLE_ADDRESS,
+		type2_dongle_buf,
+		sizeof(type2_dongle_buf))) {
+		dal_logger_write(ddc->ctx->logger,
+			LOG_MAJOR_DCS,
+			LOG_MINOR_DCS_DONGLE_DETECTION,
+			"Detected DP-DVI dongle.\n");
+		*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+		sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+		return;
+	}
+
+	/* Check if Type 2 dongle.*/
+	if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID)
+		is_type2_dongle = true;
+
+	dongle_signature =
+		(struct dp_hdmi_dongle_signature_data *)type2_dongle_buf;
+
+	is_valid_hdmi_signature = true;
+
+	/* Check EOT */
+	if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) {
+		is_valid_hdmi_signature = false;
+	}
+
+	/* Check signature */
+	for (i = 0; i < sizeof(dongle_signature->id); ++i) {
+		/* If its not the right signature,
+		 * skip mismatch in subversion byte.*/
+		if (dongle_signature->id[i] !=
+			dp_hdmi_dongle_signature_str[i] && i != 3) {
+
+			if (is_type2_dongle) {
+				is_valid_hdmi_signature = false;
+				break;
+			}
+
+		}
+	}
+
+	if (is_type2_dongle) {
+		uint32_t max_tmds_clk =
+			type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK];
+
+		max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2;
+
+		if (0 == max_tmds_clk ||
+				max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK ||
+				max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) {
+			dal_logger_write(ddc->ctx->logger,
+				LOG_MAJOR_DCS,
+				LOG_MINOR_DCS_DONGLE_DETECTION,
+				"Invalid Maximum TMDS clock");
+			*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+		} else {
+			if (is_valid_hdmi_signature == true) {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+				dal_logger_write(ddc->ctx->logger,
+					LOG_MAJOR_DCS,
+					LOG_MINOR_DCS_DONGLE_DETECTION,
+					"Detected Type 2 DP-HDMI Maximum TMDS "
+					"clock, max TMDS clock: %d MHz",
+					max_tmds_clk);
+			} else {
+				*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+				dal_logger_write(ddc->ctx->logger,
+					LOG_MAJOR_DCS,
+					LOG_MINOR_DCS_DONGLE_DETECTION,
+					"Detected Type 2 DP-HDMI (no valid HDMI"
+					" signature) Maximum TMDS clock, max "
+					"TMDS clock: %d MHz",
+					max_tmds_clk);
+			}
+
+			/* Multiply by 1000 to convert to kHz. */
+			sink_cap->max_hdmi_pixel_clock =
+				max_tmds_clk * 1000;
+		}
+
+	} else {
+		if (is_valid_hdmi_signature == true) {
+			dal_logger_write(ddc->ctx->logger,
+				LOG_MAJOR_DCS,
+				LOG_MINOR_DCS_DONGLE_DETECTION,
+				"Detected Type 1 DP-HDMI dongle.\n");
+			*dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE;
+		} else {
+			dal_logger_write(ddc->ctx->logger,
+				LOG_MAJOR_DCS,
+				LOG_MINOR_DCS_DONGLE_DETECTION,
+				"Detected Type 1 DP-HDMI dongle (no valid HDMI "
+				"signature).\n");
+
+			*dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE;
+		}
+	}
+
+	return;
+}
+
+enum {
+	DP_SINK_CAP_SIZE =
+		DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV + 1
+};
+
+bool dal_ddc_service_query_ddc_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *write_buf,
+	uint32_t write_size,
+	uint8_t *read_buf,
+	uint32_t read_size)
+{
+	bool ret;
+	uint32_t payload_size =
+		dal_ddc_service_is_in_aux_transaction_mode(ddc) ?
+			DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE;
+
+	uint32_t write_payloads =
+		(write_size + payload_size - 1) / payload_size;
+
+	uint32_t read_payloads =
+		(read_size + payload_size - 1) / payload_size;
+
+	uint32_t payloads_num = write_payloads + read_payloads;
+
+	if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+		return false;
+
+	/*TODO: len of payload data for i2c and aux is uint8!!!!,
+	 *  but we want to read 256 over i2c!!!!*/
+	if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+
+		struct aux_payloads *payloads =
+			dal_ddc_aux_payloads_create(ddc->ctx, payloads_num);
+
+		struct aux_command command = {
+			.payloads = dal_ddc_aux_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.defer_delay = get_defer_delay(ddc),
+			.max_defer_write_retry = 0 };
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_aux_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_aux_payloads_get_count(payloads);
+
+		ret = dal_i2caux_submit_aux_command(
+				dal_adapter_service_get_i2caux(ddc->as),
+				ddc->ddc_pin,
+				&command);
+
+		dal_ddc_aux_payloads_destroy(&payloads);
+
+	} else {
+		struct i2c_payloads *payloads =
+			dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+
+		struct i2c_command command = {
+			.payloads = dal_ddc_i2c_payloads_get(payloads),
+			.number_of_payloads = 0,
+			.engine = DDC_I2C_COMMAND_ENGINE,
+			.speed =
+				dal_adapter_service_get_sw_i2c_speed(ddc->as) };
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, write_size, write_buf, true);
+
+		dal_ddc_i2c_payloads_add(
+			payloads, address, read_size, read_buf, false);
+
+		command.number_of_payloads =
+			dal_ddc_i2c_payloads_get_count(payloads);
+
+		ret = dal_i2caux_submit_i2c_command(
+				dal_adapter_service_get_i2caux(ddc->as),
+				ddc->ddc_pin,
+				&command);
+
+		dal_ddc_i2c_payloads_destroy(&payloads);
+	}
+
+	return ret;
+}
+
+bool dal_ddc_service_get_dp_receiver_id_info(
+	struct ddc_service *ddc,
+	struct dp_receiver_id_info *info)
+{
+	if (!info)
+		return false;
+
+	*info = ddc->dp_receiver_id_info;
+	return true;
+}
+
+enum ddc_result dal_ddc_service_read_dpcd_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload read_payload = {
+		.i2c_over_aux = false,
+		.write = false,
+		.address = address,
+		.length = len,
+		.data = data,
+	};
+	struct aux_command command = {
+		.payloads = &read_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		dal_adapter_service_get_i2caux(ddc->as),
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+enum ddc_result dal_ddc_service_write_dpcd_data(
+	struct ddc_service *ddc,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t len)
+{
+	struct aux_payload write_payload = {
+		.i2c_over_aux = false,
+		.write = true,
+		.address = address,
+		.length = len,
+		.data = (uint8_t *)data,
+	};
+	struct aux_command command = {
+		.payloads = &write_payload,
+		.number_of_payloads = 1,
+		.defer_delay = 0,
+		.max_defer_write_retry = 0,
+	};
+
+	if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
+		BREAK_TO_DEBUGGER();
+		return DDC_RESULT_FAILED_INVALID_OPERATION;
+	}
+
+	if (dal_i2caux_submit_aux_command(
+		dal_adapter_service_get_i2caux(ddc->as),
+		ddc->ddc_pin,
+		&command))
+		return DDC_RESULT_SUCESSFULL;
+
+	return DDC_RESULT_FAILED_OPERATION;
+}
+
+/*test only function*/
+void dal_ddc_service_set_ddc_pin(
+	struct ddc_service *ddc_service,
+	struct ddc *ddc)
+{
+	ddc_service->ddc_pin = ddc;
+}
+
+struct ddc *dal_ddc_service_get_ddc_pin(struct ddc_service *ddc_service)
+{
+	return ddc_service->ddc_pin;
+}
+
+
+void dal_ddc_service_reset_dp_receiver_id_info(struct ddc_service *ddc_service)
+{
+	dm_memset(&ddc_service->dp_receiver_id_info,
+		0, sizeof(struct dp_receiver_id_info));
+}
+
+void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
+		uint32_t pix_clk,
+		bool lte_340_scramble)
+{
+	bool over_340_mhz = pix_clk > 340000 ? 1 : 0;
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_SINK_VERSION;
+	uint8_t sink_version = 0;
+	uint8_t write_buffer[2] = {0};
+	/*Lower than 340 Scramble bit from SCDC caps*/
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &sink_version, sizeof(sink_version));
+	if (sink_version == 1) {
+		/*Source Version = 1*/
+		write_buffer[0] = HDMI_SCDC_SOURCE_VERSION;
+		write_buffer[1] = 1;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				write_buffer, sizeof(write_buffer), NULL, 0);
+		/*Read Request from SCDC caps*/
+	}
+	write_buffer[0] = HDMI_SCDC_TMDS_CONFIG;
+
+	if (over_340_mhz) {
+		write_buffer[1] = 3;
+	} else if (lte_340_scramble) {
+		write_buffer[1] = 1;
+	} else {
+		write_buffer[1] = 0;
+	}
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, write_buffer,
+			sizeof(write_buffer), NULL, 0);
+}
+
+void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
+{
+	uint8_t slave_address = HDMI_SCDC_ADDRESS;
+	uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
+	uint8_t tmds_config = 0;
+
+	dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
+			sizeof(offset), &tmds_config, sizeof(tmds_config));
+	if (tmds_config & 0x1) {
+		union hdmi_scdc_status_flags_data status_data = { {0} };
+		uint8_t scramble_status = 0;
+
+		offset = HDMI_SCDC_SCRAMBLER_STATUS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), &scramble_status,
+				sizeof(scramble_status));
+		offset = HDMI_SCDC_STATUS_FLAGS;
+		dal_ddc_service_query_ddc_data(ddc_service, slave_address,
+				&offset, sizeof(offset), status_data.byte,
+				sizeof(status_data.byte));
+	}
+}
+
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/dal/dc/core/dc_link_dp.c
new file mode 100644
index 000000000000..742ab756cb48
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_link_dp.c
@@ -0,0 +1,1728 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+#include "dm_services.h"
+#include "dc.h"
+#include "dc_link_dp.h"
+#include "dm_helpers.h"
+
+#include "inc/core_types.h"
+#include "link_hwss.h"
+#include "dc_link_ddc.h"
+#include "core_status.h"
+#include "dpcd_defs.h"
+
+/* maximum pre emphasis level allowed for each voltage swing level*/
+static const enum pre_emphasis voltage_swing_to_pre_emphasis[] = {
+		PRE_EMPHASIS_LEVEL3,
+		PRE_EMPHASIS_LEVEL2,
+		PRE_EMPHASIS_LEVEL1,
+		PRE_EMPHASIS_DISABLED };
+
+enum {
+	POST_LT_ADJ_REQ_LIMIT = 6,
+	POST_LT_ADJ_REQ_TIMEOUT = 200
+};
+
+enum {
+	LINK_TRAINING_MAX_RETRY_COUNT = 5,
+	/* to avoid infinite loop where-in the receiver
+	 * switches between different VS
+	 */
+	LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
+static const struct link_settings link_training_fallback_table[] = {
+/* 2160 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 1080 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 648 Mbytes/sec*/
+{ LANE_COUNT_FOUR, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
+/* 1080 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 540 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 324 Mbytes/sec*/
+{ LANE_COUNT_TWO, LINK_RATE_LOW, LINK_SPREAD_DISABLED },
+/* 540 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_HIGH2, LINK_SPREAD_DISABLED },
+/* 270 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_HIGH, LINK_SPREAD_DISABLED },
+/* 162 Mbytes/sec*/
+{ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED } };
+
+static void wait_for_training_aux_rd_interval(
+	struct core_link* link,
+	uint32_t default_wait_in_micro_secs)
+{
+	uint8_t training_rd_interval;
+
+	/* overwrite the delay if rev > 1.1*/
+	if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+		/* DP 1.2 or later - retrieve delay through
+		 * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */
+		core_link_read_dpcd(
+			link,
+			DPCD_ADDRESS_TRAINING_AUX_RD_INTERVAL,
+			&training_rd_interval,
+			sizeof(training_rd_interval));
+		default_wait_in_micro_secs = training_rd_interval ?
+			(training_rd_interval * 4000) :
+			default_wait_in_micro_secs;
+	}
+
+	dm_delay_in_microseconds(link->ctx, default_wait_in_micro_secs);
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s:\n wait = %d\n",
+		__func__,
+		default_wait_in_micro_secs);
+}
+
+static void dpcd_set_training_pattern(
+	struct core_link* link,
+	union dpcd_training_pattern dpcd_pattern)
+{
+	core_link_write_dpcd(
+		link,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		&dpcd_pattern.raw,
+		1);
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		dpcd_pattern.bits.TRAINING_PATTERN_SET);
+}
+
+static void dpcd_set_link_settings(
+	struct core_link* link,
+	const struct link_training_settings *lt_settings)
+{
+	uint8_t rate = (uint8_t)
+	(lt_settings->link_settings.link_rate);
+
+	union down_spread_ctrl downspread = {{0}};
+	union lane_count_set lane_count_set = {{0}};
+	uint8_t link_set_buffer[2];
+
+
+	downspread.raw = (uint8_t)
+	(lt_settings->link_settings.link_spread);
+
+	lane_count_set.bits.LANE_COUNT_SET =
+	lt_settings->link_settings.lane_count;
+
+	lane_count_set.bits.ENHANCED_FRAMING = 1;
+
+	lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
+		link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
+
+	link_set_buffer[0] = rate;
+	link_set_buffer[1] = lane_count_set.raw;
+
+	core_link_write_dpcd(link, DPCD_ADDRESS_LINK_BW_SET,
+	link_set_buffer, 2);
+	core_link_write_dpcd(link, DPCD_ADDRESS_DOWNSPREAD_CNTL,
+	&downspread.raw, sizeof(downspread));
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+		__func__,
+		DPCD_ADDRESS_LINK_BW_SET,
+		lt_settings->link_settings.link_rate,
+		DPCD_ADDRESS_LANE_COUNT_SET,
+		lt_settings->link_settings.lane_count,
+		DPCD_ADDRESS_DOWNSPREAD_CNTL,
+		lt_settings->link_settings.link_spread);
+
+}
+
+static enum dpcd_training_patterns
+	hw_training_pattern_to_dpcd_training_pattern(
+	struct core_link* link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dpcd_training_patterns dpcd_tr_pattern =
+	DPCD_TRAINING_PATTERN_VIDEOIDLE;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3;
+		break;
+	default:
+		ASSERT(0);
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_HW_TRACE,
+			LOG_MINOR_HW_TRACE_LINK_TRAINING,
+			"%s: Invalid HW Training pattern: %d\n",
+			__func__, pattern);
+		break;
+	}
+
+	return dpcd_tr_pattern;
+
+}
+
+static void dpcd_set_lt_pattern_and_lane_settings(
+	struct core_link* link,
+	const struct link_training_settings *lt_settings,
+	enum hw_dp_training_pattern pattern)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	const uint32_t dpcd_base_lt_offset =
+	DPCD_ADDRESS_TRAINING_PATTERN_SET;
+	uint8_t dpcd_lt_buffer[5] = {0};
+	union dpcd_training_pattern dpcd_pattern = {{0}};
+	uint32_t lane;
+	uint32_t size_in_bytes;
+	bool edp_workaround = false; /* TODO link_prop.INTERNAL */
+
+	/*****************************************************************
+	* DpcdAddress_TrainingPatternSet
+	*****************************************************************/
+	dpcd_pattern.bits.TRAINING_PATTERN_SET =
+		hw_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+	dpcd_lt_buffer[DPCD_ADDRESS_TRAINING_PATTERN_SET - dpcd_base_lt_offset]
+		= dpcd_pattern.raw;
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s\n %x pattern = %x\n",
+		__func__,
+		DPCD_ADDRESS_TRAINING_PATTERN_SET,
+		dpcd_pattern.bits.TRAINING_PATTERN_SET);
+
+
+	/*****************************************************************
+	* DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
+	*****************************************************************/
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count); lane++) {
+
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+		(uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
+
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+		(lt_settings->lane_settings[lane].VOLTAGE_SWING ==
+		VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+		(lt_settings->lane_settings[lane].PRE_EMPHASIS ==
+		PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	/* concatinate everything into one buffer*/
+
+	size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+
+	 // 0x00103 - 0x00102
+	dm_memmove(
+		&dpcd_lt_buffer[DPCD_ADDRESS_LANE0_SET - dpcd_base_lt_offset],
+		dpcd_lane,
+		size_in_bytes);
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s:\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DPCD_ADDRESS_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+
+	if (edp_workaround) {
+		/* for eDP write in 2 parts because the 5-byte burst is
+		* causing issues on some eDP panels (EPR#366724)
+		*/
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_TRAINING_PATTERN_SET,
+			&dpcd_pattern.raw,
+			sizeof(dpcd_pattern.raw) );
+
+		core_link_write_dpcd(
+			link,
+			DPCD_ADDRESS_LANE0_SET,
+			(uint8_t *)(dpcd_lane),
+			size_in_bytes);
+
+		} else
+		/* write it all in (1 + number-of-lanes)-byte burst*/
+			core_link_write_dpcd(
+				link,
+				dpcd_base_lt_offset,
+				dpcd_lt_buffer,
+				size_in_bytes + sizeof(dpcd_pattern.raw) );
+
+	link->ln_setting = lt_settings->lane_settings[0];
+}
+
+static bool is_cr_done(enum lane_count ln_count,
+	union lane_status *dpcd_lane_status)
+{
+	bool done = true;
+	uint32_t lane;
+	/*LANEx_CR_DONE bits All 1's?*/
+	for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+		if (!dpcd_lane_status[lane].bits.CR_DONE_0)
+			done = false;
+	}
+	return done;
+
+}
+
+static bool is_ch_eq_done(enum lane_count ln_count,
+	union lane_status *dpcd_lane_status,
+	union lane_align_status_updated *lane_status_updated)
+{
+	bool done = true;
+	uint32_t lane;
+	if (!lane_status_updated->bits.INTERLANE_ALIGN_DONE)
+		done = false;
+	else {
+		for (lane = 0; lane < (uint32_t)(ln_count); lane++) {
+			if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0 ||
+				!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0)
+				done = false;
+		}
+	}
+	return done;
+
+}
+
+static void update_drive_settings(
+		struct link_training_settings *dest,
+		struct link_training_settings src)
+{
+	uint32_t lane;
+	for (lane = 0; lane < src.link_settings.lane_count; lane++) {
+		dest->lane_settings[lane].VOLTAGE_SWING =
+			src.lane_settings[lane].VOLTAGE_SWING;
+		dest->lane_settings[lane].PRE_EMPHASIS =
+			src.lane_settings[lane].PRE_EMPHASIS;
+		dest->lane_settings[lane].POST_CURSOR2 =
+			src.lane_settings[lane].POST_CURSOR2;
+	}
+}
+
+static uint8_t get_nibble_at_index(const uint8_t *buf,
+	uint32_t index)
+{
+	uint8_t nibble;
+	nibble = buf[index / 2];
+
+	if (index % 2)
+		nibble >>= 4;
+	else
+		nibble &= 0x0F;
+
+	return nibble;
+}
+
+static enum pre_emphasis get_max_pre_emphasis_for_voltage_swing(
+	enum voltage_swing voltage)
+{
+	enum pre_emphasis pre_emphasis;
+	pre_emphasis = PRE_EMPHASIS_MAX_LEVEL;
+
+	if (voltage <= VOLTAGE_SWING_MAX_LEVEL)
+		pre_emphasis = voltage_swing_to_pre_emphasis[voltage];
+
+	return pre_emphasis;
+
+}
+
+static void find_max_drive_settings(
+	const struct link_training_settings *link_training_setting,
+	struct link_training_settings *max_lt_setting)
+{
+	uint32_t lane;
+	struct lane_settings max_requested;
+
+	max_requested.VOLTAGE_SWING =
+		link_training_setting->
+		lane_settings[0].VOLTAGE_SWING;
+	max_requested.PRE_EMPHASIS =
+		link_training_setting->
+		lane_settings[0].PRE_EMPHASIS;
+	/*max_requested.postCursor2 =
+	 * link_training_setting->laneSettings[0].postCursor2;*/
+
+	/* Determine what the maximum of the requested settings are*/
+	for (lane = 1; lane < link_training_setting->link_settings.lane_count;
+			lane++) {
+		if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
+			max_requested.VOLTAGE_SWING)
+
+			max_requested.VOLTAGE_SWING =
+			link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING;
+
+
+		if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
+				max_requested.PRE_EMPHASIS)
+			max_requested.PRE_EMPHASIS =
+			link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS;
+
+		/*
+		if (link_training_setting->laneSettings[lane].postCursor2 >
+		 max_requested.postCursor2)
+		{
+		max_requested.postCursor2 =
+		link_training_setting->laneSettings[lane].postCursor2;
+		}
+		*/
+	}
+
+	/* make sure the requested settings are
+	 * not higher than maximum settings*/
+	if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL)
+		max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL;
+
+	if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
+		max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
+	/*
+	if (max_requested.postCursor2 > PostCursor2_MaxLevel)
+	max_requested.postCursor2 = PostCursor2_MaxLevel;
+	*/
+
+	/* make sure the pre-emphasis matches the voltage swing*/
+	if (max_requested.PRE_EMPHASIS >
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING))
+		max_requested.PRE_EMPHASIS =
+		get_max_pre_emphasis_for_voltage_swing(
+			max_requested.VOLTAGE_SWING);
+
+	/*
+	 * Post Cursor2 levels are completely independent from
+	 * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
+	 * can only be applied to each allowable combination of voltage
+	 * swing and pre-emphasis levels */
+	 /* if ( max_requested.postCursor2 >
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
+	  *  max_requested.postCursor2 =
+	  *  getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
+	  */
+
+	max_lt_setting->link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	max_lt_setting->link_settings.lane_count =
+	link_training_setting->link_settings.lane_count;
+	max_lt_setting->link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		link_training_setting->link_settings.lane_count;
+		lane++) {
+		max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
+			max_requested.VOLTAGE_SWING;
+		max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
+			max_requested.PRE_EMPHASIS;
+		/*max_lt_setting->laneSettings[lane].postCursor2 =
+		 * max_requested.postCursor2;
+		 */
+	}
+
+}
+
+static void get_lane_status_and_drive_settings(
+	struct core_link* link,
+	const struct link_training_settings *link_training_setting,
+	union lane_status *ln_status,
+	union lane_align_status_updated *ln_status_updated,
+	struct link_training_settings *req_settings)
+{
+	uint8_t dpcd_buf[6] = {0};
+	union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {{{0}}};
+	struct link_training_settings request_settings = {{0}};
+	uint32_t lane;
+
+	dm_memset(req_settings, '\0', sizeof(struct link_training_settings));
+
+	core_link_read_dpcd(
+		link,
+		DPCD_ADDRESS_LANE_01_STATUS,
+		(uint8_t *)(dpcd_buf),
+		sizeof(dpcd_buf));
+
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		ln_status[lane].raw =
+			get_nibble_at_index(&dpcd_buf[0], lane);
+		dpcd_lane_adjust[lane].raw =
+			get_nibble_at_index(&dpcd_buf[4], lane);
+	}
+
+	ln_status_updated->raw = dpcd_buf[2];
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s:\n%x Lane01Status = %x\n %x Lane23Status = %x\n ",
+		__func__,
+		DPCD_ADDRESS_LANE_01_STATUS, dpcd_buf[0],
+		DPCD_ADDRESS_LANE_23_STATUS, dpcd_buf[1]);
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s:\n %x Lane01AdjustRequest = %x\n %x Lane23AdjustRequest = %x\n",
+		__func__,
+		DPCD_ADDRESS_ADJUST_REQUEST_LANE0_1,
+		dpcd_buf[4],
+		DPCD_ADDRESS_ADJUST_REQUEST_LANE2_3,
+		dpcd_buf[5]);
+
+	/*copy to req_settings*/
+	request_settings.link_settings.lane_count =
+		link_training_setting->link_settings.lane_count;
+	request_settings.link_settings.link_rate =
+		link_training_setting->link_settings.link_rate;
+	request_settings.link_settings.link_spread =
+		link_training_setting->link_settings.link_spread;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->link_settings.lane_count);
+		lane++) {
+
+		request_settings.lane_settings[lane].VOLTAGE_SWING =
+			(enum voltage_swing)(dpcd_lane_adjust[lane].bits.
+				VOLTAGE_SWING_LANE);
+		request_settings.lane_settings[lane].PRE_EMPHASIS =
+			(enum pre_emphasis)(dpcd_lane_adjust[lane].bits.
+				PRE_EMPHASIS_LANE);
+	}
+
+	/*Note: for postcursor2, read adjusted
+	 * postcursor2 settings from*/
+	/*DpcdAddress_AdjustRequestPostCursor2 =
+	 *0x020C (not implemented yet)*/
+
+	/* we find the maximum of the requested settings across all lanes*/
+	/* and set this maximum for all lanes*/
+	find_max_drive_settings(&request_settings, req_settings);
+
+	/* if post cursor 2 is needed in the future,
+	 * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
+	 */
+
+}
+
+static void dpcd_set_lane_settings(
+	struct core_link* link,
+	const struct link_training_settings *link_training_setting)
+{
+	union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
+	uint32_t lane;
+
+	for (lane = 0; lane <
+		(uint32_t)(link_training_setting->
+		link_settings.lane_count);
+		lane++) {
+		dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING);
+		dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
+			(uint8_t)(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS);
+		dpcd_lane[lane].bits.MAX_SWING_REACHED =
+			(link_training_setting->
+			lane_settings[lane].VOLTAGE_SWING ==
+			VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+		dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+			(link_training_setting->
+			lane_settings[lane].PRE_EMPHASIS ==
+			PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+	}
+
+	core_link_write_dpcd(link,
+		DPCD_ADDRESS_LANE0_SET,
+		(uint8_t *)(dpcd_lane),
+		link_training_setting->link_settings.lane_count);
+
+	/*
+	if (LTSettings.link.rate == LinkRate_High2)
+	{
+		DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
+		for ( uint32_t lane = 0;
+		lane < lane_count_DPMax; lane++)
+		{
+			dpcd_lane2[lane].bits.post_cursor2_set =
+			static_cast<unsigned char>(
+			LTSettings.laneSettings[lane].postCursor2);
+			dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
+		}
+		m_pDpcdAccessSrv->WriteDpcdData(
+		DpcdAddress_Lane0Set2,
+		reinterpret_cast<unsigned char*>(dpcd_lane2),
+		LTSettings.link.lanes);
+	}
+	*/
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_LINK_TRAINING,
+		"%s\n %x VS set = %x  PE set = %x \
+		max VS Reached = %x  max PE Reached = %x\n",
+		__func__,
+		DPCD_ADDRESS_LANE0_SET,
+		dpcd_lane[0].bits.VOLTAGE_SWING_SET,
+		dpcd_lane[0].bits.PRE_EMPHASIS_SET,
+		dpcd_lane[0].bits.MAX_SWING_REACHED,
+		dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+
+	link->ln_setting = link_training_setting->lane_settings[0];
+
+}
+
+static bool is_max_vs_reached(
+	const struct link_training_settings *lt_settings)
+{
+	uint32_t lane;
+	for (lane = 0; lane <
+		(uint32_t)(lt_settings->link_settings.lane_count);
+		lane++) {
+		if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+			== VOLTAGE_SWING_MAX_LEVEL)
+			return true;
+	}
+	return false;
+
+}
+
+void set_drive_settings(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	/* program ASIC PHY settings*/
+	dp_set_hw_lane_settings(link, lt_settings);
+
+	/* Notify DP sink the PHY settings from source */
+	dpcd_set_lane_settings(link, lt_settings);
+}
+
+static bool perform_post_lt_adj_req_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	enum lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+
+	uint32_t adj_req_count;
+	uint32_t adj_req_timer;
+	bool req_drv_setting_changed;
+	uint32_t lane;
+
+	req_drv_setting_changed = false;
+	for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT;
+	adj_req_count++) {
+
+		req_drv_setting_changed = false;
+
+		for (adj_req_timer = 0;
+			adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
+			adj_req_timer++) {
+
+			struct link_training_settings req_settings;
+			union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+			union lane_align_status_updated
+				dpcd_lane_status_updated;
+
+			get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+			if (dpcd_lane_status_updated.bits.
+					POST_LT_ADJ_REQ_IN_PROGRESS == 0)
+				return true;
+
+			if (!is_cr_done(lane_count, dpcd_lane_status))
+				return false;
+
+			if (!is_ch_eq_done(
+				lane_count,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated))
+				return false;
+
+			for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
+
+				if (lt_settings->
+				lane_settings[lane].VOLTAGE_SWING !=
+				req_settings.lane_settings[lane].
+				VOLTAGE_SWING ||
+				lt_settings->lane_settings[lane].PRE_EMPHASIS !=
+				req_settings.lane_settings[lane].PRE_EMPHASIS) {
+
+					req_drv_setting_changed = true;
+					break;
+				}
+			}
+
+			if (req_drv_setting_changed) {
+				update_drive_settings(
+					lt_settings,req_settings);
+
+				set_drive_settings(link, lt_settings);
+				break;
+			}
+
+			dm_sleep_in_milliseconds(link->ctx, 1);
+		}
+
+		if (!req_drv_setting_changed) {
+			dal_logger_write(link->ctx->logger,
+				LOG_MAJOR_WARNING,
+				LOG_MINOR_COMPONENT_LINK_SERVICE,
+				"%s: Post Link Training Adjust Request Timed out\n",
+				__func__);
+
+			ASSERT(0);
+			return true;
+		}
+	}
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_WARNING,
+		LOG_MINOR_COMPONENT_LINK_SERVICE,
+		"%s: Post Link Training Adjust Request limit reached\n",
+		__func__);
+
+	ASSERT(0);
+	return true;
+
+}
+
+static bool perform_channel_equalization_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	struct link_training_settings req_settings;
+	enum hw_dp_training_pattern hw_tr_pattern;
+	uint32_t retries_ch_eq;
+	enum lane_count lane_count = lt_settings->link_settings.lane_count;
+	union lane_align_status_updated dpcd_lane_status_updated = {{0}};
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+
+	/*TODO hw_tr_pattern = HW_DP_TRAINING_PATTERN_3;*/
+	hw_tr_pattern = HW_DP_TRAINING_PATTERN_2;
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT;
+		retries_ch_eq++) {
+
+		dp_set_hw_lane_settings(link, lt_settings);
+
+		/* 2. update DPCD*/
+		if (!retries_ch_eq)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration*/
+			dpcd_set_lt_pattern_and_lane_settings(
+				link,
+				lt_settings,
+				hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(link, lt_settings);
+
+		/* 3. wait for receiver to lock-on*/
+		wait_for_training_aux_rd_interval(link, 400);
+
+		/* 4. Read lane status and requested
+		 * drive settings as set by the sink*/
+
+		get_lane_status_and_drive_settings(
+			link,
+			lt_settings,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated,
+			&req_settings);
+
+		/* 5. check CR done*/
+		if (!is_cr_done(lane_count, dpcd_lane_status))
+			return false;
+
+		/* 6. check CHEQ done*/
+		if (is_ch_eq_done(lane_count,
+			dpcd_lane_status,
+			&dpcd_lane_status_updated))
+			return true;
+
+		/* 7. update VS/PE/PC2 in lt_settings*/
+		update_drive_settings(lt_settings, req_settings);
+	}
+
+	return false;
+
+}
+
+static bool perform_clock_recovery_sequence(
+	struct core_link *link,
+	struct link_training_settings *lt_settings)
+{
+	uint32_t retries_cr;
+	uint32_t retry_count;
+	uint32_t lane;
+	struct link_training_settings req_settings;
+	enum lane_count lane_count =
+	lt_settings->link_settings.lane_count;
+	enum hw_dp_training_pattern hw_tr_pattern = HW_DP_TRAINING_PATTERN_1;
+	union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+	union lane_align_status_updated dpcd_lane_status_updated;
+
+	retries_cr = 0;
+	retry_count = 0;
+	/* initial drive setting (VS/PE/PC2)*/
+	for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+		lt_settings->lane_settings[lane].VOLTAGE_SWING =
+		VOLTAGE_SWING_LEVEL0;
+		lt_settings->lane_settings[lane].PRE_EMPHASIS =
+		PRE_EMPHASIS_DISABLED;
+		lt_settings->lane_settings[lane].POST_CURSOR2 =
+		POST_CURSOR2_DISABLED;
+	}
+
+	dp_set_hw_training_pattern(link, hw_tr_pattern);
+
+	/* najeeb - The synaptics MST hub can put the LT in
+	* infinite loop by switching the VS
+	*/
+	/* between level 0 and level 1 continuously, here
+	* we try for CR lock for LinkTrainingMaxCRRetry count*/
+	while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+	(retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+
+		dm_memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status));
+		dm_memset(&dpcd_lane_status_updated, '\0',
+		sizeof(dpcd_lane_status_updated));
+
+		/* 1. call HWSS to set lane settings*/
+		dp_set_hw_lane_settings(
+				link,
+				lt_settings);
+
+		/* 2. update DPCD of the receiver*/
+		if (!retries_cr)
+			/* EPR #361076 - write as a 5-byte burst,
+			 * but only for the 1-st iteration.*/
+			dpcd_set_lt_pattern_and_lane_settings(
+					link,
+					lt_settings,
+					hw_tr_pattern);
+		else
+			dpcd_set_lane_settings(
+					link,
+					lt_settings);
+
+
+		/* 3. wait receiver to lock-on*/
+		wait_for_training_aux_rd_interval(
+				link,
+				100);
+
+		/* 4. Read lane status and requested drive
+		* settings as set by the sink
+		*/
+		get_lane_status_and_drive_settings(
+				link,
+				lt_settings,
+				dpcd_lane_status,
+				&dpcd_lane_status_updated,
+				&req_settings);
+
+
+		/* 5. check CR done*/
+		if (is_cr_done(lane_count, dpcd_lane_status))
+			return true;
+
+		/* 6. max VS reached*/
+		if (is_max_vs_reached(lt_settings))
+			return false;
+
+		/* 7. same voltage*/
+		/* Note: VS same for all lanes,
+		* so comparing first lane is sufficient*/
+		if (lt_settings->lane_settings[0].VOLTAGE_SWING ==
+			req_settings.lane_settings[0].VOLTAGE_SWING)
+			retries_cr++;
+		else
+			retries_cr = 0;
+
+
+			/* 8. update VS/PE/PC2 in lt_settings*/
+			update_drive_settings(lt_settings, req_settings);
+
+			retry_count++;
+	}
+
+	if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) {
+		ASSERT(0);
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_ERROR,
+			LOG_MINOR_COMPONENT_LINK_SERVICE,
+			"%s: Link Training Error, could not \
+			 get CR after %d tries. \
+			Possibly voltage swing issue", __func__,
+			LINK_TRAINING_MAX_CR_RETRY);
+
+	}
+
+	return false;
+}
+
+ bool perform_link_training(
+	struct core_link *link,
+	const struct link_settings *link_setting,
+	bool skip_video_pattern)
+{
+	bool status;
+	union dpcd_training_pattern dpcd_pattern = {{0}};
+	union lane_count_set lane_count_set = {{0}};
+	const int8_t *link_rate = "Unknown";
+	struct link_training_settings lt_settings;
+
+	status = false;
+	dm_memset(&lt_settings, '\0', sizeof(lt_settings));
+
+	lt_settings.link_settings.link_rate = link_setting->link_rate;
+	lt_settings.link_settings.lane_count = link_setting->lane_count;
+
+	/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
+
+	/* TODO hard coded to SS for now
+	 * lt_settings.link_settings.link_spread =
+	 * dal_display_path_is_ss_supported(
+	 * path_mode->display_path) ?
+	 * LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+	 * LINK_SPREAD_DISABLED;
+	 */
+	lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+	/* 1. set link rate, lane count and spread*/
+	dpcd_set_link_settings(link, &lt_settings);
+
+	/* 2. perform link training (set link training done
+	 *  to false is done as well)*/
+	if (perform_clock_recovery_sequence(link, &lt_settings)) {
+
+		if (perform_channel_equalization_sequence(link, &lt_settings))
+			status = true;
+	}
+
+	if (status || !skip_video_pattern) {
+
+		/* 3. set training not in progress*/
+		dpcd_pattern.bits.TRAINING_PATTERN_SET =
+			DPCD_TRAINING_PATTERN_VIDEOIDLE;
+		dpcd_set_training_pattern(link, dpcd_pattern);
+
+		/* 4. mainlink output idle pattern*/
+		dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE);
+
+		/* 5. post training adjust if required*/
+		if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED
+			== 1) {
+			if (status == true) {
+				if (perform_post_lt_adj_req_sequence(
+					link, &lt_settings) == false)
+					status = false;
+			}
+
+			lane_count_set.bits.LANE_COUNT_SET =
+				lt_settings.link_settings.lane_count;
+			lane_count_set.bits.ENHANCED_FRAMING = 1;
+			lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
+
+			core_link_write_dpcd(
+				link,
+				DPCD_ADDRESS_LANE_COUNT_SET,
+				&lane_count_set.raw,
+				sizeof(lane_count_set));
+		}
+	}
+
+	/* 6. print status message*/
+	switch (lt_settings.link_settings.link_rate) {
+
+	case LINK_RATE_LOW:
+		link_rate = "Low";
+		break;
+	case LINK_RATE_HIGH:
+		link_rate = "High";
+		break;
+	case LINK_RATE_HIGH2:
+		link_rate = "High2";
+		break;
+	case LINK_RATE_RBR2:
+		link_rate = "RBR2";
+		break;
+	default:
+		break;
+	}
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_MST,
+		LOG_MINOR_MST_PROGRAMMING,
+		"Link training for %d lanes at %s rate %s with PE %d, VS %d\n",
+		lt_settings.link_settings.lane_count,
+		link_rate,
+		status ? "succeeded" : "failed",
+		lt_settings.lane_settings[0].PRE_EMPHASIS,
+		lt_settings.lane_settings[0].VOLTAGE_SWING);
+
+	return status;
+}
+
+/*TODO add more check to see if link support request link configuration */
+static bool is_link_setting_supported(
+	const struct link_settings *link_setting,
+	const struct link_settings *max_link_setting)
+{
+	if (link_setting->lane_count > max_link_setting->lane_count ||
+		link_setting->link_rate > max_link_setting->link_rate)
+		return false;
+	return true;
+}
+
+static const uint32_t get_link_training_fallback_table_len(
+	struct core_link *link)
+{
+	return ARRAY_SIZE(link_training_fallback_table);
+}
+
+static const struct link_settings *get_link_training_fallback_table(
+	struct core_link *link, uint32_t i)
+{
+	return &link_training_fallback_table[i];
+}
+
+static bool exceeded_limit_link_setting(const struct link_settings *link_setting,
+			const struct link_settings *limit_link_setting)
+{
+	return (link_setting->lane_count * link_setting->link_rate
+		 > limit_link_setting->lane_count * limit_link_setting->link_rate ?
+				 true : false);
+}
+
+
+bool dp_hbr_verify_link_cap(
+	struct core_link *link,
+	struct link_settings *known_limit_link_setting)
+{
+	struct link_settings max_link_cap = {0};
+	bool success;
+	bool skip_link_training;
+	const struct link_settings *cur;
+	bool skip_video_pattern;
+	uint32_t i;
+
+	success = false;
+	skip_link_training = false;
+
+	/* TODO confirm this is correct for cz */
+	max_link_cap.lane_count = LANE_COUNT_FOUR;
+	max_link_cap.link_rate = LINK_RATE_HIGH2;
+	max_link_cap.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+	/* TODO implement override and monitor patch later */
+
+	/* try to train the link from high to low to
+	 * find the physical link capability
+	 */
+	/* disable PHY done possible by BIOS, will be done by driver itself */
+	dp_disable_link_phy(link, link->public.connector_signal);
+
+	for (i = 0; i < get_link_training_fallback_table_len(link) &&
+		!success; i++) {
+		cur = get_link_training_fallback_table(link, i);
+
+		if (known_limit_link_setting->lane_count != LANE_COUNT_UNKNOWN &&
+			exceeded_limit_link_setting(cur,
+					known_limit_link_setting))
+			continue;
+
+		if (!is_link_setting_supported(cur, &max_link_cap))
+			continue;
+
+		skip_video_pattern = true;
+		if (cur->link_rate == LINK_RATE_LOW)
+			skip_video_pattern = false;
+
+		dp_enable_link_phy(
+				link,
+				link->public.connector_signal,
+				cur);
+
+		if (skip_link_training)
+			success = true;
+		else {
+			uint8_t num_retries = 3;
+			uint8_t j;
+			uint8_t delay_between_retries = 10;
+
+			for (j = 0; j < num_retries; ++j) {
+				success = perform_link_training(
+					link,
+					cur,
+					skip_video_pattern);
+
+				if (success)
+					break;
+
+				dm_sleep_in_milliseconds(
+					link->ctx,
+					delay_between_retries);
+
+				delay_between_retries += 10;
+			}
+		}
+
+		if (success)
+			link->verified_link_cap = *cur;
+
+		/* always disable the link before trying another
+		 * setting or before returning we'll enable it later
+		 * based on the actual mode we're driving
+		 */
+		dp_disable_link_phy(link, link->public.connector_signal);
+	}
+
+	/* Link Training failed for all Link Settings
+	 *  (Lane Count is still unknown)
+	 */
+	if (!success) {
+		/* If all LT fails for all settings,
+		 * set verified = failed safe (1 lane low)
+		 */
+		link->verified_link_cap.lane_count = LANE_COUNT_ONE;
+		link->verified_link_cap.link_rate = LINK_RATE_LOW;
+
+		link->verified_link_cap.link_spread =
+		LINK_SPREAD_DISABLED;
+	}
+
+	link->max_link_setting = link->verified_link_cap;
+
+	return success;
+}
+
+static uint32_t bandwidth_in_kbps_from_timing(
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t bits_per_channel = 0;
+	uint32_t kbps;
+	switch (timing->display_color_depth) {
+
+	case COLOR_DEPTH_666:
+		bits_per_channel = 6;
+		break;
+	case COLOR_DEPTH_888:
+		bits_per_channel = 8;
+		break;
+	case COLOR_DEPTH_101010:
+		bits_per_channel = 10;
+		break;
+	case COLOR_DEPTH_121212:
+		bits_per_channel = 12;
+		break;
+	case COLOR_DEPTH_141414:
+		bits_per_channel = 14;
+		break;
+	case COLOR_DEPTH_161616:
+		bits_per_channel = 16;
+		break;
+	default:
+		break;
+	}
+	ASSERT(bits_per_channel != 0);
+
+	kbps = timing->pix_clk_khz;
+	kbps *= bits_per_channel;
+
+	if (timing->flags.Y_ONLY != 1)
+		/*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+		kbps *= 3;
+
+	return kbps;
+
+}
+
+static uint32_t bandwidth_in_kbps_from_link_settings(
+	const struct link_settings *link_setting)
+{
+	uint32_t link_rate_in_kbps = link_setting->link_rate *
+		LINK_RATE_REF_FREQ_IN_KHZ;
+
+	uint32_t lane_count  = link_setting->lane_count;
+	uint32_t kbps = link_rate_in_kbps;
+	kbps *= lane_count;
+	kbps *= 8;   /* 8 bits per byte*/
+
+	return kbps;
+
+}
+
+bool dp_validate_mode_timing(
+	struct core_link *link,
+	const struct dc_crtc_timing *timing)
+{
+	uint32_t req_bw;
+	uint32_t max_bw;
+
+	const struct link_settings *link_setting;
+
+	/*always DP fail safe mode*/
+	if (timing->pix_clk_khz == (uint32_t)25175 &&
+		timing->h_addressable == (uint32_t)640 &&
+		timing->v_addressable == (uint32_t)480)
+		return true;
+
+	/* For static validation we always use reported
+	 * link settings for other cases, when no modelist
+	 * changed we can use verified link setting*/
+	link_setting = &link->reported_link_cap;
+
+	/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+	/*if (flags.DYNAMIC_VALIDATION == 1 &&
+		link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN)
+		link_setting = &link->verified_link_cap;
+	*/
+
+	req_bw = bandwidth_in_kbps_from_timing(timing);
+	max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+
+	if (req_bw < max_bw) {
+		/* remember the biggest mode here, during
+		 * initial link training (to get
+		 * verified_link_cap), LS sends event about
+		 * cannot train at reported cap to upper
+		 * layer and upper layer will re-enumerate modes.
+		 * this is not necessary if the lower
+		 * verified_link_cap is enough to drive
+		 * all the modes */
+
+		/* TODO: DYNAMIC_VALIDATION needs to be implemented */
+		/* if (flags.DYNAMIC_VALIDATION == 1)
+			dpsst->max_req_bw_for_verified_linkcap = dal_max(
+				dpsst->max_req_bw_for_verified_linkcap, req_bw); */
+		return true;
+	} else
+		return false;
+}
+
+void decide_link_settings(struct core_stream *stream,
+	struct link_settings *link_setting)
+{
+
+	const struct link_settings *cur_ls;
+	struct core_link* link;
+	uint32_t req_bw;
+	uint32_t link_bw;
+	uint32_t i;
+
+	req_bw = bandwidth_in_kbps_from_timing(
+			&stream->public.timing);
+
+	/* if preferred is specified through AMDDP, use it, if it's enough
+	 * to drive the mode
+	 */
+	link = stream->sink->link;
+
+	if ((link->reported_link_cap.lane_count != LANE_COUNT_UNKNOWN) &&
+		(link->reported_link_cap.link_rate <=
+				link->verified_link_cap.link_rate)) {
+
+		link_bw = bandwidth_in_kbps_from_link_settings(
+				&link->reported_link_cap);
+
+		if (req_bw < link_bw) {
+			*link_setting = link->reported_link_cap;
+			return;
+		}
+	}
+
+	/* search for first suitable setting for the requested
+	 * bandwidth
+	 */
+	for (i = 0; i < get_link_training_fallback_table_len(link); i++) {
+
+		cur_ls = get_link_training_fallback_table(link, i);
+
+		link_bw =
+				bandwidth_in_kbps_from_link_settings(
+				cur_ls);
+
+		if (req_bw < link_bw) {
+			if (is_link_setting_supported(
+				cur_ls,
+				&link->max_link_setting)) {
+				*link_setting = *cur_ls;
+				return;
+			}
+		}
+	}
+
+	BREAK_TO_DEBUGGER();
+	ASSERT(link->verified_link_cap.lane_count !=
+		LANE_COUNT_UNKNOWN);
+
+	*link_setting = link->verified_link_cap;
+}
+
+/*************************Short Pulse IRQ***************************/
+
+static bool hpd_rx_irq_check_link_loss_status(
+	struct core_link *link,
+	union hpd_irq_data *hpd_irq_dpcd_data)
+{
+	uint8_t irq_reg_rx_power_state;
+	enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
+	union lane_status lane_status;
+	uint32_t lane;
+	bool sink_status_changed;
+	bool return_code;
+
+	sink_status_changed = false;
+	return_code = false;
+
+	if (link->cur_link_settings.lane_count == 0)
+		return return_code;
+	/*1. Check that we can handle interrupt: Not in FS DOS,
+	 *  Not in "Display Timeout" state, Link is trained.
+	 */
+
+	dpcd_result = core_link_read_dpcd(link,
+		DPCD_ADDRESS_POWER_STATE,
+		&irq_reg_rx_power_state,
+		sizeof(irq_reg_rx_power_state));
+
+	if (dpcd_result != DC_OK) {
+		irq_reg_rx_power_state = DP_PWR_STATE_D0;
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_HW_TRACE,
+			LOG_MINOR_HW_TRACE_HPD_IRQ,
+			"%s: DPCD read failed to obtain power state.\n",
+			__func__);
+	}
+
+	if (irq_reg_rx_power_state == DP_PWR_STATE_D0) {
+
+		/*2. Check that Link Status changed, before re-training.*/
+
+		/*parse lane status*/
+		for (lane = 0;
+			lane < link->cur_link_settings.lane_count;
+			lane++) {
+
+			/* check status of lanes 0,1
+			 * changed DpcdAddress_Lane01Status (0x202)*/
+			lane_status.raw = get_nibble_at_index(
+				&hpd_irq_dpcd_data->bytes.lane01_status.raw,
+				lane);
+
+			if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+				!lane_status.bits.CR_DONE_0 ||
+				!lane_status.bits.SYMBOL_LOCKED_0) {
+				/* if one of the channel equalization, clock
+				 * recovery or symbol lock is dropped
+				 * consider it as (link has been
+				 * dropped) dp sink status has changed*/
+				sink_status_changed = true;
+				break;
+			}
+
+		}
+
+		/* Check interlane align.*/
+		if (sink_status_changed ||
+			!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
+			INTERLANE_ALIGN_DONE) {
+
+			dal_logger_write(link->ctx->logger,
+				LOG_MAJOR_HW_TRACE,
+				LOG_MINOR_HW_TRACE_HPD_IRQ,
+				"%s: Link Status changed.\n",
+				__func__);
+
+			return_code = true;
+		}
+	}
+
+	return return_code;
+}
+
+static enum dc_status read_hpd_rx_irq_data(
+	struct core_link *link,
+	union hpd_irq_data *irq_data)
+{
+	/* The HW reads 16 bytes from 200h on HPD,
+	 * but if we get an AUX_DEFER, the HW cannot retry
+	 * and this causes the CTS tests 4.3.2.1 - 3.2.4 to
+	 * fail, so we now explicitly read 6 bytes which is
+	 * the req from the above mentioned test cases.
+	 */
+	return core_link_read_dpcd(
+	link,
+	DPCD_ADDRESS_SINK_COUNT,
+	irq_data->raw,
+	sizeof(union hpd_irq_data));
+}
+
+static bool allow_hpd_rx_irq(const struct core_link *link)
+{
+	/*
+	 * Don't handle RX IRQ unless one of following is met:
+	 * 1) The link is established (cur_link_settings != unknown)
+	 * 2) We kicked off MST detection
+	 * 3) We know we're dealing with an active dongle
+	 */
+
+	if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
+		(link->public.type == dc_connection_mst_branch) ||
+		is_dp_active_dongle(link))
+		return true;
+
+	return false;
+}
+
+bool dc_link_handle_hpd_rx_irq(const struct dc_link *dc_link)
+{
+	struct core_link *link = DC_LINK_TO_LINK(dc_link);
+	union hpd_irq_data hpd_irq_dpcd_data = {{{{0}}}};
+	enum dc_status result = DDC_RESULT_UNKNOWN;
+	bool status = false;
+	/* For use cases related to down stream connection status change,
+	 * PSR and device auto test, refer to function handle_sst_hpd_irq
+	 * in DAL2.1*/
+
+	dal_logger_write(link->ctx->logger,
+		LOG_MAJOR_HW_TRACE,
+		LOG_MINOR_HW_TRACE_HPD_IRQ,
+		"%s: Got short pulse HPD on link %d\n",
+		__func__, link->public.link_index);
+
+	if (!allow_hpd_rx_irq(link)) {
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_HW_TRACE,
+			LOG_MINOR_HW_TRACE_HPD_IRQ,
+			"%s: skipping HPD handling on %d\n",
+			__func__, link->public.link_index);
+		return false;
+	}
+
+	 /* All the "handle_hpd_irq_xxx()" methods
+	 * should be called only after
+	 * dal_dpsst_ls_read_hpd_irq_data
+	 * Order of calls is important too
+	 */
+	result = read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data);
+
+	if (result != DC_OK) {
+		dal_logger_write(link->ctx->logger,
+			LOG_MAJOR_HW_TRACE,
+			LOG_MINOR_HW_TRACE_HPD_IRQ,
+			"%s: DPCD read failed to obtain irq data\n",
+			__func__);
+		return false;
+	}
+
+	/* check if we have MST msg and return since we poll for it */
+	if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY ||
+		hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+		return false;
+
+
+	/* For now we only handle 'Downstream port status' case. */
+	/* If we got sink count changed it means Downstream port status changed,
+	 * then DM should call DC to do the detection. */
+	if (hpd_rx_irq_check_link_loss_status(
+		link,
+		&hpd_irq_dpcd_data)) {
+		perform_link_training(link, &link->cur_link_settings, true);
+		status = false;
+	}
+
+	if (hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT
+				!= link->dpcd_sink_count)
+		status = true;
+
+	/* reasons for HPD RX:
+	 * 1. Link Loss - ie Re-train the Link
+	 * 2. MST sideband message
+	 * 3. Automated Test - ie. Internal Commit
+	 * 4. CP (copy protection) - (not interesting for DM???)
+	 * 5. DRR
+	 * 6. Downstream Port status changed -ie. Detect - this the only one
+	 * which is interesting for DM because it must call dc_link_detect.
+	 */
+	return status;
+}
+
+/*query dpcd for version and mst cap addresses*/
+bool is_mst_supported(struct core_link *link)
+{
+	bool mst          = false;
+	enum dc_status st = DC_OK;
+	union dpcd_rev rev;
+	union mstm_cap cap;
+
+	rev.raw  = 0;
+	cap.raw  = 0;
+
+	st = core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV, &rev.raw,
+			sizeof(rev));
+
+	if (st == DC_OK && rev.raw >= DPCD_REV_12) {
+
+		st = core_link_read_dpcd(link, DPCD_ADDRESS_MSTM_CAP,
+				&cap.raw, sizeof(cap));
+		if (st == DC_OK && cap.bits.MST_CAP == 1)
+			mst = true;
+	}
+	return mst;
+
+}
+
+bool is_dp_active_dongle(const struct core_link *link)
+{
+	enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
+
+	return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
+			(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+}
+
+static void get_active_converter_info(
+	uint8_t data, struct core_link *link)
+{
+	union dp_downstream_port_present ds_port = { .byte = data };
+
+	/* decode converter info*/
+	if (!ds_port.fields.PORT_PRESENT) {
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		ddc_service_set_dongle_type(link->ddc,
+				link->dpcd_caps.dongle_type);
+		return;
+	}
+
+	switch (ds_port.fields.PORT_TYPE) {
+	case DOWNSTREAM_VGA:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
+		break;
+	case DOWNSTREAM_DVI_HDMI:
+		/* At this point we don't know is it DVI or HDMI,
+		 * assume DVI.*/
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER;
+		break;
+	default:
+		link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+		break;
+	}
+
+	if (link->dpcd_caps.dpcd_rev.raw >= DCS_DPCD_REV_11) {
+		uint8_t det_caps[4];
+		union dwnstream_port_caps_byte0 *port_caps =
+			(union dwnstream_port_caps_byte0 *)det_caps;
+		core_link_read_dpcd(link, DPCD_ADDRESS_DWN_STRM_PORT0_CAPS,
+				det_caps, sizeof(det_caps));
+
+		switch (port_caps->bits.DWN_STRM_PORTX_TYPE) {
+		case DOWN_STREAM_DETAILED_VGA:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_VGA_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_DVI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_DVI_CONVERTER;
+			break;
+		case DOWN_STREAM_DETAILED_HDMI:
+			link->dpcd_caps.dongle_type =
+				DISPLAY_DONGLE_DP_HDMI_CONVERTER;
+
+			if (ds_port.fields.DETAILED_CAPS) {
+
+				union dwnstream_port_caps_byte3_hdmi
+					hdmi_caps = {.raw = det_caps[3] };
+
+				link->dpcd_caps.is_dp_hdmi_s3d_converter =
+					hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
+			}
+			break;
+		}
+	}
+	ddc_service_set_dongle_type(link->ddc,
+			link->dpcd_caps.dongle_type);
+}
+
+static void dp_wa_power_up_0010FA(struct core_link *link, uint8_t *dpcd_data,
+		int length)
+{
+	int retry = 0;
+	struct dp_device_vendor_id dp_id;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	if (!link->dpcd_caps.dpcd_rev.raw) {
+		do {
+			dp_receiver_power_ctrl(link, true);
+			core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV,
+							dpcd_data, length);
+			link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+				DPCD_ADDRESS_DPCD_REV -
+				DPCD_ADDRESS_DPCD_REV];
+		} while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw);
+	}
+
+	ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
+				 DPCD_ADDRESS_DPCD_REV];
+
+	get_active_converter_info(ds_port.byte, link);
+
+	/* read IEEE branch device id */
+	core_link_read_dpcd(link, DPCD_ADDRESS_BRANCH_DEVICE_ID_START,
+			(uint8_t *)&dp_id, sizeof(dp_id));
+	link->dpcd_caps.branch_dev_id =
+			(dp_id.ieee_oui[0] << 16) +
+			(dp_id.ieee_oui[1] << 8) +
+			dp_id.ieee_oui[2];
+
+	if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) {
+		switch (link->dpcd_caps.branch_dev_id) {
+		/* Some active dongles (DP-VGA, DP-DLDVI converters) power down
+		 * all internal circuits including AUX communication preventing
+		 * reading DPCD table and EDID (spec violation).
+		 * Encoder will skip DP RX power down on disable_output to
+		 * keep receiver powered all the time.*/
+		case DP_BRANCH_DEVICE_ID_1:
+		case DP_BRANCH_DEVICE_ID_4:
+			link->wa_flags.dp_keep_receiver_powered = true;
+			break;
+
+		/* TODO: May need work around for other dongles. */
+		default:
+			link->wa_flags.dp_keep_receiver_powered = false;
+			break;
+		}
+	} else
+		link->wa_flags.dp_keep_receiver_powered = false;
+}
+
+static void retrieve_link_cap(struct core_link *link)
+{
+	uint8_t dpcd_data[
+			DPCD_ADDRESS_EDP_CONFIG_CAP -
+			DPCD_ADDRESS_DPCD_REV + 1];
+
+	union down_stream_port_count down_strm_port_count;
+	union edp_configuration_cap edp_config_cap;
+	union max_down_spread max_down_spread;
+	union dp_downstream_port_present ds_port = { 0 };
+
+	dm_memset(dpcd_data, '\0', sizeof(dpcd_data));
+	dm_memset(&down_strm_port_count,
+		'\0', sizeof(union down_stream_port_count));
+	dm_memset(&edp_config_cap, '\0',
+		sizeof(union edp_configuration_cap));
+	dm_memset(&max_down_spread, '\0',
+		sizeof(union max_down_spread));
+
+	core_link_read_dpcd(link, DPCD_ADDRESS_DPCD_REV,
+			dpcd_data, sizeof(dpcd_data));
+	link->dpcd_caps.dpcd_rev.raw = dpcd_data[
+		DPCD_ADDRESS_DPCD_REV -
+		DPCD_ADDRESS_DPCD_REV];
+
+	ds_port.byte = dpcd_data[DPCD_ADDRESS_DOWNSTREAM_PORT_PRESENT -
+				 DPCD_ADDRESS_DPCD_REV];
+
+	get_active_converter_info(ds_port.byte, link);
+
+	dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
+
+	link->dpcd_caps.allow_invalid_MSA_timing_param =
+		down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM;
+
+	link->dpcd_caps.max_ln_count.raw = dpcd_data[
+		DPCD_ADDRESS_MAX_LANE_COUNT - DPCD_ADDRESS_DPCD_REV];
+
+	max_down_spread.raw = dpcd_data[
+		DPCD_ADDRESS_MAX_DOWNSPREAD - DPCD_ADDRESS_DPCD_REV];
+
+	link->reported_link_cap.lane_count =
+		link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT;
+	link->reported_link_cap.link_rate = dpcd_data[
+		DPCD_ADDRESS_MAX_LINK_RATE - DPCD_ADDRESS_DPCD_REV];
+	link->reported_link_cap.link_spread =
+		max_down_spread.bits.MAX_DOWN_SPREAD ?
+		LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
+
+	edp_config_cap.raw = dpcd_data[
+		DPCD_ADDRESS_EDP_CONFIG_CAP - DPCD_ADDRESS_DPCD_REV];
+	link->dpcd_caps.panel_mode_edp =
+		edp_config_cap.bits.ALT_SCRAMBLER_RESET;
+
+	link->edp_revision = DPCD_EDP_REVISION_EDP_UNKNOWN;
+
+	/* read sink count */
+	core_link_read_dpcd(link,
+			DPCD_ADDRESS_SINK_COUNT,
+			&link->dpcd_caps.sink_count.raw,
+			sizeof(link->dpcd_caps.sink_count.raw));
+
+	/* Display control registers starting at DPCD 700h are only valid and
+	 * enabled if this eDP config cap bit is set. */
+	if (edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE) {
+		/* Read the Panel's eDP revision at DPCD 700h. */
+		core_link_read_dpcd(link,
+			DPCD_ADDRESS_EDP_REV,
+			(uint8_t *)(&link->edp_revision),
+			sizeof(link->edp_revision));
+	}
+	/* TODO: Confirm if need retrieve_psr_link_cap */
+}
+
+void detect_dp_sink_caps(struct core_link *link)
+{
+	retrieve_link_cap(link);
+
+	/* dc init_hw has power encoder using default
+	 * signal for connector. For native DP, no
+	 * need to power up encoder again. If not native
+	 * DP, hw_init may need check signal or power up
+	 * encoder here.
+	 */
+
+	if (is_mst_supported(link)) {
+		link->verified_link_cap = link->reported_link_cap;
+	} else {
+		dp_hbr_verify_link_cap(link,
+			&link->reported_link_cap);
+	}
+	/* TODO save sink caps in link->sink */
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/dal/dc/core/dc_link_hwss.c
new file mode 100644
index 000000000000..39aa734680c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_link_hwss.c
@@ -0,0 +1,201 @@
+/* Copyright 2015 Advanced Micro Devices, Inc. */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "inc/core_dc.h"
+#include "include/ddc_service_types.h"
+#include "include/i2caux_interface.h"
+#include "link_hwss.h"
+#include "hw_sequencer.h"
+#include "dc_link_ddc.h"
+#include "dm_helpers.h"
+#include "dce110/dce110_link_encoder.h"
+#include "dce110/dce110_stream_encoder.h"
+
+
+enum dc_status core_link_read_dpcd(
+	struct core_link* link,
+	uint32_t address,
+	uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helper_dp_read_dpcd(link->ctx,
+			&link->public,
+			address, data, size))
+			return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+enum dc_status core_link_write_dpcd(
+	struct core_link* link,
+	uint32_t address,
+	const uint8_t *data,
+	uint32_t size)
+{
+	if (!dm_helper_dp_write_dpcd(link->ctx,
+			&link->public,
+			address, data, size))
+				return DC_ERROR_UNEXPECTED;
+
+	return DC_OK;
+}
+
+void dp_receiver_power_ctrl(struct core_link *link, bool on)
+{
+	uint8_t state;
+
+	state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3;
+
+	core_link_write_dpcd(link, DPCD_ADDRESS_POWER_STATE, &state,
+			sizeof(state));
+}
+
+void dp_enable_link_phy(
+	struct core_link *link,
+	enum signal_type signal,
+	const struct link_settings *link_settings)
+{
+	struct link_encoder *link_enc = link->link_enc;
+
+	if (dc_is_dp_sst_signal(signal)) {
+		if (signal == SIGNAL_TYPE_EDP) {
+			link_enc->funcs->power_control(link_enc, true);
+			link_enc->funcs->backlight_control(link_enc, true);
+		}
+
+		link_enc->funcs->enable_dp_output(
+						link_enc,
+						link_settings,
+						CLOCK_SOURCE_ID_EXTERNAL);
+	} else {
+		link_enc->funcs->enable_dp_mst_output(
+						link_enc,
+						link_settings,
+						CLOCK_SOURCE_ID_EXTERNAL);
+	}
+
+	dp_receiver_power_ctrl(link, true);
+}
+
+void dp_disable_link_phy(struct core_link *link, enum signal_type signal)
+{
+	if (!link->wa_flags.dp_keep_receiver_powered)
+		dp_receiver_power_ctrl(link, false);
+
+	if (signal == SIGNAL_TYPE_EDP)
+		link->link_enc->funcs->backlight_control(link->link_enc, false);
+
+	link->link_enc->funcs->disable_output(link->link_enc, signal);
+
+	/* Clear current link setting.*/
+	dm_memset(&link->cur_link_settings, 0,
+			sizeof(link->cur_link_settings));
+}
+
+void dp_disable_link_phy_mst(struct core_link *link, struct core_stream *stream)
+{
+	/* MST disable link only when no stream use the link */
+	if (link->mst_stream_alloc_table.stream_count > 0)
+		return;
+
+	dp_disable_link_phy(link, stream->signal);
+}
+
+bool dp_set_hw_training_pattern(
+	struct core_link *link,
+	enum hw_dp_training_pattern pattern)
+{
+	enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED;
+	struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+	struct link_encoder *encoder = link->link_enc;
+
+	switch (pattern) {
+	case HW_DP_TRAINING_PATTERN_1:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1;
+		break;
+	case HW_DP_TRAINING_PATTERN_2:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2;
+		break;
+	case HW_DP_TRAINING_PATTERN_3:
+		test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3;
+		break;
+	default:
+		break;
+	}
+
+	pattern_param.dp_phy_pattern = test_pattern;
+	pattern_param.custom_pattern = NULL;
+	pattern_param.custom_pattern_size = 0;
+	pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+	encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+
+	return true;
+}
+
+
+void dp_set_hw_lane_settings(
+	struct core_link *link,
+	const struct link_training_settings *link_settings)
+{
+	struct link_encoder *encoder = link->link_enc;
+
+	/* call Encoder to set lane settings */
+	encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+}
+
+enum dp_panel_mode dp_get_panel_mode(struct core_link *link)
+{
+	/* We need to explicitly check that connector
+	 * is not DP. Some Travis_VGA get reported
+	 * by video bios as DP.
+	 */
+	if (link->public.connector_signal != SIGNAL_TYPE_DISPLAY_PORT) {
+
+		switch (link->dpcd_caps.branch_dev_id) {
+		case DP_BRANCH_DEVICE_ID_2:
+			if (strncmp(
+				link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_2,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		case DP_BRANCH_DEVICE_ID_3:
+			if (strncmp(link->dpcd_caps.branch_dev_name,
+				DP_VGA_LVDS_CONVERTER_ID_3,
+				sizeof(
+				link->dpcd_caps.
+				branch_dev_name)) == 0) {
+				return DP_PANEL_MODE_SPECIAL;
+			}
+			break;
+		default:
+			break;
+		}
+
+		if (link->dpcd_caps.panel_mode_edp) {
+			return DP_PANEL_MODE_EDP;
+		}
+	}
+
+	return DP_PANEL_MODE_DEFAULT;
+}
+
+void dp_set_hw_test_pattern(
+	struct core_link *link,
+	enum dp_test_pattern test_pattern)
+{
+	struct encoder_set_dp_phy_pattern_param pattern_param = {0};
+	struct link_encoder *encoder = link->link_enc;
+
+	pattern_param.dp_phy_pattern = test_pattern;
+	pattern_param.custom_pattern = NULL;
+	pattern_param.custom_pattern_size = 0;
+	pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+
+	encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_resource.c b/drivers/gpu/drm/amd/dal/dc/core/dc_resource.c
new file mode 100644
index 000000000000..8cb756e99bfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_resource.c
@@ -0,0 +1,1243 @@
+/*
+* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "link_encoder.h"
+#include "stream_encoder.h"
+#include "opp.h"
+#include "transform.h"
+
+#if defined(CONFIG_DRM_AMD_DAL_DCE10_0)
+#include "dce100/dce100_resource.h"
+#endif
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+#include "dce110/dce110_resource.h"
+#endif
+
+bool dc_construct_resource_pool(struct adapter_service *adapter_serv,
+				struct dc *dc,
+				uint8_t num_virtual_links)
+{
+	enum dce_version dce_ver = dal_adapter_service_get_dce_version(adapter_serv);
+
+	switch (dce_ver) {
+#if defined(CONFIG_DRM_AMD_DAL_DCE10_0)
+	case DCE_VERSION_10_0:
+		return dce100_construct_resource_pool(
+			adapter_serv, num_virtual_links, dc, &dc->res_pool);
+#endif
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+	case DCE_VERSION_11_0:
+		return dce110_construct_resource_pool(
+			adapter_serv, num_virtual_links, dc, &dc->res_pool);
+#endif
+	default:
+		break;
+	}
+
+	return false;
+}
+
+void unreference_clock_source(
+		struct resource_context *res_ctx,
+		struct clock_source *clock_source)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool.clk_src_count; i++) {
+		if (res_ctx->pool.clock_sources[i] == clock_source) {
+			res_ctx->clock_source_ref_count[i]--;
+
+		if (res_ctx->clock_source_ref_count[i] == 0)
+			clock_source->funcs->cs_power_down(clock_source);
+		}
+	}
+
+
+}
+
+void reference_clock_source(
+		struct resource_context *res_ctx,
+		struct clock_source *clock_source)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool.clk_src_count; i++) {
+		if (res_ctx->pool.clock_sources[i] == clock_source) {
+			res_ctx->clock_source_ref_count[i]++;
+		}
+	}
+}
+
+bool is_same_timing(
+	const struct dc_crtc_timing *timing1,
+	const struct dc_crtc_timing *timing2)
+{
+	return dm_memcmp(timing1, timing2, sizeof(struct dc_crtc_timing)) == 0;
+}
+
+static bool is_sharable_clk_src(
+	const struct core_stream *stream_with_clk_src,
+	const struct core_stream *stream)
+{
+	enum clock_source_id id = stream_with_clk_src->clock_source->id;
+
+	if (stream_with_clk_src->clock_source == NULL)
+		return false;
+
+	if (id == CLOCK_SOURCE_ID_EXTERNAL)
+		return false;
+
+	/* Sharing dual link is not working */
+	if (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+			stream_with_clk_src->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+			return false;
+
+	if(!is_same_timing(
+		&stream_with_clk_src->public.timing, &stream->public.timing))
+		return false;
+
+	return true;
+}
+
+struct clock_source *find_used_clk_src_for_sharing(
+					struct validate_context *context,
+					struct core_stream *stream)
+{
+	uint8_t i, j;
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *clock_source_stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			if (clock_source_stream->clock_source == NULL)
+				continue;
+
+			if (is_sharable_clk_src(clock_source_stream, stream))
+				return clock_source_stream->clock_source;
+		}
+	}
+
+	return NULL;
+}
+
+static enum pixel_format convert_pixel_format_to_dalsurface(
+		enum surface_pixel_format surface_pixel_format)
+{
+	enum pixel_format dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+
+	switch (surface_pixel_format) {
+	case SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS:
+		dal_pixel_format = PIXEL_FORMAT_INDEX8;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+		dal_pixel_format = PIXEL_FORMAT_RGB565;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_BGRA8888:
+		dal_pixel_format = PIXEL_FORMAT_ARGB8888;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+		dal_pixel_format = PIXEL_FORMAT_ARGB2101010_XRBIAS;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+		dal_pixel_format = PIXEL_FORMAT_FP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+		dal_pixel_format = PIXEL_FORMAT_FP16;
+		break;
+
+
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+		dal_pixel_format = PIXEL_FORMAT_420BPP12;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+		dal_pixel_format = PIXEL_FORMAT_420BPP12;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_422_YCb:
+		dal_pixel_format = PIXEL_FORMAT_422BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_422_YCr:
+		dal_pixel_format = PIXEL_FORMAT_422BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_422_CbY:
+		dal_pixel_format = PIXEL_FORMAT_422BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_422_CrY:
+		dal_pixel_format = PIXEL_FORMAT_422BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_ACrYCb1555:
+		dal_pixel_format = PIXEL_FORMAT_444BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_CrYCb565:
+		dal_pixel_format = PIXEL_FORMAT_444BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_ACrYCb4444:
+		dal_pixel_format = PIXEL_FORMAT_444BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_CbYCrA5551:
+		dal_pixel_format = PIXEL_FORMAT_444BPP16;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_ACrYCb8888:
+		dal_pixel_format = PIXEL_FORMAT_444BPP32;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_ACrYCb2101010:
+		dal_pixel_format = PIXEL_FORMAT_444BPP32;
+		break;
+	case SURFACE_PIXEL_FORMAT_VIDEO_444_CbYCrA1010102:
+		dal_pixel_format = PIXEL_FORMAT_444BPP32;
+		break;
+	default:
+		dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
+		break;
+	}
+	return dal_pixel_format;
+}
+
+static void calculate_viewport(
+		const struct dc_surface *surface,
+		struct core_stream *stream)
+{
+	const struct rect src = surface->src_rect;
+	const struct rect clip = surface->clip_rect;
+	const struct rect dst = surface->dst_rect;
+
+	/* offset = src.ofs + (clip.ofs - dst.ofs) * scl_ratio
+	 * num_pixels = clip.num_pix * scl_ratio
+	 */
+	stream->viewport.x = src.x + (clip.x - dst.x) * src.width / dst.width;
+	stream->viewport.width = clip.width * src.width / dst.width;
+
+	stream->viewport.y = src.y + (clip.y - dst.y) * src.height / dst.height;
+	stream->viewport.height = clip.height * src.height / dst.height;
+
+	/* Minimum viewport such that 420/422 chroma vp is non 0 */
+	if (stream->viewport.width < 2)
+	{
+		stream->viewport.width = 2;
+	}
+	if (stream->viewport.height < 2)
+	{
+		stream->viewport.height = 2;
+	}
+}
+
+static void calculate_overscan(
+		const struct dc_surface *surface,
+		struct core_stream *stream)
+{
+	stream->overscan.left = stream->public.dst.x;
+	if (stream->public.src.x < surface->clip_rect.x)
+		stream->overscan.left += (surface->clip_rect.x
+			- stream->public.src.x) * stream->public.dst.width
+			/ stream->public.src.width;
+
+	stream->overscan.right = stream->public.timing.h_addressable
+		- stream->public.dst.x - stream->public.dst.width;
+	if (stream->public.src.x + stream->public.src.width
+		> surface->clip_rect.x + surface->clip_rect.width)
+		stream->overscan.right = stream->public.timing.h_addressable -
+			dal_fixed31_32_floor(dal_fixed31_32_div(
+				dal_fixed31_32_from_int(
+						stream->viewport.width),
+						stream->ratios.horz)) -
+						stream->overscan.left;
+
+
+	stream->overscan.top = stream->public.dst.y;
+	if (stream->public.src.y < surface->clip_rect.y)
+		stream->overscan.top += (surface->clip_rect.y
+			- stream->public.src.y) * stream->public.dst.height
+			/ stream->public.src.height;
+
+	stream->overscan.bottom = stream->public.timing.v_addressable
+		- stream->public.dst.y - stream->public.dst.height;
+	if (stream->public.src.y + stream->public.src.height
+		> surface->clip_rect.y + surface->clip_rect.height)
+		stream->overscan.bottom = stream->public.timing.v_addressable -
+			dal_fixed31_32_floor(dal_fixed31_32_div(
+				dal_fixed31_32_from_int(
+						stream->viewport.height),
+						stream->ratios.vert)) -
+						stream->overscan.top;
+
+
+	/* TODO: Add timing overscan to finalize overscan calculation*/
+}
+
+static void calculate_scaling_ratios(
+		const struct dc_surface *surface,
+		struct core_stream *stream)
+{
+	const uint32_t in_w = stream->public.src.width;
+	const uint32_t in_h = stream->public.src.height;
+	const uint32_t out_w = stream->public.dst.width;
+	const uint32_t out_h = stream->public.dst.height;
+
+	stream->ratios.horz = dal_fixed31_32_from_fraction(
+					surface->src_rect.width,
+					surface->dst_rect.width);
+	stream->ratios.vert = dal_fixed31_32_from_fraction(
+					surface->src_rect.height,
+					surface->dst_rect.height);
+
+	if (surface->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE)
+		stream->ratios.horz.value *= 2;
+	else if (surface->stereo_format
+					== PLANE_STEREO_FORMAT_TOP_AND_BOTTOM)
+		stream->ratios.vert.value *= 2;
+
+	stream->ratios.vert.value = div64_s64(stream->ratios.vert.value * in_h,
+			out_h);
+	stream->ratios.horz.value = div64_s64(stream->ratios.horz.value * in_w ,
+			out_w);
+
+	stream->ratios.horz_c = stream->ratios.horz;
+	stream->ratios.vert_c = stream->ratios.vert;
+
+	if (stream->format == PIXEL_FORMAT_420BPP12) {
+		stream->ratios.horz_c.value /= 2;
+		stream->ratios.vert_c.value /= 2;
+	} else if (stream->format == PIXEL_FORMAT_422BPP16) {
+		stream->ratios.horz_c.value /= 2;
+	}
+}
+
+/*TODO: per pipe not per stream*/
+void build_scaling_params(
+	const struct dc_surface *surface,
+	struct core_stream *stream)
+{
+	/* Important: scaling ratio calculation requires pixel format,
+	 * overscan calculation requires scaling ratios and viewport
+	 * and lb depth/taps calculation requires overscan. Call sequence
+	 * is therefore important */
+	stream->format = convert_pixel_format_to_dalsurface(surface->format);
+
+	calculate_viewport(surface, stream);
+
+	calculate_scaling_ratios(surface, stream);
+
+	calculate_overscan(surface, stream);
+
+	/* Check if scaling is required update taps if not */
+	if (dal_fixed31_32_u2d19(stream->ratios.horz) == 1 << 19)
+		stream->taps.h_taps = 1;
+	else
+		stream->taps.h_taps = surface->scaling_quality.h_taps;
+
+	if (dal_fixed31_32_u2d19(stream->ratios.horz_c) == 1 << 19)
+		stream->taps.h_taps_c = 1;
+	else
+		stream->taps.h_taps_c = surface->scaling_quality.h_taps_c;
+
+	if (dal_fixed31_32_u2d19(stream->ratios.vert) == 1 << 19)
+		stream->taps.v_taps = 1;
+	else
+		stream->taps.v_taps = surface->scaling_quality.v_taps;
+
+	if (dal_fixed31_32_u2d19(stream->ratios.vert_c) == 1 << 19)
+		stream->taps.v_taps_c = 1;
+	else
+		stream->taps.v_taps_c = surface->scaling_quality.v_taps_c;
+
+	dal_logger_write(stream->ctx->logger,
+				LOG_MAJOR_DCP,
+				LOG_MINOR_DCP_SCALER,
+				"%s: Overscan:\n bot:%d left:%d right:%d "
+				"top:%d\nViewport:\nheight:%d width:%d x:%d "
+				"y:%d\n dst_rect:\nheight:%d width:%d x:%d "
+				"y:%d\n",
+				__func__,
+				stream->overscan.bottom,
+				stream->overscan.left,
+				stream->overscan.right,
+				stream->overscan.top,
+				stream->viewport.height,
+				stream->viewport.width,
+				stream->viewport.x,
+				stream->viewport.y,
+				surface->dst_rect.height,
+				surface->dst_rect.width,
+				surface->dst_rect.x,
+				surface->dst_rect.y);
+}
+
+void build_scaling_params_for_context(
+	const struct dc *dc,
+	struct validate_context *context)
+{
+	uint8_t i, j, k;
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+		if (context->target_flags[i].unchanged)
+			continue;
+		for (j = 0; j < target->status.surface_count; j++) {
+			const struct dc_surface *surface =
+				target->status.surfaces[j];
+			for (k = 0; k < target->public.stream_count; k++) {
+				struct core_stream *stream =
+					DC_STREAM_TO_CORE(
+						target->public.streams[k]);
+
+				build_scaling_params(surface, stream);
+			}
+		}
+	}
+}
+
+bool logical_attach_surfaces_to_target(
+		struct dc_surface *surfaces[],
+		uint8_t surface_count,
+		struct dc_target *dc_target)
+{
+	uint8_t i;
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+
+	if (surface_count > MAX_SURFACE_NUM) {
+		dm_error("Surface: can not attach %d surfaces! Maximum is: %d\n",
+			surface_count, MAX_SURFACE_NUM);
+		return false;
+	}
+
+	for (i = 0; i < target->status.surface_count; i++)
+		dc_surface_release(target->status.surfaces[i]);
+
+	for (i = 0; i < surface_count; i++) {
+		struct core_surface *surface = DC_SURFACE_TO_CORE(surfaces[i]);
+		surface->status.dc_target = &target->public;
+		target->status.surfaces[i] = surfaces[i];
+		dc_surface_retain(target->status.surfaces[i]);
+	}
+	target->status.surface_count = surface_count;
+
+	return true;
+}
+
+static uint32_t get_min_vblank_time_us(const struct validate_context *context)
+{
+	uint8_t i, j;
+	uint32_t min_vertical_blank_time = -1;
+
+	for (i = 0; i < context->target_count; i++) {
+		const struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			const struct dc_stream *stream =
+						target->public.streams[j];
+			uint32_t vertical_blank_in_pixels = 0;
+			uint32_t vertical_blank_time = 0;
+
+			vertical_blank_in_pixels = stream->timing.h_total *
+				(stream->timing.v_total
+					- stream->timing.v_addressable);
+			vertical_blank_time = vertical_blank_in_pixels
+				* 1000 / stream->timing.pix_clk_khz;
+			if (min_vertical_blank_time > vertical_blank_time)
+				min_vertical_blank_time = vertical_blank_time;
+		}
+	}
+	return min_vertical_blank_time;
+}
+
+static void fill_display_configs(
+	const struct validate_context *context,
+	struct dc_pp_display_configuration *pp_display_cfg)
+{
+	uint8_t i, j;
+	uint8_t num_cfgs = 0;
+
+	for (i = 0; i < context->target_count; i++) {
+		const struct core_target *target = context->targets[i];
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			const struct core_stream *stream =
+			DC_STREAM_TO_CORE(target->public.streams[j]);
+			struct dc_pp_single_disp_config *cfg =
+					&pp_display_cfg->disp_configs[num_cfgs];
+
+			num_cfgs++;
+			cfg->signal = stream->signal;
+			cfg->pipe_idx = stream->opp->inst;
+			cfg->src_height = stream->public.src.height;
+			cfg->src_width = stream->public.src.width;
+			cfg->ddi_channel_mapping =
+				stream->sink->link->ddi_channel_mapping.raw;
+			cfg->transmitter =
+				stream->sink->link->link_enc->transmitter;
+			cfg->link_settings =
+					stream->sink->link->cur_link_settings;
+			cfg->sym_clock = stream->public.timing.pix_clk_khz;
+			switch (stream->public.timing.display_color_depth) {
+			case COLOR_DEPTH_101010:
+				cfg->sym_clock = (cfg->sym_clock * 30) / 24;
+				break;
+			case COLOR_DEPTH_121212:
+				cfg->sym_clock = (cfg->sym_clock * 36) / 24;
+				break;
+			case COLOR_DEPTH_161616:
+				cfg->sym_clock = (cfg->sym_clock * 48) / 24;
+				break;
+			default:
+				break;
+			}
+			/* TODO: unhardcode*/
+			cfg->v_refresh = 60;
+		}
+	}
+	pp_display_cfg->display_count = num_cfgs;
+}
+
+void pplib_apply_safe_state(
+	const struct dc *dc)
+{
+	dm_pp_apply_safe_state(dc->ctx);
+}
+
+void pplib_apply_display_requirements(
+	const struct dc *dc,
+	const struct validate_context *context)
+{
+	struct dc_pp_display_configuration pp_display_cfg = { 0 };
+
+	pp_display_cfg.all_displays_in_sync =
+		context->bw_results.all_displays_in_sync;
+	pp_display_cfg.nb_pstate_switch_disable =
+			context->bw_results.nbp_state_change_enable == false;
+	pp_display_cfg.cpu_cc6_disable =
+			context->bw_results.cpuc_state_change_enable == false;
+	pp_display_cfg.cpu_pstate_disable =
+			context->bw_results.cpup_state_change_enable == false;
+	pp_display_cfg.cpu_pstate_separation_time =
+			context->bw_results.required_blackout_duration_us;
+
+	pp_display_cfg.min_memory_clock_khz = context->bw_results.required_yclk
+		/ MEMORY_TYPE_MULTIPLIER;
+	pp_display_cfg.min_engine_clock_khz = context->bw_results.required_sclk;
+	pp_display_cfg.min_engine_clock_deep_sleep_khz
+			= context->bw_results.required_sclk_deep_sleep;
+
+	pp_display_cfg.avail_mclk_switch_time_us =
+						get_min_vblank_time_us(context);
+	pp_display_cfg.avail_mclk_switch_time_in_disp_active_us = 0;
+
+	pp_display_cfg.disp_clk_khz = context->bw_results.dispclk_khz;
+
+	fill_display_configs(context, &pp_display_cfg);
+
+	/* TODO: is this still applicable?*/
+	if (pp_display_cfg.display_count == 1) {
+		const struct dc_crtc_timing *timing =
+			&context->targets[0]->public.streams[0]->timing;
+
+		pp_display_cfg.crtc_index =
+			pp_display_cfg.disp_configs[0].pipe_idx;
+		pp_display_cfg.line_time_in_us = timing->h_total * 1000
+							/ timing->pix_clk_khz;
+	}
+
+	dm_pp_apply_display_requirements(dc->ctx, &pp_display_cfg);
+}
+
+/* Maximum TMDS single link pixel clock 165MHz */
+#define TMDS_MAX_PIXEL_CLOCK_IN_KHZ 165000
+
+static void attach_stream_to_controller(
+		struct resource_context *res_ctx,
+		struct core_stream *stream)
+{
+	res_ctx->controller_ctx[stream->controller_idx].stream = stream;
+}
+
+static void set_stream_engine_in_use(
+		struct resource_context *res_ctx,
+		struct stream_encoder *stream_enc)
+{
+	int i;
+
+	for (i = 0; i < res_ctx->pool.stream_enc_count; i++) {
+		if (res_ctx->pool.stream_enc[i] == stream_enc)
+			res_ctx->is_stream_enc_acquired[i] = true;
+	}
+}
+
+/* TODO: release audio object */
+static void set_audio_in_use(
+		struct resource_context *res_ctx,
+		struct audio *audio)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool.audio_count; i++) {
+		if (res_ctx->pool.audios[i] == audio) {
+			res_ctx->is_audio_acquired[i] = true;
+		}
+	}
+}
+
+static bool assign_first_free_controller(
+		struct resource_context *res_ctx,
+		struct core_stream *stream)
+{
+	uint8_t i;
+	for (i = 0; i < res_ctx->pool.controller_count; i++) {
+		if (!res_ctx->controller_ctx[i].stream) {
+			stream->tg = res_ctx->pool.timing_generators[i];
+			stream->mi = res_ctx->pool.mis[i];
+			stream->ipp = res_ctx->pool.ipps[i];
+			stream->xfm = res_ctx->pool.transforms[i];
+			stream->opp = res_ctx->pool.opps[i];
+			stream->controller_idx = i;
+			stream->dis_clk = res_ctx->pool.display_clock;
+			return true;
+		}
+	}
+	return false;
+}
+
+static struct stream_encoder *find_first_free_match_stream_enc_for_link(
+		struct resource_context *res_ctx,
+		struct core_link *link)
+{
+	uint8_t i;
+	int8_t j = -1;
+	const struct dc_sink *sink = NULL;
+
+	for (i = 0; i < res_ctx->pool.stream_enc_count; i++) {
+		if (!res_ctx->is_stream_enc_acquired[i] &&
+					res_ctx->pool.stream_enc[i]) {
+			/* Store first available for MST second display
+			 * in daisy chain use case */
+			j = i;
+			if (res_ctx->pool.stream_enc[i]->id ==
+					link->link_enc->preferred_engine)
+				return res_ctx->pool.stream_enc[i];
+		}
+	}
+
+	/*
+	 * below can happen in cases when stream encoder is acquired:
+	 * 1) for second MST display in chain, so preferred engine already
+	 * acquired;
+	 * 2) for another link, which preferred engine already acquired by any
+	 * MST configuration.
+	 *
+	 * If signal is of DP type and preferred engine not found, return last available
+	 *
+	 * TODO - This is just a patch up and a generic solution is
+	 * required for non DP connectors.
+	 */
+
+	sink = link->public.local_sink ? link->public.local_sink : link->public.remote_sinks[0];
+
+	if (sink && j >= 0 &&  dc_is_dp_signal(sink->sink_signal))
+		return res_ctx->pool.stream_enc[j];
+
+	return NULL;
+}
+
+static struct audio *find_first_free_audio(struct resource_context *res_ctx)
+{
+	int i;
+	for (i = 0; i < res_ctx->pool.audio_count; i++) {
+		if (res_ctx->is_audio_acquired[i] == false) {
+			return res_ctx->pool.audios[i];
+		}
+	}
+
+	return 0;
+}
+
+static bool check_timing_change(struct core_stream *cur_stream,
+		struct core_stream *new_stream)
+{
+	if (cur_stream == NULL)
+		return true;
+
+	/* If sink pointer changed, it means this is a hotplug, we should do
+	 * full hw setting.
+	 */
+	if (cur_stream->sink != new_stream->sink)
+		return true;
+
+	return !is_same_timing(
+					&cur_stream->public.timing,
+					&new_stream->public.timing);
+}
+
+static void set_stream_signal(struct core_stream *stream)
+{
+	struct dc_sink *dc_sink = (struct dc_sink *)stream->public.sink;
+
+	/* For asic supports dual link DVI, we should adjust signal type
+	 * based on timing pixel clock. If pixel clock more than 165Mhz,
+	 * signal is dual link, otherwise, single link.
+	 */
+	if (dc_sink->sink_signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
+			dc_sink->sink_signal == SIGNAL_TYPE_DVI_DUAL_LINK) {
+		if (stream->public.timing.pix_clk_khz >
+			TMDS_MAX_PIXEL_CLOCK_IN_KHZ)
+			dc_sink->sink_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
+		else
+			dc_sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+	}
+
+	stream->signal = dc_sink->sink_signal;
+}
+
+enum dc_status map_resources(
+		const struct dc *dc,
+		struct validate_context *context)
+{
+	uint8_t i, j;
+
+	/* mark resources used for targets that are already active */
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		if (!context->target_flags[i].unchanged)
+			continue;
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+
+			attach_stream_to_controller(
+				&context->res_ctx,
+				stream);
+
+			set_stream_engine_in_use(
+				&context->res_ctx,
+				stream->stream_enc);
+
+			reference_clock_source(
+				&context->res_ctx,
+				stream->clock_source);
+
+			if (stream->audio) {
+				set_audio_in_use(&context->res_ctx,
+					stream->audio);
+			}
+		}
+	}
+
+	/* acquire new resources */
+	for (i = 0; i < context->target_count; i++) {
+		struct core_target *target = context->targets[i];
+
+		if (context->target_flags[i].unchanged)
+			continue;
+
+		for (j = 0; j < target->public.stream_count; j++) {
+			struct core_stream *stream =
+				DC_STREAM_TO_CORE(target->public.streams[j]);
+			struct core_stream *curr_stream;
+
+			if (!assign_first_free_controller(
+					&context->res_ctx, stream))
+				return DC_NO_CONTROLLER_RESOURCE;
+
+			attach_stream_to_controller(&context->res_ctx, stream);
+
+			set_stream_signal(stream);
+
+			curr_stream =
+				dc->current_context.res_ctx.controller_ctx
+				[stream->controller_idx].stream;
+			context->res_ctx.controller_ctx[stream->controller_idx]
+			.flags.timing_changed =
+				check_timing_change(curr_stream, stream);
+
+			stream->stream_enc =
+				find_first_free_match_stream_enc_for_link(
+					&context->res_ctx,
+					stream->sink->link);
+
+			if (!stream->stream_enc)
+				return DC_NO_STREAM_ENG_RESOURCE;
+
+			set_stream_engine_in_use(
+					&context->res_ctx,
+					stream->stream_enc);
+
+			/* TODO: Add check if ASIC support and EDID audio */
+			if (!stream->sink->converter_disable_audio &&
+						dc_is_audio_capable_signal(
+						stream->signal)) {
+				stream->audio = find_first_free_audio(
+						&context->res_ctx);
+
+				if (!stream->audio)
+					return DC_NO_STREAM_AUDIO_RESOURCE;
+
+				set_audio_in_use(&context->res_ctx,
+						stream->audio);
+			}
+		}
+	}
+
+	return DC_OK;
+}
+
+static enum ds_color_space build_default_color_space(
+		struct core_stream *stream)
+{
+	enum ds_color_space color_space =
+			DS_COLOR_SPACE_SRGB_FULLRANGE;
+	struct dc_crtc_timing *timing = &stream->public.timing;
+
+	switch (stream->signal) {
+	/* TODO: implement other signal color space setting */
+	case SIGNAL_TYPE_DISPLAY_PORT:
+	case SIGNAL_TYPE_DISPLAY_PORT_MST:
+	case SIGNAL_TYPE_EDP:
+		break;
+	case SIGNAL_TYPE_HDMI_TYPE_A:
+	{
+		uint32_t pix_clk_khz;
+
+		if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 &&
+			timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
+			if (timing->timing_standard ==
+					TIMING_STANDARD_CEA770 &&
+				timing->timing_standard ==
+						TIMING_STANDARD_CEA861)
+				color_space = DS_COLOR_SPACE_SRGB_FULLRANGE;
+
+			pix_clk_khz = timing->pix_clk_khz / 10;
+			if (timing->h_addressable == 640 &&
+				timing->v_addressable == 480 &&
+				(pix_clk_khz == 2520 || pix_clk_khz == 2517))
+				color_space = DS_COLOR_SPACE_SRGB_FULLRANGE;
+		} else {
+			if (timing->timing_standard ==
+					TIMING_STANDARD_CEA770 ||
+					timing->timing_standard ==
+					TIMING_STANDARD_CEA861) {
+
+				color_space =
+					(timing->pix_clk_khz > PIXEL_CLOCK) ?
+						DS_COLOR_SPACE_YCBCR709 :
+						DS_COLOR_SPACE_YCBCR601;
+			}
+		}
+		break;
+	}
+	default:
+		switch (timing->pixel_encoding) {
+		case PIXEL_ENCODING_YCBCR422:
+		case PIXEL_ENCODING_YCBCR444:
+			if (timing->pix_clk_khz > PIXEL_CLOCK)
+				color_space = DS_COLOR_SPACE_YCBCR709;
+			else
+				color_space = DS_COLOR_SPACE_YCBCR601;
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+	return color_space;
+}
+
+static void translate_info_frame(const struct hw_info_frame *hw_info_frame,
+	struct encoder_info_frame *encoder_info_frame)
+{
+	dm_memset(
+		encoder_info_frame, 0, sizeof(struct encoder_info_frame));
+
+	/* For gamut we recalc checksum */
+	if (hw_info_frame->gamut_packet.valid) {
+		uint8_t chk_sum = 0;
+		uint8_t *ptr;
+		uint8_t i;
+
+		dm_memmove(
+						&encoder_info_frame->gamut,
+						&hw_info_frame->gamut_packet,
+						sizeof(struct hw_info_packet));
+
+		/*start of the Gamut data. */
+		ptr = &encoder_info_frame->gamut.sb[3];
+
+		for (i = 0; i <= encoder_info_frame->gamut.sb[1]; i++)
+			chk_sum += ptr[i];
+
+		encoder_info_frame->gamut.sb[2] = (uint8_t) (0x100 - chk_sum);
+	}
+
+	if (hw_info_frame->avi_info_packet.valid) {
+		dm_memmove(
+						&encoder_info_frame->avi,
+						&hw_info_frame->avi_info_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->vendor_info_packet.valid) {
+		dm_memmove(
+						&encoder_info_frame->vendor,
+						&hw_info_frame->vendor_info_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->spd_packet.valid) {
+		dm_memmove(
+						&encoder_info_frame->spd,
+						&hw_info_frame->spd_packet,
+						sizeof(struct hw_info_packet));
+	}
+
+	if (hw_info_frame->vsc_packet.valid) {
+		dm_memmove(
+						&encoder_info_frame->vsc,
+						&hw_info_frame->vsc_packet,
+						sizeof(struct hw_info_packet));
+	}
+}
+
+static void set_avi_info_frame(struct hw_info_packet *info_packet,
+		struct core_stream *stream)
+{
+	enum ds_color_space color_space = DS_COLOR_SPACE_UNKNOWN;
+	struct info_frame info_frame = { {0} };
+	uint32_t pixel_encoding = 0;
+	enum scanning_type scan_type = SCANNING_TYPE_NODATA;
+	enum dc_aspect_ratio aspect = ASPECT_RATIO_NO_DATA;
+	bool itc = false;
+	uint8_t cn0_cn1 = 0;
+	uint8_t *check_sum = NULL;
+	uint8_t byte_index = 0;
+
+	if (info_packet == NULL)
+		return;
+
+	color_space = build_default_color_space(stream);
+
+	/* Initialize header */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.
+			info_frame_type = INFO_FRAME_AVI;
+	/* InfoFrameVersion_3 is defined by CEA861F (Section 6.4), but shall
+	* not be used in HDMI 2.0 (Section 10.1) */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.version =
+			INFO_FRAME_VERSION_2;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.header.length =
+			INFO_FRAME_SIZE_AVI;
+
+	/* IDO-defined (Y2,Y1,Y0 = 1,1,1) shall not be used by devices built
+	* according to HDMI 2.0 spec (Section 10.1)
+	* Add "case PixelEncoding_YCbCr420:    pixelEncoding = 3; break;"
+	* when YCbCr 4:2:0 is supported by DAL hardware. */
+
+	switch (stream->public.timing.pixel_encoding) {
+	case PIXEL_ENCODING_YCBCR422:
+		pixel_encoding = 1;
+		break;
+
+	case PIXEL_ENCODING_YCBCR444:
+		pixel_encoding = 2;
+		break;
+
+	case PIXEL_ENCODING_RGB:
+	default:
+		pixel_encoding = 0;
+	}
+
+	/* Y0_Y1_Y2 : The pixel encoding */
+	/* H14b AVI InfoFrame has extension on Y-field from 2 bits to 3 bits */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.Y0_Y1_Y2 =
+		pixel_encoding;
+
+
+	/* A0 = 1 Active Format Information valid */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.A0 =
+		ACTIVE_FORMAT_VALID;
+
+	/* B0, B1 = 3; Bar info data is valid */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.B0_B1 =
+		BAR_INFO_BOTH_VALID;
+
+	info_frame.avi_info_packet.info_packet_hdmi.bits.SC0_SC1 =
+			PICTURE_SCALING_UNIFORM;
+
+	/* S0, S1 : Underscan / Overscan */
+	/* TODO: un-hardcode scan type */
+	scan_type = SCANNING_TYPE_UNDERSCAN;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.S0_S1 = scan_type;
+
+	/* C0, C1 : Colorimetry */
+	if (color_space == DS_COLOR_SPACE_YCBCR709)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_ITU709;
+	else if (color_space == DS_COLOR_SPACE_YCBCR601)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_ITU601;
+	else
+		info_frame.avi_info_packet.info_packet_hdmi.bits.C0_C1 =
+				COLORIMETRY_NO_DATA;
+
+
+	/* TODO: un-hardcode aspect ratio */
+	aspect = stream->public.timing.aspect_ratio;
+
+	switch (aspect) {
+	case ASPECT_RATIO_4_3:
+	case ASPECT_RATIO_16_9:
+		info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = aspect;
+		break;
+
+	case ASPECT_RATIO_NO_DATA:
+	case ASPECT_RATIO_64_27:
+	case ASPECT_RATIO_256_135:
+	default:
+		info_frame.avi_info_packet.info_packet_hdmi.bits.M0_M1 = 0;
+	}
+
+	/* Active Format Aspect ratio - same as Picture Aspect Ratio. */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.R0_R3 =
+			ACTIVE_FORMAT_ASPECT_RATIO_SAME_AS_PICTURE;
+
+	/* TODO: un-hardcode cn0_cn1 and itc */
+	cn0_cn1 = 0;
+	itc = false;
+
+	if (itc) {
+		info_frame.avi_info_packet.info_packet_hdmi.bits.ITC = 1;
+		info_frame.avi_info_packet.info_packet_hdmi.bits.CN0_CN1 =
+			cn0_cn1;
+	}
+
+	/* TODO: un-hardcode q0_q1 */
+	if (color_space == DS_COLOR_SPACE_SRGB_FULLRANGE)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_FULL_RANGE;
+	else if (color_space == DS_COLOR_SPACE_SRGB_LIMITEDRANGE)
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_LIMITED_RANGE;
+	else
+		info_frame.avi_info_packet.info_packet_hdmi.bits.Q0_Q1 =
+						RGB_QUANTIZATION_DEFAULT_RANGE;
+
+	/* TODO : We should handle YCC quantization,
+	 * but we do not have matrix calculation */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.YQ0_YQ1 =
+					YYC_QUANTIZATION_LIMITED_RANGE;
+
+	info_frame.avi_info_packet.info_packet_hdmi.bits.VIC0_VIC7 =
+					stream->public.timing.vic;
+
+	/* pixel repetition
+	 * PR0 - PR3 start from 0 whereas pHwPathMode->mode.timing.flags.pixel
+	 * repetition start from 1 */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.PR0_PR3 = 0;
+
+	/* Bar Info
+	 * barTop:    Line Number of End of Top Bar.
+	 * barBottom: Line Number of Start of Bottom Bar.
+	 * barLeft:   Pixel Number of End of Left Bar.
+	 * barRight:  Pixel Number of Start of Right Bar. */
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_top =
+			stream->public.timing.v_border_top;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_bottom =
+		(stream->public.timing.v_border_top
+			- stream->public.timing.v_border_bottom + 1);
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_left =
+			stream->public.timing.h_border_left;
+	info_frame.avi_info_packet.info_packet_hdmi.bits.bar_right =
+		(stream->public.timing.h_total
+			- stream->public.timing.h_border_right + 1);
+
+	/* check_sum - Calculate AFMT_AVI_INFO0 ~ AFMT_AVI_INFO3 */
+	check_sum =
+		&info_frame.
+		avi_info_packet.info_packet_hdmi.packet_raw_data.sb[0];
+	*check_sum = INFO_FRAME_AVI + INFO_FRAME_SIZE_AVI
+			+ INFO_FRAME_VERSION_2;
+
+	for (byte_index = 1; byte_index <= INFO_FRAME_SIZE_AVI; byte_index++)
+		*check_sum += info_frame.avi_info_packet.info_packet_hdmi.
+				packet_raw_data.sb[byte_index];
+
+	/* one byte complement */
+	*check_sum = (uint8_t) (0x100 - *check_sum);
+
+	/* Store in hw_path_mode */
+	info_packet->hb0 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb0;
+	info_packet->hb1 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb1;
+	info_packet->hb2 =
+		info_frame.avi_info_packet.info_packet_hdmi.packet_raw_data.hb2;
+
+	for (byte_index = 0; byte_index < sizeof(info_packet->sb); byte_index++)
+		info_packet->sb[byte_index] = info_frame.avi_info_packet.
+		info_packet_hdmi.packet_raw_data.sb[byte_index];
+
+	info_packet->valid = true;
+}
+
+static void set_vendor_info_packet(struct core_stream *stream,
+		struct hw_info_packet *info_packet)
+{
+	uint32_t length = 0;
+	bool hdmi_vic_mode = false;
+	uint8_t checksum = 0;
+	uint32_t i = 0;
+	enum dc_timing_3d_format format;
+
+	ASSERT_CRITICAL(stream != NULL);
+	ASSERT_CRITICAL(info_packet != NULL);
+
+	format = stream->public.timing.timing_3d_format;
+
+	/* Can be different depending on packet content */
+	length = 5;
+
+	if (stream->public.timing.hdmi_vic != 0
+			&& stream->public.timing.h_total >= 3840
+			&& stream->public.timing.v_total >= 2160)
+		hdmi_vic_mode = true;
+
+	/* According to HDMI 1.4a CTS, VSIF should be sent
+	 * for both 3D stereo and HDMI VIC modes.
+	 * For all other modes, there is no VSIF sent.  */
+
+	if (format == TIMING_3D_FORMAT_NONE && !hdmi_vic_mode)
+		return;
+
+	/* 24bit IEEE Registration identifier (0x000c03). LSB first. */
+	info_packet->sb[1] = 0x03;
+	info_packet->sb[2] = 0x0C;
+	info_packet->sb[3] = 0x00;
+
+	/*PB4: 5 lower bytes = 0 (reserved). 3 higher bits = HDMI_Video_Format.
+	 * The value for HDMI_Video_Format are:
+	 * 0x0 (0b000) - No additional HDMI video format is presented in this
+	 * packet
+	 * 0x1 (0b001) - Extended resolution format present. 1 byte of HDMI_VIC
+	 * parameter follows
+	 * 0x2 (0b010) - 3D format indication present. 3D_Structure and
+	 * potentially 3D_Ext_Data follows
+	 * 0x3..0x7 (0b011..0b111) - reserved for future use */
+	if (format != TIMING_3D_FORMAT_NONE)
+		info_packet->sb[4] = (2 << 5);
+	else if (hdmi_vic_mode)
+		info_packet->sb[4] = (1 << 5);
+
+	/* PB5: If PB4 claims 3D timing (HDMI_Video_Format = 0x2):
+	 * 4 lower bites = 0 (reserved). 4 higher bits = 3D_Structure.
+	 * The value for 3D_Structure are:
+	 * 0x0 - Frame Packing
+	 * 0x1 - Field Alternative
+	 * 0x2 - Line Alternative
+	 * 0x3 - Side-by-Side (full)
+	 * 0x4 - L + depth
+	 * 0x5 - L + depth + graphics + graphics-depth
+	 * 0x6 - Top-and-Bottom
+	 * 0x7 - Reserved for future use
+	 * 0x8 - Side-by-Side (Half)
+	 * 0x9..0xE - Reserved for future use
+	 * 0xF - Not used */
+	switch (format) {
+	case TIMING_3D_FORMAT_HW_FRAME_PACKING:
+	case TIMING_3D_FORMAT_SW_FRAME_PACKING:
+		info_packet->sb[5] = (0x0 << 4);
+		break;
+
+	case TIMING_3D_FORMAT_SIDE_BY_SIDE:
+	case TIMING_3D_FORMAT_SBS_SW_PACKED:
+		info_packet->sb[5] = (0x8 << 4);
+		length = 6;
+		break;
+
+	case TIMING_3D_FORMAT_TOP_AND_BOTTOM:
+	case TIMING_3D_FORMAT_TB_SW_PACKED:
+		info_packet->sb[5] = (0x6 << 4);
+		break;
+
+	default:
+		break;
+	}
+
+	/*PB5: If PB4 is set to 0x1 (extended resolution format)
+	 * fill PB5 with the correct HDMI VIC code */
+	if (hdmi_vic_mode)
+		info_packet->sb[5] = stream->public.timing.hdmi_vic;
+
+	/* Header */
+	info_packet->hb0 = 0x81; /* VSIF packet type. */
+	info_packet->hb1 = 0x01; /* Version */
+
+	/* 4 lower bits = Length, 4 higher bits = 0 (reserved) */
+	info_packet->hb2 = (uint8_t) (length);
+
+	/* Calculate checksum */
+	checksum = 0;
+	checksum += info_packet->hb0;
+	checksum += info_packet->hb1;
+	checksum += info_packet->hb2;
+
+	for (i = 1; i <= length; i++)
+		checksum += info_packet->sb[i];
+
+	info_packet->sb[0] = (uint8_t) (0x100 - checksum);
+
+	info_packet->valid = true;
+}
+
+void build_info_frame(struct core_stream *stream)
+{
+	enum signal_type signal = SIGNAL_TYPE_NONE;
+	struct hw_info_frame info_frame = { { 0 } };
+
+	/* default all packets to invalid */
+	info_frame.avi_info_packet.valid = false;
+	info_frame.gamut_packet.valid = false;
+	info_frame.vendor_info_packet.valid = false;
+	info_frame.spd_packet.valid = false;
+	info_frame.vsc_packet.valid = false;
+
+	signal = stream->sink->public.sink_signal;
+
+	/* HDMi and DP have different info packets*/
+	if (signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+		set_avi_info_frame(&info_frame.avi_info_packet,
+				stream);
+		set_vendor_info_packet(stream, &info_frame.vendor_info_packet);
+	}
+
+	translate_info_frame(&info_frame,
+			&stream->encoder_info_frame);
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_sink.c b/drivers/gpu/drm/amd/dal/dc/core/dc_sink.c
new file mode 100644
index 000000000000..c5a770e61812
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_sink.c
@@ -0,0 +1,116 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dm_helpers.h"
+#include "core_types.h"
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+struct sink {
+	struct core_sink protected;
+	int ref_count;
+};
+
+#define DC_SINK_TO_SINK(dc_sink) \
+			container_of(dc_sink, struct sink, protected.public)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+
+static void destruct(struct sink *sink)
+{
+
+}
+
+static bool construct(struct sink *sink, const struct dc_sink_init_data *init_params)
+{
+
+	struct core_link *core_link = DC_LINK_TO_LINK(init_params->link);
+
+	sink->protected.public.sink_signal = init_params->sink_signal;
+	sink->protected.link = core_link;
+	sink->protected.ctx = core_link->ctx;
+	sink->protected.dongle_max_pix_clk = init_params->dongle_max_pix_clk;
+	sink->protected.converter_disable_audio =
+			init_params->converter_disable_audio;
+
+	return true;
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+
+void dc_sink_retain(const struct dc_sink *dc_sink)
+{
+	struct sink *sink = DC_SINK_TO_SINK(dc_sink);
+
+	++sink->ref_count;
+}
+
+void dc_sink_release(const struct dc_sink *dc_sink)
+{
+	struct core_sink *core_sink = DC_SINK_TO_CORE(dc_sink);
+	struct sink *sink = DC_SINK_TO_SINK(dc_sink);
+
+	--sink->ref_count;
+
+	if (sink->ref_count == 0) {
+		destruct(sink);
+		dm_free(core_sink->ctx, sink);
+	}
+}
+
+struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params)
+{
+	struct core_link *core_link = DC_LINK_TO_LINK(init_params->link);
+
+	struct sink *sink = dm_alloc(core_link->ctx, sizeof(*sink));
+
+	if (NULL == sink)
+		goto alloc_fail;
+
+	if (false == construct(sink, init_params))
+		goto construct_fail;
+
+	/* TODO should we move this outside to where the assignment actually happens? */
+	dc_sink_retain(&sink->protected.public);
+
+	return &sink->protected.public;
+
+construct_fail:
+	dm_free(core_link->ctx, sink);
+
+alloc_fail:
+	return NULL;
+}
+
+/*******************************************************************************
+ * Protected functions - visible only inside of DC (not visible in DM)
+ ******************************************************************************/
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_stream.c b/drivers/gpu/drm/amd/dal/dc/core/dc_stream.c
new file mode 100644
index 000000000000..d7012bcda10a
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_stream.c
@@ -0,0 +1,188 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+#include "core_types.h"
+#include "resource.h"
+
+/*******************************************************************************
+ * Private definitions
+ ******************************************************************************/
+
+struct stream {
+	struct core_stream protected;
+	int ref_count;
+};
+
+#define DC_STREAM_TO_STREAM(dc_stream) container_of(dc_stream, struct stream, protected.public)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static void build_bit_depth_reduction_params(
+		const struct core_stream *stream,
+		struct bit_depth_reduction_params *fmt_bit_depth)
+{
+	dm_memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
+
+	/*TODO: Need to un-hardcode, refer to function with same name
+	 * in dal2 hw_sequencer*/
+
+	fmt_bit_depth->flags.TRUNCATE_ENABLED = 0;
+	fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 0;
+	fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 0;
+
+	/* Diagnostics need consistent CRC of the image, that means
+	 * dithering should not be enabled for Diagnostics. */
+	if (IS_DIAG_DC(stream->ctx->dce_environment) == false) {
+
+		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+
+		/* frame random is on by default */
+		fmt_bit_depth->flags.FRAME_RANDOM = 1;
+		/* apply RGB dithering */
+		fmt_bit_depth->flags.RGB_RANDOM = true;
+	}
+
+	return;
+}
+
+static void setup_pixel_encoding(
+	struct clamping_and_pixel_encoding_params *clamping)
+{
+	/*TODO: Need to un-hardcode, refer to function with same name
+		 * in dal2 hw_sequencer*/
+
+	clamping->pixel_encoding = PIXEL_ENCODING_RGB;
+
+	return;
+}
+
+static bool construct(struct core_stream *stream,
+	const struct dc_sink *dc_sink_data)
+{
+	uint32_t i = 0;
+
+	stream->sink = DC_SINK_TO_CORE(dc_sink_data);
+	stream->ctx = stream->sink->ctx;
+	stream->public.sink = dc_sink_data;
+
+	dc_sink_retain(dc_sink_data);
+
+	build_bit_depth_reduction_params(stream, &stream->bit_depth_params);
+	setup_pixel_encoding(&stream->clamping);
+
+	/* Copy audio modes */
+	/* TODO - Remove this translation */
+	for (i = 0; i < (dc_sink_data->edid_caps.audio_mode_count); i++)
+	{
+		stream->public.audio_info.modes[i].channel_count = dc_sink_data->edid_caps.audio_modes[i].channel_count;
+		stream->public.audio_info.modes[i].format_code = dc_sink_data->edid_caps.audio_modes[i].format_code;
+		stream->public.audio_info.modes[i].sample_rates.all = dc_sink_data->edid_caps.audio_modes[i].sample_rate;
+		stream->public.audio_info.modes[i].sample_size = dc_sink_data->edid_caps.audio_modes[i].sample_size;
+	}
+	stream->public.audio_info.mode_count = dc_sink_data->edid_caps.audio_mode_count;
+	stream->public.audio_info.audio_latency = dc_sink_data->edid_caps.audio_latency;
+	stream->public.audio_info.video_latency = dc_sink_data->edid_caps.video_latency;
+	dm_memmove(
+		stream->public.audio_info.display_name,
+		dc_sink_data->edid_caps.display_name,
+		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+	stream->public.audio_info.manufacture_id = dc_sink_data->edid_caps.manufacturer_id;
+	stream->public.audio_info.product_id = dc_sink_data->edid_caps.product_id;
+	stream->public.audio_info.flags.all = dc_sink_data->edid_caps.speaker_flags;
+
+	/* TODO - Unhardcode port_id */
+	stream->public.audio_info.port_id[0] = 0x5558859e;
+	stream->public.audio_info.port_id[1] = 0xd989449;
+
+	/* EDID CAP translation for HDMI 2.0 */
+	stream->public.timing.flags.LTE_340MCSC_SCRAMBLE = dc_sink_data->edid_caps.lte_340mcsc_scramble;
+	return true;
+}
+
+static void destruct(struct core_stream *stream)
+{
+	dc_sink_release(&stream->sink->public);
+}
+
+void dc_stream_retain(struct dc_stream *dc_stream)
+{
+	struct stream *stream = DC_STREAM_TO_STREAM(dc_stream);
+	stream->ref_count++;
+}
+
+void dc_stream_release(struct dc_stream *public)
+{
+	struct stream *stream = DC_STREAM_TO_STREAM(public);
+	struct core_stream *protected = DC_STREAM_TO_CORE(public);
+	struct dc_context *ctx = protected->ctx;
+	stream->ref_count--;
+
+	if (stream->ref_count == 0) {
+		destruct(protected);
+		dm_free(ctx, stream);
+	}
+}
+
+struct dc_stream *dc_create_stream_for_sink(const struct dc_sink *dc_sink)
+{
+	struct core_sink *sink = DC_SINK_TO_CORE(dc_sink);
+	struct stream *stream;
+
+	if (sink == NULL)
+		goto alloc_fail;
+
+	stream = dm_alloc(sink->ctx, sizeof(struct stream));
+
+	if (NULL == stream)
+		goto alloc_fail;
+
+	if (false == construct(&stream->protected, dc_sink))
+			goto construct_fail;
+
+	dc_stream_retain(&stream->protected.public);
+
+	return &stream->protected.public;
+
+construct_fail:
+	dm_free(sink->ctx, stream);
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_update_stream(const struct dc_stream *dc_stream,
+		struct rect *src,
+		struct rect *dst)
+{
+	struct core_stream *stream = DC_STREAM_TO_CORE(dc_stream);
+
+	stream->public.src = *src;
+	stream->public.dst = *dst;
+}
+
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_surface.c b/drivers/gpu/drm/amd/dal/dc/core/dc_surface.c
new file mode 100644
index 000000000000..1a9ee8f97757
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_surface.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2015 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/* DC interface (public) */
+#include "dm_services.h"
+#include "dc.h"
+
+/* DC core (private) */
+#include "core_dc.h"
+#include "inc/transform.h"
+
+/*******************************************************************************
+ * Private structures
+ ******************************************************************************/
+struct surface {
+	struct core_surface protected;
+	enum dc_irq_source irq_source;
+	int ref_count;
+};
+
+#define DC_SURFACE_TO_SURFACE(dc_surface) container_of(dc_surface, struct surface, protected.public)
+#define CORE_SURFACE_TO_SURFACE(core_surface) container_of(core_surface, struct surface, protected)
+
+/*******************************************************************************
+ * Private functions
+ ******************************************************************************/
+static bool construct(struct dc_context *ctx, struct surface *surface)
+{
+	uint32_t i;
+	struct gamma_ramp *gamma =
+			&surface->protected.public.gamma_correction;
+
+	/* construct gamma default value. */
+	for (i = 0; i < NUM_OF_RAW_GAMMA_RAMP_RGB_256; i++) {
+		gamma->gamma_ramp_rgb256x3x16.red[i] =
+				(unsigned short) (i << 8);
+		gamma->gamma_ramp_rgb256x3x16.green[i] =
+				(unsigned short) (i << 8);
+		gamma->gamma_ramp_rgb256x3x16.blue[i] =
+				(unsigned short) (i << 8);
+	}
+	gamma->type = GAMMA_RAMP_TYPE_RGB256;
+	gamma->size = sizeof(gamma->gamma_ramp_rgb256x3x16);
+
+	surface->protected.ctx = ctx;
+	return true;
+}
+
+static void destruct(struct surface *surface)
+{
+}
+
+/*******************************************************************************
+ * Public functions
+ ******************************************************************************/
+void enable_surface_flip_reporting(struct dc_surface *dc_surface,
+		uint32_t controller_id)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+	surface->irq_source = controller_id + DC_IRQ_SOURCE_PFLIP1 - 1;
+	/*register_flip_interrupt(surface);*/
+}
+
+struct dc_surface *dc_create_surface(const struct dc *dc)
+{
+	struct surface *surface = dm_alloc(dc->ctx, sizeof(*surface));
+
+	if (NULL == surface)
+		goto alloc_fail;
+
+	if (false == construct(dc->ctx, surface))
+		goto construct_fail;
+
+	dc_surface_retain(&surface->protected.public);
+
+	return &surface->protected.public;
+
+construct_fail:
+	dm_free(dc->ctx, surface);
+
+alloc_fail:
+	return NULL;
+}
+
+void dc_surface_retain(const struct dc_surface *dc_surface)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+
+	++surface->ref_count;
+}
+
+void dc_surface_release(const struct dc_surface *dc_surface)
+{
+	struct surface *surface = DC_SURFACE_TO_SURFACE(dc_surface);
+	--surface->ref_count;
+
+	if (surface->ref_count == 0) {
+		destruct(surface);
+		dm_free(surface->protected.ctx, surface);
+	}
+}
diff --git a/drivers/gpu/drm/amd/dal/dc/core/dc_target.c b/drivers/gpu/drm/amd/dal/dc/core/dc_target.c
new file mode 100644
index 000000000000..e93e73d13448
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/dc/core/dc_target.c
@@ -0,0 +1,548 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "core_types.h"
+#include "hw_sequencer.h"
+#include "resource.h"
+#include "ipp.h"
+#include "timing_generator.h"
+
+#define COEFF_RANGE	3
+#define REGAMMA_COEFF_A0	31308
+#define REGAMMA_COEFF_A1	12920
+#define REGAMMA_COEFF_A2	55
+#define REGAMMA_COEFF_A3	55
+#define REGAMMA_COEFF_GAMMA	2400
+
+struct target {
+	struct core_target protected;
+	int ref_count;
+};
+
+#define DC_TARGET_TO_TARGET(dc_target) \
+	container_of(dc_target, struct target, protected.public)
+#define CORE_TARGET_TO_TARGET(core_target) \
+	container_of(core_target, struct target, protected)
+
+static void construct(
+	struct core_target *target,
+	struct dc_context *ctx,
+	struct dc_stream *dc_streams[],
+	uint8_t stream_count)
+{
+	uint8_t i;
+	for (i = 0; i < stream_count; i++) {
+		target->public.streams[i] = dc_streams[i];
+		dc_stream_retain(dc_streams[i]);
+	}
+
+	target->ctx = ctx;
+	target->public.stream_count = stream_count;
+}
+
+static void destruct(struct core_target *core_target)
+{
+	int i;
+
+	for (i = 0; i < core_target->status.surface_count; i++) {
+		dc_surface_release(core_target->status.surfaces[i]);
+		core_target->status.surfaces[i] = NULL;
+	}
+	for (i = 0; i < core_target->public.stream_count; i++) {
+		dc_stream_release(
+			(struct dc_stream *)core_target->public.streams[i]);
+		core_target->public.streams[i] = NULL;
+	}
+}
+
+void dc_target_retain(struct dc_target *dc_target)
+{
+	struct target *target = DC_TARGET_TO_TARGET(dc_target);
+
+	target->ref_count++;
+}
+
+void dc_target_release(struct dc_target *dc_target)
+{
+	struct target *target = DC_TARGET_TO_TARGET(dc_target);
+	struct core_target *protected = DC_TARGET_TO_CORE(dc_target);
+
+	ASSERT(target->ref_count > 0);
+	target->ref_count--;
+	if (target->ref_count == 0) {
+		destruct(protected);
+		dm_free(protected->ctx, target);
+	}
+}
+
+const struct dc_target_status *dc_target_get_status(
+					const struct dc_target* dc_target)
+{
+	struct core_target* target = DC_TARGET_TO_CORE(dc_target);
+	return &target->status;
+}
+
+struct dc_target *dc_create_target_for_streams(
+		struct dc_stream *dc_streams[],
+		uint8_t stream_count)
+{
+	struct core_stream *stream;
+	struct target *target;
+
+	if (0 == stream_count)
+		goto target_alloc_fail;
+
+	stream = DC_STREAM_TO_CORE(dc_streams[0]);
+
+	target = dm_alloc(stream->ctx, sizeof(struct target));
+
+	if (NULL == target)
+		goto target_alloc_fail;
+
+	construct(&target->protected, stream->ctx, dc_streams, stream_count);
+
+	dc_target_retain(&target->protected.public);
+
+	return &target->protected.public;
+
+
+target_alloc_fail:
+	return NULL;
+}
+
+static void build_gamma_params(
+		enum pixel_format pixel_format,
+		struct gamma_parameters *gamma_param)
+{
+	uint32_t i;
+
+	/* translate parameters */
+	gamma_param->surface_pixel_format = pixel_format;
+
+	gamma_param->regamma_adjust_type = GRAPHICS_REGAMMA_ADJUST_SW;
+	gamma_param->degamma_adjust_type = GRAPHICS_REGAMMA_ADJUST_SW;
+
+	gamma_param->selected_gamma_lut = GRAPHICS_GAMMA_LUT_REGAMMA;
+
+	/* TODO support non-legacy gamma */
+	gamma_param->disable_adjustments = false;
+	gamma_param->flag.bits.config_is_changed = 0;
+	gamma_param->flag.bits.regamma_update = 1;
+	gamma_param->flag.bits.gamma_update = 1;
+
+	/* Set regamma */
+	gamma_param->regamma.features.bits.GRAPHICS_DEGAMMA_SRGB = 1;
+	gamma_param->regamma.features.bits.OVERLAY_DEGAMMA_SRGB = 1;
+	gamma_param->regamma.features.bits.GAMMA_RAMP_ARRAY = 0;
+	gamma_param->regamma.features.bits.APPLY_DEGAMMA = 0;
+
+	for (i = 0; i < COEFF_RANGE; i++) {
+		gamma_param->regamma.gamma_coeff.a0[i] = REGAMMA_COEFF_A0;
+		gamma_param->regamma.gamma_coeff.a1[i] = REGAMMA_COEFF_A1;
+		gamma_param->regamma.gamma_coeff.a2[i] = REGAMMA_COEFF_A2;
+		gamma_param->regamma.gamma_coeff.a3[i] = REGAMMA_COEFF_A3;
+		gamma_param->regamma.gamma_coeff.gamma[i] = REGAMMA_COEFF_GAMMA;
+	}
+}
+
+
+static bool program_gamma(
+		struct dc_context *ctx,
+		struct dc_surface *surface,
+		struct input_pixel_processor *ipp,
+		struct output_pixel_processor *opp)
+{
+	struct gamma_parameters *gamma_param;
+	bool result= false;
+
+	gamma_param = dm_alloc(ctx, sizeof(struct gamma_parameters));
+
+	if (!gamma_param)
+		goto gamma_param_fail;
+
+	build_gamma_params(surface->format, gamma_param);
+
+	result = ctx->dc->hwss.set_gamma_ramp(ipp, opp,
+			&surface->gamma_correction,
+			gamma_param);
+
+	dm_free(ctx, gamma_param);
+
+gamma_param_fail:
+	return result;
+}
+
+static bool validate_surface_address(
+		struct dc_plane_address address)
+{
+	bool is_valid_address = false;
+
+	switch (address.type) {
+	case PLN_ADDR_TYPE_GRAPHICS:
+		if (address.grph.addr.quad_part != 0)
+			is_valid_address = true;
+		break;
+	case PLN_ADDR_TYPE_GRPH_STEREO:
+		if ((address.grph_stereo.left_addr.quad_part != 0) &&
+			(address.grph_stereo.right_addr.quad_part != 0)) {
+			is_valid_address = true;
+		}
+		break;
+	case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+	default:
+		/* not supported */
+		BREAK_TO_DEBUGGER();
+		break;
+	}
+
+	return is_valid_address;
+}
+
+bool dc_commit_surfaces_to_target(
+		struct dc *dc,
+		struct dc_surface *new_surfaces[],
+		uint8_t new_surface_count,
+		struct dc_target *dc_target)
+
+{
+	int i, j;
+	uint32_t prev_disp_clk = dc->current_context.bw_results.dispclk_khz;
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+
+	int current_enabled_surface_count = 0;
+	int new_enabled_surface_count = 0;
+
+	if (!dal_adapter_service_is_in_accelerated_mode(
+						dc->res_pool.adapter_srv) ||
+			dc->current_context.target_count == 0) {
+		return false;
+	}
+
+	for (i = 0; i < dc->current_context.target_count; i++)
+		if (target == dc->current_context.targets[i])
+			break;
+
+	/* Cannot commit surface to a target that is not commited */
+	if (i == dc->current_context.target_count)
+		return false;
+
+	for (i = 0; i < target->status.surface_count; i++)
+		if (target->status.surfaces[i]->visible)
+			current_enabled_surface_count++;
+
+	for (i = 0; i < new_surface_count; i++)
+		if (new_surfaces[i]->visible)
+			new_enabled_surface_count++;
+
+	dal_logger_write(dc->ctx->logger,
+				LOG_MAJOR_INTERFACE_TRACE,
+				LOG_MINOR_COMPONENT_DC,
+				"%s: commit %d surfaces to target 0x%x\n",
+				__func__,
+				new_surface_count,
+				dc_target);
+
+
+	if (!logical_attach_surfaces_to_target(
+						new_surfaces,
+						new_surface_count,
+						dc_target)) {
+		BREAK_TO_DEBUGGER();
+		goto unexpected_fail;
+	}
+
+	for (i = 0; i < new_surface_count; i++)
+		for (j = 0; j < target->public.stream_count; j++)
+			build_scaling_params(
+				new_surfaces[i],
+				DC_STREAM_TO_CORE(target->public.streams[j]));
+
+	if (dc->res_pool.funcs->validate_bandwidth(dc, &dc->current_context)
+								!= DC_OK) {
+		BREAK_TO_DEBUGGER();
+		goto unexpected_fail;
+	}
+
+	if (prev_disp_clk < dc->current_context.bw_results.dispclk_khz) {
+		dc->hwss.program_bw(dc, &dc->current_context);
+		pplib_apply_display_requirements(dc, &dc->current_context);
+	}
+
+	if (current_enabled_surface_count > 0 && new_enabled_surface_count == 0)
+		dc_target_disable_memory_requests(dc_target);
+
+	for (i = 0; i < new_surface_count; i++) {
+		struct dc_surface *surface = new_surfaces[i];
+		struct core_surface *core_surface = DC_SURFACE_TO_CORE(surface);
+		bool is_valid_address =
+				validate_surface_address(surface->address);
+
+		dal_logger_write(dc->ctx->logger,
+					LOG_MAJOR_INTERFACE_TRACE,
+					LOG_MINOR_COMPONENT_DC,
+					"0x%x:",
+					surface);
+
+		program_gamma(dc->ctx, surface,
+			DC_STREAM_TO_CORE(target->public.streams[0])->ipp,
+			DC_STREAM_TO_CORE(target->public.streams[0])->opp);
+
+		dc->hwss.set_plane_config(dc, core_surface, target);
+
+		if (is_valid_address)
+			dc->hwss.update_plane_address(dc, core_surface, target);
+	}
+
+	if (current_enabled_surface_count == 0 && new_enabled_surface_count > 0)
+		dc_target_enable_memory_requests(dc_target);
+
+	/* Lower display clock if necessary */
+	if (prev_disp_clk > dc->current_context.bw_results.dispclk_khz) {
+		dc->hwss.program_bw(dc, &dc->current_context);
+		pplib_apply_display_requirements(dc, &dc->current_context);
+	}
+
+	return true;
+
+unexpected_fail:
+	for (i = 0; i < new_surface_count; i++) {
+		target->status.surfaces[i] = NULL;
+	}
+	target->status.surface_count = 0;
+
+	return false;
+}
+
+bool dc_target_is_connected_to_sink(
+		const struct dc_target * dc_target,
+		const struct dc_sink *dc_sink)
+{
+	struct core_target *target = DC_TARGET_TO_CORE(dc_target);
+	uint8_t i;
+	for (i = 0; i < target->public.stream_count; i++) {
+		if (target->public.streams[i]->sink == dc_sink)
+			return true;
+	}
+	return false;
+}
+
+void dc_target_enable_memory_requests(struct dc_target *target)
+{
+	uint8_t i;
+	struct core_target *core_target = DC_TARGET_TO_CORE(target);
+	for (i = 0; i < core_target->public.stream_count; i++) {
+		struct timing_generator *tg =
+			DC_STREAM_TO_CORE(core_target->public.streams[i])->tg;
+
+		if (!tg->funcs->set_blank(tg, false)) {
+			dm_error("DC: failed to unblank crtc!\n");
+			BREAK_TO_DEBUGGER();
+		}
+	}
+}
+
+void dc_target_disable_memory_requests(struct dc_target *target)
+{
+	uint8_t i;
+	struct core_target *core_target = DC_TARGET_TO_CORE(target);
+	for (i = 0; i < core_target->public.stream_count; i++) {
+	struct timing_generator *tg =
+		DC_STREAM_TO_CORE(core_target->public.streams[i])->tg;
+
+		if (NULL == tg) {
+			dm_error("DC: timing generator is NULL!\n");
+			BREAK_TO_DEBUGGER();
+			continue;
+		}
+
+		if (false == tg->funcs->set_blank(tg, true)) {
+			dm_error("DC: failed to blank crtc!\n");
+			BREAK_TO_DEBUGGER();
+		}
+	}
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+bool dc_target_set_cursor_attributes(
+	struct dc_target *dc_target,
+	const struct dc_cursor_attributes *attributes)
+{
+	struct core_target *core_target;
+	struct input_pixel_processor *ipp;
+
+	if (NULL == dc_target) {
+		dm_error("DC: dc_target is NULL!\n");
+			return false;
+
+	}
+
+	core_target = DC_TARGET_TO_CORE(dc_target);
+	ipp = DC_STREAM_TO_CORE(core_target->public.streams[0])->ipp;
+
+	if (NULL == ipp) {
+		dm_error("DC: input pixel processor is NULL!\n");
+		return false;
+	}
+
+	if (true == ipp->funcs->ipp_cursor_set_attributes(ipp, attributes))
+		return true;
+
+	return false;
+}
+
+bool dc_target_set_cursor_position(
+	struct dc_target *dc_target,
+	const struct dc_cursor_position *position)
+{
+	struct core_target *core_target;
+	struct input_pixel_processor *ipp;
+
+	if (NULL == dc_target) {
+		dm_error("DC: dc_target is NULL!\n");
+		return false;
+	}
+
+	if (NULL == position) {
+		dm_error("DC: cursor position is NULL!\n");
+		return false;
+	}
+
+	core_target = DC_TARGET_TO_CORE(dc_target);
+	ipp = DC_STREAM_TO_CORE(core_target->public.streams[0])->ipp;
+
+	if (NULL == ipp) {
+		dm_error("DC: input pixel processor is NULL!\n");
+		return false;
+	}
+
+
+	if (true == ipp->funcs->ipp_cursor_set_position(ipp, position))
+		return true;
+
+	return false;
+}
+
+/* TODO: #flip temporary to make flip work */
+uint8_t dc_target_get_link_index(const struct dc_target *dc_target)
+{
+	const struct core_target *target = CONST_DC_TARGET_TO_CORE(dc_target);
+	const struct core_sink *sink =
+		DC_SINK_TO_CORE(target->public.streams[0]->sink);
+
+	return sink->link->public.link_index;
+}
+
+uint32_t dc_target_get_vblank_counter(const struct dc_target *dc_target)
+{
+	struct core_target *core_target = DC_TARGET_TO_CORE(dc_target);
+	struct timing_generator *tg =
+		DC_STREAM_TO_CORE(core_target->public.streams[0])->tg;
+
+	return tg->funcs->get_frame_count(tg);
+}
+
+enum dc_irq_source dc_target_get_irq_src(
+	const struct dc_target *dc_target, const enum irq_type irq_type)
+{
+	struct core_target *core_target = DC_TARGET_TO_CORE(dc_target);
+
+	/* #TODO - Remove the assumption that the controller is always in the
+	 * first stream of a core target */
+	struct core_stream *stream =
+		DC_STREAM_TO_CORE(core_target->public.streams[0]);
+	uint8_t controller_idx = stream->controller_idx;
+
+	/* Get controller id */
+	enum controller_id crtc_id = controller_idx + 1;
+
+	/* Calculate controller offset */
+	unsigned int offset = crtc_id - CONTROLLER_ID_D0;
+	unsigned int base = irq_type;
+
+	/* Calculate irq source */
+	enum dc_irq_source src = base + offset;
+
+	return src;
+}
+
+void dc_target_log(
+	const struct dc_target *dc_target,
+	struct dal_logger *dal_logger,
+	enum log_major log_major,
+	enum log_minor log_minor)
+{
+	int i;
+
+	const struct core_target *core_target =
+			CONST_DC_TARGET_TO_CORE(dc_target);
+
+	dal_logger_write(dal_logger,
+			log_major,
+			log_minor,
+			"core_target 0x%x: surface_count=%d, stream_count=%d\n",
+			core_target,
+			core_target->status.surface_count,
+			core_target->public.stream_count);
+
+	for (i = 0; i < core_target->public.stream_count; i++) {
+		const struct core_stream *core_stream =
+			DC_STREAM_TO_CORE(core_target->public.streams[i]);
+
+		dal_logger_write(dal_logger,
+			log_major,
+			log_minor,
+			"core_stream 0x%x: src: %d, %d, %d, %d; dst: %d, %d, %d, %d;\n",
+			core_stream,
+			core_stream->public.src.x,
+			core_stream->public.src.y,
+			core_stream->public.src.width,
+			core_stream->public.src.height,
+			core_stream->public.dst.x,
+			core_stream->public.dst.y,
+			core_stream->public.dst.width,
+			core_stream->public.dst.height);
+		dal_logger_write(dal_logger,
+			log_major,
+			log_minor,
+			"\tpix_clk_khz: %d, h_total: %d, v_total: %d\n",
+			core_stream->public.timing.pix_clk_khz,
+			core_stream->public.timing.h_total,
+			core_stream->public.timing.v_total);
+		dal_logger_write(dal_logger,
+			log_major,
+			log_minor,
+			"\tsink name: %s, serial: %d\n",
+			core_stream->sink->public.edid_caps.display_name,
+			core_stream->sink->public.edid_caps.serial_number);
+		dal_logger_write(dal_logger,
+			log_major,
+			log_minor,
+			"\tlink: %d\n",
+			core_stream->sink->link->public.link_index);
+	}
+}
-- 
2.1.4

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2016-02-11 17:21 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-02-11 17:19 [PATCH 00/29] Enabling new DAL display driver for amdgpu on Carrizo and Tonga Harry Wentland
2016-02-11 17:19 ` [PATCH 01/29] drm/amd/dal: Add dal headers Harry Wentland
2016-02-11 17:19 ` [PATCH 02/29] drm/amd/dal: Add DAL Basic Types and Logger Harry Wentland
2016-02-11 17:19 ` [PATCH 03/29] drm/amd/dal: Fixed point arithmetic Harry Wentland
2016-02-11 17:19 ` [PATCH 04/29] drm/amd/dal: Asic Capabilities Harry Wentland
2016-02-11 17:19 ` [PATCH 05/29] drm/amd/dal: GPIO (General Purpose IO) Harry Wentland
2016-02-11 17:19 ` [PATCH 06/29] drm/amd/dal: Adapter Service Harry Wentland
2016-02-12  0:26   ` Dave Airlie
2016-02-12 14:30     ` Wentland, Harry
2016-02-11 17:19 ` [PATCH 07/29] drm/amd/dal: BIOS Parser Harry Wentland
2016-02-11 17:19 ` [PATCH 08/29] drm/amd/dal: I2C Aux Manager Harry Wentland
2016-02-11 20:19   ` Rob Clark
2016-02-11 20:52     ` Daniel Vetter
2016-02-17  3:23     ` Harry Wentland
2016-02-11 17:19 ` [PATCH 09/29] drm/amd/dal: IRQ Service Harry Wentland
2016-02-11 17:19 ` [PATCH 10/29] drm/amd/dal: GPU Harry Wentland
2016-02-11 17:19 ` [PATCH 11/29] drm/amd/dal: Audio Harry Wentland
2016-02-11 17:19 ` [PATCH 12/29] drm/amd/dal: Bandwidth calculations Harry Wentland
2016-02-11 17:19 ` [PATCH 13/29] drm/amd/dal: Add encoder HW programming Harry Wentland
2016-02-11 17:19 ` [PATCH 14/29] drm/amd/dal: Add clock source " Harry Wentland
2016-02-11 17:19 ` [PATCH 15/29] drm/amd/dal: Add timing generator " Harry Wentland
2016-02-11 17:19 ` [PATCH 16/29] drm/amd/dal: Add surface " Harry Wentland
2016-02-11 17:19 ` [PATCH 17/29] drm/amd/dal: Add framebuffer compression " Harry Wentland
2016-02-11 17:19 ` [PATCH 18/29] drm/amd/dal: Add input pixel processing " Harry Wentland
2016-02-11 17:19 ` [PATCH 19/29] drm/amd/dal: Add output " Harry Wentland
2016-02-11 17:20 ` [PATCH 20/29] drm/amd/dal: Add transform & scaler " Harry Wentland
2016-02-11 17:20 ` [PATCH 21/29] drm/amd/dal: Add Carrizo HW sequencer and resource Harry Wentland
2016-02-11 17:20 ` [PATCH 22/29] drm/amd/dal: Add Tonga/Fiji " Harry Wentland
2016-02-11 17:20 ` [PATCH 23/29] drm/amd/dal: Add empty encoder programming for virtual HW Harry Wentland
2016-02-11 17:20 ` Harry Wentland [this message]
2016-02-11 17:20 ` [PATCH 25/29] drm/amd/dal: Adding amdgpu_dm for dal Harry Wentland
2016-02-11 17:20 ` [PATCH 26/29] drm/amdgpu: Use dal driver for Carrizo, Tonga, and Fiji Harry Wentland
2016-02-11 17:20 ` [PATCH 27/29] drm/amd/dal: Correctly interpret rotation as bit set Harry Wentland
2016-02-11 21:00   ` Oded Gabbay
2016-02-16 16:46     ` Harry Wentland
2016-02-11 17:20 ` [PATCH 28/29] drm/amd/dal: fix flip clean-up state Harry Wentland
2016-02-11 17:20 ` [PATCH 29/29] drm/amd/dal: Force bw programming for DCE 10 until we start calculate BW Harry Wentland
2016-02-11 20:02 ` [PATCH 00/29] Enabling new DAL display driver for amdgpu on Carrizo and Tonga Mike Lothian
2016-02-11 20:05   ` Wentland, Harry
2016-02-11 20:52 ` Dave Airlie
2016-02-11 21:06   ` Daniel Vetter
2016-02-12  0:57     ` Dave Airlie
2016-02-12  5:34     ` Daniel Vetter
2016-02-13  0:05       ` Wentland, Harry
2016-02-14 11:22         ` Jerome Glisse
2016-02-14 13:23           ` Daniel Vetter
2016-02-17  3:28           ` Harry Wentland
2016-02-14 13:32         ` Rob Clark
2016-02-14 13:51           ` Daniel Vetter
2016-02-17  3:26           ` Harry Wentland
2016-02-14 14:01         ` Daniel Vetter
2016-02-17  3:32           ` Harry Wentland
2016-02-14 21:44         ` Daniel Stone
2016-02-16 22:27 ` [PATCH v2 00/26] " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 01/26] drm/amd/dal: Add dal headers Harry Wentland
2016-02-16 22:27   ` [PATCH v2 02/26] drm/amd/dal: Add DAL Basic Types and Logger Harry Wentland
2016-02-16 22:27   ` [PATCH v2 03/26] drm/amd/dal: Fixed point arithmetic Harry Wentland
2016-02-16 22:27   ` [PATCH v2 04/26] drm/amd/dal: Asic Capabilities Harry Wentland
2016-02-16 22:27   ` [PATCH v2 05/26] drm/amd/dal: GPIO (General Purpose IO) Harry Wentland
2016-02-16 22:27   ` [PATCH v2 06/26] drm/amd/dal: Adapter Service Harry Wentland
2016-02-16 22:27   ` [PATCH v2 07/26] drm/amd/dal: BIOS Parser Harry Wentland
2016-02-16 22:27   ` [PATCH v2 08/26] drm/amd/dal: I2C Aux Manager Harry Wentland
2016-02-16 22:27   ` [PATCH v2 09/26] drm/amd/dal: IRQ Service Harry Wentland
2016-02-16 22:27   ` [PATCH v2 10/26] drm/amd/dal: GPU Harry Wentland
2016-02-16 22:27   ` [PATCH v2 11/26] drm/amd/dal: Audio Harry Wentland
2016-02-16 22:27   ` [PATCH v2 12/26] drm/amd/dal: Bandwidth calculations Harry Wentland
2016-02-16 22:27   ` [PATCH v2 13/26] drm/amd/dal: Add encoder HW programming Harry Wentland
2016-02-16 22:27   ` [PATCH v2 14/26] drm/amd/dal: Add clock source " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 15/26] drm/amd/dal: Add timing generator " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 16/26] drm/amd/dal: Add surface " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 17/26] drm/amd/dal: Add framebuffer compression " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 18/26] drm/amd/dal: Add input pixel processing " Harry Wentland
2016-02-16 22:27   ` [PATCH v2 19/26] drm/amd/dal: Add output " Harry Wentland
2016-02-16 22:28   ` [PATCH v2 20/26] drm/amd/dal: Add transform & scaler " Harry Wentland
2016-02-16 22:28   ` [PATCH v2 21/26] drm/amd/dal: Add Carrizo HW sequencer and resource Harry Wentland
2016-02-16 22:28   ` [PATCH v2 22/26] drm/amd/dal: Add Tonga/Fiji " Harry Wentland
2016-02-16 22:28   ` [PATCH v2 23/26] drm/amd/dal: Add empty encoder programming for virtual HW Harry Wentland
2016-02-16 22:28   ` [PATCH v2 24/26] drm/amd/dal: Add display core Harry Wentland
2016-02-16 22:28   ` [PATCH v2 25/26] drm/amd/dal: Adding amdgpu_dm for dal Harry Wentland
2016-02-16 22:28   ` [PATCH v2 26/26] drm/amdgpu: Use dal driver for Carrizo, Tonga, and Fiji Harry Wentland
2016-02-29 21:56 ` [PATCH v3 00/26] Enabling new DAL display driver for amdgpu on Carrizo and Tonga Harry Wentland
2016-02-29 21:56   ` [PATCH v3 01/26] drm/amd/dal: Add dal headers Harry Wentland
2016-02-29 21:56   ` [PATCH v3 05/26] drm/amd/dal: GPIO (General Purpose IO) Harry Wentland
2016-02-29 21:56   ` [PATCH v3 07/26] drm/amd/dal: BIOS Parser Harry Wentland
2016-02-29 21:56   ` [PATCH v3 24/26] drm/amd/dal: Add display core Harry Wentland
2016-02-29 21:56   ` [PATCH v3 25/26] drm/amd/dal: Adding amdgpu_dm for dal Harry Wentland
2016-02-29 21:56   ` [PATCH v3 26/26] drm/amdgpu: Use dal driver for Carrizo, Tonga, and Fiji Harry Wentland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1455211209-26733-25-git-send-email-harry.wentland@amd.com \
    --to=harry.wentland@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.