All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ville Syrjala <ville.syrjala@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH v3 02/18] drm/i915/bios: Generate LFP data table pointers if the VBT lacks them
Date: Tue, 26 Apr 2022 22:32:06 +0300	[thread overview]
Message-ID: <20220426193222.3422-3-ville.syrjala@linux.intel.com> (raw)
In-Reply-To: <20220426193222.3422-1-ville.syrjala@linux.intel.com>

From: Ville Syrjälä <ville.syrjala@linux.intel.com>

Modern VBTs no longer contain the LFP data table pointers
block (41). We are expecting to have one in order to be able
to parse the LFP data block (42), so let's make one up.

Since the fp_timing table has variable size we must somehow
determine its size. Rather than just hardcode it we look for
the terminator bytes (0xffff) to figure out where each table
entry starts. dvo_timing, panel_pnp_id, and panel_name are
expected to have fixed size.

This has been observed on various machines, eg. TGL with BDB
version 240, CML with BDB version 231, etc. The most recent
VBT I've observed that still had block 41 had BDB version
228. So presumably the cutoff (if an exact cutoff even exists)
is somewhere around BDB version 229-231.

v2: kfree the thing we allocated, not the thing+3 bytes
v3: Do the debugprint only if we found the LFP data block
v4: Fix t0 null check (Jani)

Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
---
 drivers/gpu/drm/i915/display/intel_bios.c | 134 +++++++++++++++++++++-
 1 file changed, 133 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 8a1086721525..7bc3d55b6bb0 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -311,16 +311,144 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
 	return validate_lfp_data_ptrs(bdb, ptrs);
 }
 
+static const void *find_fp_timing_terminator(const u8 *data, int size)
+{
+	int i;
+
+	for (i = 0; i < size - 1; i++) {
+		if (data[i] == 0xff && data[i+1] == 0xff)
+			return &data[i];
+	}
+
+	return NULL;
+}
+
+static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
+			     int table_size, int total_size)
+{
+	if (total_size < table_size)
+		return total_size;
+
+	table->table_size = table_size;
+	table->offset = total_size - table_size;
+
+	return total_size - table_size;
+}
+
+static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
+			      const struct lvds_lfp_data_ptr_table *prev,
+			      int size)
+{
+	next->table_size = prev->table_size;
+	next->offset = prev->offset + size;
+}
+
+static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
+				    const void *bdb)
+{
+	int i, size, table_size, block_size, offset;
+	const void *t0, *t1, *block;
+	struct bdb_lvds_lfp_data_ptrs *ptrs;
+	void *ptrs_block;
+
+	block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
+	if (!block)
+		return NULL;
+
+	drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
+
+	block_size = get_blocksize(block);
+
+	size = block_size;
+	t0 = find_fp_timing_terminator(block, size);
+	if (!t0)
+		return NULL;
+
+	size -= t0 - block - 2;
+	t1 = find_fp_timing_terminator(t0 + 2, size);
+	if (!t1)
+		return NULL;
+
+	size = t1 - t0;
+	if (size * 16 > block_size)
+		return NULL;
+
+	ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL);
+	if (!ptrs_block)
+		return NULL;
+
+	*(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS;
+	*(u16 *)(ptrs_block + 1) = sizeof(*ptrs);
+	ptrs = ptrs_block + 3;
+
+	table_size = sizeof(struct lvds_pnp_id);
+	size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size);
+
+	table_size = sizeof(struct lvds_dvo_timing);
+	size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
+
+	table_size = t0 - block + 2;
+	size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
+
+	if (ptrs->ptr[0].fp_timing.table_size)
+		ptrs->lvds_entries++;
+	if (ptrs->ptr[0].dvo_timing.table_size)
+		ptrs->lvds_entries++;
+	if (ptrs->ptr[0].panel_pnp_id.table_size)
+		ptrs->lvds_entries++;
+
+	if (size != 0 || ptrs->lvds_entries != 3) {
+		kfree(ptrs);
+		return NULL;
+	}
+
+	size = t1 - t0;
+	for (i = 1; i < 16; i++) {
+		next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
+		next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
+		next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
+	}
+
+	size = t1 - t0;
+	table_size = sizeof(struct lvds_lfp_panel_name);
+
+	if (16 * (size + table_size) <= block_size) {
+		ptrs->panel_name.table_size = table_size;
+		ptrs->panel_name.offset = size * 16;
+	}
+
+	offset = block - bdb;
+
+	for (i = 0; i < 16; i++) {
+		ptrs->ptr[i].fp_timing.offset += offset;
+		ptrs->ptr[i].dvo_timing.offset += offset;
+		ptrs->ptr[i].panel_pnp_id.offset += offset;
+	}
+
+	if (ptrs->panel_name.table_size)
+		ptrs->panel_name.offset += offset;
+
+	return ptrs_block;
+}
+
 static void
 init_bdb_block(struct drm_i915_private *i915,
 	       const void *bdb, enum bdb_block_id section_id,
 	       size_t min_size)
 {
 	struct bdb_block_entry *entry;
+	void *temp_block = NULL;
 	const void *block;
 	size_t block_size;
 
 	block = find_raw_section(bdb, section_id);
+
+	/* Modern VBTs lack the LFP data table pointers block, make one up */
+	if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) {
+		temp_block = generate_lfp_data_ptrs(i915, bdb);
+		if (temp_block)
+			block = temp_block + 3;
+	}
 	if (!block)
 		return;
 
@@ -331,12 +459,16 @@ init_bdb_block(struct drm_i915_private *i915,
 
 	entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
 			GFP_KERNEL);
-	if (!entry)
+	if (!entry) {
+		kfree(temp_block);
 		return;
+	}
 
 	entry->section_id = section_id;
 	memcpy(entry->data, block - 3, block_size + 3);
 
+	kfree(temp_block);
+
 	drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
 		    section_id, block_size, min_size);
 
-- 
2.35.1


  parent reply	other threads:[~2022-04-26 19:32 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-26 19:32 [Intel-gfx] [PATCH v3 00/18] drm/i915/bios: Rework BDB block handling and PNPID->panel_type matching Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 01/18] drm/i915/bios: Reorder panel DTD parsing Ville Syrjala
2022-04-26 19:32 ` Ville Syrjala [this message]
2022-05-03 10:55   ` [Intel-gfx] [PATCH v3 02/18] drm/i915/bios: Generate LFP data table pointers if the VBT lacks them Jani Nikula
2022-05-03 17:29     ` Ville Syrjälä
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 03/18] drm/i915/bios: Get access to the tail end of the LFP data block Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 04/18] drm/i915/bios: Document the mess around the LFP data tables Ville Syrjala
2022-05-03 10:56   ` Jani Nikula
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 05/18] drm/i915/bios: Assume panel_type==0 if the VBT has bogus data Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 06/18] drm/i915/bios: Split parse_driver_features() into two parts Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 07/18] drm/i915/bios: Split VBT parsing to global vs. panel specific parts Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 08/18] drm/i915/bios: Don't parse some panel specific data multiple times Ville Syrjala
2022-05-03 11:08   ` Jani Nikula
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 09/18] drm/i915/pps: Split PPS init+sanitize in two Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 10/18] drm/i915/pps: Reinit PPS delays after VBT has been fully parsed Ville Syrjala
2022-05-03 11:46   ` Jani Nikula
2022-05-03 17:05     ` Ville Syrjälä
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 11/18] drm/i915/bios: Do panel specific VBT parsing later Ville Syrjala
2022-05-03 11:07   ` Jani Nikula
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 12/18] drm/i915/bios: Extract get_panel_type() Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 13/18] drm/i915/bios: Refactor panel_type code Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 14/18] drm/i915/bios: Determine panel type via PNPID match Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 15/18] drm/i915/bios: Parse the seamless DRRS min refresh rate Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 16/18] drm/i915: Respect VBT " Ville Syrjala
2022-04-26 19:32 ` [PATCH v3 17/18] drm/edid: Extract drm_edid_decode_mfg_id() Ville Syrjala
2022-04-26 19:32   ` [Intel-gfx] " Ville Syrjala
2022-04-26 19:32 ` [Intel-gfx] [PATCH v3 18/18] drm/i915/bios: Dump PNPID and panel name Ville Syrjala
2022-04-26 20:32 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/bios: Rework BDB block handling and PNPID->panel_type matching (rev7) Patchwork
2022-04-26 20:32 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-04-26 20:56 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2022-04-27 13:25 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/bios: Rework BDB block handling and PNPID->panel_type matching (rev8) Patchwork
2022-04-27 13:25 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2022-04-27 13:55 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220426193222.3422-3-ville.syrjala@linux.intel.com \
    --to=ville.syrjala@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.