From mboxrd@z Thu Jan 1 00:00:00 1970 From: Ville =?iso-8859-1?Q?Syrj=E4l=E4?= Subject: Re: [PATCH 64/71] drm/i915/chv: Don't use PCS group access reads Date: Wed, 9 Apr 2014 19:56:50 +0300 Message-ID: <20140409165649.GT18465@intel.com> References: <1397039349-10639-1-git-send-email-ville.syrjala@linux.intel.com> <1397039349-10639-65-git-send-email-ville.syrjala@linux.intel.com> <20140409161838.GR9262@phenom.ffwll.local> Mime-Version: 1.0 Content-Type: text/plain; charset="iso-8859-1" Content-Transfer-Encoding: quoted-printable Return-path: Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id 536266E018 for ; Wed, 9 Apr 2014 09:59:26 -0700 (PDT) Content-Disposition: inline In-Reply-To: <20140409161838.GR9262@phenom.ffwll.local> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" To: Daniel Vetter Cc: intel-gfx@lists.freedesktop.org List-Id: intel-gfx@lists.freedesktop.org On Wed, Apr 09, 2014 at 06:18:38PM +0200, Daniel Vetter wrote: > On Wed, Apr 09, 2014 at 01:29:02PM +0300, ville.syrjala@linux.intel.com w= rote: > > From: Ville Syrj=E4l=E4 > > = > > All PCS groups access reads return 0xffffffff, so we can't use group > > access for RMW cycles. Instead target each spline separately. > = > I have no idea what PCS means here and spline ... Can you please expand > for those who haven't yet lost their souls in chv docs? Just so we have a > commonly-understood jargon for talking about this stuff. I guess we should have that somewhere as a comment. The same terminology applies to VLV as well. > = > Thanks, Daniel > = > > = > > Signed-off-by: Ville Syrj=E4l=E4 > > --- > > drivers/gpu/drm/i915/i915_reg.h | 14 ++++++++++++++ > > drivers/gpu/drm/i915/intel_dp.c | 32 ++++++++++++++++++++++++-------- > > drivers/gpu/drm/i915/intel_hdmi.c | 34 +++++++++++++++++++++++++------= --- > > 3 files changed, 63 insertions(+), 17 deletions(-) > > = > > diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i91= 5_reg.h > > index 4617fb3..ffed03e 100644 > > --- a/drivers/gpu/drm/i915/i915_reg.h > > +++ b/drivers/gpu/drm/i915/i915_reg.h > > @@ -654,6 +654,13 @@ enum punit_power_well { > > #define DPIO_PCS_TX_LANE1_RESET (1<<7) > > #define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) > > = > > +#define _VLV_PCS01_DW0_CH0 0x200 > > +#define _VLV_PCS23_DW0_CH0 0x400 > > +#define _VLV_PCS01_DW0_CH1 0x2600 > > +#define _VLV_PCS23_DW0_CH1 0x2800 > > +#define VLV_PCS01_DW0(ch) _PORT(ch, _VLV_PCS01_DW0_CH0, _VLV_PCS01_DW0= _CH1) > > +#define VLV_PCS23_DW0(ch) _PORT(ch, _VLV_PCS23_DW0_CH0, _VLV_PCS23_DW0= _CH1) > > + > > #define _VLV_PCS_DW1_CH0 0x8204 > > #define _VLV_PCS_DW1_CH1 0x8404 > > #define CHV_PCS_REQ_SOFTRESET_EN (1<<23) > > @@ -663,6 +670,13 @@ enum punit_power_well { > > #define DPIO_PCS_CLK_SOFT_RESET (1<<5) > > #define VLV_PCS_DW1(ch) _PORT(ch, _VLV_PCS_DW1_CH0, _VLV_PCS_DW1_CH1) > > = > > +#define _VLV_PCS01_DW1_CH0 0x204 > > +#define _VLV_PCS23_DW1_CH0 0x404 > > +#define _VLV_PCS01_DW1_CH1 0x2604 > > +#define _VLV_PCS23_DW1_CH1 0x2804 > > +#define VLV_PCS01_DW1(ch) _PORT(ch, _VLV_PCS01_DW1_CH0, _VLV_PCS01_DW1= _CH1) > > +#define VLV_PCS23_DW1(ch) _PORT(ch, _VLV_PCS23_DW1_CH0, _VLV_PCS23_DW1= _CH1) > > + > > #define _VLV_PCS_DW8_CH0 0x8220 > > #define _VLV_PCS_DW8_CH1 0x8420 > > #define VLV_PCS_DW8(ch) _PORT(ch, _VLV_PCS_DW8_CH0, _VLV_PCS_DW8_CH1) > > diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/int= el_dp.c > > index 079e0e3..cc7bccd3 100644 > > --- a/drivers/gpu/drm/i915/intel_dp.c > > +++ b/drivers/gpu/drm/i915/intel_dp.c > > @@ -1845,13 +1845,21 @@ static void chv_post_disable_dp(struct intel_en= coder *encoder) > > mutex_lock(&dev_priv->dpio_lock); > > = > > /* Propagate soft reset to data lane reset */ > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); > > val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); > > = > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); > > + val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); > > + val &=3D ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); > > val &=3D ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); > > = > > mutex_unlock(&dev_priv->dpio_lock); > > } > > @@ -1983,13 +1991,21 @@ static void chv_pre_enable_dp(struct intel_enco= der *encoder) > > mutex_lock(&dev_priv->dpio_lock); > > = > > /* Deassert soft data lane reset*/ > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); > > val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); > > + val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); > > + val |=3D (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); > > = > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); > > val |=3D (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); > > = > > /* Program Tx lane latency optimal setting*/ > > for (i =3D 0; i < 4; i++) { > > diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/i= ntel_hdmi.c > > index 6a2152b..c3896b0 100644 > > --- a/drivers/gpu/drm/i915/intel_hdmi.c > > +++ b/drivers/gpu/drm/i915/intel_hdmi.c > > @@ -1216,13 +1216,21 @@ static void chv_hdmi_post_disable(struct intel_= encoder *encoder) > > mutex_lock(&dev_priv->dpio_lock); > > = > > /* Propagate soft reset to data lane reset */ > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); > > val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val) > > -; > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch)); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); > > + val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); > > + val &=3D ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); > > val &=3D ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); > > = > > mutex_unlock(&dev_priv->dpio_lock); > > } > > @@ -1242,13 +1250,21 @@ static void chv_hdmi_pre_enable(struct intel_en= coder *encoder) > > mutex_lock(&dev_priv->dpio_lock); > > = > > /* Deassert soft data lane reset*/ > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW1(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); > > val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch)); > > + val |=3D CHV_PCS_REQ_SOFTRESET_EN; > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val); > > + > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch)); > > + val |=3D (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val); > > = > > - val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS_DW0(ch)); > > + val =3D vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch)); > > val |=3D (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET); > > - vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(ch), val); > > + vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val); > > = > > /* Program Tx latency optimal setting */ > > for (i =3D 0; i < 4; i++) { > > -- = > > 1.8.3.2 > > = > > _______________________________________________ > > Intel-gfx mailing list > > Intel-gfx@lists.freedesktop.org > > http://lists.freedesktop.org/mailman/listinfo/intel-gfx > = > -- = > Daniel Vetter > Software Engineer, Intel Corporation > +41 (0) 79 365 57 48 - http://blog.ffwll.ch -- = Ville Syrj=E4l=E4 Intel OTC