linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Benjamin Gaignard <benjamin.gaignard@linaro.org>
To: Benjamin Gaignard <benjamin.gaignard@st.com>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>,
	Maxime Ripard <mripard@kernel.org>, Sean Paul <sean@poorly.run>,
	David Airlie <airlied@linux.ie>, Daniel Vetter <daniel@ffwll.ch>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>,
	ML dri-devel <dri-devel@lists.freedesktop.org>
Subject: Re: [PATCH v3] drm/dp_mst: Fix W=1 warnings
Date: Tue, 3 Dec 2019 17:58:45 +0100	[thread overview]
Message-ID: <CA+M3ks7fOpftvvo_bu=xFVW-e-v-8_Y5v8Y_f_oA-SspUSzqEA@mail.gmail.com> (raw)
In-Reply-To: <20191128135057.20020-1-benjamin.gaignard@st.com>

Le sam. 30 nov. 2019 à 14:24, Benjamin Gaignard
<benjamin.gaignard@st.com> a écrit :
>
> Fix the warnings that show up with W=1.
> They are all about unused but set variables.
> If functions returns are not used anymore make them void.
>
> Signed-off-by: Benjamin Gaignard <benjamin.gaignard@st.com>
> ---
> CC: Jani Nikula <jani.nikula@linux.intel.com>
>
> changes in version 3:
> - remove the hunk that may conflict with c485e2c97dae
>   ("drm/dp_mst: Refactor pdt setup/teardown, add more locking")

I have test today this patch on drm-misc-next where the other patch is
present, it apply fine and I don't have warning anymore.

Benjamin
>
> changes in version 2:
> - fix indentations
> - when possible change functions prototype to void
>
> drivers/gpu/drm/drm_dp_mst_topology.c | 83 +++++++++++++----------------------
>  1 file changed, 31 insertions(+), 52 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 1437bc46368b..d5cb5688b5dd 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -674,7 +674,6 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
>                                       u8 *replybuf, u8 replybuflen, bool hdr)
>  {
>         int ret;
> -       u8 crc4;
>
>         if (hdr) {
>                 u8 hdrlen;
> @@ -716,8 +715,6 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
>         }
>
>         if (msg->curchunk_idx >= msg->curchunk_len) {
> -               /* do CRC */
> -               crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
>                 /* copy chunk into bigger msg */
>                 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
>                 msg->curlen += msg->curchunk_len - 1;
> @@ -1014,7 +1011,7 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
>         }
>  }
>
> -static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
> +static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
>  {
>         struct drm_dp_sideband_msg_req_body req;
>
> @@ -1024,17 +1021,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
>         req.u.dpcd_write.num_bytes = num_bytes;
>         req.u.dpcd_write.bytes = bytes;
>         drm_dp_encode_sideband_req(&req, msg);
> -
> -       return 0;
>  }
>
> -static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
> +static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
>  {
>         struct drm_dp_sideband_msg_req_body req;
>
>         req.req_type = DP_LINK_ADDRESS;
>         drm_dp_encode_sideband_req(&req, msg);
> -       return 0;
>  }
>
>  static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
> @@ -1048,7 +1042,7 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
>         return 0;
>  }
>
> -static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
> +static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
>                                   u8 vcpi, uint16_t pbn,
>                                   u8 number_sdp_streams,
>                                   u8 *sdp_stream_sink)
> @@ -1064,10 +1058,9 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
>                    number_sdp_streams);
>         drm_dp_encode_sideband_req(&req, msg);
>         msg->path_msg = true;
> -       return 0;
>  }
>
> -static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
> +static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
>                                   int port_num, bool power_up)
>  {
>         struct drm_dp_sideband_msg_req_body req;
> @@ -1080,7 +1073,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
>         req.u.port_num.port_number = port_num;
>         drm_dp_encode_sideband_req(&req, msg);
>         msg->path_msg = true;
> -       return 0;
>  }
>
>  static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
> @@ -1746,14 +1738,13 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
>   */
>  static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
>  {
> -       int ret;
>         u8 rad[6], lct;
>         bool send_link = false;
>         switch (port->pdt) {
>         case DP_PEER_DEVICE_DP_LEGACY_CONV:
>         case DP_PEER_DEVICE_SST_SINK:
>                 /* add i2c over sideband */
> -               ret = drm_dp_mst_register_i2c_bus(&port->aux);
> +               drm_dp_mst_register_i2c_bus(&port->aux);
>                 break;
>         case DP_PEER_DEVICE_MST_BRANCHING:
>                 lct = drm_dp_calculate_rad(port, rad);
> @@ -1823,25 +1814,20 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
>
>  static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
>  {
> -       int ret;
> -
>         memcpy(mstb->guid, guid, 16);
>
>         if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
>                 if (mstb->port_parent) {
> -                       ret = drm_dp_send_dpcd_write(
> -                                       mstb->mgr,
> -                                       mstb->port_parent,
> -                                       DP_GUID,
> -                                       16,
> -                                       mstb->guid);
> +                       drm_dp_send_dpcd_write(mstb->mgr,
> +                                              mstb->port_parent,
> +                                              DP_GUID,
> +                                              16,
> +                                              mstb->guid);
>                 } else {
> -
> -                       ret = drm_dp_dpcd_write(
> -                                       mstb->mgr->aux,
> -                                       DP_GUID,
> -                                       mstb->guid,
> -                                       16);
> +                       drm_dp_dpcd_write(mstb->mgr->aux,
> +                                         DP_GUID,
> +                                         mstb->guid,
> +                                         16);
>                 }
>         }
>  }
> @@ -2197,7 +2183,7 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
>         return false;
>  }
>
> -static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
> +static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
>  {
>         struct drm_dp_sideband_msg_req_body req;
>
> @@ -2206,8 +2192,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
>         req.u.dpcd_read.dpcd_address = offset;
>         req.u.dpcd_read.num_bytes = num_bytes;
>         drm_dp_encode_sideband_req(&req, msg);
> -
> -       return 0;
>  }
>
>  static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
> @@ -2429,14 +2413,14 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
>  {
>         struct drm_dp_sideband_msg_tx *txmsg;
>         struct drm_dp_link_address_ack_reply *reply;
> -       int i, len, ret;
> +       int i, ret;
>
>         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
>         if (!txmsg)
>                 return;
>
>         txmsg->dst = mstb;
> -       len = build_link_address(txmsg);
> +       build_link_address(txmsg);
>
>         mstb->link_address_sent = true;
>         drm_dp_queue_down_tx(mgr, txmsg);
> @@ -2478,7 +2462,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
>  {
>         struct drm_dp_enum_path_resources_ack_reply *path_res;
>         struct drm_dp_sideband_msg_tx *txmsg;
> -       int len;
>         int ret;
>
>         txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
> @@ -2486,7 +2469,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
>                 return -ENOMEM;
>
>         txmsg->dst = mstb;
> -       len = build_enum_path_resources(txmsg, port->port_num);
> +       build_enum_path_resources(txmsg, port->port_num);
>
>         drm_dp_queue_down_tx(mgr, txmsg);
>
> @@ -2569,7 +2552,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
>  {
>         struct drm_dp_sideband_msg_tx *txmsg;
>         struct drm_dp_mst_branch *mstb;
> -       int len, ret, port_num;
> +       int ret, port_num;
>         u8 sinks[DRM_DP_MAX_SDP_STREAMS];
>         int i;
>
> @@ -2594,9 +2577,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
>                 sinks[i] = i;
>
>         txmsg->dst = mstb;
> -       len = build_allocate_payload(txmsg, port_num,
> -                                    id,
> -                                    pbn, port->num_sdp_streams, sinks);
> +       build_allocate_payload(txmsg, port_num,
> +                              id,
> +                              pbn, port->num_sdp_streams, sinks);
>
>         drm_dp_queue_down_tx(mgr, txmsg);
>
> @@ -2625,7 +2608,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
>                                  struct drm_dp_mst_port *port, bool power_up)
>  {
>         struct drm_dp_sideband_msg_tx *txmsg;
> -       int len, ret;
> +       int ret;
>
>         port = drm_dp_mst_topology_get_port_validated(mgr, port);
>         if (!port)
> @@ -2638,7 +2621,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
>         }
>
>         txmsg->dst = port->parent;
> -       len = build_power_updown_phy(txmsg, port->port_num, power_up);
> +       build_power_updown_phy(txmsg, port->port_num, power_up);
>         drm_dp_queue_down_tx(mgr, txmsg);
>
>         ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
> @@ -2858,7 +2841,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
>                                  struct drm_dp_mst_port *port,
>                                  int offset, int size, u8 *bytes)
>  {
> -       int len;
>         int ret = 0;
>         struct drm_dp_sideband_msg_tx *txmsg;
>         struct drm_dp_mst_branch *mstb;
> @@ -2873,7 +2855,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
>                 goto fail_put;
>         }
>
> -       len = build_dpcd_read(txmsg, port->port_num, offset, size);
> +       build_dpcd_read(txmsg, port->port_num, offset, size);
>         txmsg->dst = port->parent;
>
>         drm_dp_queue_down_tx(mgr, txmsg);
> @@ -2911,7 +2893,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
>                                   struct drm_dp_mst_port *port,
>                                   int offset, int size, u8 *bytes)
>  {
> -       int len;
>         int ret;
>         struct drm_dp_sideband_msg_tx *txmsg;
>         struct drm_dp_mst_branch *mstb;
> @@ -2926,7 +2907,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
>                 goto fail_put;
>         }
>
> -       len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
> +       build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
>         txmsg->dst = mstb;
>
>         drm_dp_queue_down_tx(mgr, txmsg);
> @@ -3149,7 +3130,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
>  {
>         int len;
>         u8 replyblock[32];
> -       int replylen, origlen, curreply;
> +       int replylen, curreply;
>         int ret;
>         struct drm_dp_sideband_msg_rx *msg;
>         int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
> @@ -3169,7 +3150,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
>         }
>         replylen = msg->curchunk_len + msg->curchunk_hdrlen;
>
> -       origlen = replylen;
>         replylen -= len;
>         curreply = len;
>         while (replylen > 0) {
> @@ -3961,17 +3941,16 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
>         mutex_lock(&mgr->lock);
>         if (mgr->mst_primary) {
>                 u8 buf[DP_PAYLOAD_TABLE_SIZE];
> -               int ret;
>
> -               ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
> +               drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
>                 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
> -               ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
> +               drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
>                 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
> -               ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
> +               drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
>                 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
>
>                 /* dump the standard OUI branch header */
> -               ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
> +               drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
>                 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
>                 for (i = 0x3; i < 0x8 && buf[i]; i++)
>                         seq_printf(m, "%c", buf[i]);
> --
> 2.15.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel

  reply	other threads:[~2019-12-03 16:58 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-28 13:50 [PATCH v3] drm/dp_mst: Fix W=1 warnings Benjamin Gaignard
2019-12-03 16:58 ` Benjamin Gaignard [this message]
2019-12-04 16:47 ` Jani Nikula
2019-12-16  8:28   ` Benjamin Gaignard
2019-12-20 14:03     ` Benjamin Gaignard
2020-01-07 13:11       ` Benjamin Gaignard
2020-01-18  0:55         ` Lyude Paul
2020-01-18  8:31           ` Benjamin Gaignard
2020-01-24 22:08         ` Lyude Paul
2020-01-27 13:08           ` Benjamin Gaignard
2020-01-30 22:22             ` Lyude Paul
2020-01-30 23:22               ` Dave Airlie
2020-01-31  8:08                 ` Benjamin GAIGNARD
2020-02-03  9:40                   ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to='CA+M3ks7fOpftvvo_bu=xFVW-e-v-8_Y5v8Y_f_oA-SspUSzqEA@mail.gmail.com' \
    --to=benjamin.gaignard@linaro.org \
    --cc=airlied@linux.ie \
    --cc=benjamin.gaignard@st.com \
    --cc=daniel@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=mripard@kernel.org \
    --cc=sean@poorly.run \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).