tree: https://github.com/dsahern/linux nvme-tcp-offload head: b9ae3391dc24a401030b31ad5363e0471ae386e0 commit: cd0edfa1a5a01a42b609ffcf907dd3d52cba8045 [12/15] net/mlx5e: NVMEoTCP DDP offload control path config: s390-allyesconfig (attached as .config) compiler: s390-linux-gcc (GCC) 9.3.0 reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # https://github.com/dsahern/linux/commit/cd0edfa1a5a01a42b609ffcf907dd3d52cba8045 git remote add dsahern-linux https://github.com/dsahern/linux git fetch --no-tags dsahern-linux nvme-tcp-offload git checkout cd0edfa1a5a01a42b609ffcf907dd3d52cba8045 # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=s390 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All warnings (new ones prefixed by >>): drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c: In function 'mlx5e_napi_poll': >> drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c:163:2: warning: ISO C90 forbids mixed declarations and code [-Wdeclaration-after-statement] 163 | struct list_head *cur; | ^~~~~~ vim +163 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c 116 117 int mlx5e_napi_poll(struct napi_struct *napi, int budget) 118 { 119 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, 120 napi); 121 struct mlx5e_ch_stats *ch_stats = c->stats; 122 struct mlx5e_xdpsq *xsksq = &c->xsksq; 123 struct mlx5e_rq *xskrq = &c->xskrq; 124 struct mlx5e_rq *rq = &c->rq; 125 bool aff_change = false; 126 bool busy_xsk = false; 127 bool busy = false; 128 int work_done = 0; 129 bool xsk_open; 130 int i; 131 132 rcu_read_lock(); 133 134 xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); 135 136 ch_stats->poll++; 137 138 for (i = 0; i < c->num_tc; i++) 139 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); 140 141 busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); 142 143 if (c->xdp) 144 busy |= mlx5e_poll_xdpsq_cq(&c->rq_xdpsq.cq); 145 146 if (likely(budget)) { /* budget=0 means: don't poll rx rings */ 147 if (xsk_open) 148 work_done = mlx5e_poll_rx_cq(&xskrq->cq, budget); 149 150 if (likely(budget - work_done)) 151 work_done += mlx5e_poll_rx_cq(&rq->cq, budget - work_done); 152 153 busy |= work_done == budget; 154 } 155 156 mlx5e_poll_ico_cq(&c->icosq.cq); 157 if (mlx5e_poll_ico_cq(&c->async_icosq.cq)) 158 /* Don't clear the flag if nothing was polled to prevent 159 * queueing more WQEs and overflowing the async ICOSQ. 160 */ 161 clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->async_icosq.state); 162 #ifdef CONFIG_MLX5_EN_NVMEOTCP > 163 struct list_head *cur; 164 struct mlx5e_nvmeotcp_sq *nvmeotcp_sq; 165 166 list_for_each(cur, &c->list_nvmeotcpsq) { 167 nvmeotcp_sq = list_entry(cur, struct mlx5e_nvmeotcp_sq, list); 168 mlx5e_poll_ico_cq(&nvmeotcp_sq->icosq.cq); 169 } 170 #endif 171 172 busy |= INDIRECT_CALL_2(rq->post_wqes, 173 mlx5e_post_rx_mpwqes, 174 mlx5e_post_rx_wqes, 175 rq); 176 if (xsk_open) { 177 busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); 178 busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); 179 } 180 181 busy |= busy_xsk; 182 183 if (busy) { 184 if (likely(mlx5e_channel_no_affinity_change(c))) { 185 work_done = budget; 186 goto out; 187 } 188 ch_stats->aff_change++; 189 aff_change = true; 190 if (budget && work_done == budget) 191 work_done--; 192 } 193 194 if (unlikely(!napi_complete_done(napi, work_done))) 195 goto out; 196 197 ch_stats->arm++; 198 199 for (i = 0; i < c->num_tc; i++) { 200 mlx5e_handle_tx_dim(&c->sq[i]); 201 mlx5e_cq_arm(&c->sq[i].cq); 202 } 203 204 mlx5e_handle_rx_dim(rq); 205 206 mlx5e_cq_arm(&rq->cq); 207 mlx5e_cq_arm(&c->icosq.cq); 208 mlx5e_cq_arm(&c->async_icosq.cq); 209 #ifdef CONFIG_MLX5_EN_NVMEOTCP 210 list_for_each(cur, &c->list_nvmeotcpsq) { 211 nvmeotcp_sq = list_entry(cur, struct mlx5e_nvmeotcp_sq, list); 212 mlx5e_cq_arm(&nvmeotcp_sq->icosq.cq); 213 } 214 #endif 215 mlx5e_cq_arm(&c->xdpsq.cq); 216 217 if (xsk_open) { 218 mlx5e_handle_rx_dim(xskrq); 219 mlx5e_cq_arm(&xsksq->cq); 220 mlx5e_cq_arm(&xskrq->cq); 221 } 222 223 if (unlikely(aff_change && busy_xsk)) { 224 mlx5e_trigger_irq(&c->icosq); 225 ch_stats->force_irq++; 226 } 227 228 out: 229 rcu_read_unlock(); 230 231 return work_done; 232 } 233 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all(a)lists.01.org