From 2636311e5e2894bd7c7800939a3b9b68e7a93bcc Mon Sep 17 00:00:00 2001 From: Gioh Kim Date: Tue, 13 Apr 2021 14:00:27 +0200 Subject: [PATCH] swap likely and unlikely --- rtrs/rtrs-clt.c | 134 +++++++++++++++++++++++++----------------------- 1 file changed, 70 insertions(+), 64 deletions(-) diff --git a/rtrs/rtrs-clt.c b/rtrs/rtrs-clt.c index 1b4b3e6..6235827 100644 --- a/rtrs/rtrs-clt.c +++ b/rtrs/rtrs-clt.c @@ -17,6 +17,12 @@ #include "rtrs-clt.h" #include "rtrs-log.h" + + +#define MYLIKELY(x) unlikely(x) +#define MYUNLIKELY(x) likely(x) + + #define RTRS_CONNECT_TIMEOUT_MS 30000 /* * Wait a bit before trying to reconnect after a failure @@ -80,9 +86,9 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) */ do { bit = find_first_zero_bit(clt->permits_map, max_depth); - if (unlikely(bit >= max_depth)) + if (MYUNLIKELY(bit >= max_depth)) return NULL; - } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map))); + } while (MYUNLIKELY(test_and_set_bit_lock(bit, clt->permits_map))); permit = get_permit(clt, bit); WARN_ON(permit->mem_id != bit); @@ -120,14 +126,14 @@ struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, DEFINE_WAIT(wait); permit = __rtrs_get_permit(clt, con_type); - if (likely(permit) || !can_wait) + if (MYLIKELY(permit) || !can_wait) return permit; do { prepare_to_wait(&clt->permits_wait, &wait, TASK_UNINTERRUPTIBLE); permit = __rtrs_get_permit(clt, con_type); - if (likely(permit)) + if (MYLIKELY(permit)) break; io_schedule(); @@ -180,7 +186,7 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, { int id = 0; - if (likely(permit->con_type == RTRS_IO_CON)) + if (MYLIKELY(permit->con_type == RTRS_IO_CON)) id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1; return to_clt_con(sess->s.con[id]); @@ -299,7 +305,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = cq->cq_context; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (MYUNLIKELY(wc->status != IB_WC_SUCCESS)) { rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); @@ -319,13 +325,13 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) container_of(wc->wr_cqe, typeof(*req), inv_cqe); struct rtrs_clt_con *con = cq->cq_context; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (MYUNLIKELY(wc->status != IB_WC_SUCCESS)) { rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } req->need_inv = false; - if (likely(req->need_inv_comp)) + if (MYLIKELY(req->need_inv_comp)) complete(&req->inv_comp); else /* Complete request from INV callback */ @@ -360,7 +366,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, sess = to_clt_sess(con->c.sess); if (req->sg_cnt) { - if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) { + if (MYUNLIKELY(req->dir == DMA_FROM_DEVICE && req->need_inv)) { /* * We are here to invalidate read requests * ourselves. In normal scenario server should @@ -375,7 +381,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, * should do that ourselves. */ - if (likely(can_wait)) { + if (MYLIKELY(can_wait)) { req->need_inv_comp = true; } else { /* This should be IO path, so always notify */ @@ -386,10 +392,10 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, refcount_inc(&req->ref); err = rtrs_inv_rkey(req); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", req->mr->rkey, err); - } else if (likely(can_wait)) { + } else if (MYLIKELY(can_wait)) { wait_for_completion(&req->inv_comp); } else { /* @@ -414,7 +420,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, req->in_use = false; req->con = NULL; - if (unlikely(errno)) { + if (MYUNLIKELY(errno)) { rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", errno, kobject_name(&sess->kobj), sess->hca_name, sess->hca_port, notify); } @@ -432,7 +438,7 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, enum ib_send_flags flags; struct ib_sge sge; - if (unlikely(!req->sg_size)) { + if (MYUNLIKELY(!req->sg_size)) { rtrs_wrn(con->c.sess, "Doing RDMA Write failed, no data supplied\n"); return -EINVAL; @@ -482,7 +488,7 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); err = rtrs_iu_post_recv(&con->c, iu); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(con->c.sess, "post iu failed %d\n", err); rtrs_rdma_error_recovery(con); } @@ -502,7 +508,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - if (unlikely(wc->byte_len < sizeof(*msg))) { + if (MYUNLIKELY(wc->byte_len < sizeof(*msg))) { rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", wc->byte_len); goto out; @@ -510,7 +516,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; - if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) { + if (MYUNLIKELY(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) { rtrs_err(sess->clt, "rkey response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; @@ -520,7 +526,7 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) goto out; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); - if (likely(imm_type == RTRS_IO_RSP_IMM || + if (MYLIKELY(imm_type == RTRS_IO_RSP_IMM || imm_type == RTRS_IO_RSP_W_INV_IMM)) { u32 msg_id; @@ -574,7 +580,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) bool w_inval = false; int err; - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (MYUNLIKELY(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) { rtrs_err(sess->clt, "RDMA failed: %s\n", ib_wc_status_msg(wc->status)); @@ -594,7 +600,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) return; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); - if (likely(imm_type == RTRS_IO_RSP_IMM || + if (MYLIKELY(imm_type == RTRS_IO_RSP_IMM || imm_type == RTRS_IO_RSP_W_INV_IMM)) { u32 msg_id; @@ -626,7 +632,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); else err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", err); rtrs_rdma_error_recovery(con); @@ -671,7 +677,7 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) } else { err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); } - if (unlikely(err)) + if (MYUNLIKELY(err)) return err; } @@ -696,7 +702,7 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) q_size *= 2; err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); return err; } @@ -757,7 +763,7 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) ppcpu_path = this_cpu_ptr(clt->pcpu_path); path = rcu_dereference(*ppcpu_path); - if (unlikely(!path)) + if (MYUNLIKELY(!path)) path = list_first_or_null_rcu(&clt->paths_list, typeof(*path), s.entry); else @@ -788,10 +794,10 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) int inflight; list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (MYUNLIKELY(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) continue; - if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) + if (MYUNLIKELY(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) continue; inflight = atomic_read(&sess->stats->inflight); @@ -839,10 +845,10 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) ktime_t latency; list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (MYUNLIKELY(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) continue; - if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) + if (MYUNLIKELY(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) continue; latency = sess->s.hb_cur_latency; @@ -1033,7 +1039,7 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); if (nr < 0) return nr; - if (unlikely(nr < req->sg_cnt)) + if (MYUNLIKELY(nr < req->sg_cnt)) return -EINVAL; ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); @@ -1057,7 +1063,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (unlikely(tsize > sess->chunk_size)) { + if (MYUNLIKELY(tsize > sess->chunk_size)) { rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", tsize, sess->chunk_size); return -EMSGSIZE; @@ -1065,7 +1071,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) if (req->sg_cnt) { count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); - if (unlikely(!count)) { + if (MYUNLIKELY(!count)) { rtrs_wrn(s, "Write request failed, map failed\n"); return -EINVAL; } @@ -1120,7 +1126,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) req->usr_len + sizeof(*msg) + sizeof(struct rtrs_sg_desc), imm, wr, &inv_wr); - if (unlikely(ret)) { + if (MYUNLIKELY(ret)) { rtrs_err_rl(s, "Write request failed: error=%d path=%s [%s:%u]\n", ret, kobject_name(&sess->kobj), sess->hca_name, sess->hca_port); if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) @@ -1150,7 +1156,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (unlikely(tsize > sess->chunk_size)) { + if (MYUNLIKELY(tsize > sess->chunk_size)) { rtrs_wrn(s, "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", tsize, sess->chunk_size); @@ -1160,7 +1166,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) if (req->sg_cnt) { count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, req->dir); - if (unlikely(!count)) { + if (MYUNLIKELY(!count)) { rtrs_wrn(s, "Read request failed, dma map failed\n"); return -EINVAL; @@ -1234,7 +1240,7 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], req->data_len, imm, wr); - if (unlikely(ret)) { + if (MYUNLIKELY(ret)) { rtrs_err_rl(s, "Read request failed: error=%d path=%s [%s:%u]\n", ret, kobject_name(&sess->kobj), sess->hca_name, sess->hca_port); if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) @@ -1265,7 +1271,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, for (path_it_init(&it, clt); (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (unlikely(READ_ONCE(alive_sess->state) != + if (MYUNLIKELY(READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)) continue; req = rtrs_clt_get_copy_req(alive_sess, fail_req); @@ -1273,7 +1279,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, err = rtrs_clt_write_req(req); else err = rtrs_clt_read_req(req); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { req->in_use = false; continue; } @@ -1308,7 +1314,7 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) complete_rdma_req(req, -ECONNABORTED, false, true); err = rtrs_clt_failover_req(clt, req); - if (unlikely(err)) + if (MYUNLIKELY(err)) /* Failover failed, notify anyway */ req->conf(req->priv, err); } @@ -1352,7 +1358,7 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess) goto out; sg_cnt = NOREG_CNT + 1; req->sge = kcalloc(sg_cnt, sizeof(*req->sge), GFP_KERNEL); - if (unlikely(!req->sge)) + if (MYUNLIKELY(!req->sge)) goto out; req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, @@ -1946,7 +1952,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, break; case RDMA_CM_EVENT_ESTABLISHED: cm_err = rtrs_rdma_conn_established(con, ev); - if (likely(!cm_err)) { + if (MYLIKELY(!cm_err)) { /* * Report success and wake up. Here we abuse state_wq, * i.e. wake up without state change, but we set cm_err. @@ -2365,7 +2371,7 @@ static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (MYUNLIKELY(wc->status != IB_WC_SUCCESS)) { rtrs_err(sess->clt, "Sess info request send failed: %s\n", ib_wc_status_msg(wc->status)); rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); @@ -2382,7 +2388,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, int i, sgi; sg_cnt = le16_to_cpu(msg->sg_cnt); - if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) { + if (MYUNLIKELY(!sg_cnt || (sess->queue_depth % sg_cnt))) { rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", sg_cnt); return -EINVAL; @@ -2392,7 +2398,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, * Check if IB immediate data size is enough to hold the mem_id and * the offset inside the memory chunk. */ - if (unlikely((ilog2(sg_cnt - 1) + 1) + + if (MYUNLIKELY((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS)) { rtrs_err(sess->clt, @@ -2412,7 +2418,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, total_len += len; - if (unlikely(!len || (len % sess->chunk_size))) { + if (MYUNLIKELY(!len || (len % sess->chunk_size))) { rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, len); return -EINVAL; @@ -2426,11 +2432,11 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, } } /* Sanity check */ - if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) { + if (MYUNLIKELY(sgi != sg_cnt || i != sess->queue_depth)) { rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); return -EINVAL; } - if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) { + if (MYUNLIKELY(total_len != sess->chunk_size * sess->queue_depth)) { rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); return -EINVAL; } @@ -2452,14 +2458,14 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(con->c.cid); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - if (unlikely(wc->status != IB_WC_SUCCESS)) { + if (MYUNLIKELY(wc->status != IB_WC_SUCCESS)) { rtrs_err(sess->clt, "Sess info response recv failed: %s\n", ib_wc_status_msg(wc->status)); goto out; } WARN_ON(wc->opcode != IB_WC_RECV); - if (unlikely(wc->byte_len < sizeof(*msg))) { + if (MYUNLIKELY(wc->byte_len < sizeof(*msg))) { rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", wc->byte_len); goto out; @@ -2467,24 +2473,24 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; - if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) { + if (MYUNLIKELY(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) { rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } rx_sz = sizeof(*msg); rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); - if (unlikely(wc->byte_len < rx_sz)) { + if (MYUNLIKELY(wc->byte_len < rx_sz)) { rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", wc->byte_len); goto out; } err = process_info_rsp(sess, msg); - if (unlikely(err)) + if (MYUNLIKELY(err)) goto out; err = post_recv_sess(sess); - if (unlikely(err)) + if (MYUNLIKELY(err)) goto out; state = RTRS_CLT_CONNECTED; @@ -2511,13 +2517,13 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) rtrs_clt_info_req_done); rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); - if (unlikely(!tx_iu || !rx_iu)) { + if (MYUNLIKELY(!tx_iu || !rx_iu)) { err = -ENOMEM; goto out; } /* Prepare for getting info response */ err = rtrs_iu_post_recv(&usr_con->c, rx_iu); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); goto out; } @@ -2532,7 +2538,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) /* Send info request */ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); goto out; } @@ -2543,7 +2549,7 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) sess->state != RTRS_CLT_CONNECTING, msecs_to_jiffies( RTRS_CONNECT_TIMEOUT_MS)); - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) { + if (MYUNLIKELY(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) { if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) err = -ECONNRESET; else @@ -2555,7 +2561,7 @@ out: rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); if (rx_iu) rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); - if (unlikely(err)) + if (MYUNLIKELY(err)) /* If we've never taken async path because of malloc problems */ rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); @@ -2899,7 +2905,7 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, &old_state); } while (!changed && old_state != RTRS_CLT_DEAD); - if (likely(changed)) { + if (MYLIKELY(changed)) { rtrs_clt_remove_path_from_arr(sess); rtrs_clt_destroy_sess_files(sess, sysfs_self); kobject_put(&sess->kobj); @@ -2971,14 +2977,14 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, rcu_read_lock(); for (path_it_init(&it, clt); (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (MYUNLIKELY(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) continue; err = rtrs_clt_should_fail_request(&sess->fault_inject); - if (unlikely(err)) + if (MYUNLIKELY(err)) continue; - if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) { + if (MYUNLIKELY(usr_len + hdr_len > sess->max_hdr_size)) { rtrs_wrn_rl(sess->clt, "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", dir == READ ? "Read" : "Write", @@ -2993,7 +2999,7 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, err = rtrs_clt_read_req(req); else err = rtrs_clt_write_req(req); - if (unlikely(err)) { + if (MYUNLIKELY(err)) { req->in_use = false; continue; } @@ -3017,12 +3023,12 @@ int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index) rcu_read_lock(); for (path_it_init(&it, clt); (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) + if (MYUNLIKELY(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) continue; con = sess->s.con[index + 1]; cnt = ib_process_cq_direct(con->cq, -1); - if (likely(cnt)) + if (MYLIKELY(cnt)) break; } path_it_deinit(&it); -- 2.25.1