All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gal Pressman <galpress@amazon.com>
To: Shiraz Saleem <shiraz.saleem@intel.com>,
	dledford@redhat.com, jgg@ziepe.ca, davem@davemloft.net
Cc: linux-rdma@vger.kernel.org, netdev@vger.kernel.org,
	mustafa.ismail@intel.com, jeffrey.t.kirsher@intel.com
Subject: Re: [RFC v1 10/19] RDMA/irdma: Add connection manager
Date: Sun, 24 Feb 2019 13:21:16 +0200	[thread overview]
Message-ID: <0eafe40b-4c54-dc12-6a85-3a821d99d2cd@amazon.com> (raw)
In-Reply-To: <20190215171107.6464-11-shiraz.saleem@intel.com>

On 15-Feb-19 19:10, Shiraz Saleem wrote:
> +/**
> + * irdma_cm_teardown_connections - teardown QPs
> + * @iwdev: device pointer
> + * @ipaddr: Pointer to IPv4 or IPv6 address
> + * @ipv4: flag indicating IPv4 when true

There is no ipv4 parameter.

> + * @disconnect_all: flag indicating disconnect all QPs
> + * teardown QPs where source or destination addr matches ip addr
> + */
> +void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
> +				   struct irdma_cm_info *nfo,
> +				   bool disconnect_all)
> +{
> +	struct irdma_cm_core *cm_core = &iwdev->cm_core;
> +	struct list_head *list_core_temp;
> +	struct list_head *list_node;
> +	struct irdma_cm_node *cm_node;
> +	struct list_head teardown_list;
> +	struct ib_qp_attr attr;
> +	struct irdma_sc_vsi *vsi = &iwdev->vsi;
> +	struct irdma_sc_qp *sc_qp;
> +	struct irdma_qp *qp;
> +	int i;
> +	unsigned long flags;
> +
> +	INIT_LIST_HEAD(&teardown_list);
> +
> +	spin_lock_irqsave(&cm_core->ht_lock, flags);
> +	list_for_each_safe(list_node, list_core_temp,
> +			   &cm_core->accelerated_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node, list);
> +		if (disconnect_all ||
> +		    (nfo->vlan_id == cm_node->vlan_id &&
> +		    !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +			atomic_inc(&cm_node->ref_count);
> +			list_add(&cm_node->teardown_entry, &teardown_list);
> +		}
> +	}
> +	list_for_each_safe(list_node, list_core_temp,
> +			   &cm_core->non_accelerated_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node, list);
> +		if (disconnect_all ||
> +		    (nfo->vlan_id == cm_node->vlan_id &&
> +		    !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +			atomic_inc(&cm_node->ref_count);
> +			list_add(&cm_node->teardown_entry, &teardown_list);
> +		}
> +	}
> +	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
> +
> +	list_for_each_safe(list_node, list_core_temp, &teardown_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node,
> +				       teardown_entry);
> +		attr.qp_state = IB_QPS_ERR;
> +		irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
> +		if (iwdev->reset)
> +			irdma_cm_disconn(cm_node->iwqp);
> +		irdma_rem_ref_cm_node(cm_node);
> +	}
> +	if (!iwdev->roce_mode)
> +		return;
> +
> +	INIT_LIST_HEAD(&teardown_list);
> +	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
> +		spin_lock_irqsave(&vsi->qos[i].lock, flags);
> +		list_for_each_safe(list_node, list_core_temp, &vsi->qos[i].qplist) {
> +			u32 qp_ip[4];
> +
> +			sc_qp = container_of(list_node, struct irdma_sc_qp, list);
> +			if (sc_qp->qp_type != IRDMA_QP_TYPE_ROCE_RC)
> +				continue;
> +
> +			qp = sc_qp->back_qp;
> +			if (!disconnect_all) {
> +				if (nfo->ipv4)
> +					qp_ip[0] = qp->udp_info.local_ipaddr3;
> +				else
> +					memcpy(qp_ip,
> +					       &qp->udp_info.local_ipaddr0,
> +					       sizeof(qp_ip));
> +			}
> +
> +			if (disconnect_all ||
> +			    (nfo->vlan_id == qp->udp_info.vlan_tag &&
> +			    !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +				spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);

You should use different 'flags' here.

> +				if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
> +					irdma_add_ref(&qp->ibqp);
> +					list_add(&qp->teardown_entry, &teardown_list);
> +				}
> +				spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> +			}
> +		}
> +		spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
> +	}
> +
> +	list_for_each_safe(list_node, list_core_temp, &teardown_list) {
> +		qp = container_of(list_node, struct irdma_qp, teardown_entry);
> +		attr.qp_state = IB_QPS_ERR;
> +		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
> +		irdma_rem_ref(&qp->ibqp);
> +	}
> +}

WARNING: multiple messages have this Message-ID (diff)
From: Gal Pressman <galpress@amazon.com>
To: Shiraz Saleem <shiraz.saleem@intel.com>, <dledford@redhat.com>,
	<jgg@ziepe.ca>, <davem@davemloft.net>
Cc: <linux-rdma@vger.kernel.org>, <netdev@vger.kernel.org>,
	<mustafa.ismail@intel.com>, <jeffrey.t.kirsher@intel.com>
Subject: Re: [RFC v1 10/19] RDMA/irdma: Add connection manager
Date: Sun, 24 Feb 2019 13:21:16 +0200	[thread overview]
Message-ID: <0eafe40b-4c54-dc12-6a85-3a821d99d2cd@amazon.com> (raw)
In-Reply-To: <20190215171107.6464-11-shiraz.saleem@intel.com>

On 15-Feb-19 19:10, Shiraz Saleem wrote:
> +/**
> + * irdma_cm_teardown_connections - teardown QPs
> + * @iwdev: device pointer
> + * @ipaddr: Pointer to IPv4 or IPv6 address
> + * @ipv4: flag indicating IPv4 when true

There is no ipv4 parameter.

> + * @disconnect_all: flag indicating disconnect all QPs
> + * teardown QPs where source or destination addr matches ip addr
> + */
> +void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
> +				   struct irdma_cm_info *nfo,
> +				   bool disconnect_all)
> +{
> +	struct irdma_cm_core *cm_core = &iwdev->cm_core;
> +	struct list_head *list_core_temp;
> +	struct list_head *list_node;
> +	struct irdma_cm_node *cm_node;
> +	struct list_head teardown_list;
> +	struct ib_qp_attr attr;
> +	struct irdma_sc_vsi *vsi = &iwdev->vsi;
> +	struct irdma_sc_qp *sc_qp;
> +	struct irdma_qp *qp;
> +	int i;
> +	unsigned long flags;
> +
> +	INIT_LIST_HEAD(&teardown_list);
> +
> +	spin_lock_irqsave(&cm_core->ht_lock, flags);
> +	list_for_each_safe(list_node, list_core_temp,
> +			   &cm_core->accelerated_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node, list);
> +		if (disconnect_all ||
> +		    (nfo->vlan_id == cm_node->vlan_id &&
> +		    !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +			atomic_inc(&cm_node->ref_count);
> +			list_add(&cm_node->teardown_entry, &teardown_list);
> +		}
> +	}
> +	list_for_each_safe(list_node, list_core_temp,
> +			   &cm_core->non_accelerated_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node, list);
> +		if (disconnect_all ||
> +		    (nfo->vlan_id == cm_node->vlan_id &&
> +		    !memcmp(cm_node->loc_addr, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +			atomic_inc(&cm_node->ref_count);
> +			list_add(&cm_node->teardown_entry, &teardown_list);
> +		}
> +	}
> +	spin_unlock_irqrestore(&cm_core->ht_lock, flags);
> +
> +	list_for_each_safe(list_node, list_core_temp, &teardown_list) {
> +		cm_node = container_of(list_node, struct irdma_cm_node,
> +				       teardown_entry);
> +		attr.qp_state = IB_QPS_ERR;
> +		irdma_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL);
> +		if (iwdev->reset)
> +			irdma_cm_disconn(cm_node->iwqp);
> +		irdma_rem_ref_cm_node(cm_node);
> +	}
> +	if (!iwdev->roce_mode)
> +		return;
> +
> +	INIT_LIST_HEAD(&teardown_list);
> +	for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) {
> +		spin_lock_irqsave(&vsi->qos[i].lock, flags);
> +		list_for_each_safe(list_node, list_core_temp, &vsi->qos[i].qplist) {
> +			u32 qp_ip[4];
> +
> +			sc_qp = container_of(list_node, struct irdma_sc_qp, list);
> +			if (sc_qp->qp_type != IRDMA_QP_TYPE_ROCE_RC)
> +				continue;
> +
> +			qp = sc_qp->back_qp;
> +			if (!disconnect_all) {
> +				if (nfo->ipv4)
> +					qp_ip[0] = qp->udp_info.local_ipaddr3;
> +				else
> +					memcpy(qp_ip,
> +					       &qp->udp_info.local_ipaddr0,
> +					       sizeof(qp_ip));
> +			}
> +
> +			if (disconnect_all ||
> +			    (nfo->vlan_id == qp->udp_info.vlan_tag &&
> +			    !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) {
> +				spin_lock_irqsave(&iwdev->rf->qptable_lock, flags);

You should use different 'flags' here.

> +				if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) {
> +					irdma_add_ref(&qp->ibqp);
> +					list_add(&qp->teardown_entry, &teardown_list);
> +				}
> +				spin_unlock_irqrestore(&iwdev->rf->qptable_lock, flags);
> +			}
> +		}
> +		spin_unlock_irqrestore(&vsi->qos[i].lock, flags);
> +	}
> +
> +	list_for_each_safe(list_node, list_core_temp, &teardown_list) {
> +		qp = container_of(list_node, struct irdma_qp, teardown_entry);
> +		attr.qp_state = IB_QPS_ERR;
> +		irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL);
> +		irdma_rem_ref(&qp->ibqp);
> +	}
> +}

  reply	other threads:[~2019-02-24 11:21 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-15 17:10 [RFC v1 00/19] Add unified Intel Ethernet RDMA driver (irdma) Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 01/19] net/i40e: Add peer register/unregister to struct i40e_netdev_priv Shiraz Saleem
2019-02-15 17:22   ` Jason Gunthorpe
2019-02-21  2:19     ` Saleem, Shiraz
2019-02-21 19:35       ` Jason Gunthorpe
2019-02-22 20:13         ` Ertman, David M
2019-02-22 20:23           ` Jason Gunthorpe
2019-03-13  2:11             ` Jeff Kirsher
2019-03-13 13:28               ` Jason Gunthorpe
2019-05-10 13:31                 ` Shiraz Saleem
2019-05-10 18:17                   ` Jason Gunthorpe
2019-02-15 17:10 ` [RFC v1 02/19] net/ice: Create framework for VSI queue context Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 03/19] net/ice: Add support for ice peer devices and drivers Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 04/19] RDMA/irdma: Add driver framework definitions Shiraz Saleem
2019-02-24 15:02   ` Gal Pressman
2019-02-24 15:02     ` Gal Pressman
2019-02-26 21:08     ` Saleem, Shiraz
2019-02-15 17:10 ` [RFC v1 05/19] RDMA/irdma: Implement device initialization definitions Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 06/19] RDMA/irdma: Implement HW Admin Queue OPs Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 07/19] RDMA/irdma: Add HMC backing store setup functions Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 08/19] RDMA/irdma: Add privileged UDA queue implementation Shiraz Saleem
2019-02-24 11:42   ` Gal Pressman
2019-02-24 11:42     ` Gal Pressman
2019-02-15 17:10 ` [RFC v1 09/19] RDMA/irdma: Add QoS definitions Shiraz Saleem
2019-02-15 17:10 ` [RFC v1 10/19] RDMA/irdma: Add connection manager Shiraz Saleem
2019-02-24 11:21   ` Gal Pressman [this message]
2019-02-24 11:21     ` Gal Pressman
2019-02-25 18:46     ` Jason Gunthorpe
2019-02-26 21:07       ` Saleem, Shiraz
2019-02-15 17:10 ` [RFC v1 11/19] RDMA/irdma: Add PBLE resource manager Shiraz Saleem
2019-02-27  6:58   ` Leon Romanovsky
2019-02-15 17:10 ` [RFC v1 12/19] RDMA/irdma: Implement device supported verb APIs Shiraz Saleem
2019-02-15 17:35   ` Jason Gunthorpe
2019-02-15 22:19     ` Shiraz Saleem
2019-02-15 22:32       ` Jason Gunthorpe
2019-02-20 14:52     ` Saleem, Shiraz
2019-02-20 16:51       ` Jason Gunthorpe
2019-02-24 14:35   ` Gal Pressman
2019-02-24 14:35     ` Gal Pressman
2019-02-25 18:50     ` Jason Gunthorpe
2019-02-26 21:09       ` Saleem, Shiraz
2019-02-26 21:09     ` Saleem, Shiraz
2019-02-27  7:31       ` Gal Pressman
2019-02-15 17:11 ` [RFC v1 13/19] RDMA/irdma: Add RoCEv2 UD OP support Shiraz Saleem
2019-02-27  6:50   ` Leon Romanovsky
2019-02-15 17:11 ` [RFC v1 14/19] RDMA/irdma: Add user/kernel shared libraries Shiraz Saleem
2019-02-15 17:11 ` [RFC v1 15/19] RDMA/irdma: Add miscellaneous utility definitions Shiraz Saleem
2019-02-15 17:47   ` Jason Gunthorpe
2019-02-20  7:51     ` Leon Romanovsky
2019-02-20 14:53     ` Saleem, Shiraz
2019-02-20 16:53       ` Jason Gunthorpe
2019-02-15 17:11 ` [RFC v1 16/19] RDMA/irdma: Add dynamic tracing for CM Shiraz Saleem
2019-02-15 17:11 ` [RFC v1 17/19] RDMA/irdma: Add ABI definitions Shiraz Saleem
2019-02-15 17:16   ` Jason Gunthorpe
2019-02-20 14:52     ` Saleem, Shiraz
2019-02-20 16:50       ` Jason Gunthorpe
2019-02-15 17:11 ` [RFC v1 18/19] RDMA/irdma: Add Kconfig and Makefile Shiraz Saleem
2019-02-15 17:11 ` [RFC v1 19/19] RDMA/irdma: Update MAINTAINERS file Shiraz Saleem
2019-02-15 17:20 ` [RFC v1 00/19] Add unified Intel Ethernet RDMA driver (irdma) Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=0eafe40b-4c54-dc12-6a85-3a821d99d2cd@amazon.com \
    --to=galpress@amazon.com \
    --cc=davem@davemloft.net \
    --cc=dledford@redhat.com \
    --cc=jeffrey.t.kirsher@intel.com \
    --cc=jgg@ziepe.ca \
    --cc=linux-rdma@vger.kernel.org \
    --cc=mustafa.ismail@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=shiraz.saleem@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.