linux-scsi.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Hannes Reinecke <hare@suse.de>
To: James Smart <jsmart2021@gmail.com>, linux-scsi@vger.kernel.org
Cc: maier@linux.ibm.com, dwagner@suse.de, bvanassche@acm.org,
	Ram Vegesna <ram.vegesna@broadcom.com>
Subject: Re: [PATCH v2 11/32] elx: libefc: SLI and FC PORT state machine interfaces
Date: Thu, 9 Jan 2020 08:34:43 +0100	[thread overview]
Message-ID: <aee9880a-81d4-7e31-bb9d-d33e7ca375d8@suse.de> (raw)
In-Reply-To: <20191220223723.26563-12-jsmart2021@gmail.com>

On 12/20/19 11:37 PM, James Smart wrote:
> This patch continues the libefc library population.
> 
> This patch adds library interface definitions for:
> - SLI and FC port (aka n_port_id) registration, allocation and
>   deallocation.
> 
> Signed-off-by: Ram Vegesna <ram.vegesna@broadcom.com>
> Signed-off-by: James Smart <jsmart2021@gmail.com>
> ---
>  drivers/scsi/elx/libefc/efc_sport.c | 843 ++++++++++++++++++++++++++++++++++++
>  drivers/scsi/elx/libefc/efc_sport.h |  52 +++
>  2 files changed, 895 insertions(+)
>  create mode 100644 drivers/scsi/elx/libefc/efc_sport.c
>  create mode 100644 drivers/scsi/elx/libefc/efc_sport.h
> 
> diff --git a/drivers/scsi/elx/libefc/efc_sport.c b/drivers/scsi/elx/libefc/efc_sport.c
> new file mode 100644
> index 000000000000..11f3ba73ec6e
> --- /dev/null
> +++ b/drivers/scsi/elx/libefc/efc_sport.c
> @@ -0,0 +1,843 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Copyright (C) 2019 Broadcom. All Rights Reserved. The term
> + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
> + */
> +
> +/*
> + * Details SLI port (sport) functions.
> + */
> +
> +#include "efc.h"
> +
> +/* HW sport callback events from the user driver */
> +int
> +efc_lport_cb(void *arg, int event, void *data)
> +{
> +	struct efc *efc = arg;
> +	struct efc_sli_port *sport = data;
> +
> +	switch (event) {
> +	case EFC_HW_PORT_ALLOC_OK:
> +		efc_log_debug(efc, "EFC_HW_PORT_ALLOC_OK\n");
> +		efc_sm_post_event(&sport->sm, EFC_EVT_SPORT_ALLOC_OK, NULL);
> +		break;
> +	case EFC_HW_PORT_ALLOC_FAIL:
> +		efc_log_debug(efc, "EFC_HW_PORT_ALLOC_FAIL\n");
> +		efc_sm_post_event(&sport->sm, EFC_EVT_SPORT_ALLOC_FAIL, NULL);
> +		break;
> +	case EFC_HW_PORT_ATTACH_OK:
> +		efc_log_debug(efc, "EFC_HW_PORT_ATTACH_OK\n");
> +		efc_sm_post_event(&sport->sm, EFC_EVT_SPORT_ATTACH_OK, NULL);
> +		break;
> +	case EFC_HW_PORT_ATTACH_FAIL:
> +		efc_log_debug(efc, "EFC_HW_PORT_ATTACH_FAIL\n");
> +		efc_sm_post_event(&sport->sm,
> +				  EFC_EVT_SPORT_ATTACH_FAIL, NULL);
> +		break;
> +	case EFC_HW_PORT_FREE_OK:
> +		efc_log_debug(efc, "EFC_HW_PORT_FREE_OK\n");
> +		efc_sm_post_event(&sport->sm, EFC_EVT_SPORT_FREE_OK, NULL);
> +		break;
> +	case EFC_HW_PORT_FREE_FAIL:
> +		efc_log_debug(efc, "EFC_HW_PORT_FREE_FAIL\n");
> +		efc_sm_post_event(&sport->sm, EFC_EVT_SPORT_FREE_FAIL, NULL);
> +		break;
> +	default:
> +		efc_log_test(efc, "unknown event %#x\n", event);
> +	}
> +
> +	return 0;
> +}
> +

Same here; please use the name mapping function and collapse the cases.

> +struct efc_sli_port *
> +efc_sport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
> +		u32 fc_id, bool enable_ini, bool enable_tgt)
> +{
> +	struct efc_sli_port *sport;
> +
> +	if (domain->efc->enable_ini)
> +		enable_ini = 0;
> +
> +	/* Return a failure if this sport has already been allocated */
> +	if (wwpn != 0) {
> +		sport = efc_sport_find_wwn(domain, wwnn, wwpn);
> +		if (sport) {
> +			efc_log_err(domain->efc,
> +				    "Failed: SPORT %016llX %016llX already allocated\n",
> +				    wwnn, wwpn);
> +			return NULL;
> +		}
> +	}
> +
> +	sport = kzalloc(sizeof(*sport), GFP_ATOMIC);
> +	if (!sport)
> +		return sport;
> +
> +	sport->efc = domain->efc;
> +	snprintf(sport->display_name, sizeof(sport->display_name), "------");
> +	sport->domain = domain;
> +	sport->lookup = efc_spv_new(domain->efc);
> +	sport->instance_index = domain->sport_instance_count++;
> +	INIT_LIST_HEAD(&sport->node_list);
> +	sport->sm.app = sport;
> +	sport->enable_ini = enable_ini;
> +	sport->enable_tgt = enable_tgt;
> +	sport->enable_rscn = (sport->enable_ini ||
> +			(sport->enable_tgt && enable_target_rscn(sport->efc)));
> +
> +	/* Copy service parameters from domain */
> +	memcpy(sport->service_params, domain->service_params,
> +		sizeof(struct fc_els_flogi));
> +
> +	/* Update requested fc_id */
> +	sport->fc_id = fc_id;
> +
> +	/* Update the sport's service parameters for the new wwn's */
> +	sport->wwpn = wwpn;
> +	sport->wwnn = wwnn;
> +	snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llX", wwnn);
> +
> +	/*
> +	 * if this is the "first" sport of the domain,
> +	 * then make it the "phys" sport
> +	 */
> +	if (list_empty(&domain->sport_list))
> +		domain->sport = sport;
> +
> +	INIT_LIST_HEAD(&sport->list_entry);
> +	list_add_tail(&sport->list_entry, &domain->sport_list);
> +
> +	efc_log_debug(domain->efc, "[%s] allocate sport\n",
> +		      sport->display_name);
> +
> +	return sport;
> +}

And this is what I meant with missing locking; if this function is
called concurrently with the same wwnn you might end up with two
identical entries in the sport list.
At the very lease explain why this is safe; but still I would prefer
locking here.

> +
> +void
> +efc_sport_free(struct efc_sli_port *sport)
> +{
> +	struct efc_domain *domain;
> +
> +	if (!sport)
> +		return;
> +
> +	domain = sport->domain;
> +	efc_log_debug(domain->efc, "[%s] free sport\n", sport->display_name);
> +	list_del(&sport->list_entry);
> +	/*
> +	 * if this is the physical sport,
> +	 * then clear it out of the domain
> +	 */
> +	if (sport == domain->sport)
> +		domain->sport = NULL;
> +
> +	efc_spv_del(sport->lookup);
> +	sport->lookup = NULL;
> +
> +	efc_spv_set(domain->lookup, sport->fc_id, NULL);
> +
> +	if (list_empty(&domain->sport_list))
> +		efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
> +				      NULL);
> +
> +	kfree(sport);
> +}
> +
> +void
> +efc_sport_force_free(struct efc_sli_port *sport)
> +{
> +	struct efc_node *node;
> +	struct efc_node *next;
> +
> +	/* shutdown sm processing */
> +	efc_sm_disable(&sport->sm);
> +
> +	list_for_each_entry_safe(node, next, &sport->node_list, list_entry) {
> +		efc_node_force_free(node);
> +	}
> +
> +	efc_sport_free(sport);
> +}
> +
> +/* Find a SLI port object, given an FC_ID */
> +struct efc_sli_port *
> +efc_sport_find(struct efc_domain *domain, u32 d_id)
> +{
> +	struct efc_sli_port *sport;
> +
> +	if (!domain->lookup) {
> +		efc_log_test(domain->efc,
> +			     "assertion failed: domain->lookup is not valid\n");
> +		return NULL;
> +	}
> +
> +	sport = efc_spv_get(domain->lookup, d_id);
> +	return sport;
> +}
> +
> +/* Find a SLI port, given the WWNN and WWPN */
> +struct efc_sli_port *
> +efc_sport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
> +{
> +	struct efc_sli_port *sport = NULL;
> +
> +	list_for_each_entry(sport, &domain->sport_list, list_entry) {
> +		if (sport->wwnn == wwnn && sport->wwpn == wwpn)
> +			return sport;
> +	}
> +	return NULL;
> +}
> +
> +/* External call to request an attach for a sport, given an FC_ID */
> +int
> +efc_sport_attach(struct efc_sli_port *sport, u32 fc_id)
> +{
> +	int rc;
> +	struct efc_node *node;
> +	struct efc *efc = sport->efc;
> +
> +	/* Set our lookup */
> +	efc_spv_set(sport->domain->lookup, fc_id, sport);
> +
> +	/* Update our display_name */
> +	efc_node_fcid_display(fc_id, sport->display_name,
> +			      sizeof(sport->display_name));
> +
> +	list_for_each_entry(node, &sport->node_list, list_entry) {
> +		efc_node_update_display_name(node);
> +	}
> +
> +	efc_log_debug(sport->efc, "[%s] attach sport: fc_id x%06x\n",
> +		      sport->display_name, fc_id);
> +
> +	rc = efc->tt.hw_port_attach(efc, sport, fc_id);
> +	if (rc != EFC_HW_RTN_SUCCESS) {
> +		efc_log_err(sport->efc,
> +			    "efc_hw_port_attach failed: %d\n", rc);
> +		return -1;
> +	}
> +	return 0;
> +}
> +
> +static void
> +efc_sport_shutdown(struct efc_sli_port *sport)
> +{
> +	struct efc *efc = sport->efc;
> +	struct efc_node *node;
> +	struct efc_node *node_next;
> +
> +	list_for_each_entry_safe(node, node_next,
> +				 &sport->node_list, list_entry) {
> +		if (node->rnode.fc_id != FC_FID_FLOGI ||
> +		    !sport->is_vport) {
> +			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
> +			continue;
> +		}
> +
> +		/*
> +		 * If this is a vport, logout of the fabric
> +		 * controller so that it deletes the vport
> +		 * on the switch.
> +		 */
> +		/* if link is down, don't send logo */
> +		if (efc->link_status == EFC_LINK_STATUS_DOWN) {
> +			efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
> +		} else {
> +			efc_log_debug(efc,
> +				      "[%s] sport shutdown vport, sending logo to node\n",
> +				      node->display_name);
> +
> +			if (efc->tt.els_send(efc, node, ELS_LOGO,
> +					     EFC_FC_FLOGI_TIMEOUT_SEC,
> +					EFC_FC_ELS_DEFAULT_RETRIES)) {
> +				/* sent LOGO, wait for response */
> +				efc_node_transition(node,
> +						    __efc_d_wait_logo_rsp,
> +						     NULL);
> +				continue;
> +			}
> +
> +			/*
> +			 * failed to send LOGO,
> +			 * go ahead and cleanup node anyways
> +			 */
> +			node_printf(node, "Failed to send LOGO\n");
> +			efc_node_post_event(node,
> +					    EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
> +					    NULL);
> +		}
> +	}
> +}
> +
> +/* Clear the sport reference in the vport specification */
> +static void
> +efc_vport_link_down(struct efc_sli_port *sport)
> +{
> +	struct efc *efc = sport->efc;
> +	struct efc_vport_spec *vport;
> +
> +	list_for_each_entry(vport, &efc->vport_list, list_entry) {
> +		if (vport->sport == sport) {
> +			vport->sport = NULL;
> +			break;
> +		}
> +	}
> +}
> +

Similar here: Why is there no locking?
Or RCU lists?

> +static void *
> +__efc_sport_common(const char *funcname, struct efc_sm_ctx *ctx,
> +		   enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc_domain *domain = sport->domain;
> +	struct efc *efc = sport->efc;
> +
> +	switch (evt) {
> +	case EFC_EVT_ENTER:
> +	case EFC_EVT_REENTER:
> +	case EFC_EVT_EXIT:
> +	case EFC_EVT_ALL_CHILD_NODES_FREE:
> +		break;
> +	case EFC_EVT_SPORT_ATTACH_OK:
> +			efc_sm_transition(ctx, __efc_sport_attached, NULL);
> +		break;
> +	case EFC_EVT_SHUTDOWN: {
> +		int node_list_empty;
> +
> +		/* Flag this sport as shutting down */
> +		sport->shutting_down = true;
> +
> +		if (sport->is_vport)
> +			efc_vport_link_down(sport);
> +
> +		node_list_empty = list_empty(&sport->node_list);
> +
> +		if (node_list_empty) {
> +			/* sm: node list is empty / efc_hw_port_free */
> +			/*
> +			 * Remove the sport from the domain's
> +			 * sparse vector lookup table
> +			 */
> +			efc_spv_set(domain->lookup, sport->fc_id, NULL);
> +			efc_sm_transition(ctx, __efc_sport_wait_port_free,
> +					  NULL);
> +			if (efc->tt.hw_port_free(efc, sport)) {
> +				efc_log_test(sport->efc,
> +					     "efc_hw_port_free failed\n");
> +				/* Not much we can do, free the sport anyways */
> +				efc_sport_free(sport);
> +			}
> +		} else {
> +			/* sm: node list is not empty / shutdown nodes */
> +			efc_sm_transition(ctx,
> +					  __efc_sport_wait_shutdown, NULL);
> +			efc_sport_shutdown(sport);
> +		}
> +		break;
> +	}
> +	default:
> +		efc_log_test(sport->efc, "[%s] %-20s %-20s not handled\n",
> +			     sport->display_name, funcname,
> +			     efc_sm_event_name(evt));
> +		break;
> +	}
> +
> +	return NULL;
> +}
> +
> +/* SLI port state machine: Physical sport allocated */
> +void *
> +__efc_sport_allocated(struct efc_sm_ctx *ctx,
> +		      enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc_domain *domain = sport->domain;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	/* the physical sport is attached */
> +	case EFC_EVT_SPORT_ATTACH_OK:
> +		efc_assert(sport == domain->sport, NULL);
> +		efc_sm_transition(ctx, __efc_sport_attached, NULL);
> +		break;
> +
> +	case EFC_EVT_SPORT_ALLOC_OK:
> +		/* ignore */
> +		break;
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +/* SLI port state machine: Handle initial virtual port events */
> +void *
> +__efc_sport_vport_init(struct efc_sm_ctx *ctx,
> +		       enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc *efc = sport->efc;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_ENTER: {
> +		__be64 be_wwpn = cpu_to_be64(sport->wwpn);
> +
> +		if (sport->wwpn == 0)
> +			efc_log_debug(efc, "vport: letting f/w select WWN\n");
> +
> +		if (sport->fc_id != U32_MAX) {
> +			efc_log_debug(efc, "vport: hard coding port id: %x\n",
> +				      sport->fc_id);
> +		}
> +
> +		efc_sm_transition(ctx, __efc_sport_vport_wait_alloc, NULL);
> +		/* If wwpn is zero, then we'll let the f/w */
> +		if (efc->tt.hw_port_alloc(efc, sport, sport->domain,
> +					  sport->wwpn == 0 ? NULL :
> +					  (uint8_t *)&be_wwpn)) {
> +			efc_log_err(efc, "Can't allocate port\n");
> +			break;
> +		}
> +
> +		break;
> +	}
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +/**
> + * SLI port state machine:
> + * Wait for the HW SLI port allocation to complete
> + */
> +void *
> +__efc_sport_vport_wait_alloc(struct efc_sm_ctx *ctx,
> +			     enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc *efc = sport->efc;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_SPORT_ALLOC_OK: {
> +		struct fc_els_flogi *sp;
> +		struct efc_node *fabric;
> +
> +		sp = (struct fc_els_flogi *)sport->service_params;
> +		/*
> +		 * If we let f/w assign wwn's,
> +		 * then sport wwn's with those returned by hw
> +		 */
> +		if (sport->wwnn == 0) {
> +			sport->wwnn = be64_to_cpu(sport->sli_wwnn);
> +			sport->wwpn = be64_to_cpu(sport->sli_wwpn);
> +			snprintf(sport->wwnn_str, sizeof(sport->wwnn_str),
> +				 "%016llX", sport->wwpn);
> +		}
> +
> +		/* Update the sport's service parameters */
> +		sp->fl_wwpn = cpu_to_be64(sport->wwpn);
> +		sp->fl_wwnn = cpu_to_be64(sport->wwnn);
> +
> +		/*
> +		 * if sport->fc_id is uninitialized,
> +		 * then request that the fabric node use FDISC
> +		 * to find an fc_id.
> +		 * Otherwise we're restoring vports, or we're in
> +		 * fabric emulation mode, so attach the fc_id
> +		 */
> +		if (sport->fc_id == U32_MAX) {
> +			fabric = efc_node_alloc(sport, FC_FID_FLOGI, false,
> +						false);
> +			if (!fabric) {
> +				efc_log_err(efc, "efc_node_alloc() failed\n");
> +				return NULL;
> +			}
> +			efc_node_transition(fabric, __efc_vport_fabric_init,
> +					    NULL);
> +		} else {
> +			snprintf(sport->wwnn_str, sizeof(sport->wwnn_str),
> +				 "%016llX", sport->wwpn);
> +			efc_sport_attach(sport, sport->fc_id);
> +		}
> +		efc_sm_transition(ctx, __efc_sport_vport_allocated, NULL);
> +		break;
> +	}
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +/**
> + * SLI port state machine: virtual sport allocated.
> + *
> + * This state is entered after the sport is allocated;
> + * it then waits for a fabric node
> + * FDISC to complete, which requests a sport attach.
> + * The sport attach complete is handled in this state.
> + */
> +
> +void *
> +__efc_sport_vport_allocated(struct efc_sm_ctx *ctx,
> +			    enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc *efc = sport->efc;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_SPORT_ATTACH_OK: {
> +		struct efc_node *node;
> +
> +		/* Find our fabric node, and forward this event */
> +		node = efc_node_find(sport, FC_FID_FLOGI);
> +		if (!node) {
> +			efc_log_test(efc, "can't find node %06x\n",
> +				     FC_FID_FLOGI);
> +			break;
> +		}
> +		/* sm: / forward sport attach to fabric node */
> +		efc_node_post_event(node, evt, NULL);
> +		efc_sm_transition(ctx, __efc_sport_attached, NULL);
> +		break;
> +	}
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +static void
> +efc_vport_update_spec(struct efc_sli_port *sport)
> +{
> +	struct efc *efc = sport->efc;
> +	struct efc_vport_spec *vport;
> +
> +	list_for_each_entry(vport, &efc->vport_list, list_entry) {
> +		if (vport->sport == sport) {
> +			vport->wwnn = sport->wwnn;
> +			vport->wwpn = sport->wwpn;
> +			vport->tgt_data = sport->tgt_data;
> +			vport->ini_data = sport->ini_data;
> +			break;
> +		}
> +	}
> +}
> +
> +/* State entered after the sport attach has completed */
> +void *
> +__efc_sport_attached(struct efc_sm_ctx *ctx,
> +		     enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc *efc = sport->efc;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_ENTER: {
> +		struct efc_node *node;
> +
> +		efc_log_debug(efc,
> +			      "[%s] SPORT attached WWPN %016llX WWNN %016llX\n",
> +			      sport->display_name,
> +			      sport->wwpn, sport->wwnn);
> +
> +		list_for_each_entry(node, &sport->node_list, list_entry) {
> +			efc_node_update_display_name(node);
> +		}
> +
> +		sport->tgt_id = sport->fc_id;
> +
> +		efc->tt.new_sport(efc, sport);
> +
> +		/*
> +		 * Update the vport (if its not the physical sport)
> +		 * parameters
> +		 */
> +		if (sport->is_vport)
> +			efc_vport_update_spec(sport);
> +		break;
> +	}
> +
> +	case EFC_EVT_EXIT:
> +		efc_log_debug(efc,
> +			      "[%s] SPORT deattached WWPN %016llX WWNN %016llX\n",
> +			      sport->display_name,
> +			      sport->wwpn, sport->wwnn);
> +
> +		efc->tt.del_sport(efc, sport);
> +		break;
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +
> +/* SLI port state machine: Wait for the node shutdowns to complete */
> +void *
> +__efc_sport_wait_shutdown(struct efc_sm_ctx *ctx,
> +			  enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +	struct efc_domain *domain = sport->domain;
> +	struct efc *efc = sport->efc;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_SPORT_ALLOC_OK:
> +	case EFC_EVT_SPORT_ALLOC_FAIL:
> +	case EFC_EVT_SPORT_ATTACH_OK:
> +	case EFC_EVT_SPORT_ATTACH_FAIL:
> +		/* ignore these events - just wait for the all free event */
> +		break;
> +
> +	case EFC_EVT_ALL_CHILD_NODES_FREE: {
> +		/*
> +		 * Remove the sport from the domain's
> +		 * sparse vector lookup table
> +		 */
> +		efc_spv_set(domain->lookup, sport->fc_id, NULL);
> +		efc_sm_transition(ctx, __efc_sport_wait_port_free, NULL);
> +		if (efc->tt.hw_port_free(efc, sport)) {
> +			efc_log_err(sport->efc, "efc_hw_port_free failed\n");
> +			/* Not much we can do, free the sport anyways */
> +			efc_sport_free(sport);
> +		}
> +		break;
> +	}
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +/* SLI port state machine: Wait for the HW's port free to complete */
> +void *
> +__efc_sport_wait_port_free(struct efc_sm_ctx *ctx,
> +			   enum efc_sm_event evt, void *arg)
> +{
> +	struct efc_sli_port *sport = ctx->app;
> +
> +	sport_sm_trace(sport);
> +
> +	switch (evt) {
> +	case EFC_EVT_SPORT_ATTACH_OK:
> +		/* Ignore as we are waiting for the free CB */
> +		break;
> +	case EFC_EVT_SPORT_FREE_OK: {
> +		/* All done, free myself */
> +		/* sm: / efc_sport_free */
> +		efc_sport_free(sport);
> +		break;
> +	}
> +	default:
> +		__efc_sport_common(__func__, ctx, evt, arg);
> +		return NULL;
> +	}
> +	return NULL;
> +}
> +
> +/* Use the vport specification to find the associated vports and start them */
> +int
> +efc_vport_start(struct efc_domain *domain)
> +{
> +	struct efc *efc = domain->efc;
> +	struct efc_vport_spec *vport;
> +	struct efc_vport_spec *next;
> +	struct efc_sli_port *sport;
> +	int rc = 0;
> +	u8 found = false;
> +
> +	list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
> +		if (!vport->sport) {
> +			found = true;
> +			break;
> +		}
> +	}
> +
> +	if (found && vport) {
> +		sport = efc_sport_alloc(domain, vport->wwpn,
> +					vport->wwnn, vport->fc_id,
> +					vport->enable_ini,
> +					vport->enable_tgt);
> +		vport->sport = sport;
> +		if (!sport) {
> +			rc = -1;
> +		} else {
> +			sport->is_vport = true;
> +			sport->tgt_data = vport->tgt_data;
> +			sport->ini_data = vport->ini_data;
> +
> +			efc_sm_transition(&sport->sm, __efc_sport_vport_init,
> +					  NULL);
> +		}
> +	}
> +
> +	return rc;
> +}
> +
> +/* Allocate a new virtual SLI port */
> +int
> +efc_sport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
> +		    u32 fc_id, bool ini, bool tgt, void *tgt_data,
> +		    void *ini_data, bool restore_vport)
> +{
> +	struct efc_sli_port *sport;
> +
> +	if (ini && domain->efc->enable_ini == 0) {
> +		efc_log_test(domain->efc,
> +			     "driver initiator functionality not enabled\n");
> +		return -1;
> +	}
> +
> +	if (tgt && domain->efc->enable_tgt == 0) {
> +		efc_log_test(domain->efc,
> +			     "driver target functionality not enabled\n");
> +		return -1;
> +	}
> +
> +	/*
> +	 * Create a vport spec if we need to recreate
> +	 * this vport after a link up event
> +	 */
> +	if (restore_vport) {
> +		if (efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id,
> +					  ini, tgt, tgt_data, ini_data)) {
> +			efc_log_test(domain->efc,
> +				     "failed to create vport object entry\n");
> +			return -1;
> +		}
> +		return efc_vport_start(domain);
> +	}
> +
> +	/* Allocate a sport */
> +	sport = efc_sport_alloc(domain, wwpn, wwnn, fc_id, ini, tgt);
> +
> +	if (!sport)
> +		return -1;
> +
> +	sport->is_vport = true;
> +	sport->tgt_data = tgt_data;
> +	sport->ini_data = ini_data;
> +

Isn't there a race condition?
The port is already allocated, but not fully populated.
Can someone access the sport before these three lines are executed?

> +	/* Transition to vport_init */
> +	efc_sm_transition(&sport->sm, __efc_sport_vport_init, NULL);
> +
> +	return 0;
> +}
> +
> +/* Remove a previously-allocated virtual port */
> +int
> +efc_sport_vport_del(struct efc *efc, struct efc_domain *domain,
> +		    u64 wwpn, uint64_t wwnn)
> +{
> +	struct efc_sli_port *sport;
> +	int found = 0;
> +	struct efc_vport_spec *vport;
> +	struct efc_vport_spec *next;
> +
> +	/* walk the efc_vport_list and remove from there */
> +	list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
> +		if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
> +			list_del(&vport->list_entry);
> +			kfree(vport);
> +			break;
> +		}
> +	}
> +

Locking?


Cheers,

Hannes
-- 
Dr. Hannes Reinecke		      Teamlead Storage & Networking
hare@suse.de			                  +49 911 74053 688
SUSE Software Solutions Germany GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), GF: Felix Imendörffer

  reply	other threads:[~2020-01-09  7:34 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-20 22:36 [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver James Smart
2019-12-20 22:36 ` [PATCH v2 01/32] elx: libefc_sli: SLI-4 register offsets and field definitions James Smart
2020-01-08  7:11   ` Hannes Reinecke
2020-01-09  0:59     ` James Smart
2019-12-20 22:36 ` [PATCH v2 02/32] elx: libefc_sli: SLI Descriptors and Queue entries James Smart
2020-01-08  7:24   ` Hannes Reinecke
2020-01-09  1:00     ` James Smart
2019-12-20 22:36 ` [PATCH v2 03/32] elx: libefc_sli: Data structures and defines for mbox commands James Smart
2020-01-08  7:32   ` Hannes Reinecke
2020-01-09  1:03     ` James Smart
2019-12-20 22:36 ` [PATCH v2 04/32] elx: libefc_sli: queue create/destroy/parse routines James Smart
2020-01-08  7:45   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 05/32] elx: libefc_sli: Populate and post different WQEs James Smart
2020-01-08  7:54   ` Hannes Reinecke
2020-01-09  1:04     ` James Smart
2019-12-20 22:36 ` [PATCH v2 06/32] elx: libefc_sli: bmbx routines and SLI config commands James Smart
2020-01-08  8:05   ` Hannes Reinecke
2019-12-20 22:36 ` [PATCH v2 07/32] elx: libefc_sli: APIs to setup SLI library James Smart
2020-01-08  8:22   ` Hannes Reinecke
2020-01-09  1:29     ` James Smart
2019-12-20 22:36 ` [PATCH v2 08/32] elx: libefc: Generic state machine framework James Smart
2020-01-09  7:05   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 09/32] elx: libefc: Emulex FC discovery library APIs and definitions James Smart
2020-01-09  7:16   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 10/32] elx: libefc: FC Domain state machine interfaces James Smart
2020-01-09  7:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 11/32] elx: libefc: SLI and FC PORT " James Smart
2020-01-09  7:34   ` Hannes Reinecke [this message]
2019-12-20 22:37 ` [PATCH v2 12/32] elx: libefc: Remote node " James Smart
2020-01-09  8:31   ` Hannes Reinecke
2020-01-09  9:57   ` Daniel Wagner
2019-12-20 22:37 ` [PATCH v2 13/32] elx: libefc: Fabric " James Smart
2020-01-09  8:34   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 14/32] elx: libefc: FC node ELS and state handling James Smart
2020-01-09  8:39   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 15/32] elx: efct: Data structures and defines for hw operations James Smart
2020-01-09  8:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 16/32] elx: efct: Driver initialization routines James Smart
2020-01-09  9:01   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 17/32] elx: efct: Hardware queues creation and deletion James Smart
2020-01-09  9:10   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 18/32] elx: efct: RQ buffer, memory pool allocation and deallocation APIs James Smart
2020-01-09  9:13   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 19/32] elx: efct: Hardware IO and SGL initialization James Smart
2020-01-09  9:22   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 20/32] elx: efct: Hardware queues processing James Smart
2020-01-09  9:24   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 21/32] elx: efct: Unsolicited FC frame processing routines James Smart
2020-01-09  9:26   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 22/32] elx: efct: Extended link Service IO handling James Smart
2020-01-09  9:38   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 23/32] elx: efct: SCSI IO handling routines James Smart
2020-01-09  9:41   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 24/32] elx: efct: LIO backend interface routines James Smart
2020-01-09  3:56   ` Bart Van Assche
2019-12-20 22:37 ` [PATCH v2 25/32] elx: efct: Hardware IO submission routines James Smart
2020-01-09  9:52   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 26/32] elx: efct: link statistics and SFP data James Smart
2020-01-09 10:12   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 27/32] elx: efct: xport and hardware teardown routines James Smart
2020-01-09 10:14   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 28/32] elx: efct: IO timeout handling routines James Smart
2020-01-09 11:27   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 29/32] elx: efct: Firmware update, async link processing James Smart
2020-01-09 11:45   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 30/32] elx: efct: scsi_transport_fc host interface support James Smart
2020-01-09 11:46   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 31/32] elx: efct: Add Makefile and Kconfig for efct driver James Smart
2019-12-20 23:17   ` Randy Dunlap
2020-01-09 11:47   ` Hannes Reinecke
2019-12-20 22:37 ` [PATCH v2 32/32] elx: efct: Tie into kernel Kconfig and build process James Smart
2019-12-24  7:45   ` kbuild test robot
2019-12-24 21:01   ` Nathan Chancellor
2019-12-25 16:09     ` James Smart
2020-01-09 11:47   ` Hannes Reinecke
2019-12-29 18:27 ` [PATCH v2 00/32] [NEW] efct: Broadcom (Emulex) FC Target driver Sebastian Herbszt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=aee9880a-81d4-7e31-bb9d-d33e7ca375d8@suse.de \
    --to=hare@suse.de \
    --cc=bvanassche@acm.org \
    --cc=dwagner@suse.de \
    --cc=jsmart2021@gmail.com \
    --cc=linux-scsi@vger.kernel.org \
    --cc=maier@linux.ibm.com \
    --cc=ram.vegesna@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).