From mboxrd@z Thu Jan 1 00:00:00 1970 From: emilne@redhat.com (Ewan D. Milne) Date: Fri, 10 Aug 2018 13:53:58 -0400 Subject: [PATCH v2] nvmet_fc: support target port removal with nvmet layer In-Reply-To: <20180809234814.14680-1-jsmart2021@gmail.com> References: <20180809234814.14680-1-jsmart2021@gmail.com> Message-ID: <1533923638.7802.128.camel@localhost.localdomain> You need to add function declarations for nvmet_fc_portentry_unbind_tgt() and nvmet_fc_portentry_rebind_tgt(). [-Werror=implicit-function-declaration] -Ewan On Thu, 2018-08-09@16:48 -0700, James Smart wrote: > Currently, if a targetport has been connected to via the nvmet > config (in other words, the add_port() transport routine called, and > the nvmet port pointer stored for using in upcalls on new io), and > if the targetport is then removed (say the lldd driver decides to > unload or fully reset its hardware) and then re-added (the lldd > driver reloads or reinits its hardware), the port pointer has been > lost so there's no way to continue to post commands up to nvmet > via the transport port. > > Correct by allocating a small "port context" structure that will > be linked to by the targetport. The context will save the targetport > WWN's and the nvmet port pointer to use for it. Initial allocation > will occur when the targetport is bound to via add_port. The context > will be deallocated when remove_port() is called. If a targetport > is removed while nvmet has the active port context, the targetport > will be unlinked from the port context before removal. If a new > targetport is registered, the port contexts without a binding are > looked through and if the WWN's match (so it's the same as nvmet's > port context) the port context is linked to the new target port. > Thus new io can be received on the new targetport and operation > resumes with nvmet. > > Additionally, this also resolves nvmet configuration changing out > from underneath of the nvme-fc target port (for example: a > nvmetcli clear). > > Signed-off-by: James Smart > > --- > v2: > v1 could upcall with nvmet port pointer null (if command received > after the port was removed from nvmet). Added check at top of > async cmd rcv for null nvmet port and abort io on link if true. > --- > drivers/nvme/target/fc.c | 128 ++++++++++++++++++++++++++++++++++++++++++++--- > 1 file changed, 120 insertions(+), 8 deletions(-) > > diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c > index 5215dc48a7b7..fbbaf68ae54f 100644 > --- a/drivers/nvme/target/fc.c > +++ b/drivers/nvme/target/fc.c > @@ -110,11 +110,19 @@ struct nvmet_fc_tgtport { > struct list_head ls_busylist; > struct list_head assoc_list; > struct ida assoc_cnt; > - struct nvmet_port *port; > + struct nvmet_fc_port_entry *pe; > struct kref ref; > u32 max_sg_cnt; > }; > > +struct nvmet_fc_port_entry { > + struct nvmet_fc_tgtport *tgtport; > + struct nvmet_port *port; > + u64 node_name; > + u64 port_name; > + struct list_head pe_list; > +}; > + > struct nvmet_fc_defer_fcp_req { > struct list_head req_list; > struct nvmefc_tgt_fcp_req *fcp_req; > @@ -132,7 +140,6 @@ struct nvmet_fc_tgt_queue { > atomic_t zrspcnt; > atomic_t rsn; > spinlock_t qlock; > - struct nvmet_port *port; > struct nvmet_cq nvme_cq; > struct nvmet_sq nvme_sq; > struct nvmet_fc_tgt_assoc *assoc; > @@ -221,6 +228,7 @@ static DEFINE_SPINLOCK(nvmet_fc_tgtlock); > > static LIST_HEAD(nvmet_fc_target_list); > static DEFINE_IDA(nvmet_fc_tgtport_cnt); > +static LIST_HEAD(nvmet_fc_portentry_list); > > > static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work); > @@ -645,7 +653,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc, > queue->qid = qid; > queue->sqsize = sqsize; > queue->assoc = assoc; > - queue->port = assoc->tgtport->port; > queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid); > INIT_LIST_HEAD(&queue->fod_list); > INIT_LIST_HEAD(&queue->avail_defer_list); > @@ -957,6 +964,83 @@ nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport, > return ret; > } > > +static void > +nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport, > + struct nvmet_fc_port_entry *pe, > + struct nvmet_port *port) > +{ > + lockdep_assert_held(&nvmet_fc_tgtlock); > + > + pe->tgtport = tgtport; > + tgtport->pe = pe; > + > + pe->port = port; > + port->priv = pe; > + > + pe->node_name = tgtport->fc_target_port.node_name; > + pe->port_name = tgtport->fc_target_port.port_name; > + INIT_LIST_HEAD(&pe->pe_list); > + > + list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); > +} > + > +static void > +nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe) > +{ > + unsigned long flags; > + > + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); > + if (pe->tgtport) > + pe->tgtport->pe = NULL; > + list_del(&pe->pe_list); > + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); > +} > + > +/* > + * called when a targetport deregisters. Breaks the relationship > + * with the nvmet port, but leaves the port_entry in place so that > + * re-registration can resume operation. > + */ > +static void > +nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport) > +{ > + struct nvmet_fc_port_entry *pe; > + unsigned long flags; > + > + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); > + pe = tgtport->pe; > + if (pe) > + pe->tgtport = NULL; > + tgtport->pe = NULL; > + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); > +} > + > +/* > + * called when a new targetport is registered. Looks in the > + * existing nvmet port_entries to see if the nvmet layer is > + * configured for the targetport's wwn's. (the targetport existed, > + * nvmet configured, the lldd unregistered the tgtport, and is now > + * reregistering the same targetport). If so, set the nvmet port > + * port entry on the targetport. > + */ > +static void > +nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport) > +{ > + struct nvmet_fc_port_entry *pe; > + unsigned long flags; > + > + spin_lock_irqsave(&nvmet_fc_tgtlock, flags); > + list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) { > + if ((tgtport->fc_target_port.node_name == pe->node_name) && > + (tgtport->fc_target_port.port_name == pe->port_name)) { > + WARN_ON(pe->tgtport); > + tgtport->pe = pe; > + pe->tgtport = tgtport; > + break; > + } > + } > + spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); > +} > > /** > * nvme_fc_register_targetport - transport entry point called by an > @@ -1034,6 +1118,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo, > goto out_free_newrec; > } > > + nvmet_fc_portentry_rebind_tgt(newrec); > + > spin_lock_irqsave(&nvmet_fc_tgtlock, flags); > list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); > spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); > @@ -1171,6 +1257,8 @@ nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port) > { > struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port); > > + nvmet_fc_portentry_unbind_tgt(tgtport); > + > /* terminate any outstanding associations */ > __nvmet_fc_free_assocs(tgtport); > > @@ -2147,7 +2235,7 @@ nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req) > > > /* > - * Actual processing routine for received FC-NVME LS Requests from the LLD > + * Actual processing routine for received FC-NVME I/O Requests from the LLD > */ > static void > nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, > @@ -2158,6 +2246,13 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, > int ret; > > /* > + * if there is no nvmet mapping to the targetport there > + * shouldn't be requests. just terminate them. > + */ > + if (!tgtport->pe) > + goto transport_error; > + > + /* > * Fused commands are currently not supported in the linux > * implementation. > * > @@ -2184,7 +2279,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport, > > fod->req.cmd = &fod->cmdiubuf.sqe; > fod->req.rsp = &fod->rspiubuf.cqe; > - fod->req.port = fod->queue->port; > + fod->req.port = tgtport->pe->port; > > /* clear any response payload */ > memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); > @@ -2508,6 +2603,7 @@ static int > nvmet_fc_add_port(struct nvmet_port *port) > { > struct nvmet_fc_tgtport *tgtport; > + struct nvmet_fc_port_entry *pe; > struct nvmet_fc_traddr traddr = { 0L, 0L }; > unsigned long flags; > int ret; > @@ -2524,24 +2620,40 @@ nvmet_fc_add_port(struct nvmet_port *port) > if (ret) > return ret; > > + pe = kzalloc(sizeof(*pe), GFP_KERNEL); > + if (!pe) > + return -ENOMEM; > + > ret = -ENXIO; > spin_lock_irqsave(&nvmet_fc_tgtlock, flags); > list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) { > if ((tgtport->fc_target_port.node_name == traddr.nn) && > (tgtport->fc_target_port.port_name == traddr.pn)) { > - tgtport->port = port; > - ret = 0; > + /* a FC port can only be 1 nvmet port id */ > + if (!tgtport->pe) { > + nvmet_fc_portentry_bind(tgtport, pe, port); > + ret = 0; > + } else > + ret = -EALREADY; > break; > } > } > spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); > + > + if (ret) > + kfree(pe); > + > return ret; > } > > static void > nvmet_fc_remove_port(struct nvmet_port *port) > { > - /* nothing to do */ > + struct nvmet_fc_port_entry *pe = port->priv; > + > + nvmet_fc_portentry_unbind(pe); > + > + kfree(pe); > } > > static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {