summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/libfc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/libfc')
-rw-r--r--drivers/scsi/libfc/Makefile15
-rw-r--r--drivers/scsi/libfc/fc_disc.c749
-rw-r--r--drivers/scsi/libfc/fc_elsct.c140
-rw-r--r--drivers/scsi/libfc/fc_encode.h951
-rw-r--r--drivers/scsi/libfc/fc_exch.c2712
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2313
-rw-r--r--drivers/scsi/libfc/fc_frame.c79
-rw-r--r--drivers/scsi/libfc/fc_libfc.c319
-rw-r--r--drivers/scsi/libfc/fc_libfc.h127
-rw-r--r--drivers/scsi/libfc/fc_lport.c2200
-rw-r--r--drivers/scsi/libfc/fc_npiv.c147
-rw-r--r--drivers/scsi/libfc/fc_rport.c2292
12 files changed, 12044 insertions, 0 deletions
diff --git a/drivers/scsi/libfc/Makefile b/drivers/scsi/libfc/Makefile
new file mode 100644
index 000000000..65396f86c
--- /dev/null
+++ b/drivers/scsi/libfc/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+# $Id: Makefile
+
+obj-$(CONFIG_LIBFC) += libfc.o
+
+libfc-objs := \
+ fc_libfc.o \
+ fc_disc.o \
+ fc_exch.o \
+ fc_elsct.o \
+ fc_frame.o \
+ fc_lport.o \
+ fc_rport.o \
+ fc_fcp.o \
+ fc_npiv.o
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
new file mode 100644
index 000000000..942fc60f7
--- /dev/null
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Target Discovery
+ *
+ * This block discovers all FC-4 remote ports, including FCP initiators. It
+ * also handles RSCN events and re-discovery if necessary.
+ */
+
+/*
+ * DISC LOCKING
+ *
+ * The disc mutex is can be locked when acquiring rport locks, but may not
+ * be held when acquiring the lport lock. Refer to fc_lport.c for more
+ * details.
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/rculist.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_libfc.h"
+
+#define FC_DISC_RETRY_LIMIT 3 /* max retries */
+#define FC_DISC_RETRY_DELAY 500UL /* (msecs) delay */
+
+static void fc_disc_gpn_ft_req(struct fc_disc *);
+static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
+static void fc_disc_timeout(struct work_struct *);
+static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
+static void fc_disc_restart(struct fc_disc *);
+
+/**
+ * fc_disc_stop_rports() - Delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop remote ports on
+ */
+static void fc_disc_stop_rports(struct fc_disc *disc)
+{
+ struct fc_rport_priv *rdata;
+
+ lockdep_assert_held(&disc->disc_mutex);
+
+ list_for_each_entry(rdata, &disc->rports, peers) {
+ if (kref_get_unless_zero(&rdata->kref)) {
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ }
+}
+
+/**
+ * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
+ * @disc: The discovery object to which the RSCN applies
+ * @fp: The RSCN frame
+ */
+static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
+{
+ struct fc_lport *lport;
+ struct fc_els_rscn *rp;
+ struct fc_els_rscn_page *pp;
+ struct fc_seq_els_data rjt_data;
+ unsigned int len;
+ int redisc = 0;
+ enum fc_els_rscn_ev_qual ev_qual;
+ enum fc_els_rscn_addr_fmt fmt;
+ LIST_HEAD(disc_ports);
+ struct fc_disc_port *dp, *next;
+
+ lockdep_assert_held(&disc->disc_mutex);
+
+ lport = fc_disc_lport(disc);
+
+ FC_DISC_DBG(disc, "Received an RSCN event\n");
+
+ /* make sure the frame contains an RSCN message */
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ if (!rp)
+ goto reject;
+ /* make sure the page length is as expected (4 bytes) */
+ if (rp->rscn_page_len != sizeof(*pp))
+ goto reject;
+ /* get the RSCN payload length */
+ len = ntohs(rp->rscn_plen);
+ if (len < sizeof(*rp))
+ goto reject;
+ /* make sure the frame contains the expected payload */
+ rp = fc_frame_payload_get(fp, len);
+ if (!rp)
+ goto reject;
+ /* payload must be a multiple of the RSCN page size */
+ len -= sizeof(*rp);
+ if (len % sizeof(*pp))
+ goto reject;
+
+ for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
+ ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+ ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+ fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+ fmt &= ELS_RSCN_ADDR_FMT_MASK;
+ /*
+ * if we get an address format other than port
+ * (area, domain, fabric), then do a full discovery
+ */
+ switch (fmt) {
+ case ELS_ADDR_FMT_PORT:
+ FC_DISC_DBG(disc, "Port address format for port "
+ "(%6.6x)\n", ntoh24(pp->rscn_fid));
+ dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+ if (!dp) {
+ redisc = 1;
+ break;
+ }
+ dp->lp = lport;
+ dp->port_id = ntoh24(pp->rscn_fid);
+ list_add_tail(&dp->peers, &disc_ports);
+ break;
+ case ELS_ADDR_FMT_AREA:
+ case ELS_ADDR_FMT_DOM:
+ case ELS_ADDR_FMT_FAB:
+ default:
+ FC_DISC_DBG(disc, "Address format is (%d)\n", fmt);
+ redisc = 1;
+ break;
+ }
+ }
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+
+ /*
+ * If not doing a complete rediscovery, do GPN_ID on
+ * the individual ports mentioned in the list.
+ * If any of these get an error, do a full rediscovery.
+ * In any case, go through the list and free the entries.
+ */
+ list_for_each_entry_safe(dp, next, &disc_ports, peers) {
+ list_del(&dp->peers);
+ if (!redisc)
+ redisc = fc_disc_single(lport, dp);
+ kfree(dp);
+ }
+ if (redisc) {
+ FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
+ fc_disc_restart(disc);
+ } else {
+ FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
+ "redisc %d state %d in_prog %d\n",
+ redisc, lport->state, disc->pending);
+ }
+ fc_frame_free(fp);
+ return;
+reject:
+ FC_DISC_DBG(disc, "Received a bad RSCN frame\n");
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_disc_recv_req() - Handle incoming requests
+ * @lport: The local port receiving the request
+ * @fp: The request frame
+ *
+ * Locking Note: This function is called from the EM and will lock
+ * the disc_mutex before calling the handler for the
+ * request.
+ */
+static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ u8 op;
+ struct fc_disc *disc = &lport->disc;
+
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+ case ELS_RSCN:
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_recv_rscn_req(disc, fp);
+ mutex_unlock(&disc->disc_mutex);
+ break;
+ default:
+ FC_DISC_DBG(disc, "Received an unsupported request, "
+ "the opcode is (%x)\n", op);
+ fc_frame_free(fp);
+ break;
+ }
+}
+
+/**
+ * fc_disc_restart() - Restart discovery
+ * @disc: The discovery object to be restarted
+ */
+static void fc_disc_restart(struct fc_disc *disc)
+{
+ lockdep_assert_held(&disc->disc_mutex);
+
+ if (!disc->disc_callback)
+ return;
+
+ FC_DISC_DBG(disc, "Restarting discovery\n");
+
+ disc->requested = 1;
+ if (disc->pending)
+ return;
+
+ /*
+ * Advance disc_id. This is an arbitrary non-zero number that will
+ * match the value in the fc_rport_priv after discovery for all
+ * freshly-discovered remote ports. Avoid wrapping to zero.
+ */
+ disc->disc_id = (disc->disc_id + 2) | 1;
+ disc->retry_count = 0;
+ fc_disc_gpn_ft_req(disc);
+}
+
+/**
+ * fc_disc_start() - Start discovery on a local port
+ * @lport: The local port to have discovery started on
+ * @disc_callback: Callback function to be called when discovery is complete
+ */
+static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
+ enum fc_disc_event),
+ struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ /*
+ * At this point we may have a new disc job or an existing
+ * one. Either way, let's lock when we make changes to it
+ * and send the GPN_FT request.
+ */
+ mutex_lock(&disc->disc_mutex);
+ disc->disc_callback = disc_callback;
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_done() - Discovery has been completed
+ * @disc: The discovery context
+ * @event: The discovery completion status
+ */
+static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
+{
+ struct fc_lport *lport = fc_disc_lport(disc);
+ struct fc_rport_priv *rdata;
+
+ lockdep_assert_held(&disc->disc_mutex);
+ FC_DISC_DBG(disc, "Discovery complete\n");
+
+ disc->pending = 0;
+ if (disc->requested) {
+ fc_disc_restart(disc);
+ return;
+ }
+
+ /*
+ * Go through all remote ports. If they were found in the latest
+ * discovery, reverify or log them in. Otherwise, log them out.
+ * Skip ports which were never discovered. These are the dNS port
+ * and ports which were created by PLOGI.
+ *
+ * We don't need to use the _rcu variant here as the rport list
+ * is protected by the disc mutex which is already held on entry.
+ */
+ list_for_each_entry(rdata, &disc->rports, peers) {
+ if (!kref_get_unless_zero(&rdata->kref))
+ continue;
+ if (rdata->disc_id) {
+ if (rdata->disc_id == disc->disc_id)
+ fc_rport_login(rdata);
+ else
+ fc_rport_logoff(rdata);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ mutex_unlock(&disc->disc_mutex);
+ disc->disc_callback(lport, event);
+ mutex_lock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_error() - Handle error on dNS request
+ * @disc: The discovery context
+ * @fp: The error code encoded as a frame pointer
+ */
+static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
+{
+ struct fc_lport *lport = fc_disc_lport(disc);
+ unsigned long delay = 0;
+
+ FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
+ PTR_ERR_OR_ZERO(fp), disc->retry_count,
+ FC_DISC_RETRY_LIMIT);
+
+ if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+ /*
+ * Memory allocation failure, or the exchange timed out,
+ * retry after delay.
+ */
+ if (disc->retry_count < FC_DISC_RETRY_LIMIT) {
+ /* go ahead and retry */
+ if (!fp)
+ delay = msecs_to_jiffies(FC_DISC_RETRY_DELAY);
+ else {
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ /* timeout faster first time */
+ if (!disc->retry_count)
+ delay /= 4;
+ }
+ disc->retry_count++;
+ schedule_delayed_work(&disc->disc_work, delay);
+ } else
+ fc_disc_done(disc, DISC_EV_FAILED);
+ } else if (PTR_ERR(fp) == -FC_EX_CLOSED) {
+ /*
+ * if discovery fails due to lport reset, clear
+ * pending flag so that subsequent discovery can
+ * continue
+ */
+ disc->pending = 0;
+ }
+}
+
+/**
+ * fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
+ * @disc: The discovery context
+ */
+static void fc_disc_gpn_ft_req(struct fc_disc *disc)
+{
+ struct fc_frame *fp;
+ struct fc_lport *lport = fc_disc_lport(disc);
+
+ lockdep_assert_held(&disc->disc_mutex);
+
+ WARN_ON(!fc_lport_test_ready(lport));
+
+ disc->pending = 1;
+ disc->requested = 0;
+
+ disc->buf_len = 0;
+ disc->seq_count = 0;
+ fp = fc_frame_alloc(lport,
+ sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_gid_ft));
+ if (!fp)
+ goto err;
+
+ if (lport->tt.elsct_send(lport, 0, fp,
+ FC_NS_GPN_FT,
+ fc_disc_gpn_ft_resp,
+ disc, 3 * lport->r_a_tov))
+ return;
+err:
+ fc_disc_error(disc, NULL);
+}
+
+/**
+ * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
+ * @disc: The discovery context
+ * @buf: The GPN_FT response buffer
+ * @len: The size of response buffer
+ *
+ * Goes through the list of IDs and names resulting from a request.
+ */
+static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
+{
+ struct fc_lport *lport;
+ struct fc_gpn_ft_resp *np;
+ char *bp;
+ size_t plen;
+ size_t tlen;
+ int error = 0;
+ struct fc_rport_identifiers ids;
+ struct fc_rport_priv *rdata;
+
+ lport = fc_disc_lport(disc);
+ disc->seq_count++;
+
+ /*
+ * Handle partial name record left over from previous call.
+ */
+ bp = buf;
+ plen = len;
+ np = (struct fc_gpn_ft_resp *)bp;
+ tlen = disc->buf_len;
+ disc->buf_len = 0;
+ if (tlen) {
+ WARN_ON(tlen >= sizeof(*np));
+ plen = sizeof(*np) - tlen;
+ WARN_ON(plen <= 0);
+ WARN_ON(plen >= sizeof(*np));
+ if (plen > len)
+ plen = len;
+ np = &disc->partial_buf;
+ memcpy((char *)np + tlen, bp, plen);
+
+ /*
+ * Set bp so that the loop below will advance it to the
+ * first valid full name element.
+ */
+ bp -= tlen;
+ len += tlen;
+ plen += tlen;
+ disc->buf_len = (unsigned char) plen;
+ if (plen == sizeof(*np))
+ disc->buf_len = 0;
+ }
+
+ /*
+ * Handle full name records, including the one filled from above.
+ * Normally, np == bp and plen == len, but from the partial case above,
+ * bp, len describe the overall buffer, and np, plen describe the
+ * partial buffer, which if would usually be full now.
+ * After the first time through the loop, things return to "normal".
+ */
+ while (plen >= sizeof(*np)) {
+ ids.port_id = ntoh24(np->fp_fid);
+ ids.port_name = ntohll(np->fp_wwpn);
+
+ if (ids.port_id != lport->port_id &&
+ ids.port_name != lport->wwpn) {
+ rdata = fc_rport_create(lport, ids.port_id);
+ if (rdata) {
+ rdata->ids.port_name = ids.port_name;
+ rdata->disc_id = disc->disc_id;
+ } else {
+ printk(KERN_WARNING "libfc: Failed to allocate "
+ "memory for the newly discovered port "
+ "(%6.6x)\n", ids.port_id);
+ error = -ENOMEM;
+ }
+ }
+
+ if (np->fp_flags & FC_NS_FID_LAST) {
+ fc_disc_done(disc, DISC_EV_SUCCESS);
+ len = 0;
+ break;
+ }
+ len -= sizeof(*np);
+ bp += sizeof(*np);
+ np = (struct fc_gpn_ft_resp *)bp;
+ plen = len;
+ }
+
+ /*
+ * Save any partial record at the end of the buffer for next time.
+ */
+ if (error == 0 && len > 0 && len < sizeof(*np)) {
+ if (np != &disc->partial_buf) {
+ FC_DISC_DBG(disc, "Partial buffer remains "
+ "for discovery\n");
+ memcpy(&disc->partial_buf, np, len);
+ }
+ disc->buf_len = (unsigned char) len;
+ }
+ return error;
+}
+
+/**
+ * fc_disc_timeout() - Handler for discovery timeouts
+ * @work: Structure holding discovery context that needs to retry discovery
+ */
+static void fc_disc_timeout(struct work_struct *work)
+{
+ struct fc_disc *disc = container_of(work,
+ struct fc_disc,
+ disc_work.work);
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_gpn_ft_req(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_ft_resp() - Handle a response frame from Get Port Names (GPN_FT)
+ * @sp: The sequence that the GPN_FT response was received on
+ * @fp: The GPN_FT response frame
+ * @disc_arg: The discovery context
+ *
+ * Locking Note: This function is called without disc mutex held, and
+ * should do all its processing with the mutex held
+ */
+static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *disc_arg)
+{
+ struct fc_disc *disc = disc_arg;
+ struct fc_ct_hdr *cp;
+ struct fc_frame_header *fh;
+ enum fc_disc_event event = DISC_EV_NONE;
+ unsigned int seq_cnt;
+ unsigned int len;
+ int error = 0;
+
+ mutex_lock(&disc->disc_mutex);
+ FC_DISC_DBG(disc, "Received a GPN_FT response\n");
+
+ if (IS_ERR(fp)) {
+ fc_disc_error(disc, fp);
+ mutex_unlock(&disc->disc_mutex);
+ return;
+ }
+
+ WARN_ON(!fc_frame_is_linear(fp)); /* buffer must be contiguous */
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ seq_cnt = ntohs(fh->fh_seq_cnt);
+ if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp) {
+ FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
+ fr_len(fp));
+ event = DISC_EV_FAILED;
+ } else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+
+ /* Accepted, parse the response. */
+ len -= sizeof(*cp);
+ error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
+ "(check zoning)\n", cp->ct_reason,
+ cp->ct_explan);
+ event = DISC_EV_FAILED;
+ if (cp->ct_reason == FC_FS_RJT_UNABL &&
+ cp->ct_explan == FC_FS_EXP_FTNR)
+ event = DISC_EV_SUCCESS;
+ } else {
+ FC_DISC_DBG(disc, "GPN_FT unexpected response code "
+ "%x\n", ntohs(cp->ct_cmd));
+ event = DISC_EV_FAILED;
+ }
+ } else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
+ error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
+ } else {
+ FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
+ "seq_cnt %x expected %x sof %x eof %x\n",
+ seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+ event = DISC_EV_FAILED;
+ }
+ if (error)
+ fc_disc_error(disc, ERR_PTR(error));
+ else if (event != DISC_EV_NONE)
+ fc_disc_done(disc, event);
+ fc_frame_free(fp);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
+ * @sp: The sequence the GPN_ID is on
+ * @fp: The response frame
+ * @rdata_arg: The remote port that sent the GPN_ID response
+ *
+ * Locking Note: This function is called without disc mutex held.
+ */
+static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_rport_priv *new_rdata;
+ struct fc_lport *lport;
+ struct fc_disc *disc;
+ struct fc_ct_hdr *cp;
+ struct fc_ns_gid_pn *pn;
+ u64 port_name;
+
+ lport = rdata->local_port;
+ disc = &lport->disc;
+
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ goto out;
+ if (IS_ERR(fp)) {
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ goto out;
+ }
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp)
+ goto redisc;
+ if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+ if (fr_len(fp) < sizeof(struct fc_frame_header) +
+ sizeof(*cp) + sizeof(*pn))
+ goto redisc;
+ pn = (struct fc_ns_gid_pn *)(cp + 1);
+ port_name = get_unaligned_be64(&pn->fn_wwpn);
+ mutex_lock(&rdata->rp_mutex);
+ if (rdata->ids.port_name == -1)
+ rdata->ids.port_name = port_name;
+ else if (rdata->ids.port_name != port_name) {
+ FC_DISC_DBG(disc, "GPN_ID accepted. WWPN changed. "
+ "Port-id %6.6x wwpn %16.16llx\n",
+ rdata->ids.port_id, port_name);
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_logoff(rdata);
+ mutex_lock(&lport->disc.disc_mutex);
+ new_rdata = fc_rport_create(lport, rdata->ids.port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (new_rdata) {
+ new_rdata->disc_id = disc->disc_id;
+ fc_rport_login(new_rdata);
+ }
+ goto free_fp;
+ }
+ rdata->disc_id = disc->disc_id;
+ mutex_unlock(&rdata->rp_mutex);
+ fc_rport_login(rdata);
+ } else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+ FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
+ cp->ct_reason, cp->ct_explan);
+ fc_rport_logoff(rdata);
+ } else {
+ FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
+ ntohs(cp->ct_cmd));
+redisc:
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ }
+free_fp:
+ fc_frame_free(fp);
+out:
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
+ * @lport: The local port to initiate discovery on
+ * @rdata: remote port private data
+ *
+ * On failure, an error code is returned.
+ */
+static int fc_disc_gpn_id_req(struct fc_lport *lport,
+ struct fc_rport_priv *rdata)
+{
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&lport->disc.disc_mutex);
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ sizeof(struct fc_ns_fid));
+ if (!fp)
+ return -ENOMEM;
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
+ fc_disc_gpn_id_resp, rdata,
+ 3 * lport->r_a_tov))
+ return -ENOMEM;
+ kref_get(&rdata->kref);
+ return 0;
+}
+
+/**
+ * fc_disc_single() - Discover the directory information for a single target
+ * @lport: The local port the remote port is associated with
+ * @dp: The port to rediscover
+ */
+static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
+{
+ struct fc_rport_priv *rdata;
+
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
+ rdata = fc_rport_create(lport, dp->port_id);
+ if (!rdata)
+ return -ENOMEM;
+ rdata->disc_id = 0;
+ return fc_disc_gpn_id_req(lport, rdata);
+}
+
+/**
+ * fc_disc_stop() - Stop discovery for a given lport
+ * @lport: The local port that discovery should stop on
+ */
+static void fc_disc_stop(struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ if (disc->pending)
+ cancel_delayed_work_sync(&disc->disc_work);
+ mutex_lock(&disc->disc_mutex);
+ fc_disc_stop_rports(disc);
+ mutex_unlock(&disc->disc_mutex);
+}
+
+/**
+ * fc_disc_stop_final() - Stop discovery for a given lport
+ * @lport: The lport that discovery should stop on
+ *
+ * This function will block until discovery has been
+ * completely stopped and all rports have been deleted.
+ */
+static void fc_disc_stop_final(struct fc_lport *lport)
+{
+ fc_disc_stop(lport);
+ fc_rport_flush_queue();
+}
+
+/**
+ * fc_disc_config() - Configure the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be configured
+ * @priv: Private data structre for users of the discovery layer
+ */
+void fc_disc_config(struct fc_lport *lport, void *priv)
+{
+ struct fc_disc *disc;
+
+ if (!lport->tt.disc_start)
+ lport->tt.disc_start = fc_disc_start;
+
+ if (!lport->tt.disc_stop)
+ lport->tt.disc_stop = fc_disc_stop;
+
+ if (!lport->tt.disc_stop_final)
+ lport->tt.disc_stop_final = fc_disc_stop_final;
+
+ if (!lport->tt.disc_recv_req)
+ lport->tt.disc_recv_req = fc_disc_recv_req;
+
+ disc = &lport->disc;
+
+ disc->priv = priv;
+}
+EXPORT_SYMBOL(fc_disc_config);
+
+/**
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
+ */
+void fc_disc_init(struct fc_lport *lport)
+{
+ struct fc_disc *disc = &lport->disc;
+
+ INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+ mutex_init(&disc->disc_mutex);
+ INIT_LIST_HEAD(&disc->rports);
+}
+EXPORT_SYMBOL(fc_disc_init);
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
new file mode 100644
index 000000000..8d3006edb
--- /dev/null
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Provide interface to send ELS/CT FC frames
+ */
+
+#include <linux/export.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ns.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/libfc.h>
+#include "fc_encode.h"
+#include "fc_libfc.h"
+
+/**
+ * fc_elsct_send() - Send an ELS or CT frame
+ * @lport: The local port to send the frame on
+ * @did: The destination ID for the frame
+ * @fp: The frame to be sent
+ * @op: The operational code
+ * @resp: The callback routine when the response is received
+ * @arg: The argument to pass to the response callback routine
+ * @timer_msec: The timeout period for the frame (in msecs)
+ */
+struct fc_seq *fc_elsct_send(struct fc_lport *lport, u32 did,
+ struct fc_frame *fp, unsigned int op,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *,
+ void *),
+ void *arg, u32 timer_msec)
+{
+ enum fc_rctl r_ctl;
+ enum fc_fh_type fh_type;
+ int rc;
+
+ /* ELS requests */
+ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
+ rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
+ else {
+ /* CT requests */
+ rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type, &did);
+ }
+
+ if (rc) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+
+ fc_fill_fc_hdr(fp, r_ctl, did, lport->port_id, fh_type,
+ FC_FCTL_REQ, 0);
+
+ return fc_exch_seq_send(lport, fp, resp, NULL, arg, timer_msec);
+}
+EXPORT_SYMBOL(fc_elsct_send);
+
+/**
+ * fc_elsct_init() - Initialize the ELS/CT layer
+ * @lport: The local port to initialize the ELS/CT layer for
+ */
+int fc_elsct_init(struct fc_lport *lport)
+{
+ if (!lport->tt.elsct_send)
+ lport->tt.elsct_send = fc_elsct_send;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_elsct_init);
+
+/**
+ * fc_els_resp_type() - Return a string describing the ELS response
+ * @fp: The frame pointer or possible error code
+ */
+const char *fc_els_resp_type(struct fc_frame *fp)
+{
+ const char *msg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ if (IS_ERR(fp)) {
+ switch (-PTR_ERR(fp)) {
+ case FC_NO_ERR:
+ msg = "response no error";
+ break;
+ case FC_EX_TIMEOUT:
+ msg = "response timeout";
+ break;
+ case FC_EX_CLOSED:
+ msg = "response closed";
+ break;
+ default:
+ msg = "response unknown error";
+ break;
+ }
+ } else {
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_type) {
+ case FC_TYPE_ELS:
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ msg = "accept";
+ break;
+ case ELS_LS_RJT:
+ msg = "reject";
+ break;
+ default:
+ msg = "response unknown ELS";
+ break;
+ }
+ break;
+ case FC_TYPE_CT:
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+ if (ct) {
+ switch (ntohs(ct->ct_cmd)) {
+ case FC_FS_ACC:
+ msg = "CT accept";
+ break;
+ case FC_FS_RJT:
+ msg = "CT reject";
+ break;
+ default:
+ msg = "response unknown CT";
+ break;
+ }
+ } else {
+ msg = "short CT response";
+ }
+ break;
+ default:
+ msg = "response not ELS or CT";
+ break;
+ }
+ }
+ return msg;
+}
diff --git a/drivers/scsi/libfc/fc_encode.h b/drivers/scsi/libfc/fc_encode.h
new file mode 100644
index 000000000..7dcac3b6b
--- /dev/null
+++ b/drivers/scsi/libfc/fc_encode.h
@@ -0,0 +1,951 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_ENCODE_H_
+#define _FC_ENCODE_H_
+#include <asm/unaligned.h>
+#include <linux/utsname.h>
+#include <scsi/fc/fc_ms.h>
+
+/*
+ * F_CTL values for simple requests and responses.
+ */
+#define FC_FCTL_REQ (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+#define FC_FCTL_RESP (FC_FC_EX_CTX | FC_FC_LAST_SEQ | \
+ FC_FC_END_SEQ | FC_FC_SEQ_INIT)
+
+struct fc_ns_rft {
+ struct fc_ns_fid fid; /* port ID object */
+ struct fc_ns_fts fts; /* FC4-types object */
+};
+
+struct fc_ct_req {
+ struct fc_ct_hdr hdr;
+ union {
+ struct fc_ns_gid_ft gid;
+ struct fc_ns_rn_id rn;
+ struct fc_ns_rft rft;
+ struct fc_ns_rff_id rff;
+ struct fc_ns_fid fid;
+ struct fc_ns_rsnn snn;
+ struct fc_ns_rspn spn;
+ struct fc_fdmi_rhba rhba;
+ struct fc_fdmi_rpa rpa;
+ struct fc_fdmi_dprt dprt;
+ struct fc_fdmi_dhba dhba;
+ } payload;
+};
+
+/**
+ * fc_adisc_fill() - Fill in adisc request frame
+ * @lport: local port.
+ * @fp: fc frame where payload will be placed.
+ */
+static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_adisc *adisc;
+
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ memset(adisc, 0, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_ADISC;
+ put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
+ put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
+ hton24(adisc->adisc_port_id, lport->port_id);
+}
+
+/**
+ * fc_ct_hdr_fill- fills ct header and reset ct payload
+ * returns pointer to ct request.
+ */
+static inline struct fc_ct_req *fc_ct_hdr_fill(const struct fc_frame *fp,
+ unsigned int op, size_t req_size,
+ enum fc_ct_fs_type fs_type,
+ u8 subtype)
+{
+ struct fc_ct_req *ct;
+ size_t ct_plen;
+
+ ct_plen = sizeof(struct fc_ct_hdr) + req_size;
+ ct = fc_frame_payload_get(fp, ct_plen);
+ memset(ct, 0, ct_plen);
+ ct->hdr.ct_rev = FC_CT_REV;
+ ct->hdr.ct_fs_type = fs_type;
+ ct->hdr.ct_fs_subtype = subtype;
+ ct->hdr.ct_cmd = htons((u16) op);
+ return ct;
+}
+
+/**
+ * fc_ct_ns_fill() - Fill in a name service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ns_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+
+ switch (op) {
+ case FC_NS_GPN_FT:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_gid_ft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ break;
+
+ case FC_NS_GPN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
+ hton24(ct->payload.fid.fp_fid, fc_id);
+ break;
+
+ case FC_NS_RFT_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rft.fid.fp_fid, lport->port_id);
+ ct->payload.rft.fts = lport->fcts;
+ break;
+
+ case FC_NS_RFF_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rff_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rff.fr_fid.fp_fid, lport->port_id);
+ ct->payload.rff.fr_type = FC_TYPE_FCP;
+ if (lport->service_params & FCP_SPPF_INIT_FCN)
+ ct->payload.rff.fr_feat = FCP_FEAT_INIT;
+ if (lport->service_params & FCP_SPPF_TARG_FCN)
+ ct->payload.rff.fr_feat |= FCP_FEAT_TARG;
+ break;
+
+ case FC_NS_RNN_ID:
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rn_id),
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.rn.fr_fid.fp_fid, lport->port_id);
+ put_unaligned_be64(lport->wwnn, &ct->payload.rn.fr_wwn);
+ break;
+
+ case FC_NS_RSPN_ID:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rspn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ hton24(ct->payload.spn.fr_fid.fp_fid, lport->port_id);
+ strncpy(ct->payload.spn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.spn.fr_name_len = len;
+ break;
+
+ case FC_NS_RSNN_NN:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rsnn) + len,
+ FC_FST_DIR, FC_NS_SUBTYPE);
+ put_unaligned_be64(lport->wwnn, &ct->payload.snn.fr_wwn);
+ strncpy(ct->payload.snn.fr_name,
+ fc_host_symbolic_name(lport->host), len);
+ ct->payload.snn.fr_name_len = len;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+static inline void fc_ct_ms_fill_attr(struct fc_fdmi_attr_entry *entry,
+ const char *in, size_t len)
+{
+ int copied;
+
+ copied = strscpy(entry->value, in, len);
+ if (copied > 0 && copied + 1 < len)
+ memset(entry->value + copied + 1, 0, len - copied - 1);
+}
+
+/**
+ * fc_ct_ms_fill() - Fill in a mgmt service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_ms_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type)
+{
+ struct fc_ct_req *ct;
+ size_t len;
+ struct fc_fdmi_attr_entry *entry;
+ struct fs_fdmi_attrs *hba_attrs;
+ int numattrs = 0;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+
+ switch (op) {
+ case FC_FDMI_RHBA:
+ numattrs = 11;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.rhba.hbaid.id);
+ /* Number of Ports - always 1 */
+ put_unaligned_be32(1, &ct->payload.rhba.port.numport);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rhba.port.port[0].portname);
+
+ /* HBA Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rhba.hba_attrs.numattrs);
+ hba_attrs = &ct->payload.rhba.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+ /* NodeName*/
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwnn,
+ (__be64 *)&entry->value);
+
+ /* Manufacturer */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MANUFACTURER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_manufacturer(lport->host),
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+
+ /* SerialNumber */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MANUFACTURER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_serial_number(lport->host),
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+
+ /* Model */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODEL,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model(lport->host),
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+
+ /* Model Description */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODEL_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_model_description(lport->host),
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+
+ /* Hardware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MODELDESCR_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_hardware_version(lport->host),
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+
+ /* Driver Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_DRIVERVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_driver_version(lport->host),
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+
+ /* OptionROM Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OPTIONROMVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ "unknown",
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+
+ /* Firmware Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_firmware_version(lport->host),
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+
+ /* OS Name and Version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ snprintf((char *)&entry->value,
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
+ "%s v%s",
+ init_utsname()->sysname,
+ init_utsname()->release);
+
+ /* Max CT payload */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_max_ct_payload(lport->host),
+ &entry->value);
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ /* Node symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NODESYMBLNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+
+ /* Vendor specific info */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(0,
+ &entry->value);
+
+ /* Number of ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_NUMBEROFPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_ports(lport->host),
+ &entry->value);
+
+ /* Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* BIOS version */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSVERSION,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_bootbios_version(lport->host),
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+
+ /* BIOS state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSVERSION_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_BIOSSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_bootbios_state(lport->host),
+ &entry->value);
+
+ /* Vendor identifier */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_HBA_ATTR_BIOSSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ put_unaligned_be16(FC_FDMI_HBA_ATTR_VENDORIDENTIFIER,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_vendor_identifier(lport->host),
+ FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN);
+ }
+
+ break;
+ case FC_FDMI_RPA:
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+
+ }
+
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.rpa.port.portname);
+
+ /* Port Attributes */
+ put_unaligned_be32(numattrs,
+ &ct->payload.rpa.hba_attrs.numattrs);
+
+ hba_attrs = &ct->payload.rpa.hba_attrs;
+ entry = (struct fc_fdmi_attr_entry *)hba_attrs->attr;
+
+ /* FC4 types */
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FC4TYPES,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_supported_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+
+ /* Supported Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+
+ put_unaligned_be32(fc_host_supported_speeds(lport->host),
+ &entry->value);
+
+ /* Current Port Speed */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(lport->link_speed,
+ &entry->value);
+
+ /* Max Frame Size */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_maxframe_size(lport->host),
+ &entry->value);
+
+ /* OS Device Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_OSDEVICENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ /* Use the sysfs device name */
+ fc_ct_ms_fill_attr(entry,
+ dev_name(&lport->host->shost_gendev),
+ strnlen(dev_name(&lport->host->shost_gendev),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+
+ /* Host Name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_HOSTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ if (strlen(fc_host_system_hostname(lport->host)))
+ fc_ct_ms_fill_attr(entry,
+ fc_host_system_hostname(lport->host),
+ strnlen(fc_host_system_hostname(lport->host),
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN));
+ else
+ fc_ct_ms_fill_attr(entry,
+ init_utsname()->nodename,
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+
+ /* Node name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_HOSTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_NODENAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_node_name(lport->host),
+ &entry->value);
+
+ /* Port name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_NODENAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(lport->wwpn,
+ &entry->value);
+
+ /* Port symbolic name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SYMBOLICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ fc_ct_ms_fill_attr(entry,
+ fc_host_symbolic_name(lport->host),
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+
+ /* Port type */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTTYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_type(lport->host),
+ &entry->value);
+
+ /* Supported class of service */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTTYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_supported_classes(lport->host),
+ &entry->value);
+
+ /* Port Fabric name */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_FABRICNAME,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be64(fc_host_fabric_name(lport->host),
+ &entry->value);
+
+ /* Port active FC-4 */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_FABRICNAME_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_CURRENTFC4TYPE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ memcpy(&entry->value, fc_host_active_fc4s(lport->host),
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+
+ /* Port state */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTSTATE,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_state(lport->host),
+ &entry->value);
+
+ /* Discovered ports */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_PORTSTATE_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_DISCOVEREDPORTS,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_num_discovered_ports(lport->host),
+ &entry->value);
+
+ /* Port ID */
+ entry = (struct fc_fdmi_attr_entry *)((char *)entry->value +
+ FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN);
+ len = FC_FDMI_ATTR_ENTRY_HEADER_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ put_unaligned_be16(FC_FDMI_PORT_ATTR_PORTID,
+ &entry->type);
+ put_unaligned_be16(len, &entry->len);
+ put_unaligned_be32(fc_host_port_id(lport->host),
+ &entry->value);
+ }
+
+ break;
+ case FC_FDMI_DPRT:
+ len = sizeof(struct fc_fdmi_dprt);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* Port Name */
+ put_unaligned_be64(lport->wwpn,
+ &ct->payload.dprt.port.portname);
+ break;
+ case FC_FDMI_DHBA:
+ len = sizeof(struct fc_fdmi_dhba);
+ ct = fc_ct_hdr_fill(fp, op, len, FC_FST_MGMT,
+ FC_FDMI_SUBTYPE);
+ /* HBA Identifier */
+ put_unaligned_be64(lport->wwpn, &ct->payload.dhba.hbaid.id);
+ break;
+ default:
+ return -EINVAL;
+ }
+ *r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ *fh_type = FC_TYPE_CT;
+ return 0;
+}
+
+/**
+ * fc_ct_fill() - Fill in a common transport service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
+ */
+static inline int fc_ct_fill(struct fc_lport *lport,
+ u32 fc_id, struct fc_frame *fp,
+ unsigned int op, enum fc_rctl *r_ctl,
+ enum fc_fh_type *fh_type, u32 *did)
+{
+ int rc = -EINVAL;
+
+ switch (fc_id) {
+ case FC_FID_MGMT_SERV:
+ rc = fc_ct_ms_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_MGMT_SERV;
+ break;
+ case FC_FID_DIR_SERV:
+ default:
+ rc = fc_ct_ns_fill(lport, fc_id, fp, op, r_ctl, fh_type);
+ *did = FC_FID_DIR_SERV;
+ break;
+ }
+
+ return rc;
+}
+/**
+ * fc_plogi_fill - Fill in plogi request frame
+ */
+static inline void fc_plogi_fill(struct fc_lport *lport, struct fc_frame *fp,
+ unsigned int op)
+{
+ struct fc_els_flogi *plogi;
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *cp;
+
+ plogi = fc_frame_payload_get(fp, sizeof(*plogi));
+ memset(plogi, 0, sizeof(*plogi));
+ plogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &plogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &plogi->fl_wwnn);
+
+ csp = &plogi->fl_csp;
+ csp->sp_hi_ver = 0x20;
+ csp->sp_lo_ver = 0x20;
+ csp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ csp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &plogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ csp->sp_features = htons(FC_SP_FT_CIRO);
+ csp->sp_tot_seq = htons(255); /* seq. we accept */
+ csp->sp_rel_off = htons(0x1f);
+ csp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+}
+
+/**
+ * fc_flogi_fill - Fill in a flogi request frame.
+ */
+static inline void fc_flogi_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *flogi;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) ELS_FLOGI;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (lport->does_npiv)
+ sp->sp_features = htons(FC_SP_FT_NPIV);
+}
+
+/**
+ * fc_fdisc_fill - Fill in a fdisc request frame.
+ */
+static inline void fc_fdisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+ struct fc_els_flogi *fdisc;
+
+ fdisc = fc_frame_payload_get(fp, sizeof(*fdisc));
+ memset(fdisc, 0, sizeof(*fdisc));
+ fdisc->fl_cmd = (u8) ELS_FDISC;
+ put_unaligned_be64(lport->wwpn, &fdisc->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &fdisc->fl_wwnn);
+ sp = &fdisc->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &fdisc->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+}
+
+/**
+ * fc_logo_fill - Fill in a logo request frame.
+ */
+static inline void fc_logo_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_logo *logo;
+
+ logo = fc_frame_payload_get(fp, sizeof(*logo));
+ memset(logo, 0, sizeof(*logo));
+ logo->fl_cmd = ELS_LOGO;
+ hton24(logo->fl_n_port_id, lport->port_id);
+ logo->fl_n_port_wwn = htonll(lport->wwpn);
+}
+
+/**
+ * fc_rtv_fill - Fill in RTV (read timeout value) request frame.
+ */
+static inline void fc_rtv_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rtv *rtv;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ memset(rtv, 0, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_RTV;
+}
+
+/**
+ * fc_rec_fill - Fill in rec request frame
+ */
+static inline void fc_rec_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_rec *rec;
+ struct fc_exch *ep = fc_seq_exch(fr_seq(fp));
+
+ rec = fc_frame_payload_get(fp, sizeof(*rec));
+ memset(rec, 0, sizeof(*rec));
+ rec->rec_cmd = ELS_REC;
+ hton24(rec->rec_s_id, lport->port_id);
+ rec->rec_ox_id = htons(ep->oxid);
+ rec->rec_rx_id = htons(ep->rxid);
+}
+
+/**
+ * fc_prli_fill - Fill in prli request frame
+ */
+static inline void fc_prli_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ memset(pp, 0, sizeof(*pp));
+ pp->prli.prli_cmd = ELS_PRLI;
+ pp->prli.prli_spp_len = sizeof(struct fc_els_spp);
+ pp->prli.prli_len = htons(sizeof(*pp));
+ pp->spp.spp_type = FC_TYPE_FCP;
+ pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR;
+ pp->spp.spp_params = htonl(lport->service_params);
+}
+
+/**
+ * fc_scr_fill - Fill in a scr request frame.
+ */
+static inline void fc_scr_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_els_scr *scr;
+
+ scr = fc_frame_payload_get(fp, sizeof(*scr));
+ memset(scr, 0, sizeof(*scr));
+ scr->scr_cmd = ELS_SCR;
+ scr->scr_reg_func = ELS_SCRF_FULL;
+}
+
+/**
+ * fc_els_fill - Fill in an ELS request frame
+ */
+static inline int fc_els_fill(struct fc_lport *lport,
+ u32 did,
+ struct fc_frame *fp, unsigned int op,
+ enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
+{
+ switch (op) {
+ case ELS_ADISC:
+ fc_adisc_fill(lport, fp);
+ break;
+
+ case ELS_PLOGI:
+ fc_plogi_fill(lport, fp, ELS_PLOGI);
+ break;
+
+ case ELS_FLOGI:
+ fc_flogi_fill(lport, fp);
+ break;
+
+ case ELS_FDISC:
+ fc_fdisc_fill(lport, fp);
+ break;
+
+ case ELS_LOGO:
+ fc_logo_fill(lport, fp);
+ break;
+
+ case ELS_RTV:
+ fc_rtv_fill(lport, fp);
+ break;
+
+ case ELS_REC:
+ fc_rec_fill(lport, fp);
+ break;
+
+ case ELS_PRLI:
+ fc_prli_fill(lport, fp);
+ break;
+
+ case ELS_SCR:
+ fc_scr_fill(lport, fp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ *r_ctl = FC_RCTL_ELS_REQ;
+ *fh_type = FC_TYPE_ELS;
+ return 0;
+}
+#endif /* _FC_ENCODE_H_ */
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
new file mode 100644
index 000000000..1d91c4575
--- /dev/null
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -0,0 +1,2712 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Fibre Channel exchange and sequence handling.
+ */
+
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/log2.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_libfc.h"
+
+u16 fc_cpu_mask; /* cpu mask for possible cpus */
+EXPORT_SYMBOL(fc_cpu_mask);
+static u16 fc_cpu_order; /* 2's power to represent total possible cpus */
+static struct kmem_cache *fc_em_cachep; /* cache for exchanges */
+static struct workqueue_struct *fc_exch_workqueue;
+
+/*
+ * Structure and function definitions for managing Fibre Channel Exchanges
+ * and Sequences.
+ *
+ * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
+ *
+ * fc_exch_mgr holds the exchange state for an N port
+ *
+ * fc_exch holds state for one exchange and links to its active sequence.
+ *
+ * fc_seq holds the state for an individual sequence.
+ */
+
+/**
+ * struct fc_exch_pool - Per cpu exchange pool
+ * @next_index: Next possible free exchange index
+ * @total_exches: Total allocated exchanges
+ * @lock: Exch pool lock
+ * @ex_list: List of exchanges
+ * @left: Cache of free slot in exch array
+ * @right: Cache of free slot in exch array
+ *
+ * This structure manages per cpu exchanges in array of exchange pointers.
+ * This array is allocated followed by struct fc_exch_pool memory for
+ * assigned range of exchanges to per cpu pool.
+ */
+struct fc_exch_pool {
+ spinlock_t lock;
+ struct list_head ex_list;
+ u16 next_index;
+ u16 total_exches;
+
+ u16 left;
+ u16 right;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct fc_exch_mgr - The Exchange Manager (EM).
+ * @class: Default class for new sequences
+ * @kref: Reference counter
+ * @min_xid: Minimum exchange ID
+ * @max_xid: Maximum exchange ID
+ * @ep_pool: Reserved exchange pointers
+ * @pool_max_index: Max exch array index in exch pool
+ * @pool: Per cpu exch pool
+ * @lport: Local exchange port
+ * @stats: Statistics structure
+ *
+ * This structure is the center for creating exchanges and sequences.
+ * It manages the allocation of exchange IDs.
+ */
+struct fc_exch_mgr {
+ struct fc_exch_pool __percpu *pool;
+ mempool_t *ep_pool;
+ struct fc_lport *lport;
+ enum fc_class class;
+ struct kref kref;
+ u16 min_xid;
+ u16 max_xid;
+ u16 pool_max_index;
+
+ struct {
+ atomic_t no_free_exch;
+ atomic_t no_free_exch_xid;
+ atomic_t xid_not_found;
+ atomic_t xid_busy;
+ atomic_t seq_not_found;
+ atomic_t non_bls_resp;
+ } stats;
+};
+
+/**
+ * struct fc_exch_mgr_anchor - primary structure for list of EMs
+ * @ema_list: Exchange Manager Anchor list
+ * @mp: Exchange Manager associated with this anchor
+ * @match: Routine to determine if this anchor's EM should be used
+ *
+ * When walking the list of anchors the match routine will be called
+ * for each anchor to determine if that EM should be used. The last
+ * anchor in the list will always match to handle any exchanges not
+ * handled by other EMs. The non-default EMs would be added to the
+ * anchor list by HW that provides offloads.
+ */
+struct fc_exch_mgr_anchor {
+ struct list_head ema_list;
+ struct fc_exch_mgr *mp;
+ bool (*match)(struct fc_frame *);
+};
+
+static void fc_exch_rrq(struct fc_exch *);
+static void fc_seq_ls_acc(struct fc_frame *);
+static void fc_seq_ls_rjt(struct fc_frame *, enum fc_els_rjt_reason,
+ enum fc_els_rjt_explan);
+static void fc_exch_els_rec(struct fc_frame *);
+static void fc_exch_els_rrq(struct fc_frame *);
+
+/*
+ * Internal implementation notes.
+ *
+ * The exchange manager is one by default in libfc but LLD may choose
+ * to have one per CPU. The sequence manager is one per exchange manager
+ * and currently never separated.
+ *
+ * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
+ * assigned by the Sequence Initiator that shall be unique for a specific
+ * D_ID and S_ID pair while the Sequence is open." Note that it isn't
+ * qualified by exchange ID, which one might think it would be.
+ * In practice this limits the number of open sequences and exchanges to 256
+ * per session. For most targets we could treat this limit as per exchange.
+ *
+ * The exchange and its sequence are freed when the last sequence is received.
+ * It's possible for the remote port to leave an exchange open without
+ * sending any sequences.
+ *
+ * Notes on reference counts:
+ *
+ * Exchanges are reference counted and exchange gets freed when the reference
+ * count becomes zero.
+ *
+ * Timeouts:
+ * Sequences are timed out for E_D_TOV and R_A_TOV.
+ *
+ * Sequence event handling:
+ *
+ * The following events may occur on initiator sequences:
+ *
+ * Send.
+ * For now, the whole thing is sent.
+ * Receive ACK
+ * This applies only to class F.
+ * The sequence is marked complete.
+ * ULP completion.
+ * The upper layer calls fc_exch_done() when done
+ * with exchange and sequence tuple.
+ * RX-inferred completion.
+ * When we receive the next sequence on the same exchange, we can
+ * retire the previous sequence ID. (XXX not implemented).
+ * Timeout.
+ * R_A_TOV frees the sequence ID. If we're waiting for ACK,
+ * E_D_TOV causes abort and calls upper layer response handler
+ * with FC_EX_TIMEOUT error.
+ * Receive RJT
+ * XXX defer.
+ * Send ABTS
+ * On timeout.
+ *
+ * The following events may occur on recipient sequences:
+ *
+ * Receive
+ * Allocate sequence for first frame received.
+ * Hold during receive handler.
+ * Release when final frame received.
+ * Keep status of last N of these for the ELS RES command. XXX TBD.
+ * Receive ABTS
+ * Deallocate sequence
+ * Send RJT
+ * Deallocate
+ *
+ * For now, we neglect conditions where only part of a sequence was
+ * received or transmitted, or where out-of-order receipt is detected.
+ */
+
+/*
+ * Locking notes:
+ *
+ * The EM code run in a per-CPU worker thread.
+ *
+ * To protect against concurrency between a worker thread code and timers,
+ * sequence allocation and deallocation must be locked.
+ * - exchange refcnt can be done atomicly without locks.
+ * - sequence allocation must be locked by exch lock.
+ * - If the EM pool lock and ex_lock must be taken at the same time, then the
+ * EM pool lock must be taken before the ex_lock.
+ */
+
+/*
+ * opcode names for debugging.
+ */
+static char *fc_exch_rctl_names[] = FC_RCTL_NAMES_INIT;
+
+/**
+ * fc_exch_name_lookup() - Lookup name by opcode
+ * @op: Opcode to be looked up
+ * @table: Opcode/name table
+ * @max_index: Index not to be exceeded
+ *
+ * This routine is used to determine a human-readable string identifying
+ * a R_CTL opcode.
+ */
+static inline const char *fc_exch_name_lookup(unsigned int op, char **table,
+ unsigned int max_index)
+{
+ const char *name = NULL;
+
+ if (op < max_index)
+ name = table[op];
+ if (!name)
+ name = "unknown";
+ return name;
+}
+
+/**
+ * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
+ * @op: The opcode to be looked up
+ */
+static const char *fc_exch_rctl_name(unsigned int op)
+{
+ return fc_exch_name_lookup(op, fc_exch_rctl_names,
+ ARRAY_SIZE(fc_exch_rctl_names));
+}
+
+/**
+ * fc_exch_hold() - Increment an exchange's reference count
+ * @ep: Echange to be held
+ */
+static inline void fc_exch_hold(struct fc_exch *ep)
+{
+ atomic_inc(&ep->ex_refcnt);
+}
+
+/**
+ * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
+ * and determine SOF and EOF.
+ * @ep: The exchange to that will use the header
+ * @fp: The frame whose header is to be modified
+ * @f_ctl: F_CTL bits that will be used for the frame header
+ *
+ * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
+ * fh_seq_id, fh_seq_cnt and the SOF and EOF.
+ */
+static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
+ u32 f_ctl)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ u16 fill;
+
+ fr_sof(fp) = ep->class;
+ if (ep->seq.cnt)
+ fr_sof(fp) = fc_sof_normal(ep->class);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack((enum fc_sof)ep->class))
+ fr_eof(fp) = FC_EOF_N;
+ /*
+ * From F_CTL.
+ * The number of fill bytes to make the length a 4-byte
+ * multiple is the low order 2-bits of the f_ctl.
+ * The fill itself will have been cleared by the frame
+ * allocation.
+ * After this, the length will be even, as expected by
+ * the transport.
+ */
+ fill = fr_len(fp) & 3;
+ if (fill) {
+ fill = 4 - fill;
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put(fp_skb(fp), fill);
+ hton24(fh->fh_f_ctl, f_ctl | fill);
+ }
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ /* Initialize remaining fh fields from fc_fill_fc_hdr */
+ fh->fh_ox_id = htons(ep->oxid);
+ fh->fh_rx_id = htons(ep->rxid);
+ fh->fh_seq_id = ep->seq.id;
+ fh->fh_seq_cnt = htons(ep->seq.cnt);
+}
+
+/**
+ * fc_exch_release() - Decrement an exchange's reference count
+ * @ep: Exchange to be released
+ *
+ * If the reference count reaches zero and the exchange is complete,
+ * it is freed.
+ */
+static void fc_exch_release(struct fc_exch *ep)
+{
+ struct fc_exch_mgr *mp;
+
+ if (atomic_dec_and_test(&ep->ex_refcnt)) {
+ mp = ep->em;
+ if (ep->destructor)
+ ep->destructor(&ep->seq, ep->arg);
+ WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
+ mempool_free(ep, mp->ep_pool);
+ }
+}
+
+/**
+ * fc_exch_timer_cancel() - cancel exch timer
+ * @ep: The exchange whose timer to be canceled
+ */
+static inline void fc_exch_timer_cancel(struct fc_exch *ep)
+{
+ if (cancel_delayed_work(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled\n");
+ atomic_dec(&ep->ex_refcnt); /* drop hold for timer */
+ }
+}
+
+/**
+ * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
+ * the exchange lock held
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ *
+ * Used for upper level protocols to time out the exchange.
+ * The timer is cancelled when it fires or when the exchange completes.
+ */
+static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ return;
+
+ FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
+
+ fc_exch_hold(ep); /* hold for timer */
+ if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
+ msecs_to_jiffies(timer_msec))) {
+ FC_EXCH_DBG(ep, "Exchange already queued\n");
+ fc_exch_release(ep);
+ }
+}
+
+/**
+ * fc_exch_timer_set() - Lock the exchange and set the timer
+ * @ep: The exchange whose timer will start
+ * @timer_msec: The timeout period
+ */
+static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
+{
+ spin_lock_bh(&ep->ex_lock);
+ fc_exch_timer_set_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
+ * fc_exch_done_locked() - Complete an exchange with the exchange lock held
+ * @ep: The exchange that is complete
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static int fc_exch_done_locked(struct fc_exch *ep)
+{
+ int rc = 1;
+
+ /*
+ * We must check for completion in case there are two threads
+ * tyring to complete this. But the rrq code will reuse the
+ * ep, and in that case we only clear the resp and set it as
+ * complete, so it can be reused by the timer to send the rrq.
+ */
+ if (ep->state & FC_EX_DONE)
+ return rc;
+ ep->esb_stat |= ESB_ST_COMPLETE;
+
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->state |= FC_EX_DONE;
+ fc_exch_timer_cancel(ep);
+ rc = 0;
+ }
+ return rc;
+}
+
+static struct fc_exch fc_quarantine_exch;
+
+/**
+ * fc_exch_ptr_get() - Return an exchange from an exchange pool
+ * @pool: Exchange Pool to get an exchange from
+ * @index: Index of the exchange within the pool
+ *
+ * Use the index to get an exchange from within an exchange pool. exches
+ * will point to an array of exchange pointers. The index will select
+ * the exchange within the array.
+ */
+static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
+ u16 index)
+{
+ struct fc_exch **exches = (struct fc_exch **)(pool + 1);
+ return exches[index];
+}
+
+/**
+ * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
+ * @pool: The pool to assign the exchange to
+ * @index: The index in the pool where the exchange will be assigned
+ * @ep: The exchange to assign to the pool
+ */
+static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
+ struct fc_exch *ep)
+{
+ ((struct fc_exch **)(pool + 1))[index] = ep;
+}
+
+/**
+ * fc_exch_delete() - Delete an exchange
+ * @ep: The exchange to be deleted
+ */
+static void fc_exch_delete(struct fc_exch *ep)
+{
+ struct fc_exch_pool *pool;
+ u16 index;
+
+ pool = ep->pool;
+ spin_lock_bh(&pool->lock);
+ WARN_ON(pool->total_exches <= 0);
+ pool->total_exches--;
+
+ /* update cache of free slot */
+ index = (ep->xid - ep->em->min_xid) >> fc_cpu_order;
+ if (!(ep->state & FC_EX_QUARANTINE)) {
+ if (pool->left == FC_XID_UNKNOWN)
+ pool->left = index;
+ else if (pool->right == FC_XID_UNKNOWN)
+ pool->right = index;
+ else
+ pool->next_index = index;
+ fc_exch_ptr_set(pool, index, NULL);
+ } else {
+ fc_exch_ptr_set(pool, index, &fc_quarantine_exch);
+ }
+ list_del(&ep->ex_list);
+ spin_unlock_bh(&pool->lock);
+ fc_exch_release(ep); /* drop hold for exch in mp */
+}
+
+static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ int error = -ENXIO;
+ u32 f_ctl;
+ u8 fh_type = fh->fh_type;
+
+ ep = fc_seq_exch(sp);
+
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
+ fc_frame_free(fp);
+ goto out;
+ }
+
+ WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+ fr_encaps(fp) = ep->encaps;
+
+ /*
+ * update sequence count if this frame is carrying
+ * multiple FC frames when sequence offload is enabled
+ * by LLD.
+ */
+ if (fr_max_payload(fp))
+ sp->cnt += DIV_ROUND_UP((fr_len(fp) - sizeof(*fh)),
+ fr_max_payload(fp));
+ else
+ sp->cnt++;
+
+ /*
+ * Send the frame.
+ */
+ error = lport->tt.frame_send(lport, fp);
+
+ if (fh_type == FC_TYPE_BLS)
+ goto out;
+
+ /*
+ * Update the exchange and sequence flags,
+ * assuming all frames for the sequence have been sent.
+ * We can only be called to send once for each sequence.
+ */
+ ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ; /* not first seq */
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+out:
+ return error;
+}
+
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp: The sequence to be sent
+ * @fp: The frame to be sent on the exchange
+ *
+ * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
+ * or indirectly by calling libfc_function_template.frame_send().
+ */
+int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp, struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ int error;
+ ep = fc_seq_exch(sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_seq_send_locked(lport, sp, fp);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+EXPORT_SYMBOL(fc_seq_send);
+
+/**
+ * fc_seq_alloc() - Allocate a sequence for a given exchange
+ * @ep: The exchange to allocate a new sequence for
+ * @seq_id: The sequence ID to be used
+ *
+ * We don't support multiple originated sequences on the same exchange.
+ * By implication, any previously originated sequence on this exchange
+ * is complete, and we reallocate the same sequence.
+ */
+static struct fc_seq *fc_seq_alloc(struct fc_exch *ep, u8 seq_id)
+{
+ struct fc_seq *sp;
+
+ sp = &ep->seq;
+ sp->ssb_stat = 0;
+ sp->cnt = 0;
+ sp->id = seq_id;
+ return sp;
+}
+
+/**
+ * fc_seq_start_next_locked() - Allocate a new sequence on the same
+ * exchange as the supplied sequence
+ * @sp: The sequence/exchange to get a new sequence for
+ */
+static struct fc_seq *fc_seq_start_next_locked(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ sp = fc_seq_alloc(ep, ep->seq_id++);
+ FC_EXCH_DBG(ep, "f_ctl %6x seq %2x\n",
+ ep->f_ctl, sp->id);
+ return sp;
+}
+
+/**
+ * fc_seq_start_next() - Lock the exchange and get a new sequence
+ * for a given sequence/exchange pair
+ * @sp: The sequence/exchange to get a new exchange for
+ */
+struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ spin_lock_bh(&ep->ex_lock);
+ sp = fc_seq_start_next_locked(sp);
+ spin_unlock_bh(&ep->ex_lock);
+
+ return sp;
+}
+EXPORT_SYMBOL(fc_seq_start_next);
+
+/*
+ * Set the response handler for the exchange associated with a sequence.
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+void fc_seq_set_resp(struct fc_seq *sp,
+ void (*resp)(struct fc_seq *, struct fc_frame *, void *),
+ void *arg)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+ DEFINE_WAIT(wait);
+
+ spin_lock_bh(&ep->ex_lock);
+ while (ep->resp_active && ep->resp_task != current) {
+ prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&ep->ex_lock);
+
+ schedule();
+
+ spin_lock_bh(&ep->ex_lock);
+ }
+ finish_wait(&ep->resp_wq, &wait);
+ ep->resp = resp;
+ ep->arg = arg;
+ spin_unlock_bh(&ep->ex_lock);
+}
+EXPORT_SYMBOL(fc_seq_set_resp);
+
+/**
+ * fc_exch_abort_locked() - Abort an exchange
+ * @ep: The exchange to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Abort an exchange and sequence. Generally called because of a
+ * exchange timeout or an abort from the upper layer.
+ *
+ * A timer_msec can be specified for abort timeout, if non-zero
+ * timer_msec value is specified then exchange resp handler
+ * will be called with timeout error if no response to abort.
+ *
+ * Locking notes: Called with exch lock held
+ *
+ * Return value: 0 on success else error code
+ */
+static int fc_exch_abort_locked(struct fc_exch *ep,
+ unsigned int timer_msec)
+{
+ struct fc_seq *sp;
+ struct fc_frame *fp;
+ int error;
+
+ FC_EXCH_DBG(ep, "exch: abort, time %d msecs\n", timer_msec);
+ if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL) ||
+ ep->state & (FC_EX_DONE | FC_EX_RST_CLEANUP)) {
+ FC_EXCH_DBG(ep, "exch: already completed esb %x state %x\n",
+ ep->esb_stat, ep->state);
+ return -ENXIO;
+ }
+
+ /*
+ * Send the abort on a new sequence if possible.
+ */
+ sp = fc_seq_start_next_locked(&ep->seq);
+ if (!sp)
+ return -ENOMEM;
+
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+
+ if (ep->sid) {
+ /*
+ * Send an abort for the sequence that timed out.
+ */
+ fp = fc_frame_alloc(ep->lp, 0);
+ if (fp) {
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
+ FC_TYPE_BLS, FC_FC_END_SEQ |
+ FC_FC_SEQ_INIT, 0);
+ error = fc_seq_send_locked(ep->lp, sp, fp);
+ } else {
+ error = -ENOBUFS;
+ }
+ } else {
+ /*
+ * If not logged into the fabric, don't send ABTS but leave
+ * sequence active until next timeout.
+ */
+ error = 0;
+ }
+ ep->esb_stat |= ESB_ST_ABNORMAL;
+ return error;
+}
+
+/**
+ * fc_seq_exch_abort() - Abort an exchange and sequence
+ * @req_sp: The sequence to be aborted
+ * @timer_msec: The period of time to wait before aborting
+ *
+ * Generally called because of a timeout or an abort from the upper layer.
+ *
+ * Return value: 0 on success else error code
+ */
+int fc_seq_exch_abort(const struct fc_seq *req_sp, unsigned int timer_msec)
+{
+ struct fc_exch *ep;
+ int error;
+
+ ep = fc_seq_exch(req_sp);
+ spin_lock_bh(&ep->ex_lock);
+ error = fc_exch_abort_locked(ep, timer_msec);
+ spin_unlock_bh(&ep->ex_lock);
+ return error;
+}
+
+/**
+ * fc_invoke_resp() - invoke ep->resp()
+ * @ep: The exchange to be operated on
+ * @fp: The frame pointer to pass through to ->resp()
+ * @sp: The sequence pointer to pass through to ->resp()
+ *
+ * Notes:
+ * It is assumed that after initialization finished (this means the
+ * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
+ * modified only via fc_seq_set_resp(). This guarantees that none of these
+ * two variables changes if ep->resp_active > 0.
+ *
+ * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
+ * this function is invoked, the first spin_lock_bh() call in this function
+ * will wait until fc_seq_set_resp() has finished modifying these variables.
+ *
+ * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
+ * ep->resp() won't be invoked after fc_exch_done() has returned.
+ *
+ * The response handler itself may invoke fc_exch_done(), which will clear the
+ * ep->resp pointer.
+ *
+ * Return value:
+ * Returns true if and only if ep->resp has been invoked.
+ */
+static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
+ struct fc_frame *fp)
+{
+ void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
+ void *arg;
+ bool res = false;
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->resp_active++;
+ if (ep->resp_task != current)
+ ep->resp_task = !ep->resp_task ? current : NULL;
+ resp = ep->resp;
+ arg = ep->arg;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (resp) {
+ resp(sp, fp, arg);
+ res = true;
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ if (--ep->resp_active == 0)
+ ep->resp_task = NULL;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (ep->resp_active == 0)
+ wake_up(&ep->resp_wq);
+
+ return res;
+}
+
+/**
+ * fc_exch_timeout() - Handle exchange timer expiration
+ * @work: The work_struct identifying the exchange that timed out
+ */
+static void fc_exch_timeout(struct work_struct *work)
+{
+ struct fc_exch *ep = container_of(work, struct fc_exch,
+ timeout_work.work);
+ struct fc_seq *sp = &ep->seq;
+ u32 e_stat;
+ int rc = 1;
+
+ FC_EXCH_DBG(ep, "Exchange timed out state %x\n", ep->state);
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
+ goto unlock;
+
+ e_stat = ep->esb_stat;
+ if (e_stat & ESB_ST_COMPLETE) {
+ ep->esb_stat = e_stat & ~ESB_ST_REC_QUAL;
+ spin_unlock_bh(&ep->ex_lock);
+ if (e_stat & ESB_ST_REC_QUAL)
+ fc_exch_rrq(ep);
+ goto done;
+ } else {
+ if (e_stat & ESB_ST_ABNORMAL)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
+ goto done;
+ }
+unlock:
+ spin_unlock_bh(&ep->ex_lock);
+done:
+ /*
+ * This release matches the hold taken when the timer was set.
+ */
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
+ * @lport: The local port that the exchange is for
+ * @mp: The exchange manager that will allocate the exchange
+ *
+ * Returns pointer to allocated fc_exch with exch lock held.
+ */
+static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+ struct fc_exch_mgr *mp)
+{
+ struct fc_exch *ep;
+ unsigned int cpu;
+ u16 index;
+ struct fc_exch_pool *pool;
+
+ /* allocate memory for exchange */
+ ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
+ if (!ep) {
+ atomic_inc(&mp->stats.no_free_exch);
+ goto out;
+ }
+ memset(ep, 0, sizeof(*ep));
+
+ cpu = raw_smp_processor_id();
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+
+ /* peek cache of free slot */
+ if (pool->left != FC_XID_UNKNOWN) {
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->left))) {
+ index = pool->left;
+ pool->left = FC_XID_UNKNOWN;
+ goto hit;
+ }
+ }
+ if (pool->right != FC_XID_UNKNOWN) {
+ if (!WARN_ON(fc_exch_ptr_get(pool, pool->right))) {
+ index = pool->right;
+ pool->right = FC_XID_UNKNOWN;
+ goto hit;
+ }
+ }
+
+ index = pool->next_index;
+ /* allocate new exch from pool */
+ while (fc_exch_ptr_get(pool, index)) {
+ index = index == mp->pool_max_index ? 0 : index + 1;
+ if (index == pool->next_index)
+ goto err;
+ }
+ pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
+hit:
+ fc_exch_hold(ep); /* hold for exch in mp */
+ spin_lock_init(&ep->ex_lock);
+ /*
+ * Hold exch lock for caller to prevent fc_exch_reset()
+ * from releasing exch while fc_exch_alloc() caller is
+ * still working on exch.
+ */
+ spin_lock_bh(&ep->ex_lock);
+
+ fc_exch_ptr_set(pool, index, ep);
+ list_add_tail(&ep->ex_list, &pool->ex_list);
+ fc_seq_alloc(ep, ep->seq_id++);
+ pool->total_exches++;
+ spin_unlock_bh(&pool->lock);
+
+ /*
+ * update exchange
+ */
+ ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
+ ep->em = mp;
+ ep->pool = pool;
+ ep->lp = lport;
+ ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
+ ep->rxid = FC_XID_UNKNOWN;
+ ep->class = mp->class;
+ ep->resp_active = 0;
+ init_waitqueue_head(&ep->resp_wq);
+ INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
+out:
+ return ep;
+err:
+ spin_unlock_bh(&pool->lock);
+ atomic_inc(&mp->stats.no_free_exch_xid);
+ mempool_free(ep, mp->ep_pool);
+ return NULL;
+}
+
+/**
+ * fc_exch_alloc() - Allocate an exchange from an EM on a
+ * local port's list of EMs.
+ * @lport: The local port that will own the exchange
+ * @fp: The FC frame that the exchange will be for
+ *
+ * This function walks the list of exchange manager(EM)
+ * anchors to select an EM for a new exchange allocation. The
+ * EM is selected when a NULL match function pointer is encountered
+ * or when a call to a match function returns true.
+ */
+static struct fc_exch *fc_exch_alloc(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ struct fc_exch_mgr_anchor *ema;
+ struct fc_exch *ep;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if (!ema->match || ema->match(fp)) {
+ ep = fc_exch_em_alloc(lport, ema->mp);
+ if (ep)
+ return ep;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * fc_exch_find() - Lookup and hold an exchange
+ * @mp: The exchange manager to lookup the exchange from
+ * @xid: The XID of the exchange to look up
+ */
+static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
+{
+ struct fc_lport *lport = mp->lport;
+ struct fc_exch_pool *pool;
+ struct fc_exch *ep = NULL;
+ u16 cpu = xid & fc_cpu_mask;
+
+ if (xid == FC_XID_UNKNOWN)
+ return NULL;
+
+ if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
+ pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
+ lport->host->host_no, lport->port_id, xid, cpu);
+ return NULL;
+ }
+
+ if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
+ pool = per_cpu_ptr(mp->pool, cpu);
+ spin_lock_bh(&pool->lock);
+ ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
+ if (ep == &fc_quarantine_exch) {
+ FC_LPORT_DBG(lport, "xid %x quarantined\n", xid);
+ ep = NULL;
+ }
+ if (ep) {
+ WARN_ON(ep->xid != xid);
+ fc_exch_hold(ep);
+ }
+ spin_unlock_bh(&pool->lock);
+ }
+ return ep;
+}
+
+
+/**
+ * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
+ * the memory allocated for the related objects may be freed.
+ * @sp: The sequence that has completed
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+void fc_exch_done(struct fc_seq *sp)
+{
+ struct fc_exch *ep = fc_seq_exch(sp);
+ int rc;
+
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ if (!rc)
+ fc_exch_delete(ep);
+}
+EXPORT_SYMBOL(fc_exch_done);
+
+/**
+ * fc_exch_resp() - Allocate a new exchange for a response frame
+ * @lport: The local port that the exchange was for
+ * @mp: The exchange manager to allocate the exchange from
+ * @fp: The response frame
+ *
+ * Sets the responder ID in the frame header.
+ */
+static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_exch *ep;
+ struct fc_frame_header *fh;
+
+ ep = fc_exch_alloc(lport, fp);
+ if (ep) {
+ ep->class = fc_frame_class(fp);
+
+ /*
+ * Set EX_CTX indicating we're responding on this exchange.
+ */
+ ep->f_ctl |= FC_FC_EX_CTX; /* we're responding */
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not new */
+ fh = fc_frame_header_get(fp);
+ ep->sid = ntoh24(fh->fh_d_id);
+ ep->did = ntoh24(fh->fh_s_id);
+ ep->oid = ep->did;
+
+ /*
+ * Allocated exchange has placed the XID in the
+ * originator field. Move it to the responder field,
+ * and set the originator XID from the frame.
+ */
+ ep->rxid = ep->xid;
+ ep->oxid = ntohs(fh->fh_ox_id);
+ ep->esb_stat |= ESB_ST_RESP | ESB_ST_SEQ_INIT;
+ if ((ntoh24(fh->fh_f_ctl) & FC_FC_SEQ_INIT) == 0)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+
+ fc_exch_hold(ep); /* hold for caller */
+ spin_unlock_bh(&ep->ex_lock); /* lock from fc_exch_alloc */
+ }
+ return ep;
+}
+
+/**
+ * fc_seq_lookup_recip() - Find a sequence where the other end
+ * originated the sequence
+ * @lport: The local port that the frame was sent to
+ * @mp: The Exchange Manager to lookup the exchange from
+ * @fp: The frame associated with the sequence we're looking for
+ *
+ * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
+ * on the ep that should be released by the caller.
+ */
+static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep = NULL;
+ struct fc_seq *sp = NULL;
+ enum fc_pf_rjt_reason reject = FC_RJT_NONE;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != 0);
+
+ /*
+ * Lookup or create the exchange if we will be creating the sequence.
+ */
+ if (f_ctl & FC_FC_EX_CTX) {
+ xid = ntohs(fh->fh_ox_id); /* we originated exch */
+ ep = fc_exch_find(mp, xid);
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_OX_ID;
+ goto out;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ else if (ep->rxid != ntohs(fh->fh_rx_id)) {
+ reject = FC_RJT_OX_ID;
+ goto rel;
+ }
+ } else {
+ xid = ntohs(fh->fh_rx_id); /* we are the responder */
+
+ /*
+ * Special case for MDS issuing an ELS TEST with a
+ * bad rxid of 0.
+ * XXX take this out once we do the proper reject.
+ */
+ if (xid == 0 && fh->fh_r_ctl == FC_RCTL_ELS_REQ &&
+ fc_frame_payload_op(fp) == ELS_TEST) {
+ fh->fh_rx_id = htons(FC_XID_UNKNOWN);
+ xid = FC_XID_UNKNOWN;
+ }
+
+ /*
+ * new sequence - find the exchange
+ */
+ ep = fc_exch_find(mp, xid);
+ if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
+ if (ep) {
+ atomic_inc(&mp->stats.xid_busy);
+ reject = FC_RJT_RX_ID;
+ goto rel;
+ }
+ ep = fc_exch_resp(lport, mp, fp);
+ if (!ep) {
+ reject = FC_RJT_EXCH_EST; /* XXX */
+ goto out;
+ }
+ xid = ep->xid; /* get our XID */
+ } else if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ reject = FC_RJT_RX_ID; /* XID not found */
+ goto out;
+ }
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ /*
+ * At this point, we have the exchange held.
+ * Find or create the sequence.
+ */
+ if (fc_sof_is_init(fr_sof(fp))) {
+ sp = &ep->seq;
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else {
+ sp = &ep->seq;
+ if (sp->id != fh->fh_seq_id) {
+ atomic_inc(&mp->stats.seq_not_found);
+ if (f_ctl & FC_FC_END_SEQ) {
+ /*
+ * Update sequence_id based on incoming last
+ * frame of sequence exchange. This is needed
+ * for FC target where DDP has been used
+ * on target where, stack is indicated only
+ * about last frame's (payload _header) header.
+ * Whereas "seq_id" which is part of
+ * frame_header is allocated by initiator
+ * which is totally different from "seq_id"
+ * allocated when XFER_RDY was sent by target.
+ * To avoid false -ve which results into not
+ * sending RSP, hence write request on other
+ * end never finishes.
+ */
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ } else {
+ spin_unlock_bh(&ep->ex_lock);
+
+ /* sequence/exch should exist */
+ reject = FC_RJT_SEQ_ID;
+ goto rel;
+ }
+ }
+ }
+ WARN_ON(ep != fc_seq_exch(sp));
+
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+
+ fr_seq(fp) = sp;
+out:
+ return reject;
+rel:
+ fc_exch_done(&ep->seq);
+ fc_exch_release(ep); /* hold from fc_exch_find/fc_exch_resp */
+ return reject;
+}
+
+/**
+ * fc_seq_lookup_orig() - Find a sequence where this end
+ * originated the sequence
+ * @mp: The Exchange Manager to lookup the exchange from
+ * @fp: The frame associated with the sequence we're looking for
+ *
+ * Does not hold the sequence for the caller.
+ */
+static struct fc_seq *fc_seq_lookup_orig(struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ u32 f_ctl;
+ u16 xid;
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ WARN_ON((f_ctl & FC_FC_SEQ_CTX) != FC_FC_SEQ_CTX);
+ xid = ntohs((f_ctl & FC_FC_EX_CTX) ? fh->fh_ox_id : fh->fh_rx_id);
+ ep = fc_exch_find(mp, xid);
+ if (!ep)
+ return NULL;
+ if (ep->seq.id == fh->fh_seq_id) {
+ /*
+ * Save the RX_ID if we didn't previously know it.
+ */
+ sp = &ep->seq;
+ if ((f_ctl & FC_FC_EX_CTX) != 0 &&
+ ep->rxid == FC_XID_UNKNOWN) {
+ ep->rxid = ntohs(fh->fh_rx_id);
+ }
+ }
+ fc_exch_release(ep);
+ return sp;
+}
+
+/**
+ * fc_exch_set_addr() - Set the source and destination IDs for an exchange
+ * @ep: The exchange to set the addresses for
+ * @orig_id: The originator's ID
+ * @resp_id: The responder's ID
+ *
+ * Note this must be done before the first sequence of the exchange is sent.
+ */
+static void fc_exch_set_addr(struct fc_exch *ep,
+ u32 orig_id, u32 resp_id)
+{
+ ep->oid = orig_id;
+ if (ep->esb_stat & ESB_ST_RESP) {
+ ep->sid = resp_id;
+ ep->did = orig_id;
+ } else {
+ ep->sid = orig_id;
+ ep->did = resp_id;
+ }
+}
+
+/**
+ * fc_seq_els_rsp_send() - Send an ELS response using information from
+ * the existing sequence/exchange.
+ * @fp: The received frame
+ * @els_cmd: The ELS command to be sent
+ * @els_data: The ELS data to be sent
+ *
+ * The received frame is not freed.
+ */
+void fc_seq_els_rsp_send(struct fc_frame *fp, enum fc_els_cmd els_cmd,
+ struct fc_seq_els_data *els_data)
+{
+ switch (els_cmd) {
+ case ELS_LS_RJT:
+ fc_seq_ls_rjt(fp, els_data->reason, els_data->explan);
+ break;
+ case ELS_LS_ACC:
+ fc_seq_ls_acc(fp);
+ break;
+ case ELS_RRQ:
+ fc_exch_els_rrq(fp);
+ break;
+ case ELS_REC:
+ fc_exch_els_rec(fp);
+ break;
+ default:
+ FC_LPORT_DBG(fr_dev(fp), "Invalid ELS CMD:%x\n", els_cmd);
+ }
+}
+EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send);
+
+/**
+ * fc_seq_send_last() - Send a sequence that is the last in the exchange
+ * @sp: The sequence that is to be sent
+ * @fp: The frame that will be sent on the sequence
+ * @rctl: The R_CTL information to be sent
+ * @fh_type: The frame header type
+ */
+static void fc_seq_send_last(struct fc_seq *sp, struct fc_frame *fp,
+ enum fc_rctl rctl, enum fc_fh_type fh_type)
+{
+ u32 f_ctl;
+ struct fc_exch *ep = fc_seq_exch(sp);
+
+ f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+ f_ctl |= ep->f_ctl;
+ fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
+ fc_seq_send_locked(ep->lp, sp, fp);
+}
+
+/**
+ * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
+ * @sp: The sequence to send the ACK on
+ * @rx_fp: The received frame that is being acknoledged
+ *
+ * Send ACK_1 (or equiv.) indicating we received something.
+ */
+static void fc_seq_send_ack(struct fc_seq *sp, const struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_exch *ep = fc_seq_exch(sp);
+ struct fc_lport *lport = ep->lp;
+ unsigned int f_ctl;
+
+ /*
+ * Don't send ACKs for class 3.
+ */
+ if (fc_sof_needs_ack(fr_sof(rx_fp))) {
+ fp = fc_frame_alloc(lport, 0);
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ACK request, out of memory\n");
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ACK_1;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ */
+ rx_fh = fc_frame_header_get(rx_fp);
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_FIRST_SEQ | FC_FC_LAST_SEQ |
+ FC_FC_END_SEQ | FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fc_exch_setup_hdr(ep, fp, f_ctl);
+ fh->fh_seq_id = rx_fh->fh_seq_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_parm_offset = htonl(1); /* ack single frame */
+
+ fr_sof(fp) = fr_sof(rx_fp);
+ if (f_ctl & FC_FC_END_SEQ)
+ fr_eof(fp) = FC_EOF_T;
+ else
+ fr_eof(fp) = FC_EOF_N;
+
+ lport->tt.frame_send(lport, fp);
+ }
+}
+
+/**
+ * fc_exch_send_ba_rjt() - Send BLS Reject
+ * @rx_fp: The frame being rejected
+ * @reason: The reason the frame is being rejected
+ * @explan: The explanation for the rejection
+ *
+ * This is for rejecting BA_ABTS only.
+ */
+static void fc_exch_send_ba_rjt(struct fc_frame *rx_fp,
+ enum fc_ba_rjt_reason reason,
+ enum fc_ba_rjt_explan explan)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *rx_fh;
+ struct fc_frame_header *fh;
+ struct fc_ba_rjt *rp;
+ struct fc_seq *sp;
+ struct fc_lport *lport;
+ unsigned int f_ctl;
+
+ lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*rp));
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "Drop BA_RJT request, out of memory\n");
+ return;
+ }
+ fh = fc_frame_header_get(fp);
+ rx_fh = fc_frame_header_get(rx_fp);
+
+ memset(fh, 0, sizeof(*fh) + sizeof(*rp));
+
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ rp->br_reason = reason;
+ rp->br_explan = explan;
+
+ /*
+ * seq_id, cs_ctl, df_ctl and param/offset are zero.
+ */
+ memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
+ memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
+ fh->fh_ox_id = rx_fh->fh_ox_id;
+ fh->fh_rx_id = rx_fh->fh_rx_id;
+ fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
+ fh->fh_r_ctl = FC_RCTL_BA_RJT;
+ fh->fh_type = FC_TYPE_BLS;
+
+ /*
+ * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
+ * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
+ * Bits 9-8 are meaningful (retransmitted or unidirectional).
+ * Last ACK uses bits 7-6 (continue sequence),
+ * bits 5-4 are meaningful (what kind of ACK to use).
+ * Always set LAST_SEQ, END_SEQ.
+ */
+ f_ctl = ntoh24(rx_fh->fh_f_ctl);
+ f_ctl &= FC_FC_EX_CTX | FC_FC_SEQ_CTX |
+ FC_FC_END_CONN | FC_FC_SEQ_INIT |
+ FC_FC_RETX_SEQ | FC_FC_UNI_TX;
+ f_ctl ^= FC_FC_EX_CTX | FC_FC_SEQ_CTX;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ f_ctl &= ~FC_FC_FIRST_SEQ;
+ hton24(fh->fh_f_ctl, f_ctl);
+
+ fr_sof(fp) = fc_sof_class(fr_sof(rx_fp));
+ fr_eof(fp) = FC_EOF_T;
+ if (fc_sof_needs_ack(fr_sof(fp)))
+ fr_eof(fp) = FC_EOF_N;
+
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_exch_recv_abts() - Handle an incoming ABTS
+ * @ep: The exchange the abort was on
+ * @rx_fp: The ABTS frame
+ *
+ * This would be for target mode usually, but could be due to lost
+ * FCP transfer ready, confirm or RRQ. We always handle this as an
+ * exchange abort, ignoring the parameter.
+ */
+static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_ba_acc *ap;
+ struct fc_frame_header *fh;
+ struct fc_seq *sp;
+
+ if (!ep)
+ goto reject;
+
+ FC_EXCH_DBG(ep, "exch: ABTS received\n");
+ fp = fc_frame_alloc(ep->lp, sizeof(*ap));
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop ABTS request, out of memory\n");
+ goto free;
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ spin_unlock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "exch: ABTS rejected, exchange complete\n");
+ fc_frame_free(fp);
+ goto reject;
+ }
+ if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for REC_QUAL */
+ }
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ fh = fc_frame_header_get(fp);
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ memset(ap, 0, sizeof(*ap));
+ sp = &ep->seq;
+ ap->ba_high_seq_cnt = htons(0xffff);
+ if (sp->ssb_stat & SSB_ST_RESP) {
+ ap->ba_seq_id = sp->id;
+ ap->ba_seq_id_val = FC_BA_SEQ_ID_VAL;
+ ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+ ap->ba_low_seq_cnt = htons(sp->cnt);
+ }
+ sp = fc_seq_start_next_locked(sp);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ ep->esb_stat |= ESB_ST_ABNORMAL;
+ spin_unlock_bh(&ep->ex_lock);
+
+free:
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
+ goto free;
+}
+
+/**
+ * fc_seq_assign() - Assign exchange and sequence for incoming request
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * On success, the sequence pointer will be returned and also in fr_seq(@fp).
+ * A reference will be held on the exchange/sequence for the caller, which
+ * must call fc_seq_release().
+ */
+struct fc_seq *fc_seq_assign(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ WARN_ON(lport != fr_dev(fp));
+ WARN_ON(fr_seq(fp));
+ fr_seq(fp) = NULL;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if ((!ema->match || ema->match(fp)) &&
+ fc_seq_lookup_recip(lport, ema->mp, fp) == FC_RJT_NONE)
+ break;
+ return fr_seq(fp);
+}
+EXPORT_SYMBOL(fc_seq_assign);
+
+/**
+ * fc_seq_release() - Release the hold
+ * @sp: The sequence.
+ */
+void fc_seq_release(struct fc_seq *sp)
+{
+ fc_exch_release(fc_seq_exch(sp));
+}
+EXPORT_SYMBOL(fc_seq_release);
+
+/**
+ * fc_exch_recv_req() - Handler for an incoming request
+ * @lport: The local port that received the request
+ * @mp: The EM that the exchange is on
+ * @fp: The request frame
+ *
+ * This is used when the other end is originating the exchange
+ * and the sequence.
+ */
+static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
+ struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = NULL;
+ struct fc_exch *ep = NULL;
+ enum fc_pf_rjt_reason reject;
+
+ /* We can have the wrong fc_lport at this point with NPIV, which is a
+ * problem now that we know a new exchange needs to be allocated
+ */
+ lport = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
+ if (!lport) {
+ fc_frame_free(fp);
+ return;
+ }
+ fr_dev(fp) = lport;
+
+ BUG_ON(fr_seq(fp)); /* XXX remove later */
+
+ /*
+ * If the RX_ID is 0xffff, don't allocate an exchange.
+ * The upper-level protocol may request one later, if needed.
+ */
+ if (fh->fh_rx_id == htons(FC_XID_UNKNOWN))
+ return fc_lport_recv(lport, fp);
+
+ reject = fc_seq_lookup_recip(lport, mp, fp);
+ if (reject == FC_RJT_NONE) {
+ sp = fr_seq(fp); /* sequence will be held */
+ ep = fc_seq_exch(sp);
+ fc_seq_send_ack(sp, fp);
+ ep->encaps = fr_encaps(fp);
+
+ /*
+ * Call the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_lport_recv(lport, fp);
+ fc_exch_release(ep); /* release from lookup */
+ } else {
+ FC_LPORT_DBG(lport, "exch/seq lookup failed: reject %x\n",
+ reject);
+ fc_frame_free(fp);
+ }
+}
+
+/**
+ * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
+ * end is the originator of the sequence that is a
+ * response to our initial exchange
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
+ */
+static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp;
+ struct fc_exch *ep;
+ enum fc_sof sof;
+ u32 f_ctl;
+ int rc;
+
+ ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
+ if (!ep) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto out;
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->rxid == FC_XID_UNKNOWN)
+ ep->rxid = ntohs(fh->fh_rx_id);
+ if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ if (ep->did != ntoh24(fh->fh_s_id) &&
+ ep->did != FC_FID_FLOGI) {
+ atomic_inc(&mp->stats.xid_not_found);
+ goto rel;
+ }
+ sof = fr_sof(fp);
+ sp = &ep->seq;
+ if (fc_sof_is_init(sof)) {
+ sp->ssb_stat |= SSB_ST_RESP;
+ sp->id = fh->fh_seq_id;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = sp;
+
+ spin_lock_bh(&ep->ex_lock);
+ if (f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+
+ if (fc_sof_needs_ack(sof))
+ fc_seq_send_ack(sp, fp);
+
+ if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
+ (f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ spin_lock_bh(&ep->ex_lock);
+ rc = fc_exch_done_locked(ep);
+ WARN_ON(fc_seq_exch(sp) != ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc) {
+ fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
+ }
+
+ /*
+ * Call the receive function.
+ * The sequence is held (has a refcnt) for us,
+ * but not for the receive function.
+ *
+ * The receive function may allocate a new sequence
+ * over the old one, so we shouldn't change the
+ * sequence after this.
+ *
+ * The frame will be freed by the receive function.
+ * If new exch resp handler is valid then call that
+ * first.
+ */
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
+
+skip_resp:
+ fc_exch_release(ep);
+ return;
+rel:
+ fc_exch_release(ep);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_exch_recv_resp() - Handler for a sequence where other end is
+ * responding to our sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The response frame
+ */
+static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_seq *sp;
+
+ sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
+
+ if (!sp)
+ atomic_inc(&mp->stats.xid_not_found);
+ else
+ atomic_inc(&mp->stats.non_bls_resp);
+
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_exch_abts_resp() - Handler for a response to an ABT
+ * @ep: The exchange that the frame is on
+ * @fp: The response frame
+ *
+ * This response would be to an ABTS cancelling an exchange or sequence.
+ * The response can be either BA_ACC or BA_RJT
+ */
+static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_ba_acc *ap;
+ struct fc_seq *sp;
+ u16 low;
+ u16 high;
+ int rc = 1, has_rec = 0;
+
+ fh = fc_frame_header_get(fp);
+ FC_EXCH_DBG(ep, "exch: BLS rctl %x - %s\n", fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+
+ if (cancel_delayed_work_sync(&ep->timeout_work)) {
+ FC_EXCH_DBG(ep, "Exchange timer canceled due to ABTS response\n");
+ fc_exch_release(ep); /* release from pending timer hold */
+ return;
+ }
+
+ spin_lock_bh(&ep->ex_lock);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ ap = fc_frame_payload_get(fp, sizeof(*ap));
+ if (!ap)
+ break;
+
+ /*
+ * Decide whether to establish a Recovery Qualifier.
+ * We do this if there is a non-empty SEQ_CNT range and
+ * SEQ_ID is the same as the one we aborted.
+ */
+ low = ntohs(ap->ba_low_seq_cnt);
+ high = ntohs(ap->ba_high_seq_cnt);
+ if ((ep->esb_stat & ESB_ST_REC_QUAL) == 0 &&
+ (ap->ba_seq_id_val != FC_BA_SEQ_ID_VAL ||
+ ap->ba_seq_id == ep->seq_id) && low != high) {
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_hold(ep); /* hold for recovery qualifier */
+ has_rec = 1;
+ }
+ break;
+ case FC_RCTL_BA_RJT:
+ break;
+ default:
+ break;
+ }
+
+ /* do we need to do some other checks here. Can we reuse more of
+ * fc_exch_recv_seq_resp
+ */
+ sp = &ep->seq;
+ /*
+ * do we want to check END_SEQ as well as LAST_SEQ here?
+ */
+ if (ep->fh_type != FC_TYPE_FCP &&
+ ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
+ if (!rc)
+ fc_exch_delete(ep);
+ if (!fc_invoke_resp(ep, sp, fp))
+ fc_frame_free(fp);
+ if (has_rec)
+ fc_exch_timer_set(ep, ep->r_a_tov);
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_recv_bls() - Handler for a BLS sequence
+ * @mp: The EM that the exchange is on
+ * @fp: The request frame
+ *
+ * The BLS frame is always a sequence initiated by the remote side.
+ * We may be either the originator or recipient of the exchange.
+ */
+static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ fh = fc_frame_header_get(fp);
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ fr_seq(fp) = NULL;
+
+ ep = fc_exch_find(mp, (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) : ntohs(fh->fh_rx_id));
+ if (ep && (f_ctl & FC_FC_SEQ_INIT)) {
+ spin_lock_bh(&ep->ex_lock);
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ }
+ if (f_ctl & FC_FC_SEQ_CTX) {
+ /*
+ * A response to a sequence we initiated.
+ * This should only be ACKs for class 2 or F.
+ */
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_ACK_1:
+ case FC_RCTL_ACK_0:
+ break;
+ default:
+ if (ep)
+ FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
+ fh->fh_r_ctl,
+ fc_exch_rctl_name(fh->fh_r_ctl));
+ break;
+ }
+ fc_frame_free(fp);
+ } else {
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_RJT:
+ case FC_RCTL_BA_ACC:
+ if (ep)
+ fc_exch_abts_resp(ep, fp);
+ else
+ fc_frame_free(fp);
+ break;
+ case FC_RCTL_BA_ABTS:
+ if (ep)
+ fc_exch_recv_abts(ep, fp);
+ else
+ fc_frame_free(fp);
+ break;
+ default: /* ignore junk */
+ fc_frame_free(fp);
+ break;
+ }
+ }
+ if (ep)
+ fc_exch_release(ep); /* release hold taken by fc_exch_find */
+}
+
+/**
+ * fc_seq_ls_acc() - Accept sequence with LS_ACC
+ * @rx_fp: The received frame, not freed here.
+ *
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_acc(struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport;
+ struct fc_els_ls_acc *acc;
+ struct fc_frame *fp;
+ struct fc_seq *sp;
+
+ lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*acc));
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
+ return;
+ }
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->la_cmd = ELS_LS_ACC;
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
+ * @rx_fp: The received frame, not freed here.
+ * @reason: The reason the sequence is being rejected
+ * @explan: The explanation for the rejection
+ *
+ * If this fails due to allocation or transmit congestion, assume the
+ * originator will repeat the sequence.
+ */
+static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
+ enum fc_els_rjt_explan explan)
+{
+ struct fc_lport *lport;
+ struct fc_els_ls_rjt *rjt;
+ struct fc_frame *fp;
+ struct fc_seq *sp;
+
+ lport = fr_dev(rx_fp);
+ sp = fr_seq(rx_fp);
+ fp = fc_frame_alloc(lport, sizeof(*rjt));
+ if (!fp) {
+ FC_EXCH_DBG(fc_seq_exch(sp),
+ "exch: drop LS_ACC, out of memory\n");
+ return;
+ }
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ memset(rjt, 0, sizeof(*rjt));
+ rjt->er_cmd = ELS_LS_RJT;
+ rjt->er_reason = reason;
+ rjt->er_explan = explan;
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+}
+
+/**
+ * fc_exch_reset() - Reset an exchange
+ * @ep: The exchange to be reset
+ *
+ * Note: May sleep if invoked from outside a response handler.
+ */
+static void fc_exch_reset(struct fc_exch *ep)
+{
+ struct fc_seq *sp;
+ int rc = 1;
+
+ spin_lock_bh(&ep->ex_lock);
+ ep->state |= FC_EX_RST_CLEANUP;
+ fc_exch_timer_cancel(ep);
+ if (ep->esb_stat & ESB_ST_REC_QUAL)
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ sp = &ep->seq;
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+
+ fc_exch_hold(ep);
+
+ if (!rc) {
+ fc_exch_delete(ep);
+ } else {
+ FC_EXCH_DBG(ep, "ep is completed already,"
+ "hence skip calling the resp\n");
+ goto skip_resp;
+ }
+
+ fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
+skip_resp:
+ fc_seq_set_resp(sp, NULL, ep->arg);
+ fc_exch_release(ep);
+}
+
+/**
+ * fc_exch_pool_reset() - Reset a per cpu exchange pool
+ * @lport: The local port that the exchange pool is on
+ * @pool: The exchange pool to be reset
+ * @sid: The source ID
+ * @did: The destination ID
+ *
+ * Resets a per cpu exches pool, releasing all of its sequences
+ * and exchanges. If sid is non-zero then reset only exchanges
+ * we sourced from the local port's FID. If did is non-zero then
+ * only reset exchanges destined for the local port's FID.
+ */
+static void fc_exch_pool_reset(struct fc_lport *lport,
+ struct fc_exch_pool *pool,
+ u32 sid, u32 did)
+{
+ struct fc_exch *ep;
+ struct fc_exch *next;
+
+ spin_lock_bh(&pool->lock);
+restart:
+ list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
+ if ((lport == ep->lp) &&
+ (sid == 0 || sid == ep->sid) &&
+ (did == 0 || did == ep->did)) {
+ fc_exch_hold(ep);
+ spin_unlock_bh(&pool->lock);
+
+ fc_exch_reset(ep);
+
+ fc_exch_release(ep);
+ spin_lock_bh(&pool->lock);
+
+ /*
+ * must restart loop incase while lock
+ * was down multiple eps were released.
+ */
+ goto restart;
+ }
+ }
+ pool->next_index = 0;
+ pool->left = FC_XID_UNKNOWN;
+ pool->right = FC_XID_UNKNOWN;
+ spin_unlock_bh(&pool->lock);
+}
+
+/**
+ * fc_exch_mgr_reset() - Reset all EMs of a local port
+ * @lport: The local port whose EMs are to be reset
+ * @sid: The source ID
+ * @did: The destination ID
+ *
+ * Reset all EMs associated with a given local port. Release all
+ * sequences and exchanges. If sid is non-zero then reset only the
+ * exchanges sent from the local port's FID. If did is non-zero then
+ * reset only exchanges destined for the local port's FID.
+ */
+void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
+{
+ struct fc_exch_mgr_anchor *ema;
+ unsigned int cpu;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ for_each_possible_cpu(cpu)
+ fc_exch_pool_reset(lport,
+ per_cpu_ptr(ema->mp->pool, cpu),
+ sid, did);
+ }
+}
+EXPORT_SYMBOL(fc_exch_mgr_reset);
+
+/**
+ * fc_exch_lookup() - find an exchange
+ * @lport: The local port
+ * @xid: The exchange ID
+ *
+ * Returns exchange pointer with hold for caller, or NULL if not found.
+ */
+static struct fc_exch *fc_exch_lookup(struct fc_lport *lport, u32 xid)
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list)
+ if (ema->mp->min_xid <= xid && xid <= ema->mp->max_xid)
+ return fc_exch_find(ema->mp, xid);
+ return NULL;
+}
+
+/**
+ * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
+ * @rfp: The REC frame, not freed here.
+ *
+ * Note that the requesting port may be different than the S_ID in the request.
+ */
+static void fc_exch_els_rec(struct fc_frame *rfp)
+{
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct fc_exch *ep;
+ struct fc_els_rec *rp;
+ struct fc_els_rec_acc *acc;
+ enum fc_els_rjt_reason reason = ELS_RJT_LOGIC;
+ enum fc_els_rjt_explan explan;
+ u32 sid;
+ u16 xid, rxid, oxid;
+
+ lport = fr_dev(rfp);
+ rp = fc_frame_payload_get(rfp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+ sid = ntoh24(rp->rec_s_id);
+ rxid = ntohs(rp->rec_rx_id);
+ oxid = ntohs(rp->rec_ox_id);
+
+ explan = ELS_EXPL_OXID_RXID;
+ if (sid == fc_host_port_id(lport->host))
+ xid = oxid;
+ else
+ xid = rxid;
+ if (xid == FC_XID_UNKNOWN) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: invalid rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ ep = fc_exch_lookup(lport, xid);
+ if (!ep) {
+ FC_LPORT_DBG(lport,
+ "REC request from %x: rxid %x oxid %x not found\n",
+ sid, rxid, oxid);
+ goto reject;
+ }
+ FC_EXCH_DBG(ep, "REC request from %x: rxid %x oxid %x\n",
+ sid, rxid, oxid);
+ if (ep->oid != sid || oxid != ep->oxid)
+ goto rel;
+ if (rxid != FC_XID_UNKNOWN && rxid != ep->rxid)
+ goto rel;
+ fp = fc_frame_alloc(lport, sizeof(*acc));
+ if (!fp) {
+ FC_EXCH_DBG(ep, "Drop REC request, out of memory\n");
+ goto out;
+ }
+
+ acc = fc_frame_payload_get(fp, sizeof(*acc));
+ memset(acc, 0, sizeof(*acc));
+ acc->reca_cmd = ELS_LS_ACC;
+ acc->reca_ox_id = rp->rec_ox_id;
+ memcpy(acc->reca_ofid, rp->rec_s_id, 3);
+ acc->reca_rx_id = htons(ep->rxid);
+ if (ep->sid == ep->oid)
+ hton24(acc->reca_rfid, ep->did);
+ else
+ hton24(acc->reca_rfid, ep->sid);
+ acc->reca_fc4value = htonl(ep->seq.rec_data);
+ acc->reca_e_stat = htonl(ep->esb_stat & (ESB_ST_RESP |
+ ESB_ST_SEQ_INIT |
+ ESB_ST_COMPLETE));
+ fc_fill_reply_hdr(fp, rfp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+out:
+ fc_exch_release(ep);
+ return;
+
+rel:
+ fc_exch_release(ep);
+reject:
+ fc_seq_ls_rjt(rfp, reason, explan);
+}
+
+/**
+ * fc_exch_rrq_resp() - Handler for RRQ responses
+ * @sp: The sequence that the RRQ is on
+ * @fp: The RRQ frame
+ * @arg: The exchange that the RRQ is on
+ *
+ * TODO: fix error handler.
+ */
+static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
+{
+ struct fc_exch *aborted_ep = arg;
+ unsigned int op;
+
+ if (IS_ERR(fp)) {
+ int err = PTR_ERR(fp);
+
+ if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
+ goto cleanup;
+ FC_EXCH_DBG(aborted_ep, "Cannot process RRQ, "
+ "frame error %d\n", err);
+ return;
+ }
+
+ op = fc_frame_payload_op(fp);
+ fc_frame_free(fp);
+
+ switch (op) {
+ case ELS_LS_RJT:
+ FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
+ fallthrough;
+ case ELS_LS_ACC:
+ goto cleanup;
+ default:
+ FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
+ op);
+ return;
+ }
+
+cleanup:
+ fc_exch_done(&aborted_ep->seq);
+ /* drop hold for rec qual */
+ fc_exch_release(aborted_ep);
+}
+
+
+/**
+ * fc_exch_seq_send() - Send a frame using a new exchange and sequence
+ * @lport: The local port to send the frame on
+ * @fp: The frame to be sent
+ * @resp: The response handler for this request
+ * @destructor: The destructor for the exchange
+ * @arg: The argument to be passed to the response handler
+ * @timer_msec: The timeout period for the exchange
+ *
+ * The exchange response handler is set in this routine to resp()
+ * function pointer. It can be called in two scenarios: if a timeout
+ * occurs or if a response frame is received for the exchange. The
+ * fc_frame pointer in response handler will also indicate timeout
+ * as error using IS_ERR related macros.
+ *
+ * The exchange destructor handler is also set in this routine.
+ * The destructor handler is invoked by EM layer when exchange
+ * is about to free, this can be used by caller to free its
+ * resources along with exchange free.
+ *
+ * The arg is passed back to resp and destructor handler.
+ *
+ * The timeout value (in msec) for an exchange is set if non zero
+ * timer_msec argument is specified. The timer is canceled when
+ * it fires or when the exchange is done. The exchange timeout handler
+ * is registered by EM layer.
+ *
+ * The frame pointer with some of the header's fields must be
+ * filled before calling this routine, those fields are:
+ *
+ * - routing control
+ * - FC port did
+ * - FC port sid
+ * - FC header type
+ * - frame control
+ * - parameter or relative offset
+ */
+struct fc_seq *fc_exch_seq_send(struct fc_lport *lport,
+ struct fc_frame *fp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg),
+ void (*destructor)(struct fc_seq *, void *),
+ void *arg, u32 timer_msec)
+{
+ struct fc_exch *ep;
+ struct fc_seq *sp = NULL;
+ struct fc_frame_header *fh;
+ struct fc_fcp_pkt *fsp = NULL;
+ int rc = 1;
+
+ ep = fc_exch_alloc(lport, fp);
+ if (!ep) {
+ fc_frame_free(fp);
+ return NULL;
+ }
+ ep->esb_stat |= ESB_ST_SEQ_INIT;
+ fh = fc_frame_header_get(fp);
+ fc_exch_set_addr(ep, ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id));
+ ep->resp = resp;
+ ep->destructor = destructor;
+ ep->arg = arg;
+ ep->r_a_tov = lport->r_a_tov;
+ ep->lp = lport;
+ sp = &ep->seq;
+
+ ep->fh_type = fh->fh_type; /* save for possbile timeout handling */
+ ep->f_ctl = ntoh24(fh->fh_f_ctl);
+ fc_exch_setup_hdr(ep, fp, ep->f_ctl);
+ sp->cnt++;
+
+ if (ep->xid <= lport->lro_xid && fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+ fsp = fr_fsp(fp);
+ fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+ }
+
+ if (unlikely(lport->tt.frame_send(lport, fp)))
+ goto err;
+
+ if (timer_msec)
+ fc_exch_timer_set_locked(ep, timer_msec);
+ ep->f_ctl &= ~FC_FC_FIRST_SEQ; /* not first seq */
+
+ if (ep->f_ctl & FC_FC_SEQ_INIT)
+ ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+ spin_unlock_bh(&ep->ex_lock);
+ return sp;
+err:
+ if (fsp)
+ fc_fcp_ddp_done(fsp);
+ rc = fc_exch_done_locked(ep);
+ spin_unlock_bh(&ep->ex_lock);
+ if (!rc)
+ fc_exch_delete(ep);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_seq_send);
+
+/**
+ * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
+ * @ep: The exchange to send the RRQ on
+ *
+ * This tells the remote port to stop blocking the use of
+ * the exchange and the seq_cnt range.
+ */
+static void fc_exch_rrq(struct fc_exch *ep)
+{
+ struct fc_lport *lport;
+ struct fc_els_rrq *rrq;
+ struct fc_frame *fp;
+ u32 did;
+
+ lport = ep->lp;
+
+ fp = fc_frame_alloc(lport, sizeof(*rrq));
+ if (!fp)
+ goto retry;
+
+ rrq = fc_frame_payload_get(fp, sizeof(*rrq));
+ memset(rrq, 0, sizeof(*rrq));
+ rrq->rrq_cmd = ELS_RRQ;
+ hton24(rrq->rrq_s_id, ep->sid);
+ rrq->rrq_ox_id = htons(ep->oxid);
+ rrq->rrq_rx_id = htons(ep->rxid);
+
+ did = ep->did;
+ if (ep->esb_stat & ESB_ST_RESP)
+ did = ep->sid;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, did,
+ lport->port_id, FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ if (fc_exch_seq_send(lport, fp, fc_exch_rrq_resp, NULL, ep,
+ lport->e_d_tov))
+ return;
+
+retry:
+ FC_EXCH_DBG(ep, "exch: RRQ send failed\n");
+ spin_lock_bh(&ep->ex_lock);
+ if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE)) {
+ spin_unlock_bh(&ep->ex_lock);
+ /* drop hold for rec qual */
+ fc_exch_release(ep);
+ return;
+ }
+ ep->esb_stat |= ESB_ST_REC_QUAL;
+ fc_exch_timer_set_locked(ep, ep->r_a_tov);
+ spin_unlock_bh(&ep->ex_lock);
+}
+
+/**
+ * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
+ * @fp: The RRQ frame, not freed here.
+ */
+static void fc_exch_els_rrq(struct fc_frame *fp)
+{
+ struct fc_lport *lport;
+ struct fc_exch *ep = NULL; /* request or subject exchange */
+ struct fc_els_rrq *rp;
+ u32 sid;
+ u16 xid;
+ enum fc_els_rjt_explan explan;
+
+ lport = fr_dev(fp);
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+ explan = ELS_EXPL_INV_LEN;
+ if (!rp)
+ goto reject;
+
+ /*
+ * lookup subject exchange.
+ */
+ sid = ntoh24(rp->rrq_s_id); /* subject source */
+ xid = fc_host_port_id(lport->host) == sid ?
+ ntohs(rp->rrq_ox_id) : ntohs(rp->rrq_rx_id);
+ ep = fc_exch_lookup(lport, xid);
+ explan = ELS_EXPL_OXID_RXID;
+ if (!ep)
+ goto reject;
+ spin_lock_bh(&ep->ex_lock);
+ FC_EXCH_DBG(ep, "RRQ request from %x: xid %x rxid %x oxid %x\n",
+ sid, xid, ntohs(rp->rrq_rx_id), ntohs(rp->rrq_ox_id));
+ if (ep->oxid != ntohs(rp->rrq_ox_id))
+ goto unlock_reject;
+ if (ep->rxid != ntohs(rp->rrq_rx_id) &&
+ ep->rxid != FC_XID_UNKNOWN)
+ goto unlock_reject;
+ explan = ELS_EXPL_SID;
+ if (ep->sid != sid)
+ goto unlock_reject;
+
+ /*
+ * Clear Recovery Qualifier state, and cancel timer if complete.
+ */
+ if (ep->esb_stat & ESB_ST_REC_QUAL) {
+ ep->esb_stat &= ~ESB_ST_REC_QUAL;
+ atomic_dec(&ep->ex_refcnt); /* drop hold for rec qual */
+ }
+ if (ep->esb_stat & ESB_ST_COMPLETE)
+ fc_exch_timer_cancel(ep);
+
+ spin_unlock_bh(&ep->ex_lock);
+
+ /*
+ * Send LS_ACC.
+ */
+ fc_seq_ls_acc(fp);
+ goto out;
+
+unlock_reject:
+ spin_unlock_bh(&ep->ex_lock);
+reject:
+ fc_seq_ls_rjt(fp, ELS_RJT_LOGIC, explan);
+out:
+ if (ep)
+ fc_exch_release(ep); /* drop hold from fc_exch_find */
+}
+
+/**
+ * fc_exch_update_stats() - update exches stats to lport
+ * @lport: The local port to update exchange manager stats
+ */
+void fc_exch_update_stats(struct fc_lport *lport)
+{
+ struct fc_host_statistics *st;
+ struct fc_exch_mgr_anchor *ema;
+ struct fc_exch_mgr *mp;
+
+ st = &lport->host_stats;
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ mp = ema->mp;
+ st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
+ st->fc_no_free_exch_xid +=
+ atomic_read(&mp->stats.no_free_exch_xid);
+ st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
+ st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
+ st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
+ st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
+ }
+}
+EXPORT_SYMBOL(fc_exch_update_stats);
+
+/**
+ * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
+ * @lport: The local port to add the exchange manager to
+ * @mp: The exchange manager to be added to the local port
+ * @match: The match routine that indicates when this EM should be used
+ */
+struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
+ struct fc_exch_mgr *mp,
+ bool (*match)(struct fc_frame *))
+{
+ struct fc_exch_mgr_anchor *ema;
+
+ ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
+ if (!ema)
+ return ema;
+
+ ema->mp = mp;
+ ema->match = match;
+ /* add EM anchor to EM anchors list */
+ list_add_tail(&ema->ema_list, &lport->ema_list);
+ kref_get(&mp->kref);
+ return ema;
+}
+EXPORT_SYMBOL(fc_exch_mgr_add);
+
+/**
+ * fc_exch_mgr_destroy() - Destroy an exchange manager
+ * @kref: The reference to the EM to be destroyed
+ */
+static void fc_exch_mgr_destroy(struct kref *kref)
+{
+ struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
+
+ mempool_destroy(mp->ep_pool);
+ free_percpu(mp->pool);
+ kfree(mp);
+}
+
+/**
+ * fc_exch_mgr_del() - Delete an EM from a local port's list
+ * @ema: The exchange manager anchor identifying the EM to be deleted
+ */
+void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
+{
+ /* remove EM anchor from EM anchors list */
+ list_del(&ema->ema_list);
+ kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
+ kfree(ema);
+}
+EXPORT_SYMBOL(fc_exch_mgr_del);
+
+/**
+ * fc_exch_mgr_list_clone() - Share all exchange manager objects
+ * @src: Source lport to clone exchange managers from
+ * @dst: New lport that takes references to all the exchange managers
+ */
+int fc_exch_mgr_list_clone(struct fc_lport *src, struct fc_lport *dst)
+{
+ struct fc_exch_mgr_anchor *ema, *tmp;
+
+ list_for_each_entry(ema, &src->ema_list, ema_list) {
+ if (!fc_exch_mgr_add(dst, ema->mp, ema->match))
+ goto err;
+ }
+ return 0;
+err:
+ list_for_each_entry_safe(ema, tmp, &dst->ema_list, ema_list)
+ fc_exch_mgr_del(ema);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(fc_exch_mgr_list_clone);
+
+/**
+ * fc_exch_mgr_alloc() - Allocate an exchange manager
+ * @lport: The local port that the new EM will be associated with
+ * @class: The default FC class for new exchanges
+ * @min_xid: The minimum XID for exchanges from the new EM
+ * @max_xid: The maximum XID for exchanges from the new EM
+ * @match: The match routine for the new EM
+ */
+struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lport,
+ enum fc_class class,
+ u16 min_xid, u16 max_xid,
+ bool (*match)(struct fc_frame *))
+{
+ struct fc_exch_mgr *mp;
+ u16 pool_exch_range;
+ size_t pool_size;
+ unsigned int cpu;
+ struct fc_exch_pool *pool;
+
+ if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
+ (min_xid & fc_cpu_mask) != 0) {
+ FC_LPORT_DBG(lport, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
+ min_xid, max_xid);
+ return NULL;
+ }
+
+ /*
+ * allocate memory for EM
+ */
+ mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
+ if (!mp)
+ return NULL;
+
+ mp->class = class;
+ mp->lport = lport;
+ /* adjust em exch xid range for offload */
+ mp->min_xid = min_xid;
+
+ /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
+ pool_exch_range = (PCPU_MIN_UNIT_SIZE - sizeof(*pool)) /
+ sizeof(struct fc_exch *);
+ if ((max_xid - min_xid + 1) / (fc_cpu_mask + 1) > pool_exch_range) {
+ mp->max_xid = pool_exch_range * (fc_cpu_mask + 1) +
+ min_xid - 1;
+ } else {
+ mp->max_xid = max_xid;
+ pool_exch_range = (mp->max_xid - mp->min_xid + 1) /
+ (fc_cpu_mask + 1);
+ }
+
+ mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
+ if (!mp->ep_pool)
+ goto free_mp;
+
+ /*
+ * Setup per cpu exch pool with entire exchange id range equally
+ * divided across all cpus. The exch pointers array memory is
+ * allocated for exch range per pool.
+ */
+ mp->pool_max_index = pool_exch_range - 1;
+
+ /*
+ * Allocate and initialize per cpu exch pool
+ */
+ pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
+ mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
+ if (!mp->pool)
+ goto free_mempool;
+ for_each_possible_cpu(cpu) {
+ pool = per_cpu_ptr(mp->pool, cpu);
+ pool->next_index = 0;
+ pool->left = FC_XID_UNKNOWN;
+ pool->right = FC_XID_UNKNOWN;
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->ex_list);
+ }
+
+ kref_init(&mp->kref);
+ if (!fc_exch_mgr_add(lport, mp, match)) {
+ free_percpu(mp->pool);
+ goto free_mempool;
+ }
+
+ /*
+ * Above kref_init() sets mp->kref to 1 and then
+ * call to fc_exch_mgr_add incremented mp->kref again,
+ * so adjust that extra increment.
+ */
+ kref_put(&mp->kref, fc_exch_mgr_destroy);
+ return mp;
+
+free_mempool:
+ mempool_destroy(mp->ep_pool);
+free_mp:
+ kfree(mp);
+ return NULL;
+}
+EXPORT_SYMBOL(fc_exch_mgr_alloc);
+
+/**
+ * fc_exch_mgr_free() - Free all exchange managers on a local port
+ * @lport: The local port whose EMs are to be freed
+ */
+void fc_exch_mgr_free(struct fc_lport *lport)
+{
+ struct fc_exch_mgr_anchor *ema, *next;
+
+ flush_workqueue(fc_exch_workqueue);
+ list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
+ fc_exch_mgr_del(ema);
+}
+EXPORT_SYMBOL(fc_exch_mgr_free);
+
+/**
+ * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
+ * upon 'xid'.
+ * @f_ctl: f_ctl
+ * @lport: The local port the frame was received on
+ * @fh: The received frame header
+ */
+static struct fc_exch_mgr_anchor *fc_find_ema(u32 f_ctl,
+ struct fc_lport *lport,
+ struct fc_frame_header *fh)
+{
+ struct fc_exch_mgr_anchor *ema;
+ u16 xid;
+
+ if (f_ctl & FC_FC_EX_CTX)
+ xid = ntohs(fh->fh_ox_id);
+ else {
+ xid = ntohs(fh->fh_rx_id);
+ if (xid == FC_XID_UNKNOWN)
+ return list_entry(lport->ema_list.prev,
+ typeof(*ema), ema_list);
+ }
+
+ list_for_each_entry(ema, &lport->ema_list, ema_list) {
+ if ((xid >= ema->mp->min_xid) &&
+ (xid <= ema->mp->max_xid))
+ return ema;
+ }
+ return NULL;
+}
+/**
+ * fc_exch_recv() - Handler for received frames
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ */
+void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_exch_mgr_anchor *ema;
+ u32 f_ctl;
+
+ /* lport lock ? */
+ if (!lport || lport->state == LPORT_ST_DISABLED) {
+ FC_LIBFC_DBG("Receiving frames for an lport that "
+ "has not been initialized correctly\n");
+ fc_frame_free(fp);
+ return;
+ }
+
+ f_ctl = ntoh24(fh->fh_f_ctl);
+ ema = fc_find_ema(f_ctl, lport, fh);
+ if (!ema) {
+ FC_LPORT_DBG(lport, "Unable to find Exchange Manager Anchor,"
+ "fc_ctl <0x%x>, xid <0x%x>\n",
+ f_ctl,
+ (f_ctl & FC_FC_EX_CTX) ?
+ ntohs(fh->fh_ox_id) :
+ ntohs(fh->fh_rx_id));
+ fc_frame_free(fp);
+ return;
+ }
+
+ /*
+ * If frame is marked invalid, just drop it.
+ */
+ switch (fr_eof(fp)) {
+ case FC_EOF_T:
+ if (f_ctl & FC_FC_END_SEQ)
+ skb_trim(fp_skb(fp), fr_len(fp) - FC_FC_FILL(f_ctl));
+ fallthrough;
+ case FC_EOF_N:
+ if (fh->fh_type == FC_TYPE_BLS)
+ fc_exch_recv_bls(ema->mp, fp);
+ else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
+ FC_FC_EX_CTX)
+ fc_exch_recv_seq_resp(ema->mp, fp);
+ else if (f_ctl & FC_FC_SEQ_CTX)
+ fc_exch_recv_resp(ema->mp, fp);
+ else /* no EX_CTX and no SEQ_CTX */
+ fc_exch_recv_req(lport, ema->mp, fp);
+ break;
+ default:
+ FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)",
+ fr_eof(fp));
+ fc_frame_free(fp);
+ }
+}
+EXPORT_SYMBOL(fc_exch_recv);
+
+/**
+ * fc_exch_init() - Initialize the exchange layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_exch_init(struct fc_lport *lport)
+{
+ if (!lport->tt.exch_mgr_reset)
+ lport->tt.exch_mgr_reset = fc_exch_mgr_reset;
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_exch_init);
+
+/**
+ * fc_setup_exch_mgr() - Setup an exchange manager
+ */
+int fc_setup_exch_mgr(void)
+{
+ fc_em_cachep = kmem_cache_create("libfc_em", sizeof(struct fc_exch),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!fc_em_cachep)
+ return -ENOMEM;
+
+ /*
+ * Initialize fc_cpu_mask and fc_cpu_order. The
+ * fc_cpu_mask is set for nr_cpu_ids rounded up
+ * to order of 2's * power and order is stored
+ * in fc_cpu_order as this is later required in
+ * mapping between an exch id and exch array index
+ * in per cpu exch pool.
+ *
+ * This round up is required to align fc_cpu_mask
+ * to exchange id's lower bits such that all incoming
+ * frames of an exchange gets delivered to the same
+ * cpu on which exchange originated by simple bitwise
+ * AND operation between fc_cpu_mask and exchange id.
+ */
+ fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
+ fc_cpu_mask = (1 << fc_cpu_order) - 1;
+
+ fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
+ if (!fc_exch_workqueue)
+ goto err;
+ return 0;
+err:
+ kmem_cache_destroy(fc_em_cachep);
+ return -ENOMEM;
+}
+
+/**
+ * fc_destroy_exch_mgr() - Destroy an exchange manager
+ */
+void fc_destroy_exch_mgr(void)
+{
+ destroy_workqueue(fc_exch_workqueue);
+ kmem_cache_destroy(fc_em_cachep);
+}
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
new file mode 100644
index 000000000..945adca5e
--- /dev/null
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -0,0 +1,2313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
+ * Copyright(c) 2008 Mike Christie
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/scatterlist.h>
+#include <linux/err.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include <scsi/fc/fc_fc2.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_encode.h"
+#include "fc_libfc.h"
+
+static struct kmem_cache *scsi_pkt_cachep;
+
+/* SRB state definitions */
+#define FC_SRB_FREE 0 /* cmd is free */
+#define FC_SRB_CMD_SENT (1 << 0) /* cmd has been sent */
+#define FC_SRB_RCV_STATUS (1 << 1) /* response has arrived */
+#define FC_SRB_ABORT_PENDING (1 << 2) /* cmd abort sent to device */
+#define FC_SRB_ABORTED (1 << 3) /* abort acknowledged */
+#define FC_SRB_DISCONTIG (1 << 4) /* non-sequential data recvd */
+#define FC_SRB_COMPL (1 << 5) /* fc_io_compl has been run */
+#define FC_SRB_FCP_PROCESSING_TMO (1 << 6) /* timer function processing */
+
+#define FC_SRB_READ (1 << 1)
+#define FC_SRB_WRITE (1 << 0)
+
+static struct libfc_cmd_priv *libfc_priv(struct scsi_cmnd *cmd)
+{
+ return scsi_cmd_priv(cmd);
+}
+
+/**
+ * struct fc_fcp_internal - FCP layer internal data
+ * @scsi_pkt_pool: Memory pool to draw FCP packets from
+ * @scsi_queue_lock: Protects the scsi_pkt_queue
+ * @scsi_pkt_queue: Current FCP packets
+ * @last_can_queue_ramp_down_time: ramp down time
+ * @last_can_queue_ramp_up_time: ramp up time
+ * @max_can_queue: max can_queue size
+ */
+struct fc_fcp_internal {
+ mempool_t *scsi_pkt_pool;
+ spinlock_t scsi_queue_lock;
+ struct list_head scsi_pkt_queue;
+ unsigned long last_can_queue_ramp_down_time;
+ unsigned long last_can_queue_ramp_up_time;
+ int max_can_queue;
+};
+
+#define fc_get_scsi_internal(x) ((struct fc_fcp_internal *)(x)->scsi_priv)
+
+/*
+ * function prototypes
+ * FC scsi I/O related functions
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
+static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code);
+static void fc_fcp_timeout(struct timer_list *);
+static void fc_fcp_rec(struct fc_fcp_pkt *);
+static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
+static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_io_compl(struct fc_fcp_pkt *);
+
+static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
+static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
+static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);
+
+/*
+ * command status codes
+ */
+#define FC_COMPLETE 0
+#define FC_CMD_ABORTED 1
+#define FC_CMD_RESET 2
+#define FC_CMD_PLOGO 3
+#define FC_SNS_RCV 4
+#define FC_TRANS_ERR 5
+#define FC_DATA_OVRRUN 6
+#define FC_DATA_UNDRUN 7
+#define FC_ERROR 8
+#define FC_HRD_ERROR 9
+#define FC_CRC_ERROR 10
+#define FC_TIMED_OUT 11
+#define FC_TRANS_RESET 12
+
+/*
+ * Error recovery timeout values.
+ */
+#define FC_SCSI_TM_TOV (10 * HZ)
+#define FC_HOST_RESET_TIMEOUT (30 * HZ)
+#define FC_CAN_QUEUE_PERIOD (60 * HZ)
+
+#define FC_MAX_ERROR_CNT 5
+#define FC_MAX_RECOV_RETRY 3
+
+#define FC_FCP_DFLT_QUEUE_DEPTH 32
+
+/**
+ * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
+ * @lport: The local port that the FCP packet is for
+ * @gfp: GFP flags for allocation
+ *
+ * Return value: fcp_pkt structure or null on allocation failure.
+ * Context: Can be called from process context, no lock is required.
+ */
+static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ struct fc_fcp_pkt *fsp;
+
+ fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
+ if (fsp) {
+ memset(fsp, 0, sizeof(*fsp));
+ fsp->lp = lport;
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
+ refcount_set(&fsp->ref_cnt, 1);
+ timer_setup(&fsp->timer, NULL, 0);
+ INIT_LIST_HEAD(&fsp->list);
+ spin_lock_init(&fsp->scsi_pkt_lock);
+ } else {
+ this_cpu_inc(lport->stats->FcpPktAllocFails);
+ }
+ return fsp;
+}
+
+/**
+ * fc_fcp_pkt_release() - Release hold on a fcp_pkt
+ * @fsp: The FCP packet to be released
+ *
+ * Context: Can be called from process or interrupt context,
+ * no lock is required.
+ */
+static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
+{
+ if (refcount_dec_and_test(&fsp->ref_cnt)) {
+ struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);
+
+ mempool_free(fsp, si->scsi_pkt_pool);
+ }
+}
+
+/**
+ * fc_fcp_pkt_hold() - Hold a fcp_pkt
+ * @fsp: The FCP packet to be held
+ */
+static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
+{
+ refcount_inc(&fsp->ref_cnt);
+}
+
+/**
+ * fc_fcp_pkt_destroy() - Release hold on a fcp_pkt
+ * @seq: The sequence that the FCP packet is on (required by destructor API)
+ * @fsp: The FCP packet to be released
+ *
+ * This routine is called by a destructor callback in the fc_exch_seq_send()
+ * routine of the libfc Transport Template. The 'struct fc_seq' is a required
+ * argument even though it is not used by this routine.
+ *
+ * Context: No locking required.
+ */
+static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
+{
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
+ * @fsp: The FCP packet to be locked and incremented
+ *
+ * We should only return error if we return a command to SCSI-ml before
+ * getting a response. This can happen in cases where we send a abort, but
+ * do not wait for the response and the abort and command can be passing
+ * each other on the wire/network-layer.
+ *
+ * Note: this function locks the packet and gets a reference to allow
+ * callers to call the completion function while the lock is held and
+ * not have to worry about the packets refcount.
+ *
+ * TODO: Maybe we should just have callers grab/release the lock and
+ * have a function that they call to verify the fsp and grab a ref if
+ * needed.
+ */
+static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->state & FC_SRB_COMPL) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ return -EPERM;
+ }
+
+ fc_fcp_pkt_hold(fsp);
+ return 0;
+}
+
+/**
+ * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
+ * reference count
+ * @fsp: The FCP packet to be unlocked and decremented
+ */
+static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
+{
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_fcp_timer_set() - Start a timer for a fcp_pkt
+ * @fsp: The FCP packet to start a timer for
+ * @delay: The timeout period in jiffies
+ */
+static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
+{
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ mod_timer(&fsp->timer, jiffies + delay);
+ fsp->timer_delay = delay;
+ }
+}
+
+static void fc_fcp_abort_done(struct fc_fcp_pkt *fsp)
+{
+ fsp->state |= FC_SRB_ABORTED;
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+
+ if (fsp->wait_for_comp)
+ complete(&fsp->tm_done);
+ else
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_send_abort() - Send an abort for exchanges associated with a
+ * fcp_pkt
+ * @fsp: The FCP packet to abort exchanges on
+ */
+static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
+{
+ int rc;
+
+ if (!fsp->seq_ptr)
+ return -EINVAL;
+
+ this_cpu_inc(fsp->lp->stats->FcpPktAborts);
+
+ fsp->state |= FC_SRB_ABORT_PENDING;
+ rc = fc_seq_exch_abort(fsp->seq_ptr, 0);
+ /*
+ * fc_seq_exch_abort() might return -ENXIO if
+ * the sequence is already completed
+ */
+ if (rc == -ENXIO) {
+ fc_fcp_abort_done(fsp);
+ rc = 0;
+ }
+ return rc;
+}
+
+/**
+ * fc_fcp_retry_cmd() - Retry a fcp_pkt
+ * @fsp: The FCP packet to be retried
+ * @status_code: The FCP status code to set
+ *
+ * Sets the status code to be FC_ERROR and then calls
+ * fc_fcp_complete_locked() which in turn calls fc_io_compl().
+ * fc_io_compl() will notify the SCSI-ml that the I/O is done.
+ * The SCSI-ml will retry the command.
+ */
+static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp, int status_code)
+{
+ if (fsp->seq_ptr) {
+ fc_exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->io_status = 0;
+ fsp->status_code = status_code;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
+ * @fsp: The FCP packet that will manage the DDP frames
+ * @xid: The XID that will be used for the DDP exchange
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
+{
+ struct fc_lport *lport;
+
+ lport = fsp->lp;
+ if ((fsp->req_flags & FC_SRB_READ) &&
+ (lport->lro_enabled) && (lport->tt.ddp_setup)) {
+ if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
+ scsi_sg_count(fsp->cmd)))
+ fsp->xfer_ddp = xid;
+ }
+}
+
+/**
+ * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
+ * DDP related resources for a fcp_pkt
+ * @fsp: The FCP packet that DDP had been used on
+ */
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport;
+
+ if (!fsp)
+ return;
+
+ if (fsp->xfer_ddp == FC_XID_UNKNOWN)
+ return;
+
+ lport = fsp->lp;
+ if (lport->tt.ddp_done) {
+ fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
+ fsp->xfer_ddp = FC_XID_UNKNOWN;
+ }
+}
+
+/**
+ * fc_fcp_can_queue_ramp_up() - increases can_queue
+ * @lport: lport to ramp up can_queue
+ */
+static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int can_queue;
+
+ spin_lock_irqsave(lport->host->host_lock, flags);
+
+ if (si->last_can_queue_ramp_up_time &&
+ (time_before(jiffies, si->last_can_queue_ramp_up_time +
+ FC_CAN_QUEUE_PERIOD)))
+ goto unlock;
+
+ if (time_before(jiffies, si->last_can_queue_ramp_down_time +
+ FC_CAN_QUEUE_PERIOD))
+ goto unlock;
+
+ si->last_can_queue_ramp_up_time = jiffies;
+
+ can_queue = lport->host->can_queue << 1;
+ if (can_queue >= si->max_can_queue) {
+ can_queue = si->max_can_queue;
+ si->last_can_queue_ramp_down_time = 0;
+ }
+ lport->host->can_queue = can_queue;
+ shost_printk(KERN_ERR, lport->host, "libfc: increased "
+ "can_queue to %d.\n", can_queue);
+
+unlock:
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+}
+
+/**
+ * fc_fcp_can_queue_ramp_down() - reduces can_queue
+ * @lport: lport to reduce can_queue
+ *
+ * If we are getting memory allocation failures, then we may
+ * be trying to execute too many commands. We let the running
+ * commands complete or timeout, then try again with a reduced
+ * can_queue. Eventually we will hit the point where we run
+ * on all reserved structs.
+ */
+static bool fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int can_queue;
+ bool changed = false;
+
+ spin_lock_irqsave(lport->host->host_lock, flags);
+
+ if (si->last_can_queue_ramp_down_time &&
+ (time_before(jiffies, si->last_can_queue_ramp_down_time +
+ FC_CAN_QUEUE_PERIOD)))
+ goto unlock;
+
+ si->last_can_queue_ramp_down_time = jiffies;
+
+ can_queue = lport->host->can_queue;
+ can_queue >>= 1;
+ if (!can_queue)
+ can_queue = 1;
+ lport->host->can_queue = can_queue;
+ changed = true;
+
+unlock:
+ spin_unlock_irqrestore(lport->host->host_lock, flags);
+ return changed;
+}
+
+/*
+ * fc_fcp_frame_alloc() - Allocates fc_frame structure and buffer.
+ * @lport: fc lport struct
+ * @len: payload length
+ *
+ * Allocates fc_frame structure and buffer but if fails to allocate
+ * then reduce can_queue.
+ */
+static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
+ size_t len)
+{
+ struct fc_frame *fp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (likely(fp))
+ return fp;
+
+ this_cpu_inc(lport->stats->FcpFrameAllocFails);
+ /* error case */
+ fc_fcp_can_queue_ramp_down(lport);
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: Could not allocate frame, "
+ "reducing can_queue to %d.\n", lport->host->can_queue);
+ return NULL;
+}
+
+/**
+ * get_fsp_rec_tov() - Helper function to get REC_TOV
+ * @fsp: the FCP packet
+ *
+ * Returns rec tov in jiffies as rpriv->e_d_tov + 1 second
+ */
+static inline unsigned int get_fsp_rec_tov(struct fc_fcp_pkt *fsp)
+{
+ struct fc_rport_libfc_priv *rpriv = fsp->rport->dd_data;
+ unsigned int e_d_tov = FC_DEF_E_D_TOV;
+
+ if (rpriv && rpriv->e_d_tov > e_d_tov)
+ e_d_tov = rpriv->e_d_tov;
+ return msecs_to_jiffies(e_d_tov) + HZ;
+}
+
+/**
+ * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
+ * @fsp: The FCP packet the data is on
+ * @fp: The data frame
+ */
+static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct scsi_cmnd *sc = fsp->cmd;
+ struct fc_lport *lport = fsp->lp;
+ struct fc_frame_header *fh;
+ size_t start_offset;
+ size_t offset;
+ u32 crc;
+ u32 copy_len = 0;
+ size_t len;
+ void *buf;
+ struct scatterlist *sg;
+ u32 nents;
+ u8 host_bcode = FC_COMPLETE;
+
+ fh = fc_frame_header_get(fp);
+ offset = ntohl(fh->fh_parm_offset);
+ start_offset = offset;
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ /*
+ * if this I/O is ddped then clear it and initiate recovery since data
+ * frames are expected to be placed directly in that case.
+ *
+ * Indicate error to scsi-ml because something went wrong with the
+ * ddp handling to get us here.
+ */
+ if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
+ fc_fcp_ddp_done(fsp);
+ FC_FCP_DBG(fsp, "DDP I/O in fc_fcp_recv_data set ERROR\n");
+ host_bcode = FC_ERROR;
+ goto err;
+ }
+ if (offset + len > fsp->data_len) {
+ /* this should never happen */
+ if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
+ fc_frame_crc_check(fp))
+ goto crc_err;
+ FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
+ "data_len %x\n", len, offset, fsp->data_len);
+
+ /* Data is corrupted indicate scsi-ml should retry */
+ host_bcode = FC_DATA_OVRRUN;
+ goto err;
+ }
+ if (offset != fsp->xfer_len)
+ fsp->state |= FC_SRB_DISCONTIG;
+
+ sg = scsi_sglist(sc);
+ nents = scsi_sg_count(sc);
+
+ if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
+ copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+ &offset, NULL);
+ } else {
+ crc = crc32(~0, (u8 *) fh, sizeof(*fh));
+ copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
+ &offset, &crc);
+ buf = fc_frame_payload_get(fp, 0);
+ if (len % 4)
+ crc = crc32(crc, buf + len, 4 - (len % 4));
+
+ if (~crc != le32_to_cpu(fr_crc(fp))) {
+crc_err:
+ this_cpu_inc(lport->stats->ErrorFrames);
+ /* per cpu count, not total count, but OK for limit */
+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT)
+ printk(KERN_WARNING "libfc: CRC error on data "
+ "frame for port (%6.6x)\n",
+ lport->port_id);
+ /*
+ * Assume the frame is total garbage.
+ * We may have copied it over the good part
+ * of the buffer.
+ * If so, we need to retry the entire operation.
+ * Otherwise, ignore it.
+ */
+ if (fsp->state & FC_SRB_DISCONTIG) {
+ host_bcode = FC_CRC_ERROR;
+ goto err;
+ }
+ return;
+ }
+ }
+
+ if (fsp->xfer_contig_end == start_offset)
+ fsp->xfer_contig_end += copy_len;
+ fsp->xfer_len += copy_len;
+
+ /*
+ * In the very rare event that this data arrived after the response
+ * and completes the transfer, call the completion handler.
+ */
+ if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
+ fsp->xfer_len == fsp->data_len - fsp->scsi_resid) {
+ FC_FCP_DBG( fsp, "complete out-of-order sequence\n" );
+ fc_fcp_complete_locked(fsp);
+ }
+ return;
+err:
+ fc_fcp_recovery(fsp, host_bcode);
+}
+
+/**
+ * fc_fcp_send_data() - Send SCSI data to a target
+ * @fsp: The FCP packet the data is on
+ * @seq: The sequence the data is to be sent on
+ * @offset: The starting offset for this data request
+ * @seq_blen: The burst length for this data request
+ *
+ * Called after receiving a Transfer Ready data descriptor.
+ * If the LLD is capable of sequence offload then send down the
+ * seq_blen amount of data in single frame, otherwise send
+ * multiple frames of the maximum frame payload supported by
+ * the target port.
+ */
+static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
+ size_t offset, size_t seq_blen)
+{
+ struct fc_exch *ep;
+ struct scsi_cmnd *sc;
+ struct scatterlist *sg;
+ struct fc_frame *fp = NULL;
+ struct fc_lport *lport = fsp->lp;
+ struct page *page;
+ size_t remaining;
+ size_t t_blen;
+ size_t tlen;
+ size_t sg_bytes;
+ size_t frame_offset, fh_parm_offset;
+ size_t off;
+ int error;
+ void *data = NULL;
+ void *page_addr;
+ int using_sg = lport->sg_supp;
+ u32 f_ctl;
+
+ WARN_ON(seq_blen <= 0);
+ if (unlikely(offset + seq_blen > fsp->data_len)) {
+ /* this should never happen */
+ FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
+ "offset %zx\n", seq_blen, offset);
+ fc_fcp_send_abort(fsp);
+ return 0;
+ } else if (offset != fsp->xfer_len) {
+ /* Out of Order Data Request - no problem, but unexpected. */
+ FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
+ "seq_blen %zx offset %zx\n", seq_blen, offset);
+ }
+
+ /*
+ * if LLD is capable of seq_offload then set transport
+ * burst length (t_blen) to seq_blen, otherwise set t_blen
+ * to max FC frame payload previously set in fsp->max_payload.
+ */
+ t_blen = fsp->max_payload;
+ if (lport->seq_offload) {
+ t_blen = min(seq_blen, (size_t)lport->lso_max);
+ FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
+ fsp, seq_blen, lport->lso_max, t_blen);
+ }
+
+ if (t_blen > 512)
+ t_blen &= ~(512 - 1); /* round down to block size */
+ sc = fsp->cmd;
+
+ remaining = seq_blen;
+ fh_parm_offset = frame_offset = offset;
+ tlen = 0;
+ seq = fc_seq_start_next(seq);
+ f_ctl = FC_FC_REL_OFF;
+ WARN_ON(!seq);
+
+ sg = scsi_sglist(sc);
+
+ while (remaining > 0 && sg) {
+ if (offset >= sg->length) {
+ offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ if (!fp) {
+ tlen = min(t_blen, remaining);
+
+ /*
+ * TODO. Temporary workaround. fc_seq_send() can't
+ * handle odd lengths in non-linear skbs.
+ * This will be the final fragment only.
+ */
+ if (tlen % 4)
+ using_sg = 0;
+ fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
+ if (!fp)
+ return -ENOMEM;
+
+ data = fc_frame_header_get(fp) + 1;
+ fh_parm_offset = frame_offset;
+ fr_max_payload(fp) = fsp->max_payload;
+ }
+
+ off = offset + sg->offset;
+ sg_bytes = min(tlen, sg->length - offset);
+ sg_bytes = min(sg_bytes,
+ (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
+ page = sg_page(sg) + (off >> PAGE_SHIFT);
+ if (using_sg) {
+ get_page(page);
+ skb_fill_page_desc(fp_skb(fp),
+ skb_shinfo(fp_skb(fp))->nr_frags,
+ page, off & ~PAGE_MASK, sg_bytes);
+ fp_skb(fp)->data_len += sg_bytes;
+ fr_len(fp) += sg_bytes;
+ fp_skb(fp)->truesize += PAGE_SIZE;
+ } else {
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we must not cross pages inside the kmap.
+ */
+ page_addr = kmap_atomic(page);
+ memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
+ sg_bytes);
+ kunmap_atomic(page_addr);
+ data += sg_bytes;
+ }
+ offset += sg_bytes;
+ frame_offset += sg_bytes;
+ tlen -= sg_bytes;
+ remaining -= sg_bytes;
+
+ if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
+ (tlen))
+ continue;
+
+ /*
+ * Send sequence with transfer sequence initiative in case
+ * this is last FCP frame of the sequence.
+ */
+ if (remaining == 0)
+ f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;
+
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, fh_parm_offset);
+
+ /*
+ * send fragment using for a sequence.
+ */
+ error = fc_seq_send(lport, seq, fp);
+ if (error) {
+ WARN_ON(1); /* send error should be rare */
+ return error;
+ }
+ fp = NULL;
+ }
+ fsp->xfer_len += seq_blen; /* premature count? */
+ return 0;
+}
+
+/**
+ * fc_fcp_abts_resp() - Receive an ABTS response
+ * @fsp: The FCP packet that is being aborted
+ * @fp: The response frame
+ */
+static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int ba_done = 1;
+ struct fc_ba_rjt *brp;
+ struct fc_frame_header *fh;
+
+ fh = fc_frame_header_get(fp);
+ switch (fh->fh_r_ctl) {
+ case FC_RCTL_BA_ACC:
+ break;
+ case FC_RCTL_BA_RJT:
+ brp = fc_frame_payload_get(fp, sizeof(*brp));
+ if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
+ break;
+ fallthrough;
+ default:
+ /*
+ * we will let the command timeout
+ * and scsi-ml recover in this case,
+ * therefore cleared the ba_done flag.
+ */
+ ba_done = 0;
+ }
+
+ if (ba_done)
+ fc_fcp_abort_done(fsp);
+}
+
+/**
+ * fc_fcp_recv() - Receive an FCP frame
+ * @seq: The sequence the frame is on
+ * @fp: The received frame
+ * @arg: The related FCP packet
+ *
+ * Context: Called from Soft IRQ context. Can not be called
+ * holding the FCP packet list lock.
+ */
+static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_lport *lport = fsp->lp;
+ struct fc_frame_header *fh;
+ struct fcp_txrdy *dd;
+ u8 r_ctl;
+ int rc = 0;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_error(fsp, fp);
+ return;
+ }
+
+ fh = fc_frame_header_get(fp);
+ r_ctl = fh->fh_r_ctl;
+
+ if (lport->state != LPORT_ST_READY) {
+ FC_FCP_DBG(fsp, "lport state %d, ignoring r_ctl %x\n",
+ lport->state, r_ctl);
+ goto out;
+ }
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_abts_resp(fsp, fp);
+ goto unlock;
+ }
+
+ if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING)) {
+ FC_FCP_DBG(fsp, "command aborted, ignoring r_ctl %x\n", r_ctl);
+ goto unlock;
+ }
+
+ if (r_ctl == FC_RCTL_DD_DATA_DESC) {
+ /*
+ * received XFER RDY from the target
+ * need to send data to the target
+ */
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+ dd = fc_frame_payload_get(fp, sizeof(*dd));
+ WARN_ON(!dd);
+
+ rc = fc_fcp_send_data(fsp, seq,
+ (size_t) ntohl(dd->ft_data_ro),
+ (size_t) ntohl(dd->ft_burst_len));
+ if (!rc)
+ seq->rec_data = fsp->xfer_len;
+ } else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
+ /*
+ * received a DATA frame
+ * next we will copy the data to the system buffer
+ */
+ WARN_ON(fr_len(fp) < sizeof(*fh)); /* len may be 0 */
+ fc_fcp_recv_data(fsp, fp);
+ seq->rec_data = fsp->xfer_contig_end;
+ } else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
+ WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
+
+ fc_fcp_resp(fsp, fp);
+ } else {
+ FC_FCP_DBG(fsp, "unexpected frame. r_ctl %x\n", r_ctl);
+ }
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_resp() - Handler for FCP responses
+ * @fsp: The FCP packet the response is for
+ * @fp: The response frame
+ */
+static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh;
+ struct fcp_resp *fc_rp;
+ struct fcp_resp_ext *rp_ex;
+ struct fcp_resp_rsp_info *fc_rp_info;
+ u32 plen;
+ u32 expected_len;
+ u32 respl = 0;
+ u32 snsl = 0;
+ u8 flags = 0;
+
+ plen = fr_len(fp);
+ fh = (struct fc_frame_header *)fr_hdr(fp);
+ if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
+ goto len_err;
+ plen -= sizeof(*fh);
+ fc_rp = (struct fcp_resp *)(fh + 1);
+ fsp->cdb_status = fc_rp->fr_status;
+ flags = fc_rp->fr_flags;
+ fsp->scsi_comp_flags = flags;
+ expected_len = fsp->data_len;
+
+ /* if ddp, update xfer len */
+ fc_fcp_ddp_done(fsp);
+
+ if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
+ rp_ex = (void *)(fc_rp + 1);
+ if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
+ if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
+ goto len_err;
+ fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
+ if (flags & FCP_RSP_LEN_VAL) {
+ respl = ntohl(rp_ex->fr_rsp_len);
+ if ((respl != FCP_RESP_RSP_INFO_LEN4) &&
+ (respl != FCP_RESP_RSP_INFO_LEN8))
+ goto len_err;
+ if (fsp->wait_for_comp) {
+ /* Abuse cdb_status for rsp code */
+ fsp->cdb_status = fc_rp_info->rsp_code;
+ complete(&fsp->tm_done);
+ /*
+ * tmfs will not have any scsi cmd so
+ * exit here
+ */
+ return;
+ }
+ }
+ if (flags & FCP_SNS_LEN_VAL) {
+ snsl = ntohl(rp_ex->fr_sns_len);
+ if (snsl > SCSI_SENSE_BUFFERSIZE)
+ snsl = SCSI_SENSE_BUFFERSIZE;
+ memcpy(fsp->cmd->sense_buffer,
+ (char *)fc_rp_info + respl, snsl);
+ }
+ }
+ if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
+ if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
+ goto len_err;
+ if (flags & FCP_RESID_UNDER) {
+ fsp->scsi_resid = ntohl(rp_ex->fr_resid);
+ /*
+ * The cmnd->underflow is the minimum number of
+ * bytes that must be transferred for this
+ * command. Provided a sense condition is not
+ * present, make sure the actual amount
+ * transferred is at least the underflow value
+ * or fail.
+ */
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (fc_rp->fr_status == 0) &&
+ (scsi_bufflen(fsp->cmd) -
+ fsp->scsi_resid) < fsp->cmd->underflow)
+ goto err;
+ expected_len -= fsp->scsi_resid;
+ } else {
+ fsp->status_code = FC_ERROR;
+ }
+ }
+ }
+ fsp->state |= FC_SRB_RCV_STATUS;
+
+ /*
+ * Check for missing or extra data frames.
+ */
+ if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len != expected_len)) {
+ if (fsp->xfer_len < expected_len) {
+ /*
+ * Some data may be queued locally,
+ * Wait a at least one jiffy to see if it is delivered.
+ * If this expires without data, we may do SRR.
+ */
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "tgt %6.6x queue busy retry\n",
+ fsp->rport->port_id);
+ return;
+ }
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx data underrun "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ return;
+ }
+ fsp->status_code = FC_DATA_OVRRUN;
+ FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
+ "len %x, data len %x\n",
+ fsp->rport->port_id,
+ fsp->xfer_len, expected_len, fsp->data_len);
+ }
+ fc_fcp_complete_locked(fsp);
+ return;
+
+len_err:
+ FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
+ "snsl %u\n", flags, fr_len(fp), respl, snsl);
+err:
+ fsp->status_code = FC_ERROR;
+ fc_fcp_complete_locked(fsp);
+}
+
+/**
+ * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
+ * fcp_pkt lock held
+ * @fsp: The FCP packet to be completed
+ *
+ * This function may sleep if a timer is pending. The packet lock must be
+ * held, and the host lock must not be held.
+ */
+static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport = fsp->lp;
+ struct fc_seq *seq;
+ struct fc_exch *ep;
+ u32 f_ctl;
+
+ if (fsp->state & FC_SRB_ABORT_PENDING)
+ return;
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ if (!fsp->status_code)
+ fsp->status_code = FC_CMD_ABORTED;
+ } else {
+ /*
+ * Test for transport underrun, independent of response
+ * underrun status.
+ */
+ if (fsp->cdb_status == SAM_STAT_GOOD &&
+ fsp->xfer_len < fsp->data_len && !fsp->io_status &&
+ (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
+ fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
+ FC_FCP_DBG(fsp, "data underrun, xfer %zx data %x\n",
+ fsp->xfer_len, fsp->data_len);
+ fsp->status_code = FC_DATA_UNDRUN;
+ }
+ }
+
+ seq = fsp->seq_ptr;
+ if (seq) {
+ fsp->seq_ptr = NULL;
+ if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
+ struct fc_frame *conf_frame;
+ struct fc_seq *csp;
+
+ csp = fc_seq_start_next(seq);
+ conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
+ if (conf_frame) {
+ f_ctl = FC_FC_SEQ_INIT;
+ f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
+ ep = fc_seq_exch(seq);
+ fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
+ ep->did, ep->sid,
+ FC_TYPE_FCP, f_ctl, 0);
+ fc_seq_send(lport, csp, conf_frame);
+ }
+ }
+ fc_exch_done(seq);
+ }
+ /*
+ * Some resets driven by SCSI are not I/Os and do not have
+ * SCSI commands associated with the requests. We should not
+ * call I/O completion if we do not have a SCSI command.
+ */
+ if (fsp->cmd)
+ fc_io_compl(fsp);
+}
+
+/**
+ * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
+ * @fsp: The FCP packet whose exchanges should be canceled
+ * @error: The reason for the cancellation
+ */
+static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
+{
+ if (fsp->seq_ptr) {
+ fc_exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->status_code = error;
+}
+
+/**
+ * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
+ * @lport: The local port whose exchanges should be canceled
+ * @id: The target's ID
+ * @lun: The LUN
+ * @error: The reason for cancellation
+ *
+ * If lun or id is -1, they are ignored.
+ */
+static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
+ unsigned int lun, int error)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ struct fc_fcp_pkt *fsp;
+ struct scsi_cmnd *sc_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+restart:
+ list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
+ sc_cmd = fsp->cmd;
+ if (id != -1 && scmd_id(sc_cmd) != id)
+ continue;
+
+ if (lun != -1 && sc_cmd->device->lun != lun)
+ continue;
+
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fc_io_compl(fsp);
+ }
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ fc_fcp_pkt_release(fsp);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ /*
+ * while we dropped the lock multiple pkts could
+ * have been released, so we have to start over.
+ */
+ goto restart;
+ }
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+}
+
+/**
+ * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
+ * @lport: The local port whose exchanges are to be aborted
+ */
+static void fc_fcp_abort_io(struct fc_lport *lport)
+{
+ fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
+}
+
+/**
+ * fc_fcp_pkt_send() - Send a fcp_pkt
+ * @lport: The local port to send the FCP packet on
+ * @fsp: The FCP packet to send
+ *
+ * Return: Zero for success and -1 for failure
+ * Locks: Called without locks held
+ */
+static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+ unsigned long flags;
+ int rc;
+
+ libfc_priv(fsp->cmd)->fsp = fsp;
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;
+
+ int_to_scsilun(fsp->cmd->device->lun, &fsp->cdb_cmd.fc_lun);
+ memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_add_tail(&fsp->list, &si->scsi_pkt_queue);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
+ if (unlikely(rc)) {
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ libfc_priv(fsp->cmd)->fsp = NULL;
+ list_del(&fsp->list);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ }
+
+ return rc;
+}
+
+/**
+ * fc_fcp_cmd_send() - Send a FCP command
+ * @lport: The local port to send the command on
+ * @fsp: The FCP packet the command is on
+ * @resp: The handler for the response
+ */
+static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
+ void (*resp)(struct fc_seq *,
+ struct fc_frame *fp,
+ void *arg))
+{
+ struct fc_frame *fp;
+ struct fc_seq *seq;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+ const size_t len = sizeof(fsp->cdb_cmd);
+ int rc = 0;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return 0;
+
+ fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
+ if (!fp) {
+ rc = -1;
+ goto unlock;
+ }
+
+ memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
+ fr_fsp(fp) = fsp;
+ rport = fsp->rport;
+ fsp->max_payload = rport->maxframe_size;
+ rpriv = rport->dd_data;
+
+ fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
+ FC_FCTL_REQ, 0);
+
+ seq = fc_exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy, fsp, 0);
+ if (!seq) {
+ rc = -1;
+ goto unlock;
+ }
+ fsp->seq_ptr = seq;
+ fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
+
+ fsp->timer.function = fc_fcp_timeout;
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+ return rc;
+}
+
+/**
+ * fc_fcp_error() - Handler for FCP layer errors
+ * @fsp: The FCP packet the error is on
+ * @fp: The frame that has errored
+ */
+static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (error == -FC_EX_CLOSED) {
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
+ goto unlock;
+ }
+
+ /*
+ * clear abort pending, because the lower layer
+ * decided to force completion.
+ */
+ fsp->state &= ~FC_SRB_ABORT_PENDING;
+ fsp->status_code = FC_CMD_PLOGO;
+ fc_fcp_complete_locked(fsp);
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/**
+ * fc_fcp_pkt_abort() - Abort a fcp_pkt
+ * @fsp: The FCP packet to abort on
+ *
+ * Called to send an abort and then wait for abort completion
+ */
+static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
+{
+ int rc = FAILED;
+ unsigned long ticks_left;
+
+ FC_FCP_DBG(fsp, "pkt abort state %x\n", fsp->state);
+ if (fc_fcp_send_abort(fsp)) {
+ FC_FCP_DBG(fsp, "failed to send abort\n");
+ return FAILED;
+ }
+
+ if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd completed\n");
+ return SUCCESS;
+ }
+
+ init_completion(&fsp->tm_done);
+ fsp->wait_for_comp = 1;
+
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ ticks_left = wait_for_completion_timeout(&fsp->tm_done,
+ FC_SCSI_TM_TOV);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->wait_for_comp = 0;
+
+ if (!ticks_left) {
+ FC_FCP_DBG(fsp, "target abort cmd failed\n");
+ } else if (fsp->state & FC_SRB_ABORTED) {
+ FC_FCP_DBG(fsp, "target abort cmd passed\n");
+ rc = SUCCESS;
+ fc_fcp_complete_locked(fsp);
+ }
+
+ return rc;
+}
+
+/**
+ * fc_lun_reset_send() - Send LUN reset command
+ * @t: Timer context used to fetch the FSP packet
+ */
+static void fc_lun_reset_send(struct timer_list *t)
+{
+ struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
+ struct fc_lport *lport = fsp->lp;
+
+ if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
+ if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
+ return;
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+ fsp->timer.function = fc_lun_reset_send;
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ fc_fcp_unlock_pkt(fsp);
+ }
+}
+
+/**
+ * fc_lun_reset() - Send a LUN RESET command to a device
+ * and wait for the reply
+ * @lport: The local port to sent the command on
+ * @fsp: The FCP packet that identifies the LUN to be reset
+ * @id: The SCSI command ID
+ * @lun: The LUN ID to be reset
+ */
+static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
+ unsigned int id, unsigned int lun)
+{
+ int rc;
+
+ fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
+ fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
+ int_to_scsilun(lun, &fsp->cdb_cmd.fc_lun);
+
+ fsp->wait_for_comp = 1;
+ init_completion(&fsp->tm_done);
+
+ fc_lun_reset_send(&fsp->timer);
+
+ /*
+ * wait for completion of reset
+ * after that make sure all commands are terminated
+ */
+ rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ fsp->state |= FC_SRB_COMPL;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ del_timer_sync(&fsp->timer);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (fsp->seq_ptr) {
+ fc_exch_done(fsp->seq_ptr);
+ fsp->seq_ptr = NULL;
+ }
+ fsp->wait_for_comp = 0;
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
+ if (!rc) {
+ FC_SCSI_DBG(lport, "lun reset failed\n");
+ return FAILED;
+ }
+
+ /* cdb_status holds the tmf's rsp code */
+ if (fsp->cdb_status != FCP_TMF_CMPL)
+ return FAILED;
+
+ FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
+ fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
+ return SUCCESS;
+}
+
+/**
+ * fc_tm_done() - Task Management response handler
+ * @seq: The sequence that the response is on
+ * @fp: The response frame
+ * @arg: The FCP packet the response is for
+ */
+static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ /*
+ * If there is an error just let it timeout or wait
+ * for TMF to be aborted if it timedout.
+ *
+ * scsi-eh will escalate for when either happens.
+ */
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ /*
+ * raced with eh timeout handler.
+ */
+ if (!fsp->seq_ptr || !fsp->wait_for_comp)
+ goto out_unlock;
+
+ fh = fc_frame_header_get(fp);
+ if (fh->fh_type != FC_TYPE_BLS)
+ fc_fcp_resp(fsp, fp);
+ fsp->seq_ptr = NULL;
+ fc_exch_done(seq);
+out_unlock:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
+ * @lport: The local port to be cleaned up
+ */
+static void fc_fcp_cleanup(struct fc_lport *lport)
+{
+ fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
+}
+
+/**
+ * fc_fcp_timeout() - Handler for fcp_pkt timeouts
+ * @t: Timer context used to fetch the FSP packet
+ *
+ * If REC is supported then just issue it and return. The REC exchange will
+ * complete or time out and recovery can continue at that point. Otherwise,
+ * if the response has been received without all the data it has been
+ * ER_TIMEOUT since the response was received. If the response has not been
+ * received we see if data was received recently. If it has been then we
+ * continue waiting, otherwise, we abort the command.
+ */
+static void fc_fcp_timeout(struct timer_list *t)
+{
+ struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer);
+ struct fc_rport *rport = fsp->rport;
+ struct fc_rport_libfc_priv *rpriv = rport->dd_data;
+
+ if (fc_fcp_lock_pkt(fsp))
+ return;
+
+ if (fsp->cdb_cmd.fc_tm_flags)
+ goto unlock;
+
+ if (fsp->lp->qfull) {
+ FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
+ fsp->timer_delay);
+ fsp->timer.function = fc_fcp_timeout;
+ fc_fcp_timer_set(fsp, fsp->timer_delay);
+ goto unlock;
+ }
+ FC_FCP_DBG(fsp, "fcp timeout, delay %d flags %x state %x\n",
+ fsp->timer_delay, rpriv->flags, fsp->state);
+ fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
+
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
+unlock:
+ fc_fcp_unlock_pkt(fsp);
+}
+
+/**
+ * fc_fcp_rec() - Send a REC ELS request
+ * @fsp: The FCP packet to send the REC request on
+ */
+static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
+{
+ struct fc_lport *lport;
+ struct fc_frame *fp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+
+ lport = fsp->lp;
+ rport = fsp->rport;
+ rpriv = rport->dd_data;
+ if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
+ fsp->status_code = FC_HRD_ERROR;
+ fsp->io_status = 0;
+ fc_fcp_complete_locked(fsp);
+ return;
+ }
+
+ fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
+ if (!fp)
+ goto retry;
+
+ fr_seq(fp) = fsp->seq_ptr;
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_ELS,
+ FC_FCTL_REQ, 0);
+ if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
+ fc_fcp_rec_resp, fsp,
+ 2 * lport->r_a_tov)) {
+ fc_fcp_pkt_hold(fsp); /* hold while REC outstanding */
+ return;
+ }
+retry:
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+}
+
+/**
+ * fc_fcp_rec_resp() - Handler for REC ELS responses
+ * @seq: The sequence the response is on
+ * @fp: The response frame
+ * @arg: The FCP packet the response is on
+ *
+ * If the response is a reject then the scsi layer will handle
+ * the timeout. If the response is a LS_ACC then if the I/O was not completed
+ * set the timeout and return. If the I/O was completed then complete the
+ * exchange and tell the SCSI layer.
+ */
+static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
+ struct fc_els_rec_acc *recp;
+ struct fc_els_ls_rjt *rjt;
+ u32 e_stat;
+ u8 opcode;
+ u32 offset;
+ enum dma_data_direction data_dir;
+ enum fc_rctl r_ctl;
+ struct fc_rport_libfc_priv *rpriv;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_rec_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fsp->recov_retry = 0;
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ switch (rjt->er_reason) {
+ default:
+ FC_FCP_DBG(fsp,
+ "device %x invalid REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
+ fallthrough;
+ case ELS_RJT_UNSUP:
+ FC_FCP_DBG(fsp, "device does not support REC\n");
+ rpriv = fsp->rport->dd_data;
+ /*
+ * if we do not spport RECs or got some bogus
+ * reason then resetup timer so we check for
+ * making progress.
+ */
+ rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
+ break;
+ case ELS_RJT_LOGIC:
+ case ELS_RJT_UNAB:
+ FC_FCP_DBG(fsp, "device %x REC reject %d/%d\n",
+ fsp->rport->port_id, rjt->er_reason,
+ rjt->er_explan);
+ /*
+ * If response got lost or is stuck in the
+ * queue somewhere we have no idea if and when
+ * the response will be received. So quarantine
+ * the xid and retry the command.
+ */
+ if (rjt->er_explan == ELS_EXPL_OXID_RXID) {
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ ep->state |= FC_EX_QUARANTINE;
+ fsp->state |= FC_SRB_ABORTED;
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
+ break;
+ }
+ fc_fcp_recovery(fsp, FC_TRANS_RESET);
+ break;
+ }
+ } else if (opcode == ELS_LS_ACC) {
+ if (fsp->state & FC_SRB_ABORTED)
+ goto unlock_out;
+
+ data_dir = fsp->cmd->sc_data_direction;
+ recp = fc_frame_payload_get(fp, sizeof(*recp));
+ offset = ntohl(recp->reca_fc4value);
+ e_stat = ntohl(recp->reca_e_stat);
+
+ if (e_stat & ESB_ST_COMPLETE) {
+
+ /*
+ * The exchange is complete.
+ *
+ * For output, we must've lost the response.
+ * For input, all data must've been sent.
+ * We lost may have lost the response
+ * (and a confirmation was requested) and maybe
+ * some data.
+ *
+ * If all data received, send SRR
+ * asking for response. If partial data received,
+ * or gaps, SRR requests data at start of gap.
+ * Recovery via SRR relies on in-order-delivery.
+ */
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end == offset) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else {
+ offset = fsp->xfer_contig_end;
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ } else if (e_stat & ESB_ST_SEQ_INIT) {
+ /*
+ * The remote port has the initiative, so just
+ * keep waiting for it to complete.
+ */
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ } else {
+
+ /*
+ * The exchange is incomplete, we have seq. initiative.
+ * Lost response with requested confirmation,
+ * lost confirmation, lost transfer ready or
+ * lost write data.
+ *
+ * For output, if not all data was received, ask
+ * for transfer ready to be repeated.
+ *
+ * If we received or sent all the data, send SRR to
+ * request response.
+ *
+ * If we lost a response, we may have lost some read
+ * data as well.
+ */
+ r_ctl = FC_RCTL_DD_SOL_DATA;
+ if (data_dir == DMA_TO_DEVICE) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ if (offset < fsp->data_len)
+ r_ctl = FC_RCTL_DD_DATA_DESC;
+ } else if (offset == fsp->xfer_contig_end) {
+ r_ctl = FC_RCTL_DD_CMD_STATUS;
+ } else if (fsp->xfer_contig_end < offset) {
+ offset = fsp->xfer_contig_end;
+ }
+ fc_fcp_srr(fsp, r_ctl, offset);
+ }
+ }
+unlock_out:
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_rec_error() - Handler for REC errors
+ * @fsp: The FCP packet the error is on
+ * @fp: The REC frame
+ */
+static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ int error = PTR_ERR(fp);
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ switch (error) {
+ case -FC_EX_CLOSED:
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange closed\n",
+ fsp, fsp->rport->port_id);
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
+ break;
+
+ default:
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
+ fsp, fsp->rport->port_id, error);
+ fsp->status_code = FC_CMD_PLOGO;
+ fallthrough;
+
+ case -FC_EX_TIMEOUT:
+ /*
+ * Assume REC or LS_ACC was lost.
+ * The exchange manager will have aborted REC, so retry.
+ */
+ FC_FCP_DBG(fsp, "REC %p fid %6.6x exchange timeout retry %d/%d\n",
+ fsp, fsp->rport->port_id, fsp->recov_retry,
+ FC_MAX_RECOV_RETRY);
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_ERROR);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_fcp_pkt_release(fsp); /* drop hold for outstanding REC */
+}
+
+/**
+ * fc_fcp_recovery() - Handler for fcp_pkt recovery
+ * @fsp: The FCP pkt that needs to be aborted
+ * @code: The FCP status code to set
+ */
+static void fc_fcp_recovery(struct fc_fcp_pkt *fsp, u8 code)
+{
+ FC_FCP_DBG(fsp, "start recovery code %x\n", code);
+ fsp->status_code = code;
+ fsp->cdb_status = 0;
+ fsp->io_status = 0;
+ /*
+ * if this fails then we let the scsi command timer fire and
+ * scsi-ml escalate.
+ */
+ fc_fcp_send_abort(fsp);
+}
+
+/**
+ * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
+ * @fsp: The FCP packet the SRR is to be sent on
+ * @r_ctl: The R_CTL field for the SRR request
+ * @offset: The SRR relative offset
+ * This is called after receiving status but insufficient data, or
+ * when expecting status but the request has timed out.
+ */
+static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
+{
+ struct fc_lport *lport = fsp->lp;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rpriv;
+ struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
+ struct fc_seq *seq;
+ struct fcp_srr *srr;
+ struct fc_frame *fp;
+
+ rport = fsp->rport;
+ rpriv = rport->dd_data;
+
+ if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
+ rpriv->rp_state != RPORT_ST_READY)
+ goto retry; /* shouldn't happen */
+ fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
+ if (!fp)
+ goto retry;
+
+ srr = fc_frame_payload_get(fp, sizeof(*srr));
+ memset(srr, 0, sizeof(*srr));
+ srr->srr_op = ELS_SRR;
+ srr->srr_ox_id = htons(ep->oxid);
+ srr->srr_rx_id = htons(ep->rxid);
+ srr->srr_r_ctl = r_ctl;
+ srr->srr_rel_off = htonl(offset);
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
+ rpriv->local_port->port_id, FC_TYPE_FCP,
+ FC_FCTL_REQ, 0);
+
+ seq = fc_exch_seq_send(lport, fp, fc_fcp_srr_resp,
+ fc_fcp_pkt_destroy,
+ fsp, get_fsp_rec_tov(fsp));
+ if (!seq)
+ goto retry;
+
+ fsp->recov_seq = seq;
+ fsp->xfer_len = offset;
+ fsp->xfer_contig_end = offset;
+ fsp->state &= ~FC_SRB_RCV_STATUS;
+ fc_fcp_pkt_hold(fsp); /* hold for outstanding SRR */
+ return;
+retry:
+ fc_fcp_retry_cmd(fsp, FC_TRANS_RESET);
+}
+
+/**
+ * fc_fcp_srr_resp() - Handler for SRR response
+ * @seq: The sequence the SRR is on
+ * @fp: The SRR frame
+ * @arg: The FCP packet the SRR is on
+ */
+static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
+{
+ struct fc_fcp_pkt *fsp = arg;
+ struct fc_frame_header *fh;
+
+ if (IS_ERR(fp)) {
+ fc_fcp_srr_error(fsp, fp);
+ return;
+ }
+
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+
+ fh = fc_frame_header_get(fp);
+ /*
+ * BUG? fc_fcp_srr_error calls fc_exch_done which would release
+ * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
+ * then fc_exch_timeout would be sending an abort. The fc_exch_done
+ * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
+ * an abort response though.
+ */
+ if (fh->fh_type == FC_TYPE_BLS) {
+ fc_fcp_unlock_pkt(fsp);
+ return;
+ }
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_LS_ACC:
+ fsp->recov_retry = 0;
+ fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
+ break;
+ case ELS_LS_RJT:
+ default:
+ fc_fcp_recovery(fsp, FC_ERROR);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_exch_done(seq);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fcp_srr_error() - Handler for SRR errors
+ * @fsp: The FCP packet that the SRR error is on
+ * @fp: The SRR frame
+ */
+static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
+{
+ if (fc_fcp_lock_pkt(fsp))
+ goto out;
+ switch (PTR_ERR(fp)) {
+ case -FC_EX_TIMEOUT:
+ FC_FCP_DBG(fsp, "SRR timeout, retries %d\n", fsp->recov_retry);
+ if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
+ fc_fcp_rec(fsp);
+ else
+ fc_fcp_recovery(fsp, FC_TIMED_OUT);
+ break;
+ case -FC_EX_CLOSED: /* e.g., link failure */
+ FC_FCP_DBG(fsp, "SRR error, exchange closed\n");
+ fallthrough;
+ default:
+ fc_fcp_retry_cmd(fsp, FC_ERROR);
+ break;
+ }
+ fc_fcp_unlock_pkt(fsp);
+out:
+ fc_exch_done(fsp->recov_seq);
+}
+
+/**
+ * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
+ * @lport: The local port to be checked
+ */
+static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
+{
+ /* lock ? */
+ return (lport->state == LPORT_ST_READY) &&
+ lport->link_up && !lport->qfull;
+}
+
+/**
+ * fc_queuecommand() - The queuecommand function of the SCSI template
+ * @shost: The Scsi_Host that the command was issued to
+ * @sc_cmd: The scsi_cmnd to be executed
+ *
+ * This is the i/o strategy routine, called by the SCSI layer.
+ */
+int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport = shost_priv(shost);
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ struct fc_fcp_pkt *fsp;
+ int rval;
+ int rc = 0;
+
+ rval = fc_remote_port_chkready(rport);
+ if (rval) {
+ sc_cmd->result = rval;
+ scsi_done(sc_cmd);
+ return 0;
+ }
+
+ if (!*(struct fc_remote_port **)rport->dd_data) {
+ /*
+ * rport is transitioning from blocked/deleted to
+ * online
+ */
+ sc_cmd->result = DID_IMM_RETRY << 16;
+ scsi_done(sc_cmd);
+ goto out;
+ }
+
+ if (!fc_fcp_lport_queue_ready(lport)) {
+ if (lport->qfull) {
+ if (fc_fcp_can_queue_ramp_down(lport))
+ shost_printk(KERN_ERR, lport->host,
+ "libfc: queue full, "
+ "reducing can_queue to %d.\n",
+ lport->host->can_queue);
+ }
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
+ if (fsp == NULL) {
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
+ /*
+ * build the libfc request pkt
+ */
+ fsp->cmd = sc_cmd; /* save the cmd */
+ fsp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * set up the transfer length
+ */
+ fsp->data_len = scsi_bufflen(sc_cmd);
+ fsp->xfer_len = 0;
+
+ /*
+ * setup the data direction
+ */
+ if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ fsp->req_flags = FC_SRB_READ;
+ this_cpu_inc(lport->stats->InputRequests);
+ this_cpu_add(lport->stats->InputBytes, fsp->data_len);
+ } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
+ fsp->req_flags = FC_SRB_WRITE;
+ this_cpu_inc(lport->stats->OutputRequests);
+ this_cpu_add(lport->stats->OutputBytes, fsp->data_len);
+ } else {
+ fsp->req_flags = 0;
+ this_cpu_inc(lport->stats->ControlRequests);
+ }
+
+ /*
+ * send it to the lower layer
+ * if we get -1 return then put the request in the pending
+ * queue.
+ */
+ rval = fc_fcp_pkt_send(lport, fsp);
+ if (rval != 0) {
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+ rc = SCSI_MLQUEUE_HOST_BUSY;
+ }
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_queuecommand);
+
+/**
+ * fc_io_compl() - Handle responses for completed commands
+ * @fsp: The FCP packet that is complete
+ *
+ * Translates fcp_pkt errors to a Linux SCSI errors.
+ * The fcp packet lock must be held when calling.
+ */
+static void fc_io_compl(struct fc_fcp_pkt *fsp)
+{
+ struct fc_fcp_internal *si;
+ struct scsi_cmnd *sc_cmd;
+ struct fc_lport *lport;
+ unsigned long flags;
+
+ /* release outstanding ddp context */
+ fc_fcp_ddp_done(fsp);
+
+ fsp->state |= FC_SRB_COMPL;
+ if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+ del_timer_sync(&fsp->timer);
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ }
+
+ lport = fsp->lp;
+ si = fc_get_scsi_internal(lport);
+
+ /*
+ * if can_queue ramp down is done then try can_queue ramp up
+ * since commands are completing now.
+ */
+ if (si->last_can_queue_ramp_down_time)
+ fc_fcp_can_queue_ramp_up(lport);
+
+ sc_cmd = fsp->cmd;
+ libfc_priv(sc_cmd)->status = fsp->cdb_status;
+ switch (fsp->status_code) {
+ case FC_COMPLETE:
+ if (fsp->cdb_status == 0) {
+ /*
+ * good I/O status
+ */
+ sc_cmd->result = DID_OK << 16;
+ if (fsp->scsi_resid)
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
+ } else {
+ /*
+ * transport level I/O was ok but scsi
+ * has non zero status
+ */
+ sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_ERROR\n");
+ sc_cmd->result = DID_ERROR << 16;
+ break;
+ case FC_DATA_UNDRUN:
+ if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
+ /*
+ * scsi status is good but transport level
+ * underrun.
+ */
+ if (fsp->state & FC_SRB_RCV_STATUS) {
+ sc_cmd->result = DID_OK << 16;
+ } else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
+ " due to FC_DATA_UNDRUN (trans)\n");
+ sc_cmd->result = DID_ERROR << 16;
+ }
+ } else {
+ /*
+ * scsi got underrun, this is an error
+ */
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_UNDRUN (scsi)\n");
+ libfc_priv(sc_cmd)->resid_len = fsp->scsi_resid;
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ }
+ break;
+ case FC_DATA_OVRRUN:
+ /*
+ * overrun is an error
+ */
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_DATA_OVRRUN\n");
+ sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
+ break;
+ case FC_CMD_ABORTED:
+ if (host_byte(sc_cmd->result) == DID_TIME_OUT)
+ FC_FCP_DBG(fsp, "Returning DID_TIME_OUT to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ else {
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to FC_CMD_ABORTED\n");
+ set_host_byte(sc_cmd, DID_ERROR);
+ }
+ sc_cmd->result |= fsp->io_status;
+ break;
+ case FC_CMD_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_RESET to scsi-ml "
+ "due to FC_CMD_RESET\n");
+ sc_cmd->result = (DID_RESET << 16);
+ break;
+ case FC_TRANS_RESET:
+ FC_FCP_DBG(fsp, "Returning DID_SOFT_ERROR to scsi-ml "
+ "due to FC_TRANS_RESET\n");
+ sc_cmd->result = (DID_SOFT_ERROR << 16);
+ break;
+ case FC_HRD_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_NO_CONNECT to scsi-ml "
+ "due to FC_HRD_ERROR\n");
+ sc_cmd->result = (DID_NO_CONNECT << 16);
+ break;
+ case FC_CRC_ERROR:
+ FC_FCP_DBG(fsp, "Returning DID_PARITY to scsi-ml "
+ "due to FC_CRC_ERROR\n");
+ sc_cmd->result = (DID_PARITY << 16);
+ break;
+ case FC_TIMED_OUT:
+ FC_FCP_DBG(fsp, "Returning DID_BUS_BUSY to scsi-ml "
+ "due to FC_TIMED_OUT\n");
+ sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
+ break;
+ default:
+ FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
+ "due to unknown error\n");
+ sc_cmd->result = (DID_ERROR << 16);
+ break;
+ }
+
+ if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
+ sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
+
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ list_del(&fsp->list);
+ libfc_priv(sc_cmd)->fsp = NULL;
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ scsi_done(sc_cmd);
+
+ /* release ref from initial allocation in queue command */
+ fc_fcp_pkt_release(fsp);
+}
+
+/**
+ * fc_eh_abort() - Abort a command
+ * @sc_cmd: The SCSI command to abort
+ *
+ * From SCSI host template.
+ * Send an ABTS to the target device and wait for the response.
+ */
+int fc_eh_abort(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_fcp_pkt *fsp;
+ struct fc_lport *lport;
+ struct fc_fcp_internal *si;
+ int rc = FAILED;
+ unsigned long flags;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
+
+ lport = shost_priv(sc_cmd->device->host);
+ if (lport->state != LPORT_ST_READY)
+ return rc;
+ else if (!lport->link_up)
+ return rc;
+
+ si = fc_get_scsi_internal(lport);
+ spin_lock_irqsave(&si->scsi_queue_lock, flags);
+ fsp = libfc_priv(sc_cmd)->fsp;
+ if (!fsp) {
+ /* command completed while scsi eh was setting up */
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+ return SUCCESS;
+ }
+ /* grab a ref so the fsp and sc_cmd cannot be released from under us */
+ fc_fcp_pkt_hold(fsp);
+ spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
+
+ if (fc_fcp_lock_pkt(fsp)) {
+ /* completed while we were waiting for timer to be deleted */
+ rc = SUCCESS;
+ goto release_pkt;
+ }
+
+ rc = fc_fcp_pkt_abort(fsp);
+ fc_fcp_unlock_pkt(fsp);
+
+release_pkt:
+ fc_fcp_pkt_release(fsp);
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_abort);
+
+/**
+ * fc_eh_device_reset() - Reset a single LUN
+ * @sc_cmd: The SCSI command which identifies the device whose
+ * LUN is to be reset
+ *
+ * Set from SCSI host template.
+ */
+int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct fc_lport *lport;
+ struct fc_fcp_pkt *fsp;
+ struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
+ int rc = FAILED;
+ int rval;
+
+ rval = fc_block_scsi_eh(sc_cmd);
+ if (rval)
+ return rval;
+
+ lport = shost_priv(sc_cmd->device->host);
+
+ if (lport->state != LPORT_ST_READY)
+ return rc;
+
+ FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
+
+ fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
+ if (fsp == NULL) {
+ printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
+ goto out;
+ }
+
+ /*
+ * Build the libfc request pkt. Do not set the scsi cmnd, because
+ * the sc passed in is not setup for execution like when sent
+ * through the queuecommand callout.
+ */
+ fsp->rport = rport; /* set the remote port ptr */
+
+ /*
+ * flush outstanding commands
+ */
+ rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
+ fsp->state = FC_SRB_FREE;
+ fc_fcp_pkt_release(fsp);
+
+out:
+ return rc;
+}
+EXPORT_SYMBOL(fc_eh_device_reset);
+
+/**
+ * fc_eh_host_reset() - Reset a Scsi_Host.
+ * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
+ */
+int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
+{
+ struct Scsi_Host *shost = sc_cmd->device->host;
+ struct fc_lport *lport = shost_priv(shost);
+ unsigned long wait_tmo;
+
+ FC_SCSI_DBG(lport, "Resetting host\n");
+
+ fc_lport_reset(lport);
+ wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
+ while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
+ wait_tmo))
+ msleep(1000);
+
+ if (fc_fcp_lport_queue_ready(lport)) {
+ shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
+ "on port (%6.6x)\n", lport->port_id);
+ return SUCCESS;
+ } else {
+ shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
+ "port (%6.6x) is not ready.\n",
+ lport->port_id);
+ return FAILED;
+ }
+}
+EXPORT_SYMBOL(fc_eh_host_reset);
+
+/**
+ * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
+ * @sdev: The SCSI device that identifies the SCSI host
+ *
+ * Configures queue depth based on host's cmd_per_len. If not set
+ * then we use the libfc default.
+ */
+int fc_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ scsi_change_queue_depth(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
+ return 0;
+}
+EXPORT_SYMBOL(fc_slave_alloc);
+
+/**
+ * fc_fcp_destroy() - Tear down the FCP layer for a given local port
+ * @lport: The local port that no longer needs the FCP layer
+ */
+void fc_fcp_destroy(struct fc_lport *lport)
+{
+ struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
+
+ if (!list_empty(&si->scsi_pkt_queue))
+ printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
+ "port (%6.6x)\n", lport->port_id);
+
+ mempool_destroy(si->scsi_pkt_pool);
+ kfree(si);
+ lport->scsi_priv = NULL;
+}
+EXPORT_SYMBOL(fc_fcp_destroy);
+
+int fc_setup_fcp(void)
+{
+ int rc = 0;
+
+ scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
+ sizeof(struct fc_fcp_pkt),
+ 0, SLAB_HWCACHE_ALIGN, NULL);
+ if (!scsi_pkt_cachep) {
+ printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
+ "module load failed!");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void fc_destroy_fcp(void)
+{
+ kmem_cache_destroy(scsi_pkt_cachep);
+}
+
+/**
+ * fc_fcp_init() - Initialize the FCP layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_fcp_init(struct fc_lport *lport)
+{
+ int rc;
+ struct fc_fcp_internal *si;
+
+ if (!lport->tt.fcp_cmd_send)
+ lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
+
+ if (!lport->tt.fcp_cleanup)
+ lport->tt.fcp_cleanup = fc_fcp_cleanup;
+
+ if (!lport->tt.fcp_abort_io)
+ lport->tt.fcp_abort_io = fc_fcp_abort_io;
+
+ si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
+ if (!si)
+ return -ENOMEM;
+ lport->scsi_priv = si;
+ si->max_can_queue = lport->host->can_queue;
+ INIT_LIST_HEAD(&si->scsi_pkt_queue);
+ spin_lock_init(&si->scsi_queue_lock);
+
+ si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
+ if (!si->scsi_pkt_pool) {
+ rc = -ENOMEM;
+ goto free_internal;
+ }
+ return 0;
+
+free_internal:
+ kfree(si);
+ return rc;
+}
+EXPORT_SYMBOL(fc_fcp_init);
diff --git a/drivers/scsi/libfc/fc_frame.c b/drivers/scsi/libfc/fc_frame.c
new file mode 100644
index 000000000..f3aefb2de
--- /dev/null
+++ b/drivers/scsi/libfc/fc_frame.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * Frame allocation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/crc32.h>
+#include <linux/gfp.h>
+
+#include <scsi/fc_frame.h>
+
+/*
+ * Check the CRC in a frame.
+ */
+u32 fc_frame_crc_check(struct fc_frame *fp)
+{
+ u32 crc;
+ u32 error;
+ const u8 *bp;
+ unsigned int len;
+
+ WARN_ON(!fc_frame_is_linear(fp));
+ fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
+ len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
+ bp = (const u8 *) fr_hdr(fp);
+ crc = ~crc32(~0, bp, len);
+ error = crc ^ fr_crc(fp);
+ return error;
+}
+EXPORT_SYMBOL(fc_frame_crc_check);
+
+/*
+ * Allocate a frame intended to be sent.
+ * Get an sk_buff for the frame and set the length.
+ */
+struct fc_frame *_fc_frame_alloc(size_t len)
+{
+ struct fc_frame *fp;
+ struct sk_buff *skb;
+
+ WARN_ON((len % sizeof(u32)) != 0);
+ len += sizeof(struct fc_frame_header);
+ skb = alloc_skb_fclone(len + FC_FRAME_HEADROOM + FC_FRAME_TAILROOM +
+ NET_SKB_PAD, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, NET_SKB_PAD + FC_FRAME_HEADROOM);
+ fp = (struct fc_frame *) skb;
+ fc_frame_init(fp);
+ skb_put(skb, len);
+ return fp;
+}
+EXPORT_SYMBOL(_fc_frame_alloc);
+
+struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
+{
+ struct fc_frame *fp;
+ size_t fill;
+
+ fill = payload_len % 4;
+ if (fill != 0)
+ fill = 4 - fill;
+ fp = _fc_frame_alloc(payload_len + fill);
+ if (fp) {
+ memset((char *) fr_hdr(fp) + payload_len, 0, fill);
+ /* trim is OK, we just allocated it so there are no fragments */
+ skb_trim(fp_skb(fp),
+ payload_len + sizeof(struct fc_frame_header));
+ }
+ return fp;
+}
+EXPORT_SYMBOL(fc_frame_alloc_fill);
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
new file mode 100644
index 000000000..0e6a1355d
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+#include <linux/crc32.h>
+#include <linux/module.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_encode.h"
+#include "fc_libfc.h"
+
+MODULE_AUTHOR("Open-FCoE.org");
+MODULE_DESCRIPTION("libfc");
+MODULE_LICENSE("GPL v2");
+
+unsigned int fc_debug_logging;
+module_param_named(debug_logging, fc_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+DEFINE_MUTEX(fc_prov_mutex);
+static LIST_HEAD(fc_local_ports);
+struct blocking_notifier_head fc_lport_notifier_head =
+ BLOCKING_NOTIFIER_INIT(fc_lport_notifier_head);
+EXPORT_SYMBOL(fc_lport_notifier_head);
+
+/*
+ * Providers which primarily send requests and PRLIs.
+ */
+struct fc4_prov *fc_active_prov[FC_FC4_PROV_SIZE] = {
+ [0] = &fc_rport_t0_prov,
+ [FC_TYPE_FCP] = &fc_rport_fcp_init,
+};
+
+/*
+ * Providers which receive requests.
+ */
+struct fc4_prov *fc_passive_prov[FC_FC4_PROV_SIZE] = {
+ [FC_TYPE_ELS] = &fc_lport_els_prov,
+};
+
+/**
+ * libfc_init() - Initialize libfc.ko
+ */
+static int __init libfc_init(void)
+{
+ int rc = 0;
+
+ rc = fc_setup_fcp();
+ if (rc)
+ return rc;
+
+ rc = fc_setup_exch_mgr();
+ if (rc)
+ goto destroy_pkt_cache;
+
+ rc = fc_setup_rport();
+ if (rc)
+ goto destroy_em;
+
+ return rc;
+destroy_em:
+ fc_destroy_exch_mgr();
+destroy_pkt_cache:
+ fc_destroy_fcp();
+ return rc;
+}
+module_init(libfc_init);
+
+/**
+ * libfc_exit() - Tear down libfc.ko
+ */
+static void __exit libfc_exit(void)
+{
+ fc_destroy_fcp();
+ fc_destroy_exch_mgr();
+ fc_destroy_rport();
+}
+module_exit(libfc_exit);
+
+/**
+ * fc_copy_buffer_to_sglist() - This routine copies the data of a buffer
+ * into a scatter-gather list (SG list).
+ *
+ * @buf: pointer to the data buffer.
+ * @len: the byte-length of the data buffer.
+ * @sg: pointer to the pointer of the SG list.
+ * @nents: pointer to the remaining number of entries in the SG list.
+ * @offset: pointer to the current offset in the SG list.
+ * @crc: pointer to the 32-bit crc value.
+ * If crc is NULL, CRC is not calculated.
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+ struct scatterlist *sg,
+ u32 *nents, size_t *offset,
+ u32 *crc)
+{
+ size_t remaining = len;
+ u32 copy_len = 0;
+
+ while (remaining > 0 && sg) {
+ size_t off, sg_bytes;
+ void *page_addr;
+
+ if (*offset >= sg->length) {
+ /*
+ * Check for end and drop resources
+ * from the last iteration.
+ */
+ if (!(*nents))
+ break;
+ --(*nents);
+ *offset -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+ sg_bytes = min(remaining, sg->length - *offset);
+
+ /*
+ * The scatterlist item may be bigger than PAGE_SIZE,
+ * but we are limited to mapping PAGE_SIZE at a time.
+ */
+ off = *offset + sg->offset;
+ sg_bytes = min(sg_bytes,
+ (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
+ page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
+ if (crc)
+ *crc = crc32(*crc, buf, sg_bytes);
+ memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
+ kunmap_atomic(page_addr);
+ buf += sg_bytes;
+ *offset += sg_bytes;
+ remaining -= sg_bytes;
+ copy_len += sg_bytes;
+ }
+ return copy_len;
+}
+
+/**
+ * fc_fill_hdr() - fill FC header fields based on request
+ * @fp: reply frame containing header to be filled in
+ * @in_fp: request frame containing header to use in filling in reply
+ * @r_ctl: R_CTL value for header
+ * @f_ctl: F_CTL value for header, with 0 pad
+ * @seq_cnt: sequence count for the header, ignored if frame has a sequence
+ * @parm_offset: parameter / offset value
+ */
+void fc_fill_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
+ enum fc_rctl r_ctl, u32 f_ctl, u16 seq_cnt, u32 parm_offset)
+{
+ struct fc_frame_header *fh;
+ struct fc_frame_header *in_fh;
+ struct fc_seq *sp;
+ u32 fill;
+
+ fh = __fc_frame_header_get(fp);
+ in_fh = __fc_frame_header_get(in_fp);
+
+ if (f_ctl & FC_FC_END_SEQ) {
+ fill = -fr_len(fp) & 3;
+ if (fill) {
+ /* TODO, this may be a problem with fragmented skb */
+ skb_put_zero(fp_skb(fp), fill);
+ f_ctl |= fill;
+ }
+ fr_eof(fp) = FC_EOF_T;
+ } else {
+ WARN_ON(fr_len(fp) % 4 != 0); /* no pad to non last frame */
+ fr_eof(fp) = FC_EOF_N;
+ }
+
+ fh->fh_r_ctl = r_ctl;
+ memcpy(fh->fh_d_id, in_fh->fh_s_id, sizeof(fh->fh_d_id));
+ memcpy(fh->fh_s_id, in_fh->fh_d_id, sizeof(fh->fh_s_id));
+ fh->fh_type = in_fh->fh_type;
+ hton24(fh->fh_f_ctl, f_ctl);
+ fh->fh_ox_id = in_fh->fh_ox_id;
+ fh->fh_rx_id = in_fh->fh_rx_id;
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = htonl(parm_offset);
+
+ sp = fr_seq(in_fp);
+ if (sp) {
+ fr_seq(fp) = sp;
+ fh->fh_seq_id = sp->id;
+ seq_cnt = sp->cnt;
+ } else {
+ fh->fh_seq_id = 0;
+ }
+ fh->fh_seq_cnt = ntohs(seq_cnt);
+ fr_sof(fp) = seq_cnt ? FC_SOF_N3 : FC_SOF_I3;
+ fr_encaps(fp) = fr_encaps(in_fp);
+}
+EXPORT_SYMBOL(fc_fill_hdr);
+
+/**
+ * fc_fill_reply_hdr() - fill FC reply header fields based on request
+ * @fp: reply frame containing header to be filled in
+ * @in_fp: request frame containing header to use in filling in reply
+ * @r_ctl: R_CTL value for reply
+ * @parm_offset: parameter / offset value
+ */
+void fc_fill_reply_hdr(struct fc_frame *fp, const struct fc_frame *in_fp,
+ enum fc_rctl r_ctl, u32 parm_offset)
+{
+ struct fc_seq *sp;
+
+ sp = fr_seq(in_fp);
+ if (sp)
+ fr_seq(fp) = fc_seq_start_next(sp);
+ fc_fill_hdr(fp, in_fp, r_ctl, FC_FCTL_RESP, 0, parm_offset);
+}
+EXPORT_SYMBOL(fc_fill_reply_hdr);
+
+/**
+ * fc_fc4_conf_lport_params() - Modify "service_params" of specified lport
+ * if there is service provider (target provider) registered with libfc
+ * for specified "fc_ft_type"
+ * @lport: Local port which service_params needs to be modified
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ */
+void fc_fc4_conf_lport_params(struct fc_lport *lport, enum fc_fh_type type)
+{
+ struct fc4_prov *prov_entry;
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ BUG_ON(!lport);
+ prov_entry = fc_passive_prov[type];
+ if (type == FC_TYPE_FCP) {
+ if (prov_entry && prov_entry->recv)
+ lport->service_params |= FCP_SPPF_TARG_FCN;
+ }
+}
+
+void fc_lport_iterate(void (*notify)(struct fc_lport *, void *), void *arg)
+{
+ struct fc_lport *lport;
+
+ mutex_lock(&fc_prov_mutex);
+ list_for_each_entry(lport, &fc_local_ports, lport_list)
+ notify(lport, arg);
+ mutex_unlock(&fc_prov_mutex);
+}
+EXPORT_SYMBOL(fc_lport_iterate);
+
+/**
+ * fc_fc4_register_provider() - register FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int fc_fc4_register_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ struct fc4_prov **prov_entry;
+ int ret = 0;
+
+ if (type >= FC_FC4_PROV_SIZE)
+ return -EINVAL;
+ mutex_lock(&fc_prov_mutex);
+ prov_entry = (prov->recv ? fc_passive_prov : fc_active_prov) + type;
+ if (*prov_entry)
+ ret = -EBUSY;
+ else
+ *prov_entry = prov;
+ mutex_unlock(&fc_prov_mutex);
+ return ret;
+}
+EXPORT_SYMBOL(fc_fc4_register_provider);
+
+/**
+ * fc_fc4_deregister_provider() - deregister FC-4 upper-level provider.
+ * @type: FC-4 type, such as FC_TYPE_FCP
+ * @prov: structure describing provider including ops vector.
+ */
+void fc_fc4_deregister_provider(enum fc_fh_type type, struct fc4_prov *prov)
+{
+ BUG_ON(type >= FC_FC4_PROV_SIZE);
+ mutex_lock(&fc_prov_mutex);
+ if (prov->recv)
+ RCU_INIT_POINTER(fc_passive_prov[type], NULL);
+ else
+ RCU_INIT_POINTER(fc_active_prov[type], NULL);
+ mutex_unlock(&fc_prov_mutex);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(fc_fc4_deregister_provider);
+
+/**
+ * fc_fc4_add_lport() - add new local port to list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_add_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_add_tail(&lport->lport_list, &fc_local_ports);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_ADD, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
+
+/**
+ * fc_fc4_del_lport() - remove local port from list and run notifiers.
+ * @lport: The new local port.
+ */
+void fc_fc4_del_lport(struct fc_lport *lport)
+{
+ mutex_lock(&fc_prov_mutex);
+ list_del(&lport->lport_list);
+ blocking_notifier_call_chain(&fc_lport_notifier_head,
+ FC_LPORT_EV_DEL, lport);
+ mutex_unlock(&fc_prov_mutex);
+}
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
new file mode 100644
index 000000000..685e3bdd0
--- /dev/null
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+#ifndef _FC_LIBFC_H_
+#define _FC_LIBFC_H_
+
+#define FC_LIBFC_LOGGING 0x01 /* General logging, not categorized */
+#define FC_LPORT_LOGGING 0x02 /* lport layer logging */
+#define FC_DISC_LOGGING 0x04 /* discovery layer logging */
+#define FC_RPORT_LOGGING 0x08 /* rport layer logging */
+#define FC_FCP_LOGGING 0x10 /* I/O path logging */
+#define FC_EM_LOGGING 0x20 /* Exchange Manager logging */
+#define FC_EXCH_LOGGING 0x40 /* Exchange/Sequence logging */
+#define FC_SCSI_LOGGING 0x80 /* SCSI logging (mostly error handling) */
+
+extern unsigned int fc_debug_logging;
+
+#define FC_CHECK_LOGGING(LEVEL, CMD) \
+ do { \
+ if (unlikely(fc_debug_logging & LEVEL)) \
+ do { \
+ CMD; \
+ } while (0); \
+ } while (0)
+
+#define FC_LIBFC_DBG(fmt, args...) \
+ FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
+ pr_info("libfc: " fmt, ##args))
+
+#define FC_LPORT_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
+
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
+
+#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
+ FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
+
+#define FC_RPORT_DBG(rdata, fmt, args...) \
+ FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
+
+#define FC_FCP_DBG(pkt, fmt, args...) \
+ FC_CHECK_LOGGING(FC_FCP_LOGGING, \
+ { \
+ if ((pkt)->seq_ptr) { \
+ struct fc_exch *_ep = NULL; \
+ _ep = fc_seq_exch((pkt)->seq_ptr); \
+ pr_info("host%u: fcp: %6.6x: " \
+ "xid %04x-%04x: " fmt, \
+ (pkt)->lp->host->host_no, \
+ (pkt)->rport->port_id, \
+ (_ep)->oxid, (_ep)->rxid, ##args); \
+ } else { \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
+ (pkt)->lp->host->host_no, \
+ (pkt)->rport->port_id, ##args); \
+ } \
+ })
+
+#define FC_EXCH_DBG(exch, fmt, args...) \
+ FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
+
+#define FC_SCSI_DBG(lport, fmt, args...) \
+ FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
+
+/*
+ * FC-4 Providers.
+ */
+extern struct fc4_prov *fc_active_prov[]; /* providers without recv */
+extern struct fc4_prov *fc_passive_prov[]; /* providers with recv */
+extern struct mutex fc_prov_mutex; /* lock over table changes */
+
+extern struct fc4_prov fc_rport_t0_prov; /* type 0 provider */
+extern struct fc4_prov fc_lport_els_prov; /* ELS provider */
+extern struct fc4_prov fc_rport_fcp_init; /* FCP initiator provider */
+
+/*
+ * Set up direct-data placement for this I/O request
+ */
+void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid);
+void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp);
+
+/*
+ * Module setup functions
+ */
+int fc_setup_exch_mgr(void);
+void fc_destroy_exch_mgr(void);
+int fc_setup_rport(void);
+void fc_destroy_rport(void);
+int fc_setup_fcp(void);
+void fc_destroy_fcp(void);
+
+/*
+ * Internal libfc functions
+ */
+const char *fc_els_resp_type(struct fc_frame *);
+extern void fc_fc4_add_lport(struct fc_lport *);
+extern void fc_fc4_del_lport(struct fc_lport *);
+extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
+
+/*
+ * Copies a buffer into an sg list
+ */
+u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
+ struct scatterlist *sg,
+ u32 *nents, size_t *offset,
+ u32 *crc);
+
+#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
new file mode 100644
index 000000000..ab06e9aeb
--- /dev/null
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -0,0 +1,2200 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * PORT LOCKING NOTES
+ *
+ * These comments only apply to the 'port code' which consists of the lport,
+ * disc and rport blocks.
+ *
+ * MOTIVATION
+ *
+ * The lport, disc and rport blocks all have mutexes that are used to protect
+ * those objects. The main motivation for these locks is to prevent from
+ * having an lport reset just before we send a frame. In that scenario the
+ * lport's FID would get set to zero and then we'd send a frame with an
+ * invalid SID. We also need to ensure that states don't change unexpectedly
+ * while processing another state.
+ *
+ * HIERARCHY
+ *
+ * The following hierarchy defines the locking rules. A greater lock
+ * may be held before acquiring a lesser lock, but a lesser lock should never
+ * be held while attempting to acquire a greater lock. Here is the hierarchy-
+ *
+ * lport > disc, lport > rport, disc > rport
+ *
+ * CALLBACKS
+ *
+ * The callbacks cause complications with this scheme. There is a callback
+ * from the rport (to either lport or disc) and a callback from disc
+ * (to the lport).
+ *
+ * As rports exit the rport state machine a callback is made to the owner of
+ * the rport to notify success or failure. Since the callback is likely to
+ * cause the lport or disc to grab its lock we cannot hold the rport lock
+ * while making the callback. To ensure that the rport is not free'd while
+ * processing the callback the rport callbacks are serialized through a
+ * single-threaded workqueue. An rport would never be free'd while in a
+ * callback handler because no other rport work in this queue can be executed
+ * at the same time.
+ *
+ * When discovery succeeds or fails a callback is made to the lport as
+ * notification. Currently, successful discovery causes the lport to take no
+ * action. A failure will cause the lport to reset. There is likely a circular
+ * locking problem with this implementation.
+ */
+
+/*
+ * LPORT LOCKING
+ *
+ * The critical sections protected by the lport's mutex are quite broad and
+ * may be improved upon in the future. The lport code and its locking doesn't
+ * influence the I/O path, so excessive locking doesn't penalize I/O
+ * performance.
+ *
+ * The strategy is to lock whenever processing a request or response. Note
+ * that every _enter_* function corresponds to a state change. They generally
+ * change the lports state and then send a request out on the wire. We lock
+ * before calling any of these functions to protect that state change. This
+ * means that the entry points into the lport block manage the locks while
+ * the state machine can transition between states (i.e. _enter_* functions)
+ * while always staying protected.
+ *
+ * When handling responses we also hold the lport mutex broadly. When the
+ * lport receives the response frame it locks the mutex and then calls the
+ * appropriate handler for the particuar response. Generally a response will
+ * trigger a state change and so the lock must already be held.
+ *
+ * Retries also have to consider the locking. The retries occur from a work
+ * context and the work function will lock the lport and then retry the state
+ * (i.e. _enter_* function).
+ */
+
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#include <scsi/fc/fc_gs.h>
+
+#include <scsi/libfc.h>
+#include <linux/scatterlist.h>
+
+#include "fc_encode.h"
+#include "fc_libfc.h"
+
+/* Fabric IDs to use for point-to-point mode, chosen on whims. */
+#define FC_LOCAL_PTP_FID_LO 0x010101
+#define FC_LOCAL_PTP_FID_HI 0x010102
+
+#define DNS_DELAY 3 /* Discovery delay after RSCN (in seconds)*/
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
+
+static void fc_lport_error(struct fc_lport *, struct fc_frame *);
+
+static void fc_lport_enter_reset(struct fc_lport *);
+static void fc_lport_enter_flogi(struct fc_lport *);
+static void fc_lport_enter_dns(struct fc_lport *);
+static void fc_lport_enter_ns(struct fc_lport *, enum fc_lport_state);
+static void fc_lport_enter_scr(struct fc_lport *);
+static void fc_lport_enter_ready(struct fc_lport *);
+static void fc_lport_enter_logo(struct fc_lport *);
+static void fc_lport_enter_fdmi(struct fc_lport *lport);
+static void fc_lport_enter_ms(struct fc_lport *, enum fc_lport_state);
+
+static const char *fc_lport_state_names[] = {
+ [LPORT_ST_DISABLED] = "disabled",
+ [LPORT_ST_FLOGI] = "FLOGI",
+ [LPORT_ST_DNS] = "dNS",
+ [LPORT_ST_RNN_ID] = "RNN_ID",
+ [LPORT_ST_RSNN_NN] = "RSNN_NN",
+ [LPORT_ST_RSPN_ID] = "RSPN_ID",
+ [LPORT_ST_RFT_ID] = "RFT_ID",
+ [LPORT_ST_RFF_ID] = "RFF_ID",
+ [LPORT_ST_FDMI] = "FDMI",
+ [LPORT_ST_RHBA] = "RHBA",
+ [LPORT_ST_RPA] = "RPA",
+ [LPORT_ST_DHBA] = "DHBA",
+ [LPORT_ST_DPRT] = "DPRT",
+ [LPORT_ST_SCR] = "SCR",
+ [LPORT_ST_READY] = "Ready",
+ [LPORT_ST_LOGO] = "LOGO",
+ [LPORT_ST_RESET] = "reset",
+};
+
+/**
+ * struct fc_bsg_info - FC Passthrough managemet structure
+ * @job: The passthrough job
+ * @lport: The local port to pass through a command
+ * @rsp_code: The expected response code
+ * @sg: job->reply_payload.sg_list
+ * @nents: job->reply_payload.sg_cnt
+ * @offset: The offset into the response data
+ */
+struct fc_bsg_info {
+ struct bsg_job *job;
+ struct fc_lport *lport;
+ u16 rsp_code;
+ struct scatterlist *sg;
+ u32 nents;
+ size_t offset;
+};
+
+/**
+ * fc_frame_drop() - Dummy frame handler
+ * @lport: The local port the frame was received on
+ * @fp: The received frame
+ */
+static int fc_frame_drop(struct fc_lport *lport, struct fc_frame *fp)
+{
+ fc_frame_free(fp);
+ return 0;
+}
+
+/**
+ * fc_lport_rport_callback() - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rdata: private remote port data
+ * @event: The event that occurred
+ *
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+static void fc_lport_rport_callback(struct fc_lport *lport,
+ struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ FC_LPORT_DBG(lport, "Received a %d event for port (%6.6x)\n", event,
+ rdata->ids.port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ switch (event) {
+ case RPORT_EV_READY:
+ if (lport->state == LPORT_ST_DNS) {
+ lport->dns_rdata = rdata;
+ fc_lport_enter_ns(lport, LPORT_ST_RNN_ID);
+ } else if (lport->state == LPORT_ST_FDMI) {
+ lport->ms_rdata = rdata;
+ fc_lport_enter_ms(lport, LPORT_ST_DHBA);
+ } else {
+ FC_LPORT_DBG(lport, "Received an READY event "
+ "on port (%6.6x) for the directory "
+ "server, but the lport is not "
+ "in the DNS or FDMI state, it's in the "
+ "%d state", rdata->ids.port_id,
+ lport->state);
+ fc_rport_logoff(rdata);
+ }
+ break;
+ case RPORT_EV_LOGO:
+ case RPORT_EV_FAILED:
+ case RPORT_EV_STOP:
+ if (rdata->ids.port_id == FC_FID_DIR_SERV)
+ lport->dns_rdata = NULL;
+ else if (rdata->ids.port_id == FC_FID_MGMT_SERV)
+ lport->ms_rdata = NULL;
+ break;
+ case RPORT_EV_NONE:
+ break;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_state() - Return a string which represents the lport's state
+ * @lport: The lport whose state is to converted to a string
+ */
+static const char *fc_lport_state(struct fc_lport *lport)
+{
+ const char *cp;
+
+ cp = fc_lport_state_names[lport->state];
+ if (!cp)
+ cp = "unknown";
+ return cp;
+}
+
+/**
+ * fc_lport_ptp_setup() - Create an rport for point-to-point mode
+ * @lport: The lport to attach the ptp rport to
+ * @remote_fid: The FID of the ptp rport
+ * @remote_wwpn: The WWPN of the ptp rport
+ * @remote_wwnn: The WWNN of the ptp rport
+ */
+static void fc_lport_ptp_setup(struct fc_lport *lport,
+ u32 remote_fid, u64 remote_wwpn,
+ u64 remote_wwnn)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ if (lport->ptp_rdata) {
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
+ }
+ mutex_lock(&lport->disc.disc_mutex);
+ lport->ptp_rdata = fc_rport_create(lport, remote_fid);
+ if (!lport->ptp_rdata) {
+ printk(KERN_WARNING "libfc: Failed to setup lport 0x%x\n",
+ lport->port_id);
+ mutex_unlock(&lport->disc.disc_mutex);
+ return;
+ }
+ kref_get(&lport->ptp_rdata->kref);
+ lport->ptp_rdata->ids.port_name = remote_wwpn;
+ lport->ptp_rdata->ids.node_name = remote_wwnn;
+ mutex_unlock(&lport->disc.disc_mutex);
+
+ fc_rport_login(lport->ptp_rdata);
+
+ fc_lport_enter_ready(lport);
+}
+
+/**
+ * fc_get_host_port_state() - Return the port state of the given Scsi_Host
+ * @shost: The SCSI host whose port state is to be determined
+ */
+void fc_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ mutex_lock(&lport->lp_mutex);
+ if (!lport->link_up)
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ switch (lport->state) {
+ case LPORT_ST_READY:
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ break;
+ default:
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_get_host_port_state);
+
+/**
+ * fc_get_host_speed() - Return the speed of the given Scsi_Host
+ * @shost: The SCSI host whose port speed is to be determined
+ */
+void fc_get_host_speed(struct Scsi_Host *shost)
+{
+ struct fc_lport *lport = shost_priv(shost);
+
+ fc_host_speed(shost) = lport->link_speed;
+}
+EXPORT_SYMBOL(fc_get_host_speed);
+
+/**
+ * fc_get_host_stats() - Return the Scsi_Host's statistics
+ * @shost: The SCSI host whose statistics are to be returned
+ */
+struct fc_host_statistics *fc_get_host_stats(struct Scsi_Host *shost)
+{
+ struct fc_host_statistics *fc_stats;
+ struct fc_lport *lport = shost_priv(shost);
+ unsigned int cpu;
+ u64 fcp_in_bytes = 0;
+ u64 fcp_out_bytes = 0;
+
+ fc_stats = &lport->host_stats;
+ memset(fc_stats, 0, sizeof(struct fc_host_statistics));
+
+ fc_stats->seconds_since_last_reset = (jiffies - lport->boot_time) / HZ;
+
+ for_each_possible_cpu(cpu) {
+ struct fc_stats *stats;
+
+ stats = per_cpu_ptr(lport->stats, cpu);
+
+ fc_stats->tx_frames += READ_ONCE(stats->TxFrames);
+ fc_stats->tx_words += READ_ONCE(stats->TxWords);
+ fc_stats->rx_frames += READ_ONCE(stats->RxFrames);
+ fc_stats->rx_words += READ_ONCE(stats->RxWords);
+ fc_stats->error_frames += READ_ONCE(stats->ErrorFrames);
+ fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount);
+ fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests);
+ fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests);
+ fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests);
+ fcp_in_bytes += READ_ONCE(stats->InputBytes);
+ fcp_out_bytes += READ_ONCE(stats->OutputBytes);
+ fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails);
+ fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts);
+ fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails);
+ fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount);
+ }
+ fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000);
+ fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000);
+ fc_stats->lip_count = -1;
+ fc_stats->nos_count = -1;
+ fc_stats->loss_of_sync_count = -1;
+ fc_stats->loss_of_signal_count = -1;
+ fc_stats->prim_seq_protocol_err_count = -1;
+ fc_stats->dumped_frames = -1;
+
+ /* update exches stats */
+ fc_exch_update_stats(lport);
+
+ return fc_stats;
+}
+EXPORT_SYMBOL(fc_get_host_stats);
+
+/**
+ * fc_lport_flogi_fill() - Fill in FLOGI command for request
+ * @lport: The local port the FLOGI is for
+ * @flogi: The FLOGI command
+ * @op: The opcode
+ */
+static void fc_lport_flogi_fill(struct fc_lport *lport,
+ struct fc_els_flogi *flogi,
+ unsigned int op)
+{
+ struct fc_els_csp *sp;
+ struct fc_els_cssp *cp;
+
+ memset(flogi, 0, sizeof(*flogi));
+ flogi->fl_cmd = (u8) op;
+ put_unaligned_be64(lport->wwpn, &flogi->fl_wwpn);
+ put_unaligned_be64(lport->wwnn, &flogi->fl_wwnn);
+ sp = &flogi->fl_csp;
+ sp->sp_hi_ver = 0x20;
+ sp->sp_lo_ver = 0x20;
+ sp->sp_bb_cred = htons(10); /* this gets set by gateway */
+ sp->sp_bb_data = htons((u16) lport->mfs);
+ cp = &flogi->fl_cssp[3 - 1]; /* class 3 parameters */
+ cp->cp_class = htons(FC_CPC_VALID | FC_CPC_SEQ);
+ if (op != ELS_FLOGI) {
+ sp->sp_features = htons(FC_SP_FT_CIRO);
+ sp->sp_tot_seq = htons(255); /* seq. we accept */
+ sp->sp_rel_off = htons(0x1f);
+ sp->sp_e_d_tov = htonl(lport->e_d_tov);
+
+ cp->cp_rdfs = htons((u16) lport->mfs);
+ cp->cp_con_seq = htons(255);
+ cp->cp_open_seq = 1;
+ }
+}
+
+/**
+ * fc_lport_add_fc4_type() - Add a supported FC-4 type to a local port
+ * @lport: The local port to add a new FC-4 type to
+ * @type: The new FC-4 type
+ */
+static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
+{
+ __be32 *mp;
+
+ mp = &lport->fcts.ff_type_map[type / FC_NS_BPW];
+ *mp = htonl(ntohl(*mp) | 1UL << (type % FC_NS_BPW));
+}
+
+/**
+ * fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
+ * @lport: Fibre Channel local port receiving the RLIR
+ * @fp: The RLIR request frame
+ */
+static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
+ fc_lport_state(lport));
+
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_lport_recv_echo_req() - Handle received ECHO request
+ * @lport: The local port receiving the ECHO
+ * @in_fp: ECHO request frame
+ */
+static void fc_lport_recv_echo_req(struct fc_lport *lport,
+ struct fc_frame *in_fp)
+{
+ struct fc_frame *fp;
+ unsigned int len;
+ void *pp;
+ void *dp;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
+ fc_lport_state(lport));
+
+ len = fr_len(in_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(in_fp, len);
+
+ if (len < sizeof(__be32))
+ len = sizeof(__be32);
+
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ dp = fc_frame_payload_get(fp, len);
+ memcpy(dp, pp, len);
+ *((__be32 *)dp) = htonl(ELS_LS_ACC << 24);
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_rnid_req() - Handle received Request Node ID data request
+ * @lport: The local port receiving the RNID
+ * @in_fp: The RNID request frame
+ */
+static void fc_lport_recv_rnid_req(struct fc_lport *lport,
+ struct fc_frame *in_fp)
+{
+ struct fc_frame *fp;
+ struct fc_els_rnid *req;
+ struct {
+ struct fc_els_rnid_resp rnid;
+ struct fc_els_rnid_cid cid;
+ struct fc_els_rnid_gen gen;
+ } *rp;
+ struct fc_seq_els_data rjt_data;
+ u8 fmt;
+ size_t len;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
+ fc_lport_state(lport));
+
+ req = fc_frame_payload_get(in_fp, sizeof(*req));
+ if (!req) {
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ } else {
+ fmt = req->rnid_fmt;
+ len = sizeof(*rp);
+ if (fmt != ELS_RNIDF_GEN ||
+ ntohl(lport->rnid_gen.rnid_atype) == 0) {
+ fmt = ELS_RNIDF_NONE; /* nothing to provide */
+ len -= sizeof(rp->gen);
+ }
+ fp = fc_frame_alloc(lport, len);
+ if (fp) {
+ rp = fc_frame_payload_get(fp, len);
+ memset(rp, 0, len);
+ rp->rnid.rnid_cmd = ELS_LS_ACC;
+ rp->rnid.rnid_fmt = fmt;
+ rp->rnid.rnid_cid_len = sizeof(rp->cid);
+ rp->cid.rnid_wwpn = htonll(lport->wwpn);
+ rp->cid.rnid_wwnn = htonll(lport->wwnn);
+ if (fmt == ELS_RNIDF_GEN) {
+ rp->rnid.rnid_sid_len = sizeof(rp->gen);
+ memcpy(&rp->gen, &lport->rnid_gen,
+ sizeof(rp->gen));
+ }
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ }
+ }
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_lport_recv_logo_req() - Handle received fabric LOGO request
+ * @lport: The local port receiving the LOGO
+ * @fp: The LOGO request frame
+ */
+static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+ fc_lport_enter_reset(lport);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_fabric_login() - Start the lport state machine
+ * @lport: The local port that should log into the fabric
+ *
+ * Locking Note: This function should not be called
+ * with the lport lock held.
+ */
+int fc_fabric_login(struct fc_lport *lport)
+{
+ int rc = -1;
+
+ mutex_lock(&lport->lp_mutex);
+ if (lport->state == LPORT_ST_DISABLED ||
+ lport->state == LPORT_ST_LOGO) {
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
+ fc_lport_enter_reset(lport);
+ rc = 0;
+ }
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_fabric_login);
+
+/**
+ * __fc_linkup() - Handler for transport linkup events
+ * @lport: The lport whose link is up
+ */
+void __fc_linkup(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ if (!lport->link_up) {
+ lport->link_up = 1;
+
+ if (lport->state == LPORT_ST_RESET)
+ fc_lport_enter_flogi(lport);
+ }
+}
+
+/**
+ * fc_linkup() - Handler for transport linkup events
+ * @lport: The local port whose link is up
+ */
+void fc_linkup(struct fc_lport *lport)
+{
+ printk(KERN_INFO "host%d: libfc: Link up on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ __fc_linkup(lport);
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkup);
+
+/**
+ * __fc_linkdown() - Handler for transport linkdown events
+ * @lport: The lport whose link is down
+ */
+void __fc_linkdown(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ if (lport->link_up) {
+ lport->link_up = 0;
+ fc_lport_enter_reset(lport);
+ lport->tt.fcp_cleanup(lport);
+ }
+}
+
+/**
+ * fc_linkdown() - Handler for transport linkdown events
+ * @lport: The local port whose link is down
+ */
+void fc_linkdown(struct fc_lport *lport)
+{
+ printk(KERN_INFO "host%d: libfc: Link down on port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+
+ mutex_lock(&lport->lp_mutex);
+ __fc_linkdown(lport);
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_linkdown);
+
+/**
+ * fc_fabric_logoff() - Logout of the fabric
+ * @lport: The local port to logoff the fabric
+ *
+ * Return value:
+ * 0 for success, -1 for failure
+ */
+int fc_fabric_logoff(struct fc_lport *lport)
+{
+ lport->tt.disc_stop_final(lport);
+ mutex_lock(&lport->lp_mutex);
+ if (lport->dns_rdata)
+ fc_rport_logoff(lport->dns_rdata);
+ mutex_unlock(&lport->lp_mutex);
+ fc_rport_flush_queue();
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_logo(lport);
+ mutex_unlock(&lport->lp_mutex);
+ cancel_delayed_work_sync(&lport->retry_work);
+ return 0;
+}
+EXPORT_SYMBOL(fc_fabric_logoff);
+
+/**
+ * fc_lport_destroy() - Unregister a fc_lport
+ * @lport: The local port to unregister
+ *
+ * Note:
+ * exit routine for fc_lport instance
+ * clean-up all the allocated memory
+ * and free up other system resources.
+ *
+ */
+int fc_lport_destroy(struct fc_lport *lport)
+{
+ mutex_lock(&lport->lp_mutex);
+ lport->state = LPORT_ST_DISABLED;
+ lport->link_up = 0;
+ lport->tt.frame_send = fc_frame_drop;
+ mutex_unlock(&lport->lp_mutex);
+
+ lport->tt.fcp_abort_io(lport);
+ lport->tt.disc_stop_final(lport);
+ lport->tt.exch_mgr_reset(lport, 0, 0);
+ cancel_delayed_work_sync(&lport->retry_work);
+ fc_fc4_del_lport(lport);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_destroy);
+
+/**
+ * fc_set_mfs() - Set the maximum frame size for a local port
+ * @lport: The local port to set the MFS for
+ * @mfs: The new MFS
+ */
+int fc_set_mfs(struct fc_lport *lport, u32 mfs)
+{
+ unsigned int old_mfs;
+ int rc = -EINVAL;
+
+ mutex_lock(&lport->lp_mutex);
+
+ old_mfs = lport->mfs;
+
+ if (mfs >= FC_MIN_MAX_FRAME) {
+ mfs &= ~3;
+ if (mfs > FC_MAX_FRAME)
+ mfs = FC_MAX_FRAME;
+ mfs -= sizeof(struct fc_frame_header);
+ lport->mfs = mfs;
+ rc = 0;
+ }
+
+ if (!rc && mfs < old_mfs)
+ fc_lport_enter_reset(lport);
+
+ mutex_unlock(&lport->lp_mutex);
+
+ return rc;
+}
+EXPORT_SYMBOL(fc_set_mfs);
+
+/**
+ * fc_lport_disc_callback() - Callback for discovery events
+ * @lport: The local port receiving the event
+ * @event: The discovery event
+ */
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
+{
+ switch (event) {
+ case DISC_EV_SUCCESS:
+ FC_LPORT_DBG(lport, "Discovery succeeded\n");
+ break;
+ case DISC_EV_FAILED:
+ printk(KERN_ERR "host%d: libfc: "
+ "Discovery failed for port (%6.6x)\n",
+ lport->host->host_no, lport->port_id);
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ break;
+ case DISC_EV_NONE:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * fc_lport_enter_ready() - Enter the ready state and start discovery
+ * @lport: The local port that is ready
+ */
+static void fc_lport_enter_ready(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered READY from state %s\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_READY);
+ if (lport->vport)
+ fc_vport_set_state(lport->vport, FC_VPORT_ACTIVE);
+ fc_vports_linkchange(lport);
+
+ if (!lport->ptp_rdata)
+ lport->tt.disc_start(fc_lport_disc_callback, lport);
+}
+
+/**
+ * fc_lport_set_port_id() - set the local port Port ID
+ * @lport: The local port which will have its Port ID set.
+ * @port_id: The new port ID.
+ * @fp: The frame containing the incoming request, or NULL.
+ */
+static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
+ struct fc_frame *fp)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ if (port_id)
+ printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
+ lport->host->host_no, port_id);
+
+ lport->port_id = port_id;
+
+ /* Update the fc_host */
+ fc_host_port_id(lport->host) = port_id;
+
+ if (lport->tt.lport_set_port_id)
+ lport->tt.lport_set_port_id(lport, port_id, fp);
+}
+
+/**
+ * fc_lport_set_local_id() - set the local port Port ID for point-to-multipoint
+ * @lport: The local port which will have its Port ID set.
+ * @port_id: The new port ID.
+ *
+ * Called by the lower-level driver when transport sets the local port_id.
+ * This is used in VN_port to VN_port mode for FCoE, and causes FLOGI and
+ * discovery to be skipped.
+ */
+void fc_lport_set_local_id(struct fc_lport *lport, u32 port_id)
+{
+ mutex_lock(&lport->lp_mutex);
+
+ fc_lport_set_port_id(lport, port_id, NULL);
+
+ switch (lport->state) {
+ case LPORT_ST_RESET:
+ case LPORT_ST_FLOGI:
+ if (port_id)
+ fc_lport_enter_ready(lport);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_set_local_id);
+
+/**
+ * fc_lport_recv_flogi_req() - Receive a FLOGI request
+ * @lport: The local port that received the request
+ * @rx_fp: The FLOGI frame
+ *
+ * A received FLOGI request indicates a point-to-point connection.
+ * Accept it with the common service parameters indicating our N port.
+ * Set up to do a PLOGI if we have the higher-number WWPN.
+ */
+static void fc_lport_recv_flogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ struct fc_els_flogi *new_flp;
+ u64 remote_wwpn;
+ u32 remote_fid;
+ u32 local_fid;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
+ fc_lport_state(lport));
+
+ remote_fid = fc_frame_sid(rx_fp);
+ flp = fc_frame_payload_get(rx_fp, sizeof(*flp));
+ if (!flp)
+ goto out;
+ remote_wwpn = get_unaligned_be64(&flp->fl_wwpn);
+ if (remote_wwpn == lport->wwpn) {
+ printk(KERN_WARNING "host%d: libfc: Received FLOGI from port "
+ "with same WWPN %16.16llx\n",
+ lport->host->host_no, remote_wwpn);
+ goto out;
+ }
+ FC_LPORT_DBG(lport, "FLOGI from port WWPN %16.16llx\n", remote_wwpn);
+
+ /*
+ * XXX what is the right thing to do for FIDs?
+ * The originator might expect our S_ID to be 0xfffffe.
+ * But if so, both of us could end up with the same FID.
+ */
+ local_fid = FC_LOCAL_PTP_FID_LO;
+ if (remote_wwpn < lport->wwpn) {
+ local_fid = FC_LOCAL_PTP_FID_HI;
+ if (!remote_fid || remote_fid == local_fid)
+ remote_fid = FC_LOCAL_PTP_FID_LO;
+ } else if (!remote_fid) {
+ remote_fid = FC_LOCAL_PTP_FID_HI;
+ }
+
+ fc_lport_set_port_id(lport, local_fid, rx_fp);
+
+ fp = fc_frame_alloc(lport, sizeof(*flp));
+ if (fp) {
+ new_flp = fc_frame_payload_get(fp, sizeof(*flp));
+ fc_lport_flogi_fill(lport, new_flp, ELS_FLOGI);
+ new_flp->fl_cmd = (u8) ELS_LS_ACC;
+
+ /*
+ * Send the response. If this fails, the originator should
+ * repeat the sequence.
+ */
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ fh = fc_frame_header_get(fp);
+ hton24(fh->fh_s_id, local_fid);
+ hton24(fh->fh_d_id, remote_fid);
+ lport->tt.frame_send(lport, fp);
+
+ } else {
+ fc_lport_error(lport, fp);
+ }
+ fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
+ get_unaligned_be64(&flp->fl_wwnn));
+out:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_lport_recv_els_req() - The generic lport ELS request handler
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * This function will see if the lport handles the request or
+ * if an rport should handle the request.
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held because it will grab the lock.
+ */
+static void fc_lport_recv_els_req(struct fc_lport *lport,
+ struct fc_frame *fp)
+{
+ mutex_lock(&lport->lp_mutex);
+
+ /*
+ * Handle special ELS cases like FLOGI, LOGO, and
+ * RSCN here. These don't require a session.
+ * Even if we had a session, it might not be ready.
+ */
+ if (!lport->link_up)
+ fc_frame_free(fp);
+ else {
+ /*
+ * Check opcode.
+ */
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ if (!lport->point_to_multipoint)
+ fc_lport_recv_flogi_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
+ break;
+ case ELS_LOGO:
+ if (fc_frame_sid(fp) == FC_FID_FLOGI)
+ fc_lport_recv_logo_req(lport, fp);
+ else
+ fc_rport_recv_req(lport, fp);
+ break;
+ case ELS_RSCN:
+ lport->tt.disc_recv_req(lport, fp);
+ break;
+ case ELS_ECHO:
+ fc_lport_recv_echo_req(lport, fp);
+ break;
+ case ELS_RLIR:
+ fc_lport_recv_rlir_req(lport, fp);
+ break;
+ case ELS_RNID:
+ fc_lport_recv_rnid_req(lport, fp);
+ break;
+ default:
+ fc_rport_recv_req(lport, fp);
+ break;
+ }
+ }
+ mutex_unlock(&lport->lp_mutex);
+}
+
+static int fc_lport_els_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *spp_in,
+ struct fc_els_spp *spp_out)
+{
+ return FC_SPP_RESP_INVL;
+}
+
+struct fc4_prov fc_lport_els_prov = {
+ .prli = fc_lport_els_prli,
+ .recv = fc_lport_recv_els_req,
+};
+
+/**
+ * fc_lport_recv() - The generic lport request handler
+ * @lport: The lport that received the request
+ * @fp: The frame the request is in
+ *
+ * Locking Note: This function should not be called with the lport
+ * lock held because it may grab the lock.
+ */
+void fc_lport_recv(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_frame_header *fh = fc_frame_header_get(fp);
+ struct fc_seq *sp = fr_seq(fp);
+ struct fc4_prov *prov;
+
+ /*
+ * Use RCU read lock and module_lock to be sure module doesn't
+ * deregister and get unloaded while we're calling it.
+ * try_module_get() is inlined and accepts a NULL parameter.
+ * Only ELSes and FCP target ops should come through here.
+ * The locking is unfortunate, and a better scheme is being sought.
+ */
+
+ rcu_read_lock();
+ if (fh->fh_type >= FC_FC4_PROV_SIZE)
+ goto drop;
+ prov = rcu_dereference(fc_passive_prov[fh->fh_type]);
+ if (!prov || !try_module_get(prov->module))
+ goto drop;
+ rcu_read_unlock();
+ prov->recv(lport, fp);
+ module_put(prov->module);
+ return;
+drop:
+ rcu_read_unlock();
+ FC_LPORT_DBG(lport, "dropping unexpected frame type %x\n", fh->fh_type);
+ fc_frame_free(fp);
+ if (sp)
+ fc_exch_done(sp);
+}
+EXPORT_SYMBOL(fc_lport_recv);
+
+/**
+ * fc_lport_reset() - Reset a local port
+ * @lport: The local port which should be reset
+ *
+ * Locking Note: This functions should not be called with the
+ * lport lock held.
+ */
+int fc_lport_reset(struct fc_lport *lport)
+{
+ cancel_delayed_work_sync(&lport->retry_work);
+ mutex_lock(&lport->lp_mutex);
+ fc_lport_enter_reset(lport);
+ mutex_unlock(&lport->lp_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_reset);
+
+/**
+ * fc_lport_reset_locked() - Reset the local port w/ the lport lock held
+ * @lport: The local port to be reset
+ */
+static void fc_lport_reset_locked(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ if (lport->dns_rdata) {
+ fc_rport_logoff(lport->dns_rdata);
+ lport->dns_rdata = NULL;
+ }
+
+ if (lport->ptp_rdata) {
+ fc_rport_logoff(lport->ptp_rdata);
+ kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
+ lport->ptp_rdata = NULL;
+ }
+
+ lport->tt.disc_stop(lport);
+
+ lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_host_fabric_name(lport->host) = 0;
+
+ if (lport->port_id && (!lport->point_to_multipoint || !lport->link_up))
+ fc_lport_set_port_id(lport, 0, NULL);
+}
+
+/**
+ * fc_lport_enter_reset() - Reset the local port
+ * @lport: The local port to be reset
+ */
+static void fc_lport_enter_reset(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
+ fc_lport_state(lport));
+
+ if (lport->state == LPORT_ST_DISABLED || lport->state == LPORT_ST_LOGO)
+ return;
+
+ if (lport->vport) {
+ if (lport->link_up)
+ fc_vport_set_state(lport->vport, FC_VPORT_INITIALIZING);
+ else
+ fc_vport_set_state(lport->vport, FC_VPORT_LINKDOWN);
+ }
+ fc_lport_state_enter(lport, LPORT_ST_RESET);
+ fc_host_post_event(lport->host, fc_get_event_number(),
+ FCH_EVT_LIPRESET, 0);
+ fc_vports_linkchange(lport);
+ fc_lport_reset_locked(lport);
+ if (lport->link_up)
+ fc_lport_enter_flogi(lport);
+}
+
+/**
+ * fc_lport_enter_disabled() - Disable the local port
+ * @lport: The local port to be reset
+ */
+static void fc_lport_enter_disabled(struct fc_lport *lport)
+{
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+ fc_vports_linkchange(lport);
+ fc_lport_reset_locked(lport);
+}
+
+/**
+ * fc_lport_error() - Handler for any errors
+ * @lport: The local port that the error was on
+ * @fp: The error code encoded in a frame pointer
+ *
+ * If the error was caused by a resource allocation failure
+ * then wait for half a second and retry, otherwise retry
+ * after the e_d_tov time.
+ */
+static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
+{
+ unsigned long delay = 0;
+ FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
+ IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
+ lport->retry_count);
+
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ return;
+
+ /*
+ * Memory allocation failure, or the exchange timed out
+ * or we received LS_RJT.
+ * Retry after delay
+ */
+ if (lport->retry_count < lport->max_retry_count) {
+ lport->retry_count++;
+ if (!fp)
+ delay = msecs_to_jiffies(500);
+ else
+ delay = msecs_to_jiffies(lport->e_d_tov);
+
+ schedule_delayed_work(&lport->retry_work, delay);
+ } else
+ fc_lport_enter_reset(lport);
+}
+
+/**
+ * fc_lport_ns_resp() - Handle response to a name server
+ * registration exchange
+ * @sp: current sequence in exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ns_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+
+ FC_LPORT_DBG(lport, "Received a ns %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state < LPORT_ST_RNN_ID || lport->state > LPORT_ST_RFF_ID) {
+ FC_LPORT_DBG(lport, "Received a name server response, "
+ "but in state %s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_DIR &&
+ ct->ct_fs_subtype == FC_NS_SUBTYPE &&
+ ntohs(ct->ct_cmd) == FC_FS_ACC)
+ switch (lport->state) {
+ case LPORT_ST_RNN_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RSNN_NN);
+ break;
+ case LPORT_ST_RSNN_NN:
+ fc_lport_enter_ns(lport, LPORT_ST_RSPN_ID);
+ break;
+ case LPORT_ST_RSPN_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ break;
+ case LPORT_ST_RFT_ID:
+ fc_lport_enter_ns(lport, LPORT_ST_RFF_ID);
+ break;
+ case LPORT_ST_RFF_ID:
+ if (lport->fdmi_enabled)
+ fc_lport_enter_fdmi(lport);
+ else
+ fc_lport_enter_scr(lport);
+ break;
+ default:
+ /* should have already been caught by state checks */
+ break;
+ }
+ else
+ fc_lport_error(lport, fp);
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_ms_resp() - Handle response to a management server
+ * exchange
+ * @sp: current sequence in exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel host port instance
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+static void fc_lport_ms_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_ct_hdr *ct;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+ FC_LPORT_DBG(lport, "Received a ms %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state < LPORT_ST_RHBA || lport->state > LPORT_ST_DPRT) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "but in state %s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+ ct->ct_fs_type == FC_FST_MGMT &&
+ ct->ct_fs_subtype == FC_FDMI_SUBTYPE) {
+ FC_LPORT_DBG(lport, "Received a management server response, "
+ "reason=%d explain=%d\n",
+ ct->ct_reason,
+ ct->ct_explan);
+
+ switch (lport->state) {
+ case LPORT_ST_RHBA:
+ if ((ntohs(ct->ct_cmd) == FC_FS_RJT) && fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "Error for FDMI-V2, fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+
+ } else if (ntohs(ct->ct_cmd) == FC_FS_ACC)
+ fc_lport_enter_ms(lport, LPORT_ST_RPA);
+ else /* Error Skip RPA */
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_RPA:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_DPRT:
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ case LPORT_ST_DHBA:
+ fc_lport_enter_ms(lport, LPORT_ST_DPRT);
+ break;
+ default:
+ /* should have already been caught by state checks */
+ break;
+ }
+ } else {
+ /* Invalid Frame? */
+ fc_lport_error(lport, fp);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_scr_resp() - Handle response to State Change Register (SCR) request
+ * @sp: current sequence in SCR exchange
+ * @fp: response frame
+ * @lp_arg: Fibre Channel lport port instance that sent the registration request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error
+ * and then unlock the lport.
+ */
+static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_SCR) {
+ FC_LPORT_DBG(lport, "Received a SCR response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_ready(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_enter_scr() - Send a SCR (State Change Register) request
+ * @lport: The local port to register for state changes
+ */
+static void fc_lport_enter_scr(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_SCR);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_scr));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
+ fc_lport_scr_resp, lport,
+ 2 * lport->r_a_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_enter_ns() - register some object with the name server
+ * @lport: Fibre Channel local port to register
+ * @state: Local port state
+ */
+static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
+{
+ struct fc_frame *fp;
+ enum fc_ns_req cmd;
+ int size = sizeof(struct fc_ct_hdr);
+ size_t len;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+ fc_lport_state_names[state],
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, state);
+
+ switch (state) {
+ case LPORT_ST_RNN_ID:
+ cmd = FC_NS_RNN_ID;
+ size += sizeof(struct fc_ns_rn_id);
+ break;
+ case LPORT_ST_RSNN_NN:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ /* if there is no symbolic name, skip to RFT_ID */
+ if (!len)
+ return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ cmd = FC_NS_RSNN_NN;
+ size += sizeof(struct fc_ns_rsnn) + len;
+ break;
+ case LPORT_ST_RSPN_ID:
+ len = strnlen(fc_host_symbolic_name(lport->host), 255);
+ /* if there is no symbolic name, skip to RFT_ID */
+ if (!len)
+ return fc_lport_enter_ns(lport, LPORT_ST_RFT_ID);
+ cmd = FC_NS_RSPN_ID;
+ size += sizeof(struct fc_ns_rspn) + len;
+ break;
+ case LPORT_ST_RFT_ID:
+ cmd = FC_NS_RFT_ID;
+ size += sizeof(struct fc_ns_rft);
+ break;
+ case LPORT_ST_RFF_ID:
+ cmd = FC_NS_RFF_ID;
+ size += sizeof(struct fc_ns_rff_id);
+ break;
+ default:
+ fc_lport_error(lport, NULL);
+ return;
+ }
+
+ fp = fc_frame_alloc(lport, size);
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, cmd,
+ fc_lport_ns_resp,
+ lport, 3 * lport->r_a_tov))
+ fc_lport_error(lport, fp);
+}
+
+static struct fc_rport_operations fc_lport_rport_ops = {
+ .event_callback = fc_lport_rport_callback,
+};
+
+/**
+ * fc_lport_enter_dns() - Create a fc_rport for the name server
+ * @lport: The local port requesting a remote port for the name server
+ */
+static void fc_lport_enter_dns(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_DNS);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = fc_rport_create(lport, FC_FID_DIR_SERV);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
+ goto err;
+
+ rdata->ops = &fc_lport_rport_ops;
+ fc_rport_login(rdata);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_enter_ms() - management server commands
+ * @lport: Fibre Channel local port to register
+ * @state: Local port state
+ */
+static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
+{
+ struct fc_frame *fp;
+ enum fc_fdmi_req cmd;
+ int size = sizeof(struct fc_ct_hdr);
+ size_t len;
+ int numattrs;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
+ fc_lport_state_names[state],
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, state);
+
+ switch (state) {
+ case LPORT_ST_RHBA:
+ cmd = FC_FDMI_RHBA;
+ /* Number of HBA Attributes */
+ numattrs = 11;
+ len = sizeof(struct fc_fdmi_rhba);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+
+ len += FC_FDMI_HBA_ATTR_NODENAME_LEN;
+ len += FC_FDMI_HBA_ATTR_MANUFACTURER_LEN;
+ len += FC_FDMI_HBA_ATTR_SERIALNUMBER_LEN;
+ len += FC_FDMI_HBA_ATTR_MODEL_LEN;
+ len += FC_FDMI_HBA_ATTR_MODELDESCR_LEN;
+ len += FC_FDMI_HBA_ATTR_HARDWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_DRIVERVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OPTIONROMVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_FIRMWAREVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN;
+
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 7;
+ len += FC_FDMI_HBA_ATTR_NODESYMBLNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORSPECIFICINFO_LEN;
+ len += FC_FDMI_HBA_ATTR_NUMBEROFPORTS_LEN;
+ len += FC_FDMI_HBA_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSVERSION_LEN;
+ len += FC_FDMI_HBA_ATTR_BIOSSTATE_LEN;
+ len += FC_FDMI_HBA_ATTR_VENDORIDENTIFIER_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
+ size += len;
+ break;
+ case LPORT_ST_RPA:
+ cmd = FC_FDMI_RPA;
+ /* Number of Port Attributes */
+ numattrs = 6;
+ len = sizeof(struct fc_fdmi_rpa);
+ len -= sizeof(struct fc_fdmi_attr_entry);
+ len += FC_FDMI_PORT_ATTR_FC4TYPES_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN;
+ len += FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN;
+ len += FC_FDMI_PORT_ATTR_OSDEVICENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_HOSTNAME_LEN;
+
+ if (fc_host->fdmi_version == FDMI_V2) {
+ numattrs += 10;
+ len += FC_FDMI_PORT_ATTR_NODENAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_SYMBOLICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTTYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_SUPPORTEDCLASSSRVC_LEN;
+ len += FC_FDMI_PORT_ATTR_FABRICNAME_LEN;
+ len += FC_FDMI_PORT_ATTR_CURRENTFC4TYPE_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTSTATE_LEN;
+ len += FC_FDMI_PORT_ATTR_DISCOVEREDPORTS_LEN;
+ len += FC_FDMI_PORT_ATTR_PORTID_LEN;
+ }
+
+ len += (numattrs * FC_FDMI_ATTR_ENTRY_HEADER_LEN);
+
+ size += len;
+ break;
+ case LPORT_ST_DPRT:
+ cmd = FC_FDMI_DPRT;
+ len = sizeof(struct fc_fdmi_dprt);
+ size += len;
+ break;
+ case LPORT_ST_DHBA:
+ cmd = FC_FDMI_DHBA;
+ len = sizeof(struct fc_fdmi_dhba);
+ size += len;
+ break;
+ default:
+ fc_lport_error(lport, NULL);
+ return;
+ }
+
+ FC_LPORT_DBG(lport, "Cmd=0x%x Len %d size %d\n",
+ cmd, (int)len, size);
+ fp = fc_frame_alloc(lport, size);
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_MGMT_SERV, fp, cmd,
+ fc_lport_ms_resp,
+ lport, 3 * lport->r_a_tov))
+ fc_lport_error(lport, fp);
+}
+
+/**
+ * fc_lport_enter_fdmi() - Create a fc_rport for the management server
+ * @lport: The local port requesting a remote port for the management server
+ */
+static void fc_lport_enter_fdmi(struct fc_lport *lport)
+{
+ struct fc_rport_priv *rdata;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_FDMI);
+
+ mutex_lock(&lport->disc.disc_mutex);
+ rdata = fc_rport_create(lport, FC_FID_MGMT_SERV);
+ mutex_unlock(&lport->disc.disc_mutex);
+ if (!rdata)
+ goto err;
+
+ rdata->ops = &fc_lport_rport_ops;
+ fc_rport_login(rdata);
+ return;
+
+err:
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_timeout() - Handler for the retry_work timer
+ * @work: The work struct of the local port
+ */
+static void fc_lport_timeout(struct work_struct *work)
+{
+ struct fc_lport *lport =
+ container_of(work, struct fc_lport,
+ retry_work.work);
+ struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
+
+ mutex_lock(&lport->lp_mutex);
+
+ switch (lport->state) {
+ case LPORT_ST_DISABLED:
+ break;
+ case LPORT_ST_READY:
+ break;
+ case LPORT_ST_RESET:
+ break;
+ case LPORT_ST_FLOGI:
+ fc_lport_enter_flogi(lport);
+ break;
+ case LPORT_ST_DNS:
+ fc_lport_enter_dns(lport);
+ break;
+ case LPORT_ST_RNN_ID:
+ case LPORT_ST_RSNN_NN:
+ case LPORT_ST_RSPN_ID:
+ case LPORT_ST_RFT_ID:
+ case LPORT_ST_RFF_ID:
+ fc_lport_enter_ns(lport, lport->state);
+ break;
+ case LPORT_ST_FDMI:
+ fc_lport_enter_fdmi(lport);
+ break;
+ case LPORT_ST_RHBA:
+ if (fc_host->fdmi_version == FDMI_V2) {
+ FC_LPORT_DBG(lport, "timeout for FDMI-V2 RHBA,fall back to FDMI-V1\n");
+ fc_host->fdmi_version = FDMI_V1;
+ fc_lport_enter_ms(lport, LPORT_ST_RHBA);
+ break;
+ }
+ fallthrough;
+ case LPORT_ST_RPA:
+ case LPORT_ST_DHBA:
+ case LPORT_ST_DPRT:
+ FC_LPORT_DBG(lport, "Skipping lport state %s to SCR\n",
+ fc_lport_state(lport));
+ fallthrough;
+ case LPORT_ST_SCR:
+ fc_lport_enter_scr(lport);
+ break;
+ case LPORT_ST_LOGO:
+ fc_lport_enter_logo(lport);
+ break;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_logo_resp() - Handle response to LOGO request
+ * @sp: The sequence that the LOGO was on
+ * @fp: The LOGO frame
+ * @lp_arg: The lport port that received the LOGO request
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+void fc_lport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ u8 op;
+
+ FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_LOGO) {
+ FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_disabled(lport);
+ else
+ fc_lport_error(lport, fp);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_logo_resp);
+
+/**
+ * fc_lport_enter_logo() - Logout of the fabric
+ * @lport: The local port to be logged out
+ */
+static void fc_lport_enter_logo(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+ struct fc_els_logo *logo;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_LOGO);
+ fc_vports_linkchange(lport);
+
+ fp = fc_frame_alloc(lport, sizeof(*logo));
+ if (!fp) {
+ fc_lport_error(lport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
+ fc_lport_logo_resp, lport,
+ 2 * lport->r_a_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_flogi_resp() - Handle response to FLOGI request
+ * @sp: The sequence that the FLOGI was on
+ * @fp: The FLOGI response frame
+ * @lp_arg: The lport port that received the FLOGI response
+ *
+ * Locking Note: This function will be called without the lport lock
+ * held, but it will lock, call an _enter_* function or fc_lport_error()
+ * and then unlock the lport.
+ */
+void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *lp_arg)
+{
+ struct fc_lport *lport = lp_arg;
+ struct fc_frame_header *fh;
+ struct fc_els_flogi *flp;
+ u32 did;
+ u16 csp_flags;
+ unsigned int r_a_tov;
+ unsigned int e_d_tov;
+ u16 mfs;
+
+ FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ return;
+
+ mutex_lock(&lport->lp_mutex);
+
+ if (lport->state != LPORT_ST_FLOGI) {
+ FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
+ "%s\n", fc_lport_state(lport));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_lport_error(lport, fp);
+ goto err;
+ }
+
+ fh = fc_frame_header_get(fp);
+ did = fc_frame_did(fp);
+ if (fh->fh_r_ctl != FC_RCTL_ELS_REP || did == 0 ||
+ fc_frame_payload_op(fp) != ELS_LS_ACC) {
+ FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
+ fc_lport_error(lport, fp);
+ goto out;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ FC_LPORT_DBG(lport, "FLOGI bad response\n");
+ fc_lport_error(lport, fp);
+ goto out;
+ }
+
+ mfs = ntohs(flp->fl_csp.sp_bb_data) &
+ FC_SP_BB_DATA_MASK;
+
+ if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) {
+ FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
+ "lport->mfs:%u\n", mfs, lport->mfs);
+ fc_lport_error(lport, fp);
+ goto out;
+ }
+
+ if (mfs <= lport->mfs) {
+ lport->mfs = mfs;
+ fc_host_maxframe_size(lport->host) = mfs;
+ }
+
+ csp_flags = ntohs(flp->fl_csp.sp_features);
+ r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov);
+ e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+
+ lport->npiv_enabled = !!(csp_flags & FC_SP_FT_NPIV_ACC);
+
+ if ((csp_flags & FC_SP_FT_FPORT) == 0) {
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ lport->r_a_tov = 2 * lport->e_d_tov;
+ fc_lport_set_port_id(lport, did, fp);
+ printk(KERN_INFO "host%d: libfc: "
+ "Port (%6.6x) entered "
+ "point-to-point mode\n",
+ lport->host->host_no, did);
+ fc_lport_ptp_setup(lport, fc_frame_sid(fp),
+ get_unaligned_be64(
+ &flp->fl_wwpn),
+ get_unaligned_be64(
+ &flp->fl_wwnn));
+ } else {
+ if (e_d_tov > lport->e_d_tov)
+ lport->e_d_tov = e_d_tov;
+ if (r_a_tov > lport->r_a_tov)
+ lport->r_a_tov = r_a_tov;
+ fc_host_fabric_name(lport->host) =
+ get_unaligned_be64(&flp->fl_wwnn);
+ fc_lport_set_port_id(lport, did, fp);
+ fc_lport_enter_dns(lport);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&lport->lp_mutex);
+}
+EXPORT_SYMBOL(fc_lport_flogi_resp);
+
+/**
+ * fc_lport_enter_flogi() - Send a FLOGI request to the fabric manager
+ * @lport: Fibre Channel local port to be logged in to the fabric
+ */
+static void fc_lport_enter_flogi(struct fc_lport *lport)
+{
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
+ fc_lport_state(lport));
+
+ fc_lport_state_enter(lport, LPORT_ST_FLOGI);
+
+ if (lport->point_to_multipoint) {
+ if (lport->port_id)
+ fc_lport_enter_ready(lport);
+ return;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp)
+ return fc_lport_error(lport, fp);
+
+ if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp,
+ lport->vport ? ELS_FDISC : ELS_FLOGI,
+ fc_lport_flogi_resp, lport,
+ lport->vport ? 2 * lport->r_a_tov :
+ lport->e_d_tov))
+ fc_lport_error(lport, NULL);
+}
+
+/**
+ * fc_lport_config() - Configure a fc_lport
+ * @lport: The local port to be configured
+ */
+int fc_lport_config(struct fc_lport *lport)
+{
+ INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
+ mutex_init(&lport->lp_mutex);
+
+ fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+
+ fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
+ fc_lport_add_fc4_type(lport, FC_TYPE_CT);
+ fc_fc4_conf_lport_params(lport, FC_TYPE_FCP);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_config);
+
+/**
+ * fc_lport_init() - Initialize the lport layer for a local port
+ * @lport: The local port to initialize the exchange layer for
+ */
+int fc_lport_init(struct fc_lport *lport)
+{
+ struct fc_host_attrs *fc_host;
+
+ fc_host = shost_to_fc_host(lport->host);
+
+ /* Set FDMI version to FDMI-2 specification*/
+ fc_host->fdmi_version = FDMI_V2;
+
+ fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
+ fc_host_node_name(lport->host) = lport->wwnn;
+ fc_host_port_name(lport->host) = lport->wwpn;
+ fc_host_supported_classes(lport->host) = FC_COS_CLASS3;
+ memset(fc_host_supported_fc4s(lport->host), 0,
+ sizeof(fc_host_supported_fc4s(lport->host)));
+ fc_host_supported_fc4s(lport->host)[2] = 1;
+ fc_host_supported_fc4s(lport->host)[7] = 1;
+ fc_host_num_discovered_ports(lport->host) = 4;
+
+ /* This value is also unchanging */
+ memset(fc_host_active_fc4s(lport->host), 0,
+ sizeof(fc_host_active_fc4s(lport->host)));
+ fc_host_active_fc4s(lport->host)[2] = 1;
+ fc_host_active_fc4s(lport->host)[7] = 1;
+ fc_host_maxframe_size(lport->host) = lport->mfs;
+ fc_host_supported_speeds(lport->host) = 0;
+ if (lport->link_supported_speeds & FC_PORTSPEED_1GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_1GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_40GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_40GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_25GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_25GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_50GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_50GBIT;
+ if (lport->link_supported_speeds & FC_PORTSPEED_100GBIT)
+ fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_100GBIT;
+
+ fc_fc4_add_lport(lport);
+
+ fc_host_num_discovered_ports(lport->host) = DISCOVERED_PORTS;
+ fc_host_port_state(lport->host) = FC_PORTSTATE_ONLINE;
+ fc_host_max_ct_payload(lport->host) = MAX_CT_PAYLOAD;
+ fc_host_num_ports(lport->host) = NUMBER_OF_PORTS;
+ fc_host_bootbios_state(lport->host) = 0X00000000;
+ snprintf(fc_host_bootbios_version(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "Unknown");
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_lport_init);
+
+/**
+ * fc_lport_bsg_resp() - The common response handler for FC Passthrough requests
+ * @sp: The sequence for the FC Passthrough response
+ * @fp: The response frame
+ * @info_arg: The BSG info that the response is for
+ */
+static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *info_arg)
+{
+ struct fc_bsg_info *info = info_arg;
+ struct bsg_job *job = info->job;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct fc_lport *lport = info->lport;
+ struct fc_frame_header *fh;
+ size_t len;
+ void *buf;
+
+ if (IS_ERR(fp)) {
+ bsg_reply->result = (PTR_ERR(fp) == -FC_EX_CLOSED) ?
+ -ECONNABORTED : -ETIMEDOUT;
+ job->reply_len = sizeof(uint32_t);
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ kfree(info);
+ return;
+ }
+
+ mutex_lock(&lport->lp_mutex);
+ fh = fc_frame_header_get(fp);
+ len = fr_len(fp) - sizeof(*fh);
+ buf = fc_frame_payload_get(fp, 0);
+
+ if (fr_sof(fp) == FC_SOF_I3 && !ntohs(fh->fh_seq_cnt)) {
+ /* Get the response code from the first frame payload */
+ unsigned short cmd = (info->rsp_code == FC_FS_ACC) ?
+ ntohs(((struct fc_ct_hdr *)buf)->ct_cmd) :
+ (unsigned short)fc_frame_payload_op(fp);
+
+ /* Save the reply status of the job */
+ bsg_reply->reply_data.ctels_reply.status =
+ (cmd == info->rsp_code) ?
+ FC_CTELS_STATUS_OK : FC_CTELS_STATUS_REJECT;
+ }
+
+ bsg_reply->reply_payload_rcv_len +=
+ fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
+ &info->offset, NULL);
+
+ if (fr_eof(fp) == FC_EOF_T &&
+ (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
+ (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
+ if (bsg_reply->reply_payload_rcv_len >
+ job->reply_payload.payload_len)
+ bsg_reply->reply_payload_rcv_len =
+ job->reply_payload.payload_len;
+ bsg_reply->result = 0;
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ kfree(info);
+ }
+ fc_frame_free(fp);
+ mutex_unlock(&lport->lp_mutex);
+}
+
+/**
+ * fc_lport_els_request() - Send ELS passthrough request
+ * @job: The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did: The destination port id
+ * @tov: The timeout period (in ms)
+ */
+static int fc_lport_els_request(struct bsg_job *job,
+ struct fc_lport *lport,
+ u32 did, u32 tov)
+{
+ struct fc_bsg_info *info;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ char *pp;
+ int len;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ fp = fc_frame_alloc(lport, job->request_payload.payload_len);
+ if (!fp)
+ return -ENOMEM;
+
+ len = job->request_payload.payload_len;
+ pp = fc_frame_payload_get(fp, len);
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ pp, len);
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_ELS_REQ;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, lport->port_id);
+ fh->fh_type = FC_TYPE_ELS;
+ hton24(fh->fh_f_ctl, FC_FCTL_REQ);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = 0;
+
+ info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+ if (!info) {
+ fc_frame_free(fp);
+ return -ENOMEM;
+ }
+
+ info->job = job;
+ info->lport = lport;
+ info->rsp_code = ELS_LS_ACC;
+ info->nents = job->reply_payload.sg_cnt;
+ info->sg = job->reply_payload.sg_list;
+
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
+ kfree(info);
+ return -ECOMM;
+ }
+ return 0;
+}
+
+/**
+ * fc_lport_ct_request() - Send CT Passthrough request
+ * @job: The BSG Passthrough job
+ * @lport: The local port sending the request
+ * @did: The destination FC-ID
+ * @tov: The timeout period to wait for the response
+ */
+static int fc_lport_ct_request(struct bsg_job *job,
+ struct fc_lport *lport, u32 did, u32 tov)
+{
+ struct fc_bsg_info *info;
+ struct fc_frame *fp;
+ struct fc_frame_header *fh;
+ struct fc_ct_req *ct;
+ size_t len;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+ job->request_payload.payload_len);
+ if (!fp)
+ return -ENOMEM;
+
+ len = job->request_payload.payload_len;
+ ct = fc_frame_payload_get(fp, len);
+
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt,
+ ct, len);
+
+ fh = fc_frame_header_get(fp);
+ fh->fh_r_ctl = FC_RCTL_DD_UNSOL_CTL;
+ hton24(fh->fh_d_id, did);
+ hton24(fh->fh_s_id, lport->port_id);
+ fh->fh_type = FC_TYPE_CT;
+ hton24(fh->fh_f_ctl, FC_FCTL_REQ);
+ fh->fh_cs_ctl = 0;
+ fh->fh_df_ctl = 0;
+ fh->fh_parm_offset = 0;
+
+ info = kzalloc(sizeof(struct fc_bsg_info), GFP_KERNEL);
+ if (!info) {
+ fc_frame_free(fp);
+ return -ENOMEM;
+ }
+
+ info->job = job;
+ info->lport = lport;
+ info->rsp_code = FC_FS_ACC;
+ info->nents = job->reply_payload.sg_cnt;
+ info->sg = job->reply_payload.sg_list;
+
+ if (!fc_exch_seq_send(lport, fp, fc_lport_bsg_resp,
+ NULL, info, tov)) {
+ kfree(info);
+ return -ECOMM;
+ }
+ return 0;
+}
+
+/**
+ * fc_lport_bsg_request() - The common entry point for sending
+ * FC Passthrough requests
+ * @job: The BSG passthrough job
+ */
+int fc_lport_bsg_request(struct bsg_job *job)
+{
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_lport *lport = shost_priv(shost);
+ struct fc_rport *rport;
+ struct fc_rport_priv *rdata;
+ int rc = -EINVAL;
+ u32 did, tov;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ mutex_lock(&lport->lp_mutex);
+
+ switch (bsg_request->msgcode) {
+ case FC_BSG_RPT_ELS:
+ rport = fc_bsg_to_rport(job);
+ if (!rport)
+ break;
+
+ rdata = rport->dd_data;
+ rc = fc_lport_els_request(job, lport, rport->port_id,
+ rdata->e_d_tov);
+ break;
+
+ case FC_BSG_RPT_CT:
+ rport = fc_bsg_to_rport(job);
+ if (!rport)
+ break;
+
+ rdata = rport->dd_data;
+ rc = fc_lport_ct_request(job, lport, rport->port_id,
+ rdata->e_d_tov);
+ break;
+
+ case FC_BSG_HST_CT:
+ did = ntoh24(bsg_request->rqst_data.h_ct.port_id);
+ if (did == FC_FID_DIR_SERV) {
+ rdata = lport->dns_rdata;
+ if (!rdata)
+ break;
+ tov = rdata->e_d_tov;
+ } else {
+ rdata = fc_rport_lookup(lport, did);
+ if (!rdata)
+ break;
+ tov = rdata->e_d_tov;
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+
+ rc = fc_lport_ct_request(job, lport, did, tov);
+ break;
+
+ case FC_BSG_HST_ELS_NOLOGIN:
+ did = ntoh24(bsg_request->rqst_data.h_els.port_id);
+ rc = fc_lport_els_request(job, lport, did, lport->e_d_tov);
+ break;
+ }
+
+ mutex_unlock(&lport->lp_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fc_lport_bsg_request);
diff --git a/drivers/scsi/libfc/fc_npiv.c b/drivers/scsi/libfc/fc_npiv.c
new file mode 100644
index 000000000..c045898b8
--- /dev/null
+++ b/drivers/scsi/libfc/fc_npiv.c
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2009 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * NPIV VN_Port helper functions for libfc
+ */
+
+#include <scsi/libfc.h>
+#include <linux/export.h>
+
+/**
+ * libfc_vport_create() - Create a new NPIV vport instance
+ * @vport: fc_vport structure from scsi_transport_fc
+ * @privsize: driver private data size to allocate along with the Scsi_Host
+ */
+
+struct fc_lport *libfc_vport_create(struct fc_vport *vport, int privsize)
+{
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+ struct fc_lport *vn_port;
+
+ vn_port = libfc_host_alloc(shost->hostt, privsize);
+ if (!vn_port)
+ return vn_port;
+
+ vn_port->vport = vport;
+ vport->dd_data = vn_port;
+
+ mutex_lock(&n_port->lp_mutex);
+ list_add_tail(&vn_port->list, &n_port->vports);
+ mutex_unlock(&n_port->lp_mutex);
+
+ return vn_port;
+}
+EXPORT_SYMBOL(libfc_vport_create);
+
+/**
+ * fc_vport_id_lookup() - find NPIV lport that matches a given fabric ID
+ * @n_port: Top level N_Port which may have multiple NPIV VN_Ports
+ * @port_id: Fabric ID to find a match for
+ *
+ * Returns: matching lport pointer or NULL if there is no match
+ */
+struct fc_lport *fc_vport_id_lookup(struct fc_lport *n_port, u32 port_id)
+{
+ struct fc_lport *lport = NULL;
+ struct fc_lport *vn_port;
+
+ if (n_port->port_id == port_id)
+ return n_port;
+
+ if (port_id == FC_FID_FLOGI)
+ return n_port; /* for point-to-point */
+
+ mutex_lock(&n_port->lp_mutex);
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ if (vn_port->port_id == port_id) {
+ lport = vn_port;
+ break;
+ }
+ }
+ mutex_unlock(&n_port->lp_mutex);
+
+ return lport;
+}
+EXPORT_SYMBOL(fc_vport_id_lookup);
+
+/*
+ * When setting the link state of vports during an lport state change, it's
+ * necessary to hold the lp_mutex of both the N_Port and the VN_Port.
+ * This tells the lockdep engine to treat the nested locking of the VN_Port
+ * as a different lock class.
+ */
+enum libfc_lport_mutex_class {
+ LPORT_MUTEX_NORMAL = 0,
+ LPORT_MUTEX_VN_PORT = 1,
+};
+
+/**
+ * __fc_vport_setlink() - update link and status on a VN_Port
+ * @n_port: parent N_Port
+ * @vn_port: VN_Port to update
+ *
+ * Locking: must be called with both the N_Port and VN_Port lp_mutex held
+ */
+static void __fc_vport_setlink(struct fc_lport *n_port,
+ struct fc_lport *vn_port)
+{
+ struct fc_vport *vport = vn_port->vport;
+
+ if (vn_port->state == LPORT_ST_DISABLED)
+ return;
+
+ if (n_port->state == LPORT_ST_READY) {
+ if (n_port->npiv_enabled) {
+ fc_vport_set_state(vport, FC_VPORT_INITIALIZING);
+ __fc_linkup(vn_port);
+ } else {
+ fc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+ __fc_linkdown(vn_port);
+ }
+ } else {
+ fc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+ __fc_linkdown(vn_port);
+ }
+}
+
+/**
+ * fc_vport_setlink() - update link and status on a VN_Port
+ * @vn_port: virtual port to update
+ */
+void fc_vport_setlink(struct fc_lport *vn_port)
+{
+ struct fc_vport *vport = vn_port->vport;
+ struct Scsi_Host *shost = vport_to_shost(vport);
+ struct fc_lport *n_port = shost_priv(shost);
+
+ mutex_lock(&n_port->lp_mutex);
+ mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+ __fc_vport_setlink(n_port, vn_port);
+ mutex_unlock(&vn_port->lp_mutex);
+ mutex_unlock(&n_port->lp_mutex);
+}
+EXPORT_SYMBOL(fc_vport_setlink);
+
+/**
+ * fc_vports_linkchange() - change the link state of all vports
+ * @n_port: Parent N_Port that has changed state
+ *
+ * Locking: called with the n_port lp_mutex held
+ */
+void fc_vports_linkchange(struct fc_lport *n_port)
+{
+ struct fc_lport *vn_port;
+
+ list_for_each_entry(vn_port, &n_port->vports, list) {
+ mutex_lock_nested(&vn_port->lp_mutex, LPORT_MUTEX_VN_PORT);
+ __fc_vport_setlink(n_port, vn_port);
+ mutex_unlock(&vn_port->lp_mutex);
+ }
+}
+
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
new file mode 100644
index 000000000..33da3c108
--- /dev/null
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -0,0 +1,2292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ *
+ * Maintained at www.Open-FCoE.org
+ */
+
+/*
+ * RPORT GENERAL INFO
+ *
+ * This file contains all processing regarding fc_rports. It contains the
+ * rport state machine and does all rport interaction with the transport class.
+ * There should be no other places in libfc that interact directly with the
+ * transport class in regards to adding and deleting rports.
+ *
+ * fc_rport's represent N_Port's within the fabric.
+ */
+
+/*
+ * RPORT LOCKING
+ *
+ * The rport should never hold the rport mutex and then attempt to acquire
+ * either the lport or disc mutexes. The rport's mutex is considered lesser
+ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
+ * more comments on the hierarchy.
+ *
+ * The locking strategy is similar to the lport's strategy. The lock protects
+ * the rport's states and is held and released by the entry points to the rport
+ * block. All _enter_* functions correspond to rport states and expect the rport
+ * mutex to be locked before calling them. This means that rports only handle
+ * one request or response at a time, since they're not critical for the I/O
+ * path this potential over-use of the mutex is acceptable.
+ */
+
+/*
+ * RPORT REFERENCE COUNTING
+ *
+ * A rport reference should be taken when:
+ * - an rport is allocated
+ * - a workqueue item is scheduled
+ * - an ELS request is send
+ * The reference should be dropped when:
+ * - the workqueue function has finished
+ * - the ELS response is handled
+ * - an rport is removed
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/export.h>
+#include <linux/rculist.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/libfc.h>
+
+#include "fc_encode.h"
+#include "fc_libfc.h"
+
+static struct workqueue_struct *rport_event_queue;
+
+static void fc_rport_enter_flogi(struct fc_rport_priv *);
+static void fc_rport_enter_plogi(struct fc_rport_priv *);
+static void fc_rport_enter_prli(struct fc_rport_priv *);
+static void fc_rport_enter_rtv(struct fc_rport_priv *);
+static void fc_rport_enter_ready(struct fc_rport_priv *);
+static void fc_rport_enter_logo(struct fc_rport_priv *);
+static void fc_rport_enter_adisc(struct fc_rport_priv *);
+
+static void fc_rport_recv_plogi_req(struct fc_lport *, struct fc_frame *);
+static void fc_rport_recv_prli_req(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_recv_logo_req(struct fc_lport *, struct fc_frame *);
+static void fc_rport_timeout(struct work_struct *);
+static void fc_rport_error(struct fc_rport_priv *, int);
+static void fc_rport_error_retry(struct fc_rport_priv *, int);
+static void fc_rport_work(struct work_struct *);
+
+static const char *fc_rport_state_names[] = {
+ [RPORT_ST_INIT] = "Init",
+ [RPORT_ST_FLOGI] = "FLOGI",
+ [RPORT_ST_PLOGI_WAIT] = "PLOGI_WAIT",
+ [RPORT_ST_PLOGI] = "PLOGI",
+ [RPORT_ST_PRLI] = "PRLI",
+ [RPORT_ST_RTV] = "RTV",
+ [RPORT_ST_READY] = "Ready",
+ [RPORT_ST_ADISC] = "ADISC",
+ [RPORT_ST_DELETE] = "Delete",
+};
+
+/**
+ * fc_rport_lookup() - Lookup a remote port by port_id
+ * @lport: The local port to lookup the remote port on
+ * @port_id: The remote port ID to look up
+ *
+ * The reference count of the fc_rport_priv structure is
+ * increased by one.
+ */
+struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+ u32 port_id)
+{
+ struct fc_rport_priv *rdata = NULL, *tmp_rdata;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp_rdata, &lport->disc.rports, peers)
+ if (tmp_rdata->ids.port_id == port_id &&
+ kref_get_unless_zero(&tmp_rdata->kref)) {
+ rdata = tmp_rdata;
+ break;
+ }
+ rcu_read_unlock();
+ return rdata;
+}
+EXPORT_SYMBOL(fc_rport_lookup);
+
+/**
+ * fc_rport_create() - Create a new remote port
+ * @lport: The local port this remote port will be associated with
+ * @port_id: The identifiers for the new remote port
+ *
+ * The remote port will start in the INIT state.
+ */
+struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+{
+ struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
+
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
+ rdata = fc_rport_lookup(lport, port_id);
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
+ return rdata;
+ }
+
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+ rdata->ids.node_name = -1;
+ rdata->ids.port_name = -1;
+ rdata->ids.port_id = port_id;
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ kref_init(&rdata->kref);
+ mutex_init(&rdata->rp_mutex);
+ rdata->local_port = lport;
+ rdata->rp_state = RPORT_ST_INIT;
+ rdata->event = RPORT_EV_NONE;
+ rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ rdata->e_d_tov = lport->e_d_tov;
+ rdata->r_a_tov = lport->r_a_tov;
+ rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+ INIT_WORK(&rdata->event_work, fc_rport_work);
+ if (port_id != FC_FID_DIR_SERV) {
+ rdata->lld_event_callback = lport->tt.rport_event_callback;
+ list_add_rcu(&rdata->peers, &lport->disc.rports);
+ }
+ return rdata;
+}
+EXPORT_SYMBOL(fc_rport_create);
+
+/**
+ * fc_rport_destroy() - Free a remote port after last reference is released
+ * @kref: The remote port's kref
+ */
+void fc_rport_destroy(struct kref *kref)
+{
+ struct fc_rport_priv *rdata;
+
+ rdata = container_of(kref, struct fc_rport_priv, kref);
+ kfree_rcu(rdata, rcu);
+}
+EXPORT_SYMBOL(fc_rport_destroy);
+
+/**
+ * fc_rport_state() - Return a string identifying the remote port's state
+ * @rdata: The remote port
+ */
+static const char *fc_rport_state(struct fc_rport_priv *rdata)
+{
+ const char *cp;
+
+ cp = fc_rport_state_names[rdata->rp_state];
+ if (!cp)
+ cp = "Unknown";
+ return cp;
+}
+
+/**
+ * fc_set_rport_loss_tmo() - Set the remote port loss timeout
+ * @rport: The remote port that gets a new timeout value
+ * @timeout: The new timeout value (in seconds)
+ */
+void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+EXPORT_SYMBOL(fc_set_rport_loss_tmo);
+
+/**
+ * fc_plogi_get_maxframe() - Get the maximum payload from the common service
+ * parameters in a FLOGI frame
+ * @flp: The FLOGI or PLOGI payload
+ * @maxval: The maximum frame size upper limit; this may be less than what
+ * is in the service parameters
+ */
+static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
+ unsigned int maxval)
+{
+ unsigned int mfs;
+
+ /*
+ * Get max payload from the common service parameters and the
+ * class 3 receive data field size.
+ */
+ mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
+ if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
+ maxval = mfs;
+ return maxval;
+}
+
+/**
+ * fc_rport_state_enter() - Change the state of a remote port
+ * @rdata: The remote port whose state should change
+ * @new: The new state
+ */
+static void fc_rport_state_enter(struct fc_rport_priv *rdata,
+ enum fc_rport_state new)
+{
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ if (rdata->rp_state != new)
+ rdata->retries = 0;
+ rdata->rp_state = new;
+}
+
+/**
+ * fc_rport_work() - Handler for remote port events in the rport_event_queue
+ * @work: Handle to the remote port being dequeued
+ *
+ * Reference counting: drops kref on return
+ */
+static void fc_rport_work(struct work_struct *work)
+{
+ u32 port_id;
+ struct fc_rport_priv *rdata =
+ container_of(work, struct fc_rport_priv, event_work);
+ struct fc_rport_libfc_priv *rpriv;
+ enum fc_rport_event event;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_rport_operations *rport_ops;
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct fc4_prov *prov;
+ u8 type;
+
+ mutex_lock(&rdata->rp_mutex);
+ event = rdata->event;
+ rport_ops = rdata->ops;
+ rport = rdata->rport;
+
+ FC_RPORT_DBG(rdata, "work event %u\n", event);
+
+ switch (event) {
+ case RPORT_EV_READY:
+ ids = rdata->ids;
+ rdata->event = RPORT_EV_NONE;
+ rdata->major_retries = 0;
+ kref_get(&rdata->kref);
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "No rport!\n");
+ rport = fc_remote_port_add(lport->host, 0, &ids);
+ }
+ if (!rport) {
+ FC_RPORT_DBG(rdata, "Failed to add the rport\n");
+ fc_rport_logoff(rdata);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ return;
+ }
+ mutex_lock(&rdata->rp_mutex);
+ if (rdata->rport)
+ FC_RPORT_DBG(rdata, "rport already allocated\n");
+ rdata->rport = rport;
+ rport->maxframe_size = rdata->maxframe_size;
+ rport->supported_classes = rdata->supported_classes;
+
+ rpriv = rport->dd_data;
+ rpriv->local_port = lport;
+ rpriv->rp_state = rdata->rp_state;
+ rpriv->flags = rdata->flags;
+ rpriv->e_d_tov = rdata->e_d_tov;
+ rpriv->r_a_tov = rdata->r_a_tov;
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (rport_ops && rport_ops->event_callback) {
+ FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+ rport_ops->event_callback(lport, rdata, event);
+ }
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
+ break;
+
+ case RPORT_EV_FAILED:
+ case RPORT_EV_LOGO:
+ case RPORT_EV_STOP:
+ if (rdata->prli_count) {
+ mutex_lock(&fc_prov_mutex);
+ for (type = 1; type < FC_FC4_PROV_SIZE; type++) {
+ prov = fc_passive_prov[type];
+ if (prov && prov->prlo)
+ prov->prlo(rdata);
+ }
+ mutex_unlock(&fc_prov_mutex);
+ }
+ port_id = rdata->ids.port_id;
+ mutex_unlock(&rdata->rp_mutex);
+
+ if (rport_ops && rport_ops->event_callback) {
+ FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+ rport_ops->event_callback(lport, rdata, event);
+ }
+ if (rdata->lld_event_callback) {
+ FC_RPORT_DBG(rdata, "lld callback ev %d\n", event);
+ rdata->lld_event_callback(lport, rdata, event);
+ }
+ if (cancel_delayed_work_sync(&rdata->retry_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
+ /*
+ * Reset any outstanding exchanges before freeing rport.
+ */
+ lport->tt.exch_mgr_reset(lport, 0, port_id);
+ lport->tt.exch_mgr_reset(lport, port_id, 0);
+
+ if (rport) {
+ rpriv = rport->dd_data;
+ rpriv->rp_state = RPORT_ST_DELETE;
+ mutex_lock(&rdata->rp_mutex);
+ rdata->rport = NULL;
+ mutex_unlock(&rdata->rp_mutex);
+ fc_remote_port_delete(rport);
+ }
+
+ mutex_lock(&rdata->rp_mutex);
+ if (rdata->rp_state == RPORT_ST_DELETE) {
+ if (port_id == FC_FID_DIR_SERV) {
+ rdata->event = RPORT_EV_NONE;
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ } else if ((rdata->flags & FC_RP_STARTED) &&
+ rdata->major_retries <
+ lport->max_rport_retry_count) {
+ rdata->major_retries++;
+ rdata->event = RPORT_EV_NONE;
+ FC_RPORT_DBG(rdata, "work restart\n");
+ fc_rport_enter_flogi(rdata);
+ mutex_unlock(&rdata->rp_mutex);
+ } else {
+ mutex_unlock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "work delete\n");
+ mutex_lock(&lport->disc.disc_mutex);
+ list_del_rcu(&rdata->peers);
+ mutex_unlock(&lport->disc.disc_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+ } else {
+ /*
+ * Re-open for events. Reissue READY event if ready.
+ */
+ rdata->event = RPORT_EV_NONE;
+ if (rdata->rp_state == RPORT_ST_READY) {
+ FC_RPORT_DBG(rdata, "work reopen\n");
+ fc_rport_enter_ready(rdata);
+ }
+ mutex_unlock(&rdata->rp_mutex);
+ }
+ break;
+
+ default:
+ mutex_unlock(&rdata->rp_mutex);
+ break;
+ }
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_login() - Start the remote port login state machine
+ * @rdata: The remote port to be logged in to
+ *
+ * Initiates the RP state machine. It is called from the LP module.
+ * This function will issue the following commands to the N_Port
+ * identified by the FC ID provided.
+ *
+ * - PLOGI
+ * - PRLI
+ * - RTV
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ *
+ * This indicates the intent to be logged into the remote port.
+ * If it appears we are already logged in, ADISC is used to verify
+ * the setup.
+ */
+int fc_rport_login(struct fc_rport_priv *rdata)
+{
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->flags & FC_RP_STARTED) {
+ FC_RPORT_DBG(rdata, "port already started\n");
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+ }
+
+ rdata->flags |= FC_RP_STARTED;
+ switch (rdata->rp_state) {
+ case RPORT_ST_READY:
+ FC_RPORT_DBG(rdata, "ADISC port\n");
+ fc_rport_enter_adisc(rdata);
+ break;
+ case RPORT_ST_DELETE:
+ FC_RPORT_DBG(rdata, "Restart deleted port\n");
+ break;
+ case RPORT_ST_INIT:
+ FC_RPORT_DBG(rdata, "Login to port\n");
+ fc_rport_enter_flogi(rdata);
+ break;
+ default:
+ FC_RPORT_DBG(rdata, "Login in progress, state %s\n",
+ fc_rport_state(rdata));
+ break;
+ }
+ mutex_unlock(&rdata->rp_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(fc_rport_login);
+
+/**
+ * fc_rport_enter_delete() - Schedule a remote port to be deleted
+ * @rdata: The remote port to be deleted
+ * @event: The event to report as the reason for deletion
+ *
+ * Allow state change into DELETE only once.
+ *
+ * Call queue_work only if there's no event already pending.
+ * Set the new event so that the old pending event will not occur.
+ * Since we have the mutex, even if fc_rport_work() is already started,
+ * it'll see the new event.
+ *
+ * Reference counting: does not modify kref
+ */
+static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
+ enum fc_rport_event event)
+{
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ if (rdata->rp_state == RPORT_ST_DELETE)
+ return;
+
+ FC_RPORT_DBG(rdata, "Delete port\n");
+
+ fc_rport_state_enter(rdata, RPORT_ST_DELETE);
+
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+
+ rdata->event = event;
+}
+
+/**
+ * fc_rport_logoff() - Logoff and remove a remote port
+ * @rdata: The remote port to be logged off of
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ */
+int fc_rport_logoff(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 port_id = rdata->ids.port_id;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Remove port\n");
+
+ rdata->flags &= ~FC_RP_STARTED;
+ if (rdata->rp_state == RPORT_ST_DELETE) {
+ FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
+ goto out;
+ }
+ /*
+ * FC-LS states:
+ * To explicitly Logout, the initiating Nx_Port shall terminate
+ * other open Sequences that it initiated with the destination
+ * Nx_Port prior to performing Logout.
+ */
+ lport->tt.exch_mgr_reset(lport, 0, port_id);
+ lport->tt.exch_mgr_reset(lport, port_id, 0);
+
+ fc_rport_enter_logo(rdata);
+
+ /*
+ * Change the state to Delete so that we discard
+ * the response.
+ */
+ fc_rport_enter_delete(rdata, RPORT_EV_STOP);
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(fc_rport_logoff);
+
+/**
+ * fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
+ * @rdata: The remote port that is ready
+ *
+ * Reference counting: schedules workqueue, does not modify kref
+ */
+static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
+{
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ fc_rport_state_enter(rdata, RPORT_ST_READY);
+
+ FC_RPORT_DBG(rdata, "Port is Ready\n");
+
+ kref_get(&rdata->kref);
+ if (rdata->event == RPORT_EV_NONE &&
+ !queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+
+ rdata->event = RPORT_EV_READY;
+}
+
+/**
+ * fc_rport_timeout() - Handler for the retry_work timer
+ * @work: Handle to the remote port that has timed out
+ *
+ * Locking Note: Called without the rport lock held. This
+ * function will hold the rport lock, call an _enter_*
+ * function and then unlock the rport.
+ *
+ * Reference counting: Drops kref on return.
+ */
+static void fc_rport_timeout(struct work_struct *work)
+{
+ struct fc_rport_priv *rdata =
+ container_of(work, struct fc_rport_priv, retry_work.work);
+
+ mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Port timeout, state %s\n", fc_rport_state(rdata));
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_FLOGI:
+ fc_rport_enter_flogi(rdata);
+ break;
+ case RPORT_ST_PLOGI:
+ fc_rport_enter_plogi(rdata);
+ break;
+ case RPORT_ST_PRLI:
+ fc_rport_enter_prli(rdata);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_rtv(rdata);
+ break;
+ case RPORT_ST_ADISC:
+ fc_rport_enter_adisc(rdata);
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ case RPORT_ST_DELETE:
+ break;
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_error() - Error handler, called once retries have been exhausted
+ * @rdata: The remote port the error is happened on
+ * @err: The error code
+ *
+ * Reference counting: does not modify kref
+ */
+static void fc_rport_error(struct fc_rport_priv *rdata, int err)
+{
+ struct fc_lport *lport = rdata->local_port;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
+ -err, fc_rport_state(rdata), rdata->retries);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_FLOGI:
+ rdata->flags &= ~FC_RP_STARTED;
+ fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+ break;
+ case RPORT_ST_PLOGI:
+ if (lport->point_to_multipoint) {
+ rdata->flags &= ~FC_RP_STARTED;
+ fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
+ } else
+ fc_rport_enter_logo(rdata);
+ break;
+ case RPORT_ST_RTV:
+ fc_rport_enter_ready(rdata);
+ break;
+ case RPORT_ST_PRLI:
+ fc_rport_enter_plogi(rdata);
+ break;
+ case RPORT_ST_ADISC:
+ fc_rport_enter_logo(rdata);
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_DELETE:
+ case RPORT_ST_READY:
+ case RPORT_ST_INIT:
+ break;
+ }
+}
+
+/**
+ * fc_rport_error_retry() - Handler for remote port state retries
+ * @rdata: The remote port whose state is to be retried
+ * @err: The error code
+ *
+ * If the error was an exchange timeout retry immediately,
+ * otherwise wait for E_D_TOV.
+ *
+ * Reference counting: increments kref when scheduling retry_work
+ */
+static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
+{
+ unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ /* make sure this isn't an FC_EX_CLOSED error, never retry those */
+ if (err == -FC_EX_CLOSED)
+ goto out;
+
+ if (rdata->retries < rdata->local_port->max_rport_retry_count) {
+ FC_RPORT_DBG(rdata, "Error %d in state %s, retrying\n",
+ err, fc_rport_state(rdata));
+ rdata->retries++;
+ /* no additional delay on exchange timeouts */
+ if (err == -FC_EX_TIMEOUT)
+ delay = 0;
+ kref_get(&rdata->kref);
+ if (!schedule_delayed_work(&rdata->retry_work, delay))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ return;
+ }
+
+out:
+ fc_rport_error(rdata, err);
+}
+
+/**
+ * fc_rport_login_complete() - Handle parameters and completion of p-mp login.
+ * @rdata: The remote port which we logged into or which logged into us.
+ * @fp: The FLOGI or PLOGI request or response frame
+ *
+ * Returns non-zero error if a problem is detected with the frame.
+ * Does not free the frame.
+ *
+ * This is only used in point-to-multipoint mode for FIP currently.
+ */
+static int fc_rport_login_complete(struct fc_rport_priv *rdata,
+ struct fc_frame *fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *flogi;
+ unsigned int e_d_tov;
+ u16 csp_flags;
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ if (!flogi)
+ return -EINVAL;
+
+ csp_flags = ntohs(flogi->fl_csp.sp_features);
+
+ if (fc_frame_payload_op(fp) == ELS_FLOGI) {
+ if (csp_flags & FC_SP_FT_FPORT) {
+ FC_RPORT_DBG(rdata, "Fabric bit set in FLOGI\n");
+ return -EINVAL;
+ }
+ } else {
+
+ /*
+ * E_D_TOV is not valid on an incoming FLOGI request.
+ */
+ e_d_tov = ntohl(flogi->fl_csp.sp_e_d_tov);
+ if (csp_flags & FC_SP_FT_EDTR)
+ e_d_tov /= 1000000;
+ if (e_d_tov > rdata->e_d_tov)
+ rdata->e_d_tov = e_d_tov;
+ }
+ rdata->maxframe_size = fc_plogi_get_maxframe(flogi, lport->mfs);
+ return 0;
+}
+
+/**
+ * fc_rport_flogi_resp() - Handle response to FLOGI request for p-mp mode
+ * @sp: The sequence that the FLOGI was on
+ * @fp: The FLOGI response frame
+ * @rp_arg: The remote port that received the FLOGI response
+ */
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
+{
+ struct fc_rport_priv *rdata = rp_arg;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *flogi;
+ unsigned int r_a_tov;
+ u8 opcode;
+ int err = 0;
+
+ FC_RPORT_DBG(rdata, "Received a FLOGI %s\n",
+ IS_ERR(fp) ? "error" : fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ FC_RPORT_DBG(rdata, "Received a FLOGI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, PTR_ERR(fp));
+ goto err;
+ }
+ opcode = fc_frame_payload_op(fp);
+ if (opcode == ELS_LS_RJT) {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ FC_RPORT_DBG(rdata, "FLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ err = -FC_EX_ELS_RJT;
+ goto bad;
+ } else if (opcode != ELS_LS_ACC) {
+ FC_RPORT_DBG(rdata, "FLOGI ELS invalid opcode %x\n", opcode);
+ err = -FC_EX_ELS_RJT;
+ goto bad;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ FC_RPORT_DBG(rdata, "FLOGI failed, no login\n");
+ err = -FC_EX_INV_LOGIN;
+ goto bad;
+ }
+
+ flogi = fc_frame_payload_get(fp, sizeof(*flogi));
+ if (!flogi) {
+ err = -FC_EX_ALLOC_ERR;
+ goto bad;
+ }
+ r_a_tov = ntohl(flogi->fl_csp.sp_r_a_tov);
+ if (r_a_tov > rdata->r_a_tov)
+ rdata->r_a_tov = r_a_tov;
+
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+ return;
+bad:
+ FC_RPORT_DBG(rdata, "Bad FLOGI response\n");
+ fc_rport_error_retry(rdata, err);
+ goto out;
+}
+
+/**
+ * fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
+ * @rdata: The remote port to send a FLOGI to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ if (!lport->point_to_multipoint)
+ return fc_rport_enter_plogi(rdata);
+
+ FC_RPORT_DBG(rdata, "Entered FLOGI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_FLOGI);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp)
+ return fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_FLOGI,
+ fc_rport_flogi_resp, rdata,
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_flogi_req() - Handle Fabric Login (FLOGI) request in p-mp mode
+ * @lport: The local port that received the PLOGI request
+ * @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: drops kref on return
+ */
+static void fc_rport_recv_flogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_els_flogi *flp;
+ struct fc_rport_priv *rdata;
+ struct fc_frame *fp = rx_fp;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+
+ sid = fc_frame_sid(fp);
+
+ FC_RPORT_ID_DBG(lport, sid, "Received FLOGI request\n");
+
+ if (!lport->point_to_multipoint) {
+ rjt_data.reason = ELS_RJT_UNSUP;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ if (!flp) {
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto reject;
+ }
+
+ rdata = fc_rport_lookup(lport, sid);
+ if (!rdata) {
+ rjt_data.reason = ELS_RJT_FIP;
+ rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
+ goto reject;
+ }
+ mutex_lock(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received FLOGI in %s state\n",
+ fc_rport_state(rdata));
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_INIT:
+ /*
+ * If received the FLOGI request on RPORT which is INIT state
+ * (means not transition to FLOGI either fc_rport timeout
+ * function didn;t trigger or this end hasn;t received
+ * beacon yet from other end. In that case only, allow RPORT
+ * state machine to continue, otherwise fall through which
+ * causes the code to send reject response.
+ * NOTE; Not checking for FIP->state such as VNMP_UP or
+ * VNMP_CLAIM because if FIP state is not one of those,
+ * RPORT wouldn;t have created and 'rport_lookup' would have
+ * failed anyway in that case.
+ */
+ break;
+ case RPORT_ST_DELETE:
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_FIP;
+ rjt_data.explan = ELS_EXPL_NOT_NEIGHBOR;
+ goto reject_put;
+ case RPORT_ST_FLOGI:
+ case RPORT_ST_PLOGI_WAIT:
+ case RPORT_ST_PLOGI:
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ /*
+ * Set the remote port to be deleted and to then restart.
+ * This queues work to be sure exchanges are reset.
+ */
+ fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_BUSY;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject_put;
+ }
+ if (fc_rport_login_complete(rdata, fp)) {
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject_put;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*flp));
+ if (!fp)
+ goto out;
+
+ fc_flogi_fill(lport, fp);
+ flp = fc_frame_payload_get(fp, sizeof(*flp));
+ flp->fl_cmd = ELS_LS_ACC;
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+
+ /*
+ * Do not proceed with the state machine if our
+ * FLOGI has crossed with an FLOGI from the
+ * remote port; wait for the FLOGI response instead.
+ */
+ if (rdata->rp_state != RPORT_ST_FLOGI) {
+ if (rdata->ids.port_name < lport->wwpn)
+ fc_rport_enter_plogi(rdata);
+ else
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ }
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ fc_frame_free(rx_fp);
+ return;
+
+reject_put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+reject:
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_plogi_resp() - Handler for ELS PLOGI responses
+ * @sp: The sequence the PLOGI is on
+ * @fp: The PLOGI response frame
+ * @rdata_arg: The remote port that sent the PLOGI response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_els_flogi *plp = NULL;
+ u16 csp_seq;
+ u16 cssp_seq;
+ u8 op;
+
+ FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_PLOGI) {
+ FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC &&
+ (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+ rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
+ rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
+
+ /* save plogi response sp_features for further reference */
+ rdata->sp_features = ntohs(plp->fl_csp.sp_features);
+
+ if (lport->point_to_multipoint)
+ fc_rport_login_complete(rdata, fp);
+ csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
+ cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
+ if (cssp_seq < csp_seq)
+ csp_seq = cssp_seq;
+ rdata->max_seq = csp_seq;
+ rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
+ fc_rport_enter_prli(rdata);
+ } else {
+ struct fc_els_ls_rjt *rjt;
+
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+static bool
+fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
+{
+ if (rdata->ids.roles == FC_PORT_ROLE_UNKNOWN)
+ return true;
+ if ((rdata->ids.roles & FC_PORT_ROLE_FCP_TARGET) &&
+ (lport->service_params & FCP_SPPF_INIT_FCN))
+ return true;
+ if ((rdata->ids.roles & FC_PORT_ROLE_FCP_INITIATOR) &&
+ (lport->service_params & FCP_SPPF_TARG_FCN))
+ return true;
+ return false;
+}
+
+/**
+ * fc_rport_enter_plogi() - Send Port Login (PLOGI) request
+ * @rdata: The remote port to send a PLOGI to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ if (!fc_rport_compatible_roles(lport, rdata)) {
+ FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
+ return;
+ }
+
+ FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
+
+ rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp) {
+ FC_RPORT_DBG(rdata, "%s frame alloc failed\n", __func__);
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ return;
+ }
+ rdata->e_d_tov = lport->e_d_tov;
+
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
+ fc_rport_plogi_resp, rdata,
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_prli_resp() - Process Login (PRLI) response handler
+ * @sp: The sequence the PRLI response was on
+ * @fp: The PRLI response frame
+ * @rdata_arg: The remote port that sent the PRLI response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp temp_spp;
+ struct fc_els_ls_rjt *rjt;
+ struct fc4_prov *prov;
+ u32 roles = FC_RPORT_ROLE_UNKNOWN;
+ u32 fcp_parm = 0;
+ u8 op;
+ enum fc_els_spp_resp resp_code;
+
+ FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_PRLI) {
+ FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error_retry(rdata, PTR_ERR(fp));
+ goto err;
+ }
+
+ /* reinitialize remote port roles */
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ if (!pp) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
+ goto out;
+ }
+
+ resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
+ FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
+ pp->spp.spp_flags, pp->spp.spp_type);
+
+ rdata->spp_type = pp->spp.spp_type;
+ if (resp_code != FC_SPP_RESP_ACK) {
+ if (resp_code == FC_SPP_RESP_CONF)
+ fc_rport_error(rdata, -FC_EX_SEQ_ERR);
+ else
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
+ goto out;
+ }
+ if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
+ goto out;
+ }
+
+ fcp_parm = ntohl(pp->spp.spp_params);
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ if (fcp_parm & FCP_SPPF_CONF_COMPL)
+ rdata->flags |= FC_RP_FLAGS_CONF_REQ;
+
+ /*
+ * Call prli provider if we should act as a target
+ */
+ if (rdata->spp_type < FC_FC4_PROV_SIZE) {
+ prov = fc_passive_prov[rdata->spp_type];
+ if (prov) {
+ memset(&temp_spp, 0, sizeof(temp_spp));
+ prov->prli(rdata, pp->prli.prli_spp_len,
+ &pp->spp, &temp_spp);
+ }
+ }
+ /*
+ * Check if the image pair could be established
+ */
+ if (rdata->spp_type != FC_TYPE_FCP ||
+ !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) {
+ /*
+ * Nope; we can't use this port as a target.
+ */
+ fcp_parm &= ~FCP_SPPF_TARG_FCN;
+ }
+ rdata->supported_classes = FC_COS_CLASS3;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ rdata->ids.roles = roles;
+ fc_rport_enter_rtv(rdata);
+
+ } else {
+ rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PRLI bad response\n");
+ else {
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
+ if (rjt->er_reason == ELS_RJT_UNAB &&
+ rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
+ fc_rport_enter_plogi(rdata);
+ goto out;
+ }
+ }
+ fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
+ }
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_enter_prli() - Send Process Login (PRLI) request
+ * @rdata: The remote port to send the PRLI request to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_frame *fp;
+ struct fc4_prov *prov;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ /*
+ * If the rport is one of the well known addresses
+ * we skip PRLI and RTV and go straight to READY.
+ */
+ if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
+ /*
+ * And if the local port does not support the initiator function
+ * there's no need to send a PRLI, either.
+ */
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN)) {
+ fc_rport_enter_ready(rdata);
+ return;
+ }
+
+ FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_PRLI);
+
+ fp = fc_frame_alloc(lport, sizeof(*pp));
+ if (!fp) {
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ return;
+ }
+
+ fc_prli_fill(lport, fp);
+
+ prov = fc_passive_prov[FC_TYPE_FCP];
+ if (prov) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+ prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp);
+ }
+
+ fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rdata->ids.port_id,
+ fc_host_port_id(lport->host), FC_TYPE_ELS,
+ FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
+
+ kref_get(&rdata->kref);
+ if (!fc_exch_seq_send(lport, fp, fc_rport_prli_resp,
+ NULL, rdata, 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_rtv_resp() - Handler for Request Timeout Value (RTV) responses
+ * @sp: The sequence the RTV was on
+ * @fp: The RTV response frame
+ * @rdata_arg: The remote port that sent the RTV response
+ *
+ * Many targets don't seem to support this.
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ u8 op;
+
+ FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_RTV) {
+ FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
+ "%s\n", fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, PTR_ERR(fp));
+ goto err;
+ }
+
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ struct fc_els_rtv_acc *rtv;
+ u32 toq;
+ u32 tov;
+
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ if (rtv) {
+ toq = ntohl(rtv->rtv_toq);
+ tov = ntohl(rtv->rtv_r_a_tov);
+ if (tov == 0)
+ tov = 1;
+ if (tov > rdata->r_a_tov)
+ rdata->r_a_tov = tov;
+ tov = ntohl(rtv->rtv_e_d_tov);
+ if (toq & FC_ELS_RTV_EDRES)
+ tov /= 1000000;
+ if (tov == 0)
+ tov = 1;
+ if (tov > rdata->e_d_tov)
+ rdata->e_d_tov = tov;
+ }
+ }
+
+ fc_rport_enter_ready(rdata);
+
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
+ * @rdata: The remote port to send the RTV request to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
+{
+ struct fc_frame *fp;
+ struct fc_lport *lport = rdata->local_port;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_RTV);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
+ if (!fp) {
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ return;
+ }
+
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
+ fc_rport_rtv_resp, rdata,
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
+ * @rdata: The remote port that sent the RTV request
+ * @in_fp: The RTV request frame
+ */
+static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rtv_acc *rtv;
+ struct fc_seq_els_data rjt_data;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received RTV request\n");
+
+ fp = fc_frame_alloc(lport, sizeof(*rtv));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+ rtv = fc_frame_payload_get(fp, sizeof(*rtv));
+ rtv->rtv_cmd = ELS_LS_ACC;
+ rtv->rtv_r_a_tov = htonl(lport->r_a_tov);
+ rtv->rtv_e_d_tov = htonl(lport->e_d_tov);
+ rtv->rtv_toq = 0;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_rport_logo_resp() - Handler for logout (LOGO) responses
+ * @sp: The sequence the LOGO was on
+ * @fp: The LOGO response frame
+ * @rdata_arg: The remote port
+ */
+static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_lport *lport = rdata->local_port;
+
+ FC_RPORT_ID_DBG(lport, fc_seq_exch(sp)->did,
+ "Received a LOGO %s\n", fc_els_resp_type(fp));
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_enter_logo() - Send a logout (LOGO) request
+ * @rdata: The remote port to send the LOGO request to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
+ fc_rport_state(rdata));
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
+ if (!fp)
+ return;
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+ fc_rport_logo_resp, rdata, 0))
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_adisc_resp() - Handler for Address Discovery (ADISC) responses
+ * @sp: The sequence the ADISC response was on
+ * @fp: The ADISC response frame
+ * @rdata_arg: The remote port that sent the ADISC response
+ *
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
+ */
+static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rdata_arg)
+{
+ struct fc_rport_priv *rdata = rdata_arg;
+ struct fc_els_adisc *adisc;
+ u8 op;
+
+ FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+
+ if (fp == ERR_PTR(-FC_EX_CLOSED))
+ goto put;
+
+ mutex_lock(&rdata->rp_mutex);
+
+ if (rdata->rp_state != RPORT_ST_ADISC) {
+ FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
+ fc_rport_state(rdata));
+ if (IS_ERR(fp))
+ goto err;
+ goto out;
+ }
+
+ if (IS_ERR(fp)) {
+ fc_rport_error(rdata, PTR_ERR(fp));
+ goto err;
+ }
+
+ /*
+ * If address verification failed. Consider us logged out of the rport.
+ * Since the rport is still in discovery, we want to be
+ * logged in, so go to PLOGI state. Otherwise, go back to READY.
+ */
+ op = fc_frame_payload_op(fp);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ if (op != ELS_LS_ACC || !adisc ||
+ ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
+ get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
+ get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
+ FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
+ fc_rport_enter_flogi(rdata);
+ } else {
+ FC_RPORT_DBG(rdata, "ADISC OK\n");
+ fc_rport_enter_ready(rdata);
+ }
+out:
+ fc_frame_free(fp);
+err:
+ mutex_unlock(&rdata->rp_mutex);
+put:
+ kref_put(&rdata->kref, fc_rport_destroy);
+}
+
+/**
+ * fc_rport_enter_adisc() - Send Address Discover (ADISC) request
+ * @rdata: The remote port to send the ADISC request to
+ *
+ * Reference counting: increments kref when sending ELS
+ */
+static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
+ fc_rport_state(rdata));
+
+ fc_rport_state_enter(rdata, RPORT_ST_ADISC);
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
+ if (!fp) {
+ fc_rport_error_retry(rdata, -FC_EX_ALLOC_ERR);
+ return;
+ }
+ kref_get(&rdata->kref);
+ if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
+ fc_rport_adisc_resp, rdata,
+ 2 * lport->r_a_tov)) {
+ fc_rport_error_retry(rdata, -FC_EX_XMIT_ERR);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
+}
+
+/**
+ * fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
+ * @rdata: The remote port that sent the ADISC request
+ * @in_fp: The ADISC request frame
+ */
+static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
+ struct fc_frame *in_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_adisc *adisc;
+ struct fc_seq_els_data rjt_data;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+ lockdep_assert_held(&lport->lp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received ADISC request\n");
+
+ adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
+ if (!adisc) {
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ fc_seq_els_rsp_send(in_fp, ELS_LS_RJT, &rjt_data);
+ goto drop;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*adisc));
+ if (!fp)
+ goto drop;
+ fc_adisc_fill(lport, fp);
+ adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+ adisc->adisc_cmd = ELS_LS_ACC;
+ fc_fill_reply_hdr(fp, in_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+drop:
+ fc_frame_free(in_fp);
+}
+
+/**
+ * fc_rport_recv_rls_req() - Handle received Read Link Status request
+ * @rdata: The remote port that sent the RLS request
+ * @rx_fp: The PRLI request frame
+ */
+static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct fc_els_rls *rls;
+ struct fc_els_rls_resp *rsp;
+ struct fc_els_lesb *lesb;
+ struct fc_seq_els_data rjt_data;
+ struct fc_host_statistics *hst;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
+ fc_rport_state(rdata));
+
+ rls = fc_frame_payload_get(rx_fp, sizeof(*rls));
+ if (!rls) {
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto out_rjt;
+ }
+
+ fp = fc_frame_alloc(lport, sizeof(*rsp));
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto out_rjt;
+ }
+
+ rsp = fc_frame_payload_get(fp, sizeof(*rsp));
+ memset(rsp, 0, sizeof(*rsp));
+ rsp->rls_cmd = ELS_LS_ACC;
+ lesb = &rsp->rls_lesb;
+ if (lport->tt.get_lesb) {
+ /* get LESB from LLD if it supports it */
+ lport->tt.get_lesb(lport, lesb);
+ } else {
+ fc_get_host_stats(lport->host);
+ hst = &lport->host_stats;
+ lesb->lesb_link_fail = htonl(hst->link_failure_count);
+ lesb->lesb_sync_loss = htonl(hst->loss_of_sync_count);
+ lesb->lesb_sig_loss = htonl(hst->loss_of_signal_count);
+ lesb->lesb_prim_err = htonl(hst->prim_seq_protocol_err_count);
+ lesb->lesb_inv_word = htonl(hst->invalid_tx_word_count);
+ lesb->lesb_inv_crc = htonl(hst->invalid_crc_count);
+ }
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ goto out;
+
+out_rjt:
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+out:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_els_req() - Handler for validated ELS requests
+ * @lport: The local port that received the ELS request
+ * @fp: The ELS request frame
+ *
+ * Handle incoming ELS requests that require port login.
+ * The ELS opcode has already been validated by the caller.
+ *
+ * Reference counting: does not modify kref
+ */
+static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata;
+ struct fc_seq_els_data els_data;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
+ if (!rdata) {
+ FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
+ "Received ELS 0x%02x from non-logged-in port\n",
+ fc_frame_payload_op(fp));
+ goto reject;
+ }
+
+ mutex_lock(&rdata->rp_mutex);
+
+ switch (rdata->rp_state) {
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ break;
+ case RPORT_ST_PLOGI:
+ if (fc_frame_payload_op(fp) == ELS_PRLI) {
+ FC_RPORT_DBG(rdata, "Reject ELS PRLI "
+ "while in state %s\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto busy;
+ }
+ fallthrough;
+ default:
+ FC_RPORT_DBG(rdata,
+ "Reject ELS 0x%02x while in state %s\n",
+ fc_frame_payload_op(fp), fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ goto reject;
+ }
+
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_PRLI:
+ fc_rport_recv_prli_req(rdata, fp);
+ break;
+ case ELS_PRLO:
+ fc_rport_recv_prlo_req(rdata, fp);
+ break;
+ case ELS_ADISC:
+ fc_rport_recv_adisc_req(rdata, fp);
+ break;
+ case ELS_RRQ:
+ fc_seq_els_rsp_send(fp, ELS_RRQ, NULL);
+ fc_frame_free(fp);
+ break;
+ case ELS_REC:
+ fc_seq_els_rsp_send(fp, ELS_REC, NULL);
+ fc_frame_free(fp);
+ break;
+ case ELS_RLS:
+ fc_rport_recv_rls_req(rdata, fp);
+ break;
+ case ELS_RTV:
+ fc_rport_recv_rtv_req(rdata, fp);
+ break;
+ default:
+ fc_frame_free(fp); /* can't happen */
+ break;
+ }
+
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ return;
+
+reject:
+ els_data.reason = ELS_RJT_UNAB;
+ els_data.explan = ELS_EXPL_PLOGI_REQD;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+
+busy:
+ els_data.reason = ELS_RJT_BUSY;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ return;
+}
+
+/**
+ * fc_rport_recv_req() - Handler for requests
+ * @lport: The local port that received the request
+ * @fp: The request frame
+ *
+ * Reference counting: does not modify kref
+ */
+void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_seq_els_data els_data;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ /*
+ * Handle FLOGI, PLOGI and LOGO requests separately, since they
+ * don't require prior login.
+ * Check for unsupported opcodes first and reject them.
+ * For some ops, it would be incorrect to reject with "PLOGI required".
+ */
+ switch (fc_frame_payload_op(fp)) {
+ case ELS_FLOGI:
+ fc_rport_recv_flogi_req(lport, fp);
+ break;
+ case ELS_PLOGI:
+ fc_rport_recv_plogi_req(lport, fp);
+ break;
+ case ELS_LOGO:
+ fc_rport_recv_logo_req(lport, fp);
+ break;
+ case ELS_PRLI:
+ case ELS_PRLO:
+ case ELS_ADISC:
+ case ELS_RRQ:
+ case ELS_REC:
+ case ELS_RLS:
+ case ELS_RTV:
+ fc_rport_recv_els_req(lport, fp);
+ break;
+ default:
+ els_data.reason = ELS_RJT_UNSUP;
+ els_data.explan = ELS_EXPL_NONE;
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &els_data);
+ fc_frame_free(fp);
+ break;
+ }
+}
+EXPORT_SYMBOL(fc_rport_recv_req);
+
+/**
+ * fc_rport_recv_plogi_req() - Handler for Port Login (PLOGI) requests
+ * @lport: The local port that received the PLOGI request
+ * @rx_fp: The PLOGI request frame
+ *
+ * Reference counting: increments kref on return
+ */
+static void fc_rport_recv_plogi_req(struct fc_lport *lport,
+ struct fc_frame *rx_fp)
+{
+ struct fc_disc *disc;
+ struct fc_rport_priv *rdata;
+ struct fc_frame *fp = rx_fp;
+ struct fc_els_flogi *pl;
+ struct fc_seq_els_data rjt_data;
+ u32 sid;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ sid = fc_frame_sid(fp);
+
+ FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
+
+ pl = fc_frame_payload_get(fp, sizeof(*pl));
+ if (!pl) {
+ FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+ goto reject;
+ }
+
+ disc = &lport->disc;
+ mutex_lock(&disc->disc_mutex);
+ rdata = fc_rport_create(lport, sid);
+ if (!rdata) {
+ mutex_unlock(&disc->disc_mutex);
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+
+ mutex_lock(&rdata->rp_mutex);
+ mutex_unlock(&disc->disc_mutex);
+
+ rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
+ rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
+
+ /*
+ * If the rport was just created, possibly due to the incoming PLOGI,
+ * set the state appropriately and accept the PLOGI.
+ *
+ * If we had also sent a PLOGI, and if the received PLOGI is from a
+ * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
+ * "command already in progress".
+ *
+ * XXX TBD: If the session was ready before, the PLOGI should result in
+ * all outstanding exchanges being reset.
+ */
+ switch (rdata->rp_state) {
+ case RPORT_ST_INIT:
+ FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
+ break;
+ case RPORT_ST_PLOGI_WAIT:
+ FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI_WAIT state\n");
+ break;
+ case RPORT_ST_PLOGI:
+ FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
+ if (rdata->ids.port_name < lport->wwpn) {
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_INPROG;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+ break;
+ case RPORT_ST_PRLI:
+ case RPORT_ST_RTV:
+ case RPORT_ST_READY:
+ case RPORT_ST_ADISC:
+ FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
+ "- ignored for now\n", rdata->rp_state);
+ /* XXX TBD - should reset */
+ break;
+ case RPORT_ST_FLOGI:
+ case RPORT_ST_DELETE:
+ FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
+ fc_rport_state(rdata));
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_BUSY;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+ if (!fc_rport_compatible_roles(lport, rdata)) {
+ FC_RPORT_DBG(rdata, "Received PLOGI for incompatible role\n");
+ mutex_unlock(&rdata->rp_mutex);
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
+ goto reject;
+ }
+
+ /*
+ * Get session payload size from incoming PLOGI.
+ */
+ rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ fp = fc_frame_alloc(lport, sizeof(*pl));
+ if (!fp)
+ goto out;
+
+ fc_plogi_fill(lport, fp, ELS_LS_ACC);
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ fc_rport_enter_prli(rdata);
+out:
+ mutex_unlock(&rdata->rp_mutex);
+ fc_frame_free(rx_fp);
+ return;
+
+reject:
+ fc_seq_els_rsp_send(fp, ELS_LS_RJT, &rjt_data);
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
+ * @rdata: The remote port that sent the PRLI request
+ * @rx_fp: The PRLI request frame
+ */
+static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct {
+ struct fc_els_prli prli;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ enum fc_els_spp_resp resp;
+ struct fc_seq_els_data rjt_data;
+ struct fc4_prov *prov;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
+ fc_rport_state(rdata));
+
+ len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prli.prli_len);
+ if ((plen % 4) != 0 || plen > len || plen < 16)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+ plen = pp->prli.prli_spp_len;
+ if ((plen % 4) != 0 || plen < sizeof(*spp) ||
+ plen > len || len < sizeof(*pp) || plen < 12)
+ goto reject_len;
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prli.prli_cmd = ELS_LS_ACC;
+ pp->prli.prli_spp_len = plen;
+ pp->prli.prli_len = htons(len);
+ len -= sizeof(struct fc_els_prli);
+
+ /*
+ * Go through all the service parameter pages and build
+ * response. If plen indicates longer SPP than standard,
+ * use that. The entire response has been pre-cleared above.
+ */
+ spp = &pp->spp;
+ mutex_lock(&fc_prov_mutex);
+ while (len >= plen) {
+ rdata->spp_type = rspp->spp_type;
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ resp = 0;
+
+ if (rspp->spp_type < FC_FC4_PROV_SIZE) {
+ enum fc_els_spp_resp active = 0, passive = 0;
+
+ prov = fc_active_prov[rspp->spp_type];
+ if (prov)
+ active = prov->prli(rdata, plen, rspp, spp);
+ prov = fc_passive_prov[rspp->spp_type];
+ if (prov)
+ passive = prov->prli(rdata, plen, rspp, spp);
+ if (!active || passive == FC_SPP_RESP_ACK)
+ resp = passive;
+ else
+ resp = active;
+ FC_RPORT_DBG(rdata, "PRLI rspp type %x "
+ "active %x passive %x\n",
+ rspp->spp_type, active, passive);
+ }
+ if (!resp) {
+ if (spp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ resp |= FC_SPP_RESP_CONF;
+ else
+ resp |= FC_SPP_RESP_INVL;
+ }
+ spp->spp_flags |= resp;
+ len -= plen;
+ rspp = (struct fc_els_spp *)((char *)rspp + plen);
+ spp = (struct fc_els_spp *)((char *)spp + plen);
+ }
+ mutex_unlock(&fc_prov_mutex);
+
+ /*
+ * Send LS_ACC. If this fails, the originator should retry.
+ */
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+drop:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
+ * @rdata: The remote port that sent the PRLO request
+ * @rx_fp: The PRLO request frame
+ */
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
+ struct fc_frame *rx_fp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ struct fc_frame *fp;
+ struct {
+ struct fc_els_prlo prlo;
+ struct fc_els_spp spp;
+ } *pp;
+ struct fc_els_spp *rspp; /* request service param page */
+ struct fc_els_spp *spp; /* response spp */
+ unsigned int len;
+ unsigned int plen;
+ struct fc_seq_els_data rjt_data;
+
+ lockdep_assert_held(&rdata->rp_mutex);
+
+ FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
+ fc_rport_state(rdata));
+
+ len = fr_len(rx_fp) - sizeof(struct fc_frame_header);
+ pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
+ if (!pp)
+ goto reject_len;
+ plen = ntohs(pp->prlo.prlo_len);
+ if (plen != 20)
+ goto reject_len;
+ if (plen < len)
+ len = plen;
+
+ rspp = &pp->spp;
+
+ fp = fc_frame_alloc(lport, len);
+ if (!fp) {
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_INSUF_RES;
+ goto reject;
+ }
+
+ pp = fc_frame_payload_get(fp, len);
+ WARN_ON(!pp);
+ memset(pp, 0, len);
+ pp->prlo.prlo_cmd = ELS_LS_ACC;
+ pp->prlo.prlo_obs = 0x10;
+ pp->prlo.prlo_len = htons(len);
+ spp = &pp->spp;
+ spp->spp_type = rspp->spp_type;
+ spp->spp_type_ext = rspp->spp_type_ext;
+ spp->spp_flags = FC_SPP_RESP_ACK;
+
+ fc_rport_enter_prli(rdata);
+
+ fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_ELS_REP, 0);
+ lport->tt.frame_send(lport, fp);
+ goto drop;
+
+reject_len:
+ rjt_data.reason = ELS_RJT_PROT;
+ rjt_data.explan = ELS_EXPL_INV_LEN;
+reject:
+ fc_seq_els_rsp_send(rx_fp, ELS_LS_RJT, &rjt_data);
+drop:
+ fc_frame_free(rx_fp);
+}
+
+/**
+ * fc_rport_recv_logo_req() - Handler for logout (LOGO) requests
+ * @lport: The local port that received the LOGO request
+ * @fp: The LOGO request frame
+ *
+ * Reference counting: drops kref on return
+ */
+static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
+{
+ struct fc_rport_priv *rdata;
+ u32 sid;
+
+ lockdep_assert_held(&lport->lp_mutex);
+
+ fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
+
+ sid = fc_frame_sid(fp);
+
+ rdata = fc_rport_lookup(lport, sid);
+ if (rdata) {
+ mutex_lock(&rdata->rp_mutex);
+ FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
+ fc_rport_state(rdata));
+
+ fc_rport_enter_delete(rdata, RPORT_EV_STOP);
+ mutex_unlock(&rdata->rp_mutex);
+ kref_put(&rdata->kref, fc_rport_destroy);
+ } else
+ FC_RPORT_ID_DBG(lport, sid,
+ "Received LOGO from non-logged-in port\n");
+ fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_flush_queue() - Flush the rport_event_queue
+ */
+void fc_rport_flush_queue(void)
+{
+ flush_workqueue(rport_event_queue);
+}
+EXPORT_SYMBOL(fc_rport_flush_queue);
+
+/**
+ * fc_rport_fcp_prli() - Handle incoming PRLI for the FCP initiator.
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ *
+ * Returns the value for the response code to be placed in spp_flags;
+ * Returns 0 if not an initiator.
+ */
+static int fc_rport_fcp_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ struct fc_lport *lport = rdata->local_port;
+ u32 fcp_parm;
+
+ fcp_parm = ntohl(rspp->spp_params);
+ rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+ if (fcp_parm & FCP_SPPF_INIT_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (fcp_parm & FCP_SPPF_TARG_FCN)
+ rdata->ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+ if (fcp_parm & FCP_SPPF_RETRY)
+ rdata->flags |= FC_RP_FLAGS_RETRY;
+ rdata->supported_classes = FC_COS_CLASS3;
+
+ if (!(lport->service_params & FCP_SPPF_INIT_FCN))
+ return 0;
+
+ spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
+
+ /*
+ * OR in our service parameters with other providers (target), if any.
+ */
+ fcp_parm = ntohl(spp->spp_params);
+ spp->spp_params = htonl(fcp_parm | lport->service_params);
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for FCP initiator.
+ */
+struct fc4_prov fc_rport_fcp_init = {
+ .prli = fc_rport_fcp_prli,
+};
+
+/**
+ * fc_rport_t0_prli() - Handle incoming PRLI parameters for type 0
+ * @rdata: remote port private
+ * @spp_len: service parameter page length
+ * @rspp: received service parameter page
+ * @spp: response service parameter page
+ */
+static int fc_rport_t0_prli(struct fc_rport_priv *rdata, u32 spp_len,
+ const struct fc_els_spp *rspp,
+ struct fc_els_spp *spp)
+{
+ if (rspp->spp_flags & FC_SPP_EST_IMG_PAIR)
+ return FC_SPP_RESP_INVL;
+ return FC_SPP_RESP_ACK;
+}
+
+/*
+ * FC-4 provider ops for type 0 service parameters.
+ *
+ * This handles the special case of type 0 which is always successful
+ * but doesn't do anything otherwise.
+ */
+struct fc4_prov fc_rport_t0_prov = {
+ .prli = fc_rport_t0_prli,
+};
+
+/**
+ * fc_setup_rport() - Initialize the rport_event_queue
+ */
+int fc_setup_rport(void)
+{
+ rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
+ if (!rport_event_queue)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * fc_destroy_rport() - Destroy the rport_event_queue
+ */
+void fc_destroy_rport(void)
+{
+ destroy_workqueue(rport_event_queue);
+}
+
+/**
+ * fc_rport_terminate_io() - Stop all outstanding I/O on a remote port
+ * @rport: The remote port whose I/O should be terminated
+ */
+void fc_rport_terminate_io(struct fc_rport *rport)
+{
+ struct fc_rport_libfc_priv *rpriv = rport->dd_data;
+ struct fc_lport *lport = rpriv->local_port;
+
+ lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
+ lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
+}
+EXPORT_SYMBOL(fc_rport_terminate_io);