diff options
Diffstat (limited to 'drivers/s390/net')
33 files changed, 31146 insertions, 0 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig new file mode 100644 index 000000000..bf236d474 --- /dev/null +++ b/drivers/s390/net/Kconfig @@ -0,0 +1,122 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "S/390 network device drivers" + depends on NETDEVICES && S390 + +config LCS + def_tristate m + prompt "Lan Channel Station Interface" + depends on CCW && NETDEVICES && (ETHERNET || FDDI) + help + Select this option if you want to use LCS networking on IBM System z. + This device driver supports FDDI (IEEE 802.7) and Ethernet. + To compile as a module, choose M. The module name is lcs. + If you do not know what it is, it's safe to choose Y. + +config CTCM + def_tristate m + prompt "CTC and MPC SNA device support" + depends on CCW && NETDEVICES + help + Select this option if you want to use channel-to-channel + point-to-point networking on IBM System z. + This device driver supports real CTC coupling using ESCON. + It also supports virtual CTCs when running under VM. + This driver also supports channel-to-channel MPC SNA devices. + MPC is an SNA protocol device used by Communication Server for Linux. + To compile as a module, choose M. The module name is ctcm. + To compile into the kernel, choose Y. + If you do not need any channel-to-channel connection, choose N. + +config NETIUCV + def_tristate m + prompt "IUCV network device support (VM only)" + depends on IUCV && NETDEVICES + help + Select this option if you want to use inter-user communication + vehicle networking under VM or VIF. It enables a fast communication + link between VM guests. Using ifconfig a point-to-point connection + can be established to the Linux on IBM System z + running on the other VM guest. To compile as a module, choose M. + The module name is netiucv. If unsure, choose Y. + +config SMSGIUCV + def_tristate m + prompt "IUCV special message support (VM only)" + depends on IUCV + help + Select this option if you want to be able to receive SMSG messages + from other VM guest systems. + +config SMSGIUCV_EVENT + def_tristate m + prompt "Deliver IUCV special messages as uevents (VM only)" + depends on SMSGIUCV + help + Select this option to deliver CP special messages (SMSGs) as + uevents. The driver handles only those special messages that + start with "APP". + + To compile as a module, choose M. The module name is "smsgiucv_app". + +config QETH + def_tristate y + prompt "Gigabit Ethernet device support" + depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET + help + This driver supports IBM's OSA Express network adapters in QDIO mode, + HiperSockets interfaces and z/VM virtual NICs for Guest LAN and + VSWITCH. + + To compile this driver as a module, choose M. + The module name is qeth. + +config QETH_L2 + def_tristate y + prompt "qeth layer 2 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 2 mode. + To compile as a module, choose M. The module name is qeth_l2. + If unsure, choose y. + +config QETH_L3 + def_tristate y + prompt "qeth layer 3 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 3 mode. + To compile as a module choose M. The module name is qeth_l3. + If unsure, choose Y. + +config QETH_OSN + def_bool !HAVE_MARCH_Z14_FEATURES + prompt "qeth OSN device support" + depends on QETH + help + This enables the qeth driver to support devices in OSN mode. + This feature will be removed in 2021. + If unsure, choose N. + +config QETH_OSX + def_bool !HAVE_MARCH_Z15_FEATURES + prompt "qeth OSX device support" + depends on QETH + help + This enables the qeth driver to support devices in OSX mode. + If unsure, choose N. + +config CCWGROUP + tristate + default (LCS || CTCM || QETH || SMC) + +config ISM + tristate "Support for ISM vPCI Adapter" + depends on PCI && SMC + default n + help + Select this option if you want to use the Internal Shared Memory + vPCI Adapter. + + To compile as a module choose M. The module name is ism. + If unsure, choose N. +endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile new file mode 100644 index 000000000..bc55ec316 --- /dev/null +++ b/drivers/s390/net/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# S/390 network devices +# + +ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o +obj-$(CONFIG_CTCM) += ctcm.o fsm.o +obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o +obj-$(CONFIG_SMSGIUCV) += smsgiucv.o +obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o +obj-$(CONFIG_LCS) += lcs.o +qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o qeth_ethtool.o +obj-$(CONFIG_QETH) += qeth.o +qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o +obj-$(CONFIG_QETH_L2) += qeth_l2.o +qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o +obj-$(CONFIG_QETH_L3) += qeth_l3.o + +ism-y := ism_drv.o +obj-$(CONFIG_ISM) += ism.o diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c new file mode 100644 index 000000000..f7ec51db3 --- /dev/null +++ b/drivers/s390/net/ctcm_dbug.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/ctype.h> +#include <linux/sysctl.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/debugfs.h> +#include "ctcm_dbug.h" + +/* + * Debug Facility Stuff + */ + +struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = { + [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL}, + [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL}, + [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL}, +}; + +void ctcm_unregister_dbf_views(void) +{ + int x; + for (x = 0; x < CTCM_DBF_INFOS; x++) { + debug_unregister(ctcm_dbf[x].id); + ctcm_dbf[x].id = NULL; + } +} + +int ctcm_register_dbf_views(void) +{ + int x; + for (x = 0; x < CTCM_DBF_INFOS; x++) { + /* register the areas */ + ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name, + ctcm_dbf[x].pages, + ctcm_dbf[x].areas, + ctcm_dbf[x].len); + if (ctcm_dbf[x].id == NULL) { + ctcm_unregister_dbf_views(); + return -ENOMEM; + } + + /* register a view */ + debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view); + /* set a passing level */ + debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level); + } + + return 0; +} + +void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...) +{ + char dbf_txt_buf[64]; + va_list args; + + if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level)) + return; + va_start(args, fmt); + vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); + va_end(args); + + debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf); +} + diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h new file mode 100644 index 000000000..675575ef1 --- /dev/null +++ b/drivers/s390/net/ctcm_dbug.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#ifndef _CTCM_DBUG_H_ +#define _CTCM_DBUG_H_ + +/* + * Debug Facility stuff + */ + +#include <asm/debug.h> + +#ifdef DEBUG + #define do_debug 1 +#else + #define do_debug 0 +#endif +#ifdef DEBUGCCW + #define do_debug_ccw 1 + #define DEBUGDATA 1 +#else + #define do_debug_ccw 0 +#endif +#ifdef DEBUGDATA + #define do_debug_data 1 +#else + #define do_debug_data 0 +#endif + +/* define dbf debug levels similar to kernel msg levels */ +#define CTC_DBF_ALWAYS 0 /* always print this */ +#define CTC_DBF_EMERG 0 /* system is unusable */ +#define CTC_DBF_ALERT 1 /* action must be taken immediately */ +#define CTC_DBF_CRIT 2 /* critical conditions */ +#define CTC_DBF_ERROR 3 /* error conditions */ +#define CTC_DBF_WARN 4 /* warning conditions */ +#define CTC_DBF_NOTICE 5 /* normal but significant condition */ +#define CTC_DBF_INFO 5 /* informational */ +#define CTC_DBF_DEBUG 6 /* debug-level messages */ + +enum ctcm_dbf_names { + CTCM_DBF_SETUP, + CTCM_DBF_ERROR, + CTCM_DBF_TRACE, + CTCM_DBF_MPC_SETUP, + CTCM_DBF_MPC_ERROR, + CTCM_DBF_MPC_TRACE, + CTCM_DBF_INFOS /* must be last element */ +}; + +struct ctcm_dbf_info { + char name[DEBUG_MAX_NAME_LEN]; + int pages; + int areas; + int len; + int level; + debug_info_t *id; +}; + +extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS]; + +int ctcm_register_dbf_views(void); +void ctcm_unregister_dbf_views(void); +void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...); + +static inline const char *strtail(const char *s, int n) +{ + int l = strlen(s); + return (l > n) ? s + (l - n) : s; +} + +#define CTCM_FUNTAIL strtail((char *)__func__, 16) + +#define CTCM_DBF_TEXT(name, level, text) \ + do { \ + debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \ + } while (0) + +#define CTCM_DBF_HEX(name, level, addr, len) \ + do { \ + debug_event(ctcm_dbf[CTCM_DBF_##name].id, \ + level, (void *)(addr), len); \ + } while (0) + +#define CTCM_DBF_TEXT_(name, level, text...) \ + ctcm_dbf_longtext(CTCM_DBF_##name, level, text) + +/* + * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. + * dev : netdevice with valid name field. + * text: any text string. + */ +#define CTCM_DBF_DEV_NAME(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \ + CTCM_FUNTAIL, dev->name, text); \ + } while (0) + +#define MPC_DBF_DEV_NAME(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \ + CTCM_FUNTAIL, dev->name, text); \ + } while (0) + +#define CTCMY_DBF_DEV_NAME(cat, dev, text) \ + do { \ + if (IS_MPCDEV(dev)) \ + MPC_DBF_DEV_NAME(cat, dev, text); \ + else \ + CTCM_DBF_DEV_NAME(cat, dev, text); \ + } while (0) + +/* + * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. + * dev : netdevice. + * text: any text string. + */ +#define CTCM_DBF_DEV(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \ + CTCM_FUNTAIL, dev, text); \ + } while (0) + +#define MPC_DBF_DEV(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \ + CTCM_FUNTAIL, dev, text); \ + } while (0) + +#define CTCMY_DBF_DEV(cat, dev, text) \ + do { \ + if (IS_MPCDEV(dev)) \ + MPC_DBF_DEV(cat, dev, text); \ + else \ + CTCM_DBF_DEV(cat, dev, text); \ + } while (0) + +#endif diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c new file mode 100644 index 000000000..661d2a49b --- /dev/null +++ b/drivers/s390/net/ctcm_fsms.c @@ -0,0 +1,2291 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + * MPC additions : + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "fsm.h" + +#include "ctcm_dbug.h" +#include "ctcm_main.h" +#include "ctcm_fsms.h" + +const char *dev_state_names[] = { + [DEV_STATE_STOPPED] = "Stopped", + [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", + [DEV_STATE_STARTWAIT_RX] = "StartWait RX", + [DEV_STATE_STARTWAIT_TX] = "StartWait TX", + [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", + [DEV_STATE_STOPWAIT_RX] = "StopWait RX", + [DEV_STATE_STOPWAIT_TX] = "StopWait TX", + [DEV_STATE_RUNNING] = "Running", +}; + +const char *dev_event_names[] = { + [DEV_EVENT_START] = "Start", + [DEV_EVENT_STOP] = "Stop", + [DEV_EVENT_RXUP] = "RX up", + [DEV_EVENT_TXUP] = "TX up", + [DEV_EVENT_RXDOWN] = "RX down", + [DEV_EVENT_TXDOWN] = "TX down", + [DEV_EVENT_RESTART] = "Restart", +}; + +const char *ctc_ch_event_names[] = { + [CTC_EVENT_IO_SUCCESS] = "ccw_device success", + [CTC_EVENT_IO_EBUSY] = "ccw_device busy", + [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", + [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", + [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", + [CTC_EVENT_ATTN] = "Status ATTN", + [CTC_EVENT_BUSY] = "Status BUSY", + [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", + [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", + [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", + [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", + [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", + [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", + [CTC_EVENT_UC_ZERO] = "Unit check ZERO", + [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", + [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", + [CTC_EVENT_MC_FAIL] = "Machine check failure", + [CTC_EVENT_MC_GOOD] = "Machine check operational", + [CTC_EVENT_IRQ] = "IRQ normal", + [CTC_EVENT_FINSTAT] = "IRQ final", + [CTC_EVENT_TIMER] = "Timer", + [CTC_EVENT_START] = "Start", + [CTC_EVENT_STOP] = "Stop", + /* + * additional MPC events + */ + [CTC_EVENT_SEND_XID] = "XID Exchange", + [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", +}; + +const char *ctc_ch_state_names[] = { + [CTC_STATE_IDLE] = "Idle", + [CTC_STATE_STOPPED] = "Stopped", + [CTC_STATE_STARTWAIT] = "StartWait", + [CTC_STATE_STARTRETRY] = "StartRetry", + [CTC_STATE_SETUPWAIT] = "SetupWait", + [CTC_STATE_RXINIT] = "RX init", + [CTC_STATE_TXINIT] = "TX init", + [CTC_STATE_RX] = "RX", + [CTC_STATE_TX] = "TX", + [CTC_STATE_RXIDLE] = "RX idle", + [CTC_STATE_TXIDLE] = "TX idle", + [CTC_STATE_RXERR] = "RX error", + [CTC_STATE_TXERR] = "TX error", + [CTC_STATE_TERM] = "Terminating", + [CTC_STATE_DTERM] = "Restarting", + [CTC_STATE_NOTOP] = "Not operational", + /* + * additional MPC states + */ + [CH_XID0_PENDING] = "Pending XID0 Start", + [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", + [CH_XID7_PENDING] = "Pending XID7 P1 Start", + [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", + [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", + [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", + [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", +}; + +static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); + +/* + * ----- static ctcm actions for channel statemachine ----- + * +*/ +static void chx_txdone(fsm_instance *fi, int event, void *arg); +static void chx_rx(fsm_instance *fi, int event, void *arg); +static void chx_rxidle(fsm_instance *fi, int event, void *arg); +static void chx_firstio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); + +/* + * ----- static ctcmpc actions for ctcmpc channel statemachine ----- + * +*/ +static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); +static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); +static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); +/* shared : +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); +*/ +static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); +static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); +static void ctcmpc_chx_resend(fsm_instance *, int, void *); +static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); + +/** + * Check return code of a preceding ccw_device call, halt_IO etc... + * + * ch : The channel, the error belongs to. + * Returns the error code (!= 0) to inspect. + */ +void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) +{ + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s: %04x\n", + CTCM_FUNTAIL, ch->id, msg, rc); + switch (rc) { + case -EBUSY: + pr_info("%s: The communication peer is busy\n", + ch->id); + fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); + break; + case -ENODEV: + pr_err("%s: The specified target device is not valid\n", + ch->id); + fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); + break; + default: + pr_err("An I/O operation resulted in error %04x\n", + rc); + fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); + } +} + +void ctcm_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); + + while ((skb = skb_dequeue(q))) { + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } +} + +/** + * NOP action for statemachines + */ +static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * Actions for channel - statemachines. + */ + +/** + * Normal data has been send. Free the corresponding + * skb (it's in io_queue), reset dev->tbusy and + * revert to idle state. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_txdone(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct sk_buff *skb; + int first = 1; + int i; + unsigned long duration; + unsigned long done_stamp = jiffies; + + CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); + + duration = done_stamp - ch->prof.send_stamp; + if (duration > ch->prof.tx_time) + ch->prof.tx_time = duration; + + if (ch->irb->scsw.cmd.count != 0) + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): TX not complete, remaining %d bytes", + CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); + fsm_deltimer(&ch->timer); + while ((skb = skb_dequeue(&ch->io_queue))) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + if (first) { + priv->stats.tx_bytes += 2; + first = 0; + } + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + } + spin_lock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[4]); + if (ch->collect_len > 0) { + int rc; + + if (ctcm_checkalloc_buffer(ch)) { + spin_unlock(&ch->collect_lock); + return; + } + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + if (ch->prof.maxmulti < (ch->collect_len + 2)) + ch->prof.maxmulti = ch->collect_len + 2; + if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) + ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); + *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; + i = 0; + while ((skb = skb_dequeue(&ch->collect_queue))) { + skb_copy_from_linear_data(skb, + skb_put(ch->trans_skb, skb->len), skb->len); + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + i++; + } + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + ch->ccw[1].count = ch->trans_skb->len; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + ch->prof.doios_multi++; + if (rc != 0) { + priv->stats.tx_dropped += i; + priv->stats.tx_errors += i; + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "chained TX"); + } + } else { + spin_unlock(&ch->collect_lock); + fsm_newstate(fi, CTC_STATE_TXIDLE); + } + ctcm_clear_busy_do(dev); +} + +/** + * Initial data is sent. + * Notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); + + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); +} + +/** + * Got normal data, check for sanity, queue it up, allocate new buffer + * trigger bottom half, and initiate next read. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_rx(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; + struct sk_buff *skb = ch->trans_skb; + __u16 block_len = *((__u16 *)skb->data); + int check_len; + int rc; + + fsm_deltimer(&ch->timer); + if (len < 8) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got packet with length %d < 8\n", + CTCM_FUNTAIL, dev->name, len); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + if (len > ch->max_bufsize) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got packet with length %d > %d\n", + CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + + /* + * VM TCP seems to have a bug sending 2 trailing bytes of garbage. + */ + switch (ch->protocol) { + case CTCM_PROTO_S390: + case CTCM_PROTO_OS390: + check_len = block_len + 2; + break; + default: + check_len = block_len; + break; + } + if ((len < block_len) || (len > check_len)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got block length %d != rx length %d\n", + CTCM_FUNTAIL, dev->name, block_len, len); + if (do_debug) + ctcmpc_dump_skb(skb, 0); + + *((__u16 *)skb->data) = len; + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + if (block_len > 2) { + *((__u16 *)skb->data) = block_len - 2; + ctcm_unpack_skb(ch, skb); + } + again: + skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(skb); + skb->len = 0; + if (ctcm_checkalloc_buffer(ch)) + return; + ch->ccw[1].count = ch->max_bufsize; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, "normal RX"); +} + +/** + * Initialize connection by sending a __u16 of value 0. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_firstio(fsm_instance *fi, int event, void *arg) +{ + int rc; + struct channel *ch = arg; + int fsmstate = fsm_getstate(fi); + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) : %02x", + CTCM_FUNTAIL, ch->id, fsmstate); + + ch->sense_rc = 0; /* reset unit check report control */ + if (fsmstate == CTC_STATE_TXIDLE) + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): remote side issued READ?, init.\n", + CTCM_FUNTAIL, ch->id); + fsm_deltimer(&ch->timer); + if (ctcm_checkalloc_buffer(ch)) + return; + if ((fsmstate == CTC_STATE_SETUPWAIT) && + (ch->protocol == CTCM_PROTO_OS390)) { + /* OS/390 resp. z/OS */ + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, + CTC_EVENT_TIMER, ch); + chx_rxidle(fi, event, arg); + } else { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); + } + return; + } + /* + * Don't setup a timer for receiving the initial RX frame + * if in compatibility mode, since VM TCP delays the initial + * frame until it has some data to send. + */ + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || + (ch->protocol != CTCM_PROTO_S390)) + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; + ch->ccw[1].count = 2; /* Transfer only length */ + + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_SETUPWAIT); + ctcm_ccw_check_rc(ch, rc, "init IO"); + } + /* + * If in compatibility mode since we don't setup a timer, we + * also signal RX channel up immediately. This enables us + * to send packets early which in turn usually triggers some + * reply from VM TCP which brings up the RX channel to it's + * final state. + */ + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && + (ch->protocol == CTCM_PROTO_S390)) { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); + } +} + +/** + * Got initial data, check it. If OK, + * notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_rxidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + __u16 buflen; + int rc; + + fsm_deltimer(&ch->timer); + buflen = *((__u16 *)ch->trans_skb->data); + CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", + __func__, dev->name, buflen); + + if (buflen >= CTCM_INITIAL_BLOCKLEN) { + if (ctcm_checkalloc_buffer(ch)) + return; + ch->ccw[1].count = ch->max_bufsize; + fsm_newstate(fi, CTC_STATE_RXIDLE); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + if (rc != 0) { + fsm_newstate(fi, CTC_STATE_RXINIT); + ctcm_ccw_check_rc(ch, rc, "initial RX"); + } else + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); + } else { + CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", + __func__, dev->name, + buflen, CTCM_INITIAL_BLOCKLEN); + chx_firstio(fi, event, arg); + } +} + +/** + * Set channel into extended mode. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + int rc; + unsigned long saveflags = 0; + int timeout = CTCM_TIME_5_SEC; + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) { + timeout = 1500; + CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + } + fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); + fsm_newstate(fi, CTC_STATE_SETUPWAIT); + CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); + + if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is undeterministic in + * static view. => ignore sparse warnings here. */ + + rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); + if (event == CTC_EVENT_TIMER) /* see above comments */ + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_STARTWAIT); + ctcm_ccw_check_rc(ch, rc, "set Mode"); + } else + ch->retry = 0; +} + +/** + * Setup channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + unsigned long saveflags; + int rc; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); + + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + } + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + ch->ccw[1].cmd_code = CCW_CMD_READ; + ch->ccw[1].flags = CCW_FLAG_SLI; + ch->ccw[1].count = 0; + } else { + ch->ccw[1].cmd_code = CCW_CMD_WRITE; + ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[1].count = 0; + } + if (ctcm_checkalloc_buffer(ch)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): %s trans_skb alloc delayed " + "until first transfer", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + } + ch->ccw[0].cmd_code = CCW_CMD_PREPARE; + ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[0].count = 0; + ch->ccw[0].cda = 0; + ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ + ch->ccw[2].flags = CCW_FLAG_SLI; + ch->ccw[2].count = 0; + ch->ccw[2].cda = 0; + memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); + ch->ccw[4].cda = 0; + ch->ccw[4].flags &= ~CCW_FLAG_IDA; + + fsm_newstate(fi, CTC_STATE_STARTWAIT); + fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_halt(ch->cdev, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); + } +} + +/** + * Shutdown a channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + unsigned long saveflags = 0; + int rc; + int oldstate; + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is undeterministic in + * static view. => ignore sparse warnings here. */ + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CTC_STATE_TERM); + rc = ccw_device_halt(ch->cdev, 0); + + if (event == CTC_EVENT_STOP) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + /* see remark above about conditional locking */ + + if (rc != 0 && rc != -EBUSY) { + fsm_deltimer(&ch->timer); + if (event != CTC_EVENT_STOP) { + fsm_newstate(fi, oldstate); + ctcm_ccw_check_rc(ch, rc, (char *)__func__); + } + } +} + +/** + * Cleanup helper for chx_fail and chx_stopped + * cleanup channels queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * state The next state (depending on caller). + * ch The channel to operate on. + */ +static void ctcm_chx_cleanup(fsm_instance *fi, int state, + struct channel *ch) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, + "%s(%s): %s[%d]\n", + CTCM_FUNTAIL, dev->name, ch->id, state); + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + fsm_newstate(fi, state); + if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb_any(ch->trans_skb); + ch->trans_skb = NULL; + } + + ch->th_seg = 0x00; + ch->th_seq_num = 0x00; + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + skb_queue_purge(&ch->io_queue); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + ctcm_purge_skb_queue(&ch->io_queue); + if (IS_MPC(ch)) + ctcm_purge_skb_queue(&ch->sweep_queue); + spin_lock(&ch->collect_lock); + ctcm_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/** + * A channel has successfully been halted. + * Cleanup it's queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) +{ + ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); +} + +/** + * A stop command from device statemachine arrived and we are in + * not operational mode. Set state to stopped. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) +{ + fsm_newstate(fi, CTC_STATE_STOPPED); +} + +/** + * A machine check for no path, not operational status or gone device has + * happened. + * Cleanup queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) +{ + ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); +} + +/** + * Handle error during setup of channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + /* + * Special case: Got UC_RCRESET on setmode. + * This means that remote side isn't setup. In this case + * simply retry after some 10 secs... + */ + if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && + ((event == CTC_EVENT_UC_RCRESET) || + (event == CTC_EVENT_UC_RSRESET))) { + fsm_newstate(fi, CTC_STATE_STARTRETRY); + fsm_deltimer(&ch->timer); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + if (!IS_MPC(ch) && + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { + int rc = ccw_device_halt(ch->cdev, 0); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, + "HaltIO in chx_setuperr"); + } + return; + } + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, + "%s(%s) : %s error during %s channel setup state=%s\n", + CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", + fsm_getstate_str(fi)); + + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/** + * Restart a channel after an error. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + unsigned long saveflags = 0; + int oldstate; + int rc; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s: %s[%d] of %s\n", + CTCM_FUNTAIL, ch->id, event, dev->name); + + fsm_deltimer(&ch->timer); + + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CTC_STATE_STARTWAIT); + if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is a known problem for + * sparse because its undeterministic in static view. + * Warnings should be ignored here. */ + rc = ccw_device_halt(ch->cdev, 0); + if (event == CTC_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, oldstate); + } + ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); + } +} + +/** + * Handle error during RX initial handshake (exchange of + * 0-length block header) + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + if (event == CTC_EVENT_TIMER) { + if (!IS_MPCDEV(dev)) + /* TODO : check if MPC deletes timer somewhere */ + fsm_deltimer(&ch->timer); + if (ch->retry++ < 3) + ctcm_chx_restart(fi, event, arg); + else { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } + } else { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); + } +} + +/** + * Notify device statemachine if we gave up initialization + * of RX channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): RX %s busy, init. fail", + CTCM_FUNTAIL, dev->name, ch->id); + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); +} + +/** + * Handle RX Unit check remote reset (remote disconnected) + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct channel *ch2; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s: %s: remote disconnect - re-init ...", + CTCM_FUNTAIL, dev->name); + fsm_deltimer(&ch->timer); + /* + * Notify device statemachine + */ + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + + fsm_newstate(fi, CTC_STATE_DTERM); + ch2 = priv->channel[CTCM_WRITE]; + fsm_newstate(ch2->fsm, CTC_STATE_DTERM); + + ccw_device_halt(ch->cdev, 0); + ccw_device_halt(ch2->cdev, 0); +} + +/** + * Handle error during TX channel initialization. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + if (event == CTC_EVENT_TIMER) { + fsm_deltimer(&ch->timer); + if (ch->retry++ < 3) + ctcm_chx_restart(fi, event, arg); + else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } + } else { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); + } +} + +/** + * Handle TX timeout by retrying operation. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct sk_buff *skb; + + CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + fsm_deltimer(&ch->timer); + if (ch->retry++ > 3) { + struct mpc_group *gptr = priv->mpcg; + CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, + "%s: %s: retries exceeded", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + /* call restart if not MPC or if MPC and mpcg fsm is ready. + use gptr as mpc indicator */ + if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) + ctcm_chx_restart(fi, event, arg); + goto done; + } + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s : %s: retry %d", + CTCM_FUNTAIL, ch->id, ch->retry); + skb = skb_peek(&ch->io_queue); + if (skb) { + int rc = 0; + unsigned long saveflags = 0; + clear_normalized_cda(&ch->ccw[4]); + ch->ccw[4].count = skb->len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, + "%s: %s: IDAL alloc failed", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + ctcm_chx_restart(fi, event, arg); + goto done; + } + fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); + if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is a known problem for + * sparse because its undeterministic in static view. + * Warnings should be ignored here. */ + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[3], + sizeof(struct ccw1) * 3); + + rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); + if (event == CTC_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), + saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); + ctcm_purge_skb_queue(&ch->io_queue); + } + } +done: + return; +} + +/** + * Handle fatal errors during an I/O command. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + int rd = CHANNEL_DIRECTION(ch->flags); + + fsm_deltimer(&ch->timer); + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s: %s: %s unrecoverable channel error", + CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); + + if (IS_MPC(ch)) { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + } + if (rd == CTCM_READ) { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/* + * The ctcm statemachine for a channel. + */ +const fsm_node ch_fsm[] = { + { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, + { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + + { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, + + { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, + { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, + { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, + { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, + { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, + + { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, + { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, + { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, +}; + +int ch_fsm_len = ARRAY_SIZE(ch_fsm); + +/* + * MPC actions for mpc channel statemachine + * handling of MPC protocol requires extra + * statemachine and actions which are prefixed ctcmpc_ . + * The ctc_ch_states and ctc_ch_state_names, + * ctc_ch_events and ctc_ch_event_names share the ctcm definitions + * which are expanded by some elements. + */ + +/* + * Actions for mpc channel statemachine. + */ + +/** + * Normal data has been send. Free the corresponding + * skb (it's in io_queue), reset dev->tbusy and + * revert to idle state. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *skb; + int first = 1; + int i; + __u32 data_space; + unsigned long duration; + struct sk_buff *peekskb; + int rc; + struct th_header *header; + struct pdu *p_header; + unsigned long done_stamp = jiffies; + + CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", + __func__, dev->name, smp_processor_id()); + + duration = done_stamp - ch->prof.send_stamp; + if (duration > ch->prof.tx_time) + ch->prof.tx_time = duration; + + if (ch->irb->scsw.cmd.count != 0) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s(%s): TX not complete, remaining %d bytes", + CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); + fsm_deltimer(&ch->timer); + while ((skb = skb_dequeue(&ch->io_queue))) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; + if (first) { + priv->stats.tx_bytes += 2; + first = 0; + } + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + } + spin_lock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[4]); + if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { + spin_unlock(&ch->collect_lock); + fsm_newstate(fi, CTC_STATE_TXIDLE); + goto done; + } + + if (ctcm_checkalloc_buffer(ch)) { + spin_unlock(&ch->collect_lock); + goto done; + } + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) + ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; + if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) + ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); + i = 0; + p_header = NULL; + data_space = grp->group_max_buflen - TH_HEADER_LENGTH; + + CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" + " data_space:%04x\n", + __func__, data_space); + + while ((skb = skb_dequeue(&ch->collect_queue))) { + skb_put_data(ch->trans_skb, skb->data, skb->len); + p_header = (struct pdu *) + (skb_tail_pointer(ch->trans_skb) - skb->len); + p_header->pdu_flag = 0x00; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) + p_header->pdu_flag |= 0x60; + else + p_header->pdu_flag |= 0x20; + + CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", + __func__, ch->trans_skb->len); + CTCM_PR_DBGDATA("%s: pdu header and data for up" + " to 32 bytes sent to vtam\n", __func__); + CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); + + ch->collect_len -= skb->len; + data_space -= skb->len; + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + peekskb = skb_peek(&ch->collect_queue); + if (peekskb->len > data_space) + break; + i++; + } + /* p_header points to the last one we handled */ + if (p_header) + p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ + header = kzalloc(TH_HEADER_LENGTH, gfp_type()); + if (!header) { + spin_unlock(&ch->collect_lock); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + header->th_ch_flag = TH_HAS_PDU; /* Normal data */ + ch->th_seq_num++; + header->th_seq_num = ch->th_seq_num; + + CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , + __func__, ch->th_seq_num); + + memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, + TH_HEADER_LENGTH); /* put the TH on the packet */ + + kfree(header); + + CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", + __func__, ch->trans_skb->len); + CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " + "data to vtam from collect_q\n", __func__); + CTCM_D3_DUMP((char *)ch->trans_skb->data, + min_t(int, ch->trans_skb->len, 50)); + + spin_unlock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[1]); + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + ch->ccw[1].count = ch->max_bufsize; + + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { + dev_kfree_skb_any(ch->trans_skb); + ch->trans_skb = NULL; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s: %s: IDAL alloc failed", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + return; + } + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + + ch->ccw[1].count = ch->trans_skb->len; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + ch->prof.send_stamp = jiffies; + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + ch->prof.doios_multi++; + if (rc != 0) { + priv->stats.tx_dropped += i; + priv->stats.tx_errors += i; + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "chained TX"); + } +done: + ctcm_clear_busy(dev); + return; +} + +/** + * Got normal data, check for sanity, queue it up, allocate new buffer + * trigger bottom half, and initiate next read. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *skb = ch->trans_skb; + struct sk_buff *new_skb; + unsigned long saveflags = 0; /* avoids compiler warning */ + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; + + CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", + CTCM_FUNTAIL, dev->name, smp_processor_id(), + ch->id, ch->max_bufsize, len); + fsm_deltimer(&ch->timer); + + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): TRANS_SKB = NULL", + CTCM_FUNTAIL, dev->name); + goto again; + } + + if (len < TH_HEADER_LENGTH) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): packet length %d to short", + CTCM_FUNTAIL, dev->name, len); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + } else { + /* must have valid th header or game over */ + __u32 block_len = len; + len = TH_HEADER_LENGTH + XID2_LENGTH + 4; + new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); + + if (new_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%d): skb allocation failed", + CTCM_FUNTAIL, dev->name); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + goto again; + } + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_RESET: + case MPCG_STATE_INOP: + dev_kfree_skb_any(new_skb); + break; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + skb_put_data(new_skb, skb->data, block_len); + skb_queue_tail(&ch->io_queue, new_skb); + tasklet_schedule(&ch->ch_tasklet); + break; + default: + skb_put_data(new_skb, skb->data, len); + skb_queue_tail(&ch->io_queue, new_skb); + tasklet_hi_schedule(&ch->ch_tasklet); + break; + } + } + +again: + switch (fsm_getstate(grp->fsm)) { + int rc, dolock; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + if (ctcm_checkalloc_buffer(ch)) + break; + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = ch->max_bufsize; + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[0], + sizeof(struct ccw1) * 3); + dolock = !in_irq(); + if (dolock) + spin_lock_irqsave( + get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + if (dolock) /* see remark about conditional locking */ + spin_unlock_irqrestore( + get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, "normal RX"); + default: + break; + } + + CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", + __func__, dev->name, ch, ch->id); + +} + +/** + * Initialize connection by sending a __u16 of value 0. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *gptr = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", + __func__, ch->id, ch); + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, + "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", + CTCM_FUNTAIL, ch->id, fsm_getstate(fi), + fsm_getstate(gptr->fsm), ch->protocol); + + if (fsm_getstate(fi) == CTC_STATE_TXIDLE) + MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); + + fsm_deltimer(&ch->timer); + if (ctcm_checkalloc_buffer(ch)) + goto done; + + switch (fsm_getstate(fi)) { + case CTC_STATE_STARTRETRY: + case CTC_STATE_SETUPWAIT: + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + ctcmpc_chx_rxidle(fi, event, arg); + } else { + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); + } + goto done; + default: + break; + } + + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); + +done: + CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", + __func__, ch->id, ch); + return; +} + +/** + * Got initial data, check it. If OK, + * notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + int rc; + unsigned long saveflags = 0; /* avoids compiler warning */ + + fsm_deltimer(&ch->timer); + CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", + __func__, ch->id, dev->name, smp_processor_id(), + fsm_getstate(fi), fsm_getstate(grp->fsm)); + + fsm_newstate(fi, CTC_STATE_RXIDLE); + /* XID processing complete */ + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + if (ctcm_checkalloc_buffer(ch)) + goto done; + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = ch->max_bufsize; + CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); + if (event == CTC_EVENT_START) + /* see remark about conditional locking */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); + if (event == CTC_EVENT_START) + spin_unlock_irqrestore( + get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + fsm_newstate(fi, CTC_STATE_RXINIT); + ctcm_ccw_check_rc(ch, rc, "initial RX"); + goto done; + } + break; + default: + break; + } + + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); +done: + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", + __func__, dev->name, ch->id, ch, smp_processor_id(), + fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + /* ok..start yside xid exchanges */ + if (!ch->in_mpcgroup) + break; + if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, + MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + + } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) + /* attn rcvd before xid0 processed via bh */ + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + /* attn rcvd before xid0 processed on ch + but mid-xid0 processing for group */ + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + switch (fsm_getstate(ch->fsm)) { + case CH_XID7_PENDING: + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case CH_XID7_PENDING2: + fsm_newstate(ch->fsm, CH_XID7_PENDING3); + break; + } + fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); + break; + } + + return; +} + +/* + * ctcmpc channel FSM action + * called from one point in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", + __func__, dev->name, ch->id, + fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); + + fsm_deltimer(&ch->timer); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID0IOWAIT: + /* vtam wants to be primary.start yside xid exchanges*/ + /* only receive one attn-busy at a time so must not */ + /* change state each time */ + grp->changed_side = 1; + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); + break; + case MPCG_STATE_XID2INITW: + if (grp->changed_side == 1) { + grp->changed_side = 2; + break; + } + /* process began via call to establish_conn */ + /* so must report failure instead of reverting */ + /* back to ready-for-xid passive state */ + if (grp->estconnfunc) + goto done; + /* this attnbusy is NOT the result of xside xid */ + /* collisions so yside must have been triggered */ + /* by an ATTN that was not intended to start XID */ + /* processing. Revert back to ready-for-xid and */ + /* wait for ATTN interrupt to signal xid start */ + if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { + fsm_newstate(ch->fsm, CH_XID0_PENDING) ; + fsm_deltimer(&grp->timer); + goto done; + } + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + case MPCG_STATE_XID2INITX: + /* XID2 was received before ATTN Busy for second + channel.Send yside xid for second channel. + */ + if (grp->changed_side == 1) { + grp->changed_side = 2; + break; + } + fallthrough; + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + default: + /* multiple attn-busy indicates too out-of-sync */ + /* and they are certainly not being received as part */ + /* of valid mpc group negotiations.. */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + + if (grp->changed_side == 1) { + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + } + if (ch->in_mpcgroup) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + else + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): channel %s not added to group", + CTCM_FUNTAIL, dev->name, ch->id); + +done: + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ach = arg; + struct net_device *dev = ach->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *wch = priv->channel[CTCM_WRITE]; + struct channel *rch = priv->channel[CTCM_READ]; + struct sk_buff *skb; + struct th_sweep *header; + int rc = 0; + unsigned long saveflags = 0; + + CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ach, ach->id); + + if (grp->in_sweep == 0) + goto done; + + CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , + __func__, wch->th_seq_num); + CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , + __func__, rch->th_seq_num); + + if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { + /* give the previous IO time to complete */ + fsm_addtimer(&wch->sweep_timer, + 200, CTC_EVENT_RSWEEP_TIMER, wch); + goto done; + } + + skb = skb_dequeue(&wch->sweep_queue); + if (!skb) + goto done; + + if (set_normalized_cda(&wch->ccw[4], skb->data)) { + grp->in_sweep = 0; + ctcm_clear_busy_do(dev); + dev_kfree_skb_any(skb); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } else { + refcount_inc(&skb->users); + skb_queue_tail(&wch->io_queue, skb); + } + + /* send out the sweep */ + wch->ccw[4].count = skb->len; + + header = (struct th_sweep *)skb->data; + switch (header->th.th_ch_flag) { + case TH_SWEEP_REQ: + grp->sweep_req_pend_num--; + break; + case TH_SWEEP_RESP: + grp->sweep_rsp_pend_num--; + break; + } + + header->sw.th_last_seq = wch->th_seq_num; + + CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); + CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); + CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); + + fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); + fsm_newstate(wch->fsm, CTC_STATE_TX); + + spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); + wch->prof.send_stamp = jiffies; + rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); + + if ((grp->sweep_req_pend_num == 0) && + (grp->sweep_rsp_pend_num == 0)) { + grp->in_sweep = 0; + rch->th_seq_num = 0x00; + wch->th_seq_num = 0x00; + ctcm_clear_busy_do(dev); + } + + CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , + __func__, wch->th_seq_num, rch->th_seq_num); + + if (rc != 0) + ctcm_ccw_check_rc(wch, rc, "send sweep"); + +done: + return; +} + + +/* + * The ctcmpc statemachine for a channel. + */ + +const fsm_node ctcmpc_ch_fsm[] = { + { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, + { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + + { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, + { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, + { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, + { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, + { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + + { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, + { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + + { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + + { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + + { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, + { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, + { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, +}; + +int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); + +/* + * Actions for interface - statemachine. + */ + +/** + * Startup channels by sending CTC_EVENT_START to each channel. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_start(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + int direction; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + fsm_deltimer(&priv->restart_timer); + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + if (IS_MPC(priv)) + priv->mpcg->channels_terminating = 0; + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + fsm_event(ch->fsm, CTC_EVENT_START, ch); + } +} + +/** + * Shutdown channels by sending CTC_EVENT_STOP to each channel. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_stop(fsm_instance *fi, int event, void *arg) +{ + int direction; + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + fsm_event(ch->fsm, CTC_EVENT_STOP, ch); + ch->th_seq_num = 0x00; + CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", + __func__, ch->th_seq_num); + } + if (IS_MPC(priv)) + fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); +} + +static void dev_action_restart(fsm_instance *fi, int event, void *arg) +{ + int restart_timer; + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(TRACE, dev, ""); + + if (IS_MPC(priv)) { + restart_timer = CTCM_TIME_1_SEC; + } else { + restart_timer = CTCM_TIME_5_SEC; + } + dev_info(&dev->dev, "Restarting device\n"); + + dev_action_stop(fi, event, arg); + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + if (IS_MPC(priv)) + fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); + + /* going back into start sequence too quickly can */ + /* result in the other side becoming unreachable due */ + /* to sense reported when IO is aborted */ + fsm_addtimer(&priv->restart_timer, restart_timer, + DEV_EVENT_START, dev); +} + +/** + * Called from channel statemachine + * when a channel is up and running. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_chup(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + int dev_stat = fsm_getstate(fi); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, + "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, + dev->name, dev->ml_priv, dev_stat, event); + + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT_RXTX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_RXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(&dev->dev, + "Connected with remote side\n"); + ctcm_clear_busy(dev); + } + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_TXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(&dev->dev, + "Connected with remote side\n"); + ctcm_clear_busy(dev); + } + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_TXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + } + + if (IS_MPC(priv)) { + if (event == DEV_EVENT_RXUP) + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_ADD); + else + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_ADD); + } +} + +/** + * Called from device statemachine + * when a channel has been shutdown. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_chdown(fsm_instance *fi, int event, void *arg) +{ + + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RXTX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); + else + fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + } + if (IS_MPC(priv)) { + if (event == DEV_EVENT_RXDOWN) + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_REMOVE); + else + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_REMOVE); + } +} + +const fsm_node dev_fsm[] = { + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, +}; + +int dev_fsm_len = ARRAY_SIZE(dev_fsm); + +/* --- This is the END my friend --- */ + diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h new file mode 100644 index 000000000..d98c48672 --- /dev/null +++ b/drivers/s390/net/ctcm_fsms.h @@ -0,0 +1,356 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + * MPC additions : + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ +#ifndef _CTCM_FSMS_H_ +#define _CTCM_FSMS_H_ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "fsm.h" +#include "ctcm_main.h" + +/* + * Definitions for the channel statemachine(s) for ctc and ctcmpc + * + * To allow better kerntyping, prefix-less definitions for channel states + * and channel events have been replaced : + * ch_event... -> ctc_ch_event... + * CH_EVENT... -> CTC_EVENT... + * ch_state... -> ctc_ch_state... + * CH_STATE... -> CTC_STATE... + */ +/* + * Events of the channel statemachine(s) for ctc and ctcmpc + */ +enum ctc_ch_events { + /* + * Events, representing return code of + * I/O operations (ccw_device_start, ccw_device_halt et al.) + */ + CTC_EVENT_IO_SUCCESS, + CTC_EVENT_IO_EBUSY, + CTC_EVENT_IO_ENODEV, + CTC_EVENT_IO_UNKNOWN, + + CTC_EVENT_ATTNBUSY, + CTC_EVENT_ATTN, + CTC_EVENT_BUSY, + /* + * Events, representing unit-check + */ + CTC_EVENT_UC_RCRESET, + CTC_EVENT_UC_RSRESET, + CTC_EVENT_UC_TXTIMEOUT, + CTC_EVENT_UC_TXPARITY, + CTC_EVENT_UC_HWFAIL, + CTC_EVENT_UC_RXPARITY, + CTC_EVENT_UC_ZERO, + CTC_EVENT_UC_UNKNOWN, + /* + * Events, representing subchannel-check + */ + CTC_EVENT_SC_UNKNOWN, + /* + * Events, representing machine checks + */ + CTC_EVENT_MC_FAIL, + CTC_EVENT_MC_GOOD, + /* + * Event, representing normal IRQ + */ + CTC_EVENT_IRQ, + CTC_EVENT_FINSTAT, + /* + * Event, representing timer expiry. + */ + CTC_EVENT_TIMER, + /* + * Events, representing commands from upper levels. + */ + CTC_EVENT_START, + CTC_EVENT_STOP, + CTC_NR_EVENTS, + /* + * additional MPC events + */ + CTC_EVENT_SEND_XID = CTC_NR_EVENTS, + CTC_EVENT_RSWEEP_TIMER, + /* + * MUST be always the last element!! + */ + CTC_MPC_NR_EVENTS, +}; + +/* + * States of the channel statemachine(s) for ctc and ctcmpc. + */ +enum ctc_ch_states { + /* + * Channel not assigned to any device, + * initial state, direction invalid + */ + CTC_STATE_IDLE, + /* + * Channel assigned but not operating + */ + CTC_STATE_STOPPED, + CTC_STATE_STARTWAIT, + CTC_STATE_STARTRETRY, + CTC_STATE_SETUPWAIT, + CTC_STATE_RXINIT, + CTC_STATE_TXINIT, + CTC_STATE_RX, + CTC_STATE_TX, + CTC_STATE_RXIDLE, + CTC_STATE_TXIDLE, + CTC_STATE_RXERR, + CTC_STATE_TXERR, + CTC_STATE_TERM, + CTC_STATE_DTERM, + CTC_STATE_NOTOP, + CTC_NR_STATES, /* MUST be the last element of non-expanded states */ + /* + * additional MPC states + */ + CH_XID0_PENDING = CTC_NR_STATES, + CH_XID0_INPROGRESS, + CH_XID7_PENDING, + CH_XID7_PENDING1, + CH_XID7_PENDING2, + CH_XID7_PENDING3, + CH_XID7_PENDING4, + CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */ +}; + +extern const char *ctc_ch_event_names[]; + +extern const char *ctc_ch_state_names[]; + +void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg); +void ctcm_purge_skb_queue(struct sk_buff_head *q); + +/* + * ----- non-static actions for ctcm channel statemachine ----- + * + */ +void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg); + +/* + * ----- FSM (state/event/action) of the ctcm channel statemachine ----- + */ +extern const fsm_node ch_fsm[]; +extern int ch_fsm_len; + + +/* + * ----- non-static actions for ctcmpc channel statemachine ---- + * + */ +/* shared : +void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg); + */ +void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg); + +/* + * ----- FSM (state/event/action) of the ctcmpc channel statemachine ----- + */ +extern const fsm_node ctcmpc_ch_fsm[]; +extern int mpc_ch_fsm_len; + +/* + * Definitions for the device interface statemachine for ctc and mpc + */ + +/* + * States of the device interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT_RXTX, + DEV_STATE_STARTWAIT_RX, + DEV_STATE_STARTWAIT_TX, + DEV_STATE_STOPWAIT_RXTX, + DEV_STATE_STOPWAIT_RX, + DEV_STATE_STOPWAIT_TX, + DEV_STATE_RUNNING, + /* + * MUST be always the last element!! + */ + CTCM_NR_DEV_STATES +}; + +extern const char *dev_state_names[]; + +/* + * Events of the device interface statemachine. + * ctcm and ctcmpc + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_RXUP, + DEV_EVENT_TXUP, + DEV_EVENT_RXDOWN, + DEV_EVENT_TXDOWN, + DEV_EVENT_RESTART, + /* + * MUST be always the last element!! + */ + CTCM_NR_DEV_EVENTS +}; + +extern const char *dev_event_names[]; + +/* + * Actions for the device interface statemachine. + * ctc and ctcmpc + */ +/* +static void dev_action_start(fsm_instance * fi, int event, void *arg); +static void dev_action_stop(fsm_instance * fi, int event, void *arg); +static void dev_action_restart(fsm_instance *fi, int event, void *arg); +static void dev_action_chup(fsm_instance * fi, int event, void *arg); +static void dev_action_chdown(fsm_instance * fi, int event, void *arg); +*/ + +/* + * The (state/event/action) fsm table of the device interface statemachine. + * ctcm and ctcmpc + */ +extern const fsm_node dev_fsm[]; +extern int dev_fsm_len; + + +/* + * Definitions for the MPC Group statemachine + */ + +/* + * MPC Group Station FSM States + +State Name When In This State +====================== ======================================= +MPCG_STATE_RESET Initial State When Driver Loaded + We receive and send NOTHING + +MPCG_STATE_INOP INOP Received. + Group level non-recoverable error + +MPCG_STATE_READY XID exchanges for at least 1 write and + 1 read channel have completed. + Group is ready for data transfer. + +States from ctc_mpc_alloc_channel +============================================================== +MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation + ATTN from other side will start + XID negotiations. + Y-side protocol only. + +MPCG_STATE_XID2INITX XID2(0) negotiations are in progress. + At least 1, but not all, XID2(0)'s + have been received from partner. + +MPCG_STATE_XID7INITW XID2(0) complete + No XID2(7)'s have yet been received. + XID2(7) negotiations pending. + +MPCG_STATE_XID7INITX XID2(7) negotiations in progress. + At least 1, but not all, XID2(7)'s + have been received from partner. + +MPCG_STATE_XID7INITF XID2(7) negotiations complete. + Transitioning to READY. + +MPCG_STATE_READY Ready for Data Transfer. + + +States from ctc_mpc_establish_connectivity call +============================================================== +MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations. + X-side protocol only. + ATTN-BUSY from other side will convert + this to Y-side protocol and the + ctc_mpc_alloc_channel flow will begin. + +MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress. + At least 1, but not all, XID2(0)'s + have been received from partner. + +MPCG_STATE_XID7INITI XID2(0) complete + No XID2(7)'s have yet been received. + XID2(7) negotiations pending. + +MPCG_STATE_XID7INITZ XID2(7) negotiations in progress. + At least 1, but not all, XID2(7)'s + have been received from partner. + +MPCG_STATE_XID7INITF XID2(7) negotiations complete. + Transitioning to READY. + +MPCG_STATE_READY Ready for Data Transfer. + +*/ + +enum mpcg_events { + MPCG_EVENT_INOP, + MPCG_EVENT_DISCONC, + MPCG_EVENT_XID0DO, + MPCG_EVENT_XID2, + MPCG_EVENT_XID2DONE, + MPCG_EVENT_XID7DONE, + MPCG_EVENT_TIMER, + MPCG_EVENT_DOIO, + MPCG_NR_EVENTS, +}; + +enum mpcg_states { + MPCG_STATE_RESET, + MPCG_STATE_INOP, + MPCG_STATE_XID2INITW, + MPCG_STATE_XID2INITX, + MPCG_STATE_XID7INITW, + MPCG_STATE_XID7INITX, + MPCG_STATE_XID0IOWAIT, + MPCG_STATE_XID0IOWAIX, + MPCG_STATE_XID7INITI, + MPCG_STATE_XID7INITZ, + MPCG_STATE_XID7INITF, + MPCG_STATE_FLOWC, + MPCG_STATE_READY, + MPCG_NR_STATES, +}; + +#endif +/* --- This is the END my friend --- */ diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c new file mode 100644 index 000000000..fb0e8f1ca --- /dev/null +++ b/drivers/s390/net/ctcm_main.c @@ -0,0 +1,1821 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2009 + * Author(s): + * Original CTC driver(s): + * Fritz Elfert (felfert@millenux.com) + * Dieter Wellerdiek (wel@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Denis Joseph Barrow (barrow_dj@yahoo.com) + * Jochen Roehrig (roehrig@de.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * MPC additions: + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + * Revived by: + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "ctcm_fsms.h" +#include "ctcm_main.h" + +/* Some common global variables */ + +/** + * The root device for ctcm group devices + */ +static struct device *ctcm_root_dev; + +/* + * Linked list of all detected channels. + */ +struct channel *channels; + +/** + * Unpack a just received skb and hand it over to + * upper layers. + * + * ch The channel where this skb has been received. + * pskb The received skb. + */ +void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + __u16 len = *((__u16 *) pskb->data); + + skb_put(pskb, 2 + LL_HEADER_LENGTH); + skb_pull(pskb, 2); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + while (len > 0) { + struct sk_buff *skb; + int skblen; + struct ll_header *header = (struct ll_header *)pskb->data; + + skb_pull(pskb, LL_HEADER_LENGTH); + if ((ch->protocol == CTCM_PROTO_S390) && + (header->type != ETH_P_IP)) { + if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { + ch->logflags |= LOG_FLAG_ILLEGALPKT; + /* + * Check packet type only if we stick strictly + * to S/390's protocol of OS390. This only + * supports IP. Otherwise allow any packet + * type. + */ + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Illegal packet type 0x%04x" + " - dropping", + CTCM_FUNTAIL, dev->name, header->type); + } + priv->stats.rx_dropped++; + priv->stats.rx_frame_errors++; + return; + } + pskb->protocol = cpu_to_be16(header->type); + if ((header->length <= LL_HEADER_LENGTH) || + (len <= LL_HEADER_LENGTH)) { + if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Illegal packet size %d(%d,%d)" + "- dropping", + CTCM_FUNTAIL, dev->name, + header->length, dev->mtu, len); + ch->logflags |= LOG_FLAG_ILLEGALSIZE; + } + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + return; + } + header->length -= LL_HEADER_LENGTH; + len -= LL_HEADER_LENGTH; + if ((header->length > skb_tailroom(pskb)) || + (header->length > len)) { + if (!(ch->logflags & LOG_FLAG_OVERRUN)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Packet size %d (overrun)" + " - dropping", CTCM_FUNTAIL, + dev->name, header->length); + ch->logflags |= LOG_FLAG_OVERRUN; + } + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + return; + } + skb_put(pskb, header->length); + skb_reset_mac_header(pskb); + len -= header->length; + skb = dev_alloc_skb(pskb->len); + if (!skb) { + if (!(ch->logflags & LOG_FLAG_NOMEM)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): MEMORY allocation error", + CTCM_FUNTAIL, dev->name); + ch->logflags |= LOG_FLAG_NOMEM; + } + priv->stats.rx_dropped++; + return; + } + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + skblen = skb->len; + /* + * reset logflags + */ + ch->logflags = 0; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skblen; + netif_rx_ni(skb); + if (len > 0) { + skb_pull(pskb, header->length); + if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { + CTCM_DBF_DEV_NAME(TRACE, dev, + "Overrun in ctcm_unpack_skb"); + ch->logflags |= LOG_FLAG_OVERRUN; + return; + } + skb_put(pskb, LL_HEADER_LENGTH); + } + } +} + +/** + * Release a specific channel in the channel list. + * + * ch Pointer to channel struct to be released. + */ +static void channel_free(struct channel *ch) +{ + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id); + ch->flags &= ~CHANNEL_FLAGS_INUSE; + fsm_newstate(ch->fsm, CTC_STATE_IDLE); +} + +/** + * Remove a specific channel in the channel list. + * + * ch Pointer to channel struct to be released. + */ +static void channel_remove(struct channel *ch) +{ + struct channel **c = &channels; + char chid[CTCM_ID_SIZE+1]; + int ok = 0; + + if (ch == NULL) + return; + else + strncpy(chid, ch->id, CTCM_ID_SIZE); + + channel_free(ch); + while (*c) { + if (*c == ch) { + *c = ch->next; + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + kfree_fsm(ch->fsm); + clear_normalized_cda(&ch->ccw[4]); + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb_any(ch->trans_skb); + } + if (IS_MPC(ch)) { + tasklet_kill(&ch->ch_tasklet); + tasklet_kill(&ch->ch_disc_tasklet); + kfree(ch->discontact_th); + } + kfree(ch->ccw); + kfree(ch->irb); + kfree(ch); + ok = 1; + break; + } + c = &((*c)->next); + } + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL, + chid, ok ? "OK" : "failed"); +} + +/** + * Get a specific channel from the channel list. + * + * type Type of channel we are interested in. + * id Id of channel we are interested in. + * direction Direction we want to use this channel for. + * + * returns Pointer to a channel or NULL if no matching channel available. + */ +static struct channel *channel_get(enum ctcm_channel_types type, + char *id, int direction) +{ + struct channel *ch = channels; + + while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type))) + ch = ch->next; + if (!ch) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%d, %s, %d) not found in channel list\n", + CTCM_FUNTAIL, type, id, direction); + } else { + if (ch->flags & CHANNEL_FLAGS_INUSE) + ch = NULL; + else { + ch->flags |= CHANNEL_FLAGS_INUSE; + ch->flags &= ~CHANNEL_FLAGS_RWMASK; + ch->flags |= (direction == CTCM_WRITE) + ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; + fsm_newstate(ch->fsm, CTC_STATE_STOPPED); + } + } + return ch; +} + +static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, + "irb error %ld on device %s\n", + PTR_ERR(irb), dev_name(&cdev->dev)); + + switch (PTR_ERR(irb)) { + case -EIO: + dev_err(&cdev->dev, + "An I/O-error occurred on the CTCM device\n"); + break; + case -ETIMEDOUT: + dev_err(&cdev->dev, + "An adapter hardware operation timed out\n"); + break; + default: + dev_err(&cdev->dev, + "An error occurred on the adapter hardware\n"); + } + return PTR_ERR(irb); +} + + +/** + * Check sense of a unit check. + * + * ch The channel, the sense code belongs to. + * sense The sense code to inspect. + */ +static void ccw_unit_check(struct channel *ch, __u8 sense) +{ + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): %02x", + CTCM_FUNTAIL, ch->id, sense); + + if (sense & SNS0_INTERVENTION_REQ) { + if (sense & 0x01) { + if (ch->sense_rc != 0x01) { + pr_notice( + "%s: The communication peer has " + "disconnected\n", ch->id); + ch->sense_rc = 0x01; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); + } else { + if (ch->sense_rc != SNS0_INTERVENTION_REQ) { + pr_notice( + "%s: The remote operating system is " + "not available\n", ch->id); + ch->sense_rc = SNS0_INTERVENTION_REQ; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); + } + } else if (sense & SNS0_EQUIPMENT_CHECK) { + if (sense & SNS0_BUS_OUT_CHECK) { + if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): remote HW error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_BUS_OUT_CHECK; + } + fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch); + } else { + if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): remote read parity error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_EQUIPMENT_CHECK; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch); + } + } else if (sense & SNS0_BUS_OUT_CHECK) { + if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): BUS OUT error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_BUS_OUT_CHECK; + } + if (sense & 0x04) /* data-streaming timeout */ + fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch); + else /* Data-transfer parity error */ + fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch); + } else if (sense & SNS0_CMD_REJECT) { + if (ch->sense_rc != SNS0_CMD_REJECT) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Command rejected", + CTCM_FUNTAIL, ch->id); + ch->sense_rc = SNS0_CMD_REJECT; + } + } else if (sense == 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Unit check ZERO", + CTCM_FUNTAIL, ch->id); + fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch); + } else { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Unit check code %02x unknown", + CTCM_FUNTAIL, ch->id, sense); + fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch); + } +} + +int ctcm_ch_alloc_buffer(struct channel *ch) +{ + clear_normalized_cda(&ch->ccw[1]); + ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); + if (ch->trans_skb == NULL) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s trans_skb allocation error", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + return -ENOMEM; + } + + ch->ccw[1].count = ch->max_bufsize; + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s set norm_cda failed", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + return -ENOMEM; + } + + ch->ccw[1].count = 0; + ch->trans_skb_data = ch->trans_skb->data; + ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED; + return 0; +} + +/* + * Interface API for upper network layers + */ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * dev Pointer to interface struct. + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +int ctcm_open(struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + if (!IS_MPC(priv)) + fsm_event(priv->fsm, DEV_EVENT_START, dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * dev Pointer to interface struct. + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +int ctcm_close(struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + if (!IS_MPC(priv)) + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + return 0; +} + + +/** + * Transmit a packet. + * This is a helper function for ctcm_tx(). + * + * ch Channel to be used for sending. + * skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by ctcm_tx(). + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) +{ + unsigned long saveflags; + struct ll_header header; + int rc = 0; + __u16 block_len; + int ccw_idx; + struct sk_buff *nskb; + unsigned long hi; + + /* we need to acquire the lock for testing the state + * otherwise we can have an IRQ changing the state to + * TXIDLE after the test but before acquiring the lock. + */ + spin_lock_irqsave(&ch->collect_lock, saveflags); + if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { + int l = skb->len + LL_HEADER_LENGTH; + + if (ch->collect_len + l > ch->max_bufsize - 2) { + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + return -EBUSY; + } else { + refcount_inc(&skb->users); + header.length = l; + header.type = be16_to_cpu(skb->protocol); + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, + LL_HEADER_LENGTH); + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += l; + } + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto done; + } + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + /* + * Protect skb against beeing free'd by upper + * layers. + */ + refcount_inc(&skb->users); + ch->prof.txlen += skb->len; + header.length = skb->len + LL_HEADER_LENGTH; + header.type = be16_to_cpu(skb->protocol); + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); + block_len = skb->len + 2; + *((__u16 *)skb_push(skb, 2)) = block_len; + + /* + * IDAL support in CTCM is broken, so we have to + * care about skb's above 2G ourselves. + */ + hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; + if (hi) { + nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + refcount_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + ctcm_clear_busy(ch->netdev); + return -ENOMEM; + } else { + skb_put_data(nskb, skb->data, skb->len); + refcount_inc(&nskb->users); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + skb = nskb; + } + } + + ch->ccw[4].count = block_len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + /* + * idal allocation failed, try via copying to + * trans_skb. trans_skb usually has a pre-allocated + * idal. + */ + if (ctcm_checkalloc_buffer(ch)) { + /* + * Remove our header. It gets added + * again on retransmit. + */ + refcount_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + ctcm_clear_busy(ch->netdev); + return -ENOMEM; + } + + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = skb->len; + skb_copy_from_linear_data(skb, + skb_put(ch->trans_skb, skb->len), skb->len); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + ccw_idx = 0; + } else { + skb_queue_tail(&ch->io_queue, skb); + ccw_idx = 3; + } + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], + sizeof(struct ccw1) * 3); + ch->retry = 0; + fsm_newstate(ch->fsm, CTC_STATE_TX); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (ccw_idx == 3) + ch->prof.doios_single++; + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "single skb TX"); + if (ccw_idx == 3) + skb_dequeue_tail(&ch->io_queue); + /* + * Remove our header. It gets added + * again on retransmit. + */ + skb_pull(skb, LL_HEADER_LENGTH + 2); + } else if (ccw_idx == 0) { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + } +done: + ctcm_clear_busy(ch->netdev); + return rc; +} + +static void ctcmpc_send_sweep_req(struct channel *rch) +{ + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct th_sweep *header; + struct sk_buff *sweep_skb; + struct channel *ch; + /* int rc = 0; */ + + priv = dev->ml_priv; + grp = priv->mpcg; + ch = priv->channel[CTCM_WRITE]; + + /* sweep processing is not complete until response and request */ + /* has completed for all read channels in group */ + if (grp->in_sweep == 0) { + grp->in_sweep = 1; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; + } + + sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); + + if (sweep_skb == NULL) { + /* rc = -ENOMEM; */ + goto nomem; + } + + header = kmalloc(TH_SWEEP_LENGTH, gfp_type()); + + if (!header) { + dev_kfree_skb_any(sweep_skb); + /* rc = -ENOMEM; */ + goto nomem; + } + + header->th.th_seg = 0x00 ; + header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */ + header->th.th_blk_flag = 0x00; + header->th.th_is_xid = 0x00; + header->th.th_seq_num = 0x00; + header->sw.th_last_seq = ch->th_seq_num; + + skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH); + + kfree(header); + + netif_trans_update(dev); + skb_queue_tail(&ch->sweep_queue, sweep_skb); + + fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); + + return; + +nomem: + grp->in_sweep = 0; + ctcm_clear_busy(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + return; +} + +/* + * MPC mode version of transmit_skb + */ +static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) +{ + struct pdu *p_header; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct th_header *header; + struct sk_buff *nskb; + int rc = 0; + int ccw_idx; + unsigned long hi; + unsigned long saveflags = 0; /* avoids compiler warning */ + + CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", + __func__, dev->name, smp_processor_id(), ch, + ch->id, fsm_getstate_str(ch->fsm)); + + if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { + spin_lock_irqsave(&ch->collect_lock, saveflags); + refcount_inc(&skb->users); + p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); + + if (!p_header) { + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto nomem_exit; + } + + p_header->pdu_offset = skb->len; + p_header->pdu_proto = 0x01; + p_header->pdu_flag = 0x00; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { + p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; + } else { + p_header->pdu_flag |= PDU_FIRST; + } + p_header->pdu_seq = 0; + memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, + PDU_HEADER_LENGTH); + + CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n" + "pdu header and data for up to 32 bytes:\n", + __func__, dev->name, skb->len); + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += skb->len; + kfree(p_header); + + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto done; + } + + /* + * Protect skb against beeing free'd by upper + * layers. + */ + refcount_inc(&skb->users); + + /* + * IDAL support in CTCM is broken, so we have to + * care about skb's above 2G ourselves. + */ + hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31; + if (hi) { + nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + goto nomem_exit; + } else { + skb_put_data(nskb, skb->data, skb->len); + refcount_inc(&nskb->users); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + skb = nskb; + } + } + + p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); + + if (!p_header) + goto nomem_exit; + + p_header->pdu_offset = skb->len; + p_header->pdu_proto = 0x01; + p_header->pdu_flag = 0x00; + p_header->pdu_seq = 0; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { + p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; + } else { + p_header->pdu_flag |= PDU_FIRST; + } + memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH); + + kfree(p_header); + + if (ch->collect_len > 0) { + spin_lock_irqsave(&ch->collect_lock, saveflags); + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += skb->len; + skb = skb_dequeue(&ch->collect_queue); + ch->collect_len -= skb->len; + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + } + + p_header = (struct pdu *)skb->data; + p_header->pdu_flag |= PDU_LAST; + + ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; + + header = kmalloc(TH_HEADER_LENGTH, gfp_type()); + if (!header) + goto nomem_exit; + + header->th_seg = 0x00; + header->th_ch_flag = TH_HAS_PDU; /* Normal data */ + header->th_blk_flag = 0x00; + header->th_is_xid = 0x00; /* Just data here */ + ch->th_seq_num++; + header->th_seq_num = ch->th_seq_num; + + CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" , + __func__, dev->name, ch->th_seq_num); + + /* put the TH on the packet */ + memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH); + + kfree(header); + + CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for " + "up to 32 bytes sent to vtam:\n", + __func__, dev->name, skb->len); + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + ch->ccw[4].count = skb->len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + /* + * idal allocation failed, try via copying to trans_skb. + * trans_skb usually has a pre-allocated idal. + */ + if (ctcm_checkalloc_buffer(ch)) { + /* + * Remove our header. + * It gets added again on retransmit. + */ + goto nomem_exit; + } + + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = skb->len; + skb_put_data(ch->trans_skb, skb->data, skb->len); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + ccw_idx = 0; + CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n" + "up to 32 bytes sent to vtam:\n", + __func__, dev->name, ch->trans_skb->len); + CTCM_D3_DUMP((char *)ch->trans_skb->data, + min_t(int, 32, ch->trans_skb->len)); + } else { + skb_queue_tail(&ch->io_queue, skb); + ccw_idx = 3; + } + ch->retry = 0; + fsm_newstate(ch->fsm, CTC_STATE_TX); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], + sizeof(struct ccw1) * 3); + + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], 0, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (ccw_idx == 3) + ch->prof.doios_single++; + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "single skb TX"); + if (ccw_idx == 3) + skb_dequeue_tail(&ch->io_queue); + } else if (ccw_idx == 0) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; + } + if (ch->th_seq_num > 0xf0000000) /* Chose at random. */ + ctcmpc_send_sweep_req(ch); + + goto done; +nomem_exit: + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT, + "%s(%s): MEMORY allocation ERROR\n", + CTCM_FUNTAIL, ch->id); + rc = -ENOMEM; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); +done: + CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name); + return rc; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + */ +/* first merge version - leaving both functions separated */ +static netdev_tx_t ctcm_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + if (skb == NULL) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): NULL sk_buff passed", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Got sk_buff with head room < %ld bytes", + CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2); + dev_kfree_skb(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* + * If channels are not running, try to restart them + * and throw away packet. + */ + if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) { + fsm_event(priv->fsm, DEV_EVENT_START, dev); + dev_kfree_skb(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + + if (ctcm_test_and_set_busy(dev)) + return NETDEV_TX_BUSY; + + netif_trans_update(dev); + if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +/* unmerged MPC variant of ctcm_tx */ +static netdev_tx_t ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) +{ + int len = 0; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *newskb = NULL; + + /* + * Some sanity checks ... + */ + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): NULL sk_buff passed", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + goto done; + } + if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s(%s): Got sk_buff with head room < %ld bytes", + CTCM_FUNTAIL, dev->name, + TH_HEADER_LENGTH + PDU_HEADER_LENGTH); + + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA); + + if (!newskb) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s: %s: __dev_alloc_skb failed", + __func__, dev->name); + + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + newskb->protocol = skb->protocol; + skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH); + skb_put_data(newskb, skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = newskb; + } + + /* + * If channels are not running, + * notify anybody about a link failure and throw + * away packet. + */ + if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) || + (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { + dev_kfree_skb_any(skb); + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): inactive MPCGROUP - dropped", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + goto done; + } + + if (ctcm_test_and_set_busy(dev)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): device busy - dropped", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + + netif_trans_update(dev); + if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): device error - dropped", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + ctcm_clear_busy(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + ctcm_clear_busy(dev); +done: + if (do_debug) + MPC_DBF_DEV_NAME(TRACE, dev, "exit"); + + return NETDEV_TX_OK; /* handle freeing of skb here */ +} + + +/** + * Sets MTU of an interface. + * + * dev Pointer to interface struct. + * new_mtu The new MTU to use for this interface. + * + * returns 0 on success, -EINVAL if MTU is out of valid range. + * (valid range is 576 .. 65527). If VM is on the + * remote side, maximum MTU is 32760, however this is + * not checked here. + */ +static int ctcm_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ctcm_priv *priv; + int max_bufsize; + + priv = dev->ml_priv; + max_bufsize = priv->channel[CTCM_READ]->max_bufsize; + + if (IS_MPC(priv)) { + if (new_mtu > max_bufsize - TH_HEADER_LENGTH) + return -EINVAL; + dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + } else { + if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2) + return -EINVAL; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + } + dev->mtu = new_mtu; + return 0; +} + +/** + * Returns interface statistics of a device. + * + * dev Pointer to interface struct. + * + * returns Pointer to stats struct of this interface. + */ +static struct net_device_stats *ctcm_stats(struct net_device *dev) +{ + return &((struct ctcm_priv *)dev->ml_priv)->stats; +} + +static void ctcm_free_netdevice(struct net_device *dev) +{ + struct ctcm_priv *priv; + struct mpc_group *grp; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s(%s)", CTCM_FUNTAIL, dev->name); + priv = dev->ml_priv; + if (priv) { + grp = priv->mpcg; + if (grp) { + if (grp->fsm) + kfree_fsm(grp->fsm); + dev_kfree_skb(grp->xid_skb); + dev_kfree_skb(grp->rcvd_xid_skb); + tasklet_kill(&grp->mpc_tasklet2); + kfree(grp); + priv->mpcg = NULL; + } + if (priv->fsm) { + kfree_fsm(priv->fsm); + priv->fsm = NULL; + } + kfree(priv->xid); + priv->xid = NULL; + /* + * Note: kfree(priv); is done in "opposite" function of + * allocator function probe_device which is remove_device. + */ + } +#ifdef MODULE + free_netdev(dev); +#endif +} + +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); + +static const struct net_device_ops ctcm_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcm_tx, +}; + +static const struct net_device_ops ctcm_mpc_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcmpc_tx, +}; + +static void ctcm_dev_setup(struct net_device *dev) +{ + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 100; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->min_mtu = 576; + dev->max_mtu = 65527; +} + +/* + * Initialize everything of the net device except the name and the + * channel structs. + */ +static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) +{ + struct net_device *dev; + struct mpc_group *grp; + if (!priv) + return NULL; + + if (IS_MPC(priv)) + dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN, + ctcm_dev_setup); + else + dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN, + ctcm_dev_setup); + + if (!dev) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, + "%s: MEMORY allocation ERROR", + CTCM_FUNTAIL); + return NULL; + } + dev->ml_priv = priv; + priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, + CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, + dev_fsm, dev_fsm_len, GFP_KERNEL); + if (priv->fsm == NULL) { + CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); + free_netdev(dev); + return NULL; + } + fsm_newstate(priv->fsm, DEV_STATE_STOPPED); + fsm_settimer(priv->fsm, &priv->restart_timer); + + if (IS_MPC(priv)) { + /* MPC Group Initializations */ + grp = ctcmpc_init_mpc_group(priv); + if (grp == NULL) { + MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); + free_netdev(dev); + return NULL; + } + tasklet_init(&grp->mpc_tasklet2, + mpc_group_ready, (unsigned long)dev); + dev->mtu = MPC_BUFSIZE_DEFAULT - + TH_HEADER_LENGTH - PDU_HEADER_LENGTH; + + dev->netdev_ops = &ctcm_mpc_netdev_ops; + dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + priv->buffer_size = MPC_BUFSIZE_DEFAULT; + } else { + dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; + dev->netdev_ops = &ctcm_netdev_ops; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + } + + CTCMY_DBF_DEV(SETUP, dev, "finished"); + + return dev; +} + +/** + * Main IRQ handler. + * + * cdev The ccw_device the interrupt is for. + * intparm interruption parameter. + * irb interruption response block. + */ +static void ctcm_irq_handler(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) +{ + struct channel *ch; + struct net_device *dev; + struct ctcm_priv *priv; + struct ccwgroup_device *cgdev; + int cstat; + int dstat; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); + + if (ctcm_check_irb_error(cdev, irb)) + return; + + cgdev = dev_get_drvdata(&cdev->dev); + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + /* Check for unsolicited interrupts. */ + if (cgdev == NULL) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, + "%s(%s) unsolicited irq: c-%02x d-%02x\n", + CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); + dev_warn(&cdev->dev, + "The adapter received a non-specific IRQ\n"); + return; + } + + priv = dev_get_drvdata(&cgdev->dev); + + /* Try to extract channel from driver data. */ + if (priv->channel[CTCM_READ]->cdev == cdev) + ch = priv->channel[CTCM_READ]; + else if (priv->channel[CTCM_WRITE]->cdev == cdev) + ch = priv->channel[CTCM_WRITE]; + else { + dev_err(&cdev->dev, + "%s: Internal error: Can't determine channel for " + "interrupt device %s\n", + __func__, dev_name(&cdev->dev)); + /* Explain: inconsistent internal structures */ + return; + } + + dev = ch->netdev; + if (dev == NULL) { + dev_err(&cdev->dev, + "%s Internal error: net_device is NULL, ch = 0x%p\n", + __func__, ch); + /* Explain: inconsistent internal structures */ + return; + } + + /* Copy interruption response block. */ + memcpy(ch->irb, irb, sizeof(struct irb)); + + /* Issue error message and return on subchannel error code */ + if (irb->scsw.cmd.cstat) { + fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): sub-ch check %s: cs=%02x ds=%02x", + CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); + dev_warn(&cdev->dev, + "A check occurred on the subchannel\n"); + return; + } + + /* Check the reason-code of a unit check */ + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + if ((irb->ecw[0] & ch->sense_rc) == 0) + /* print it only once */ + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): sense=%02x, ds=%02x", + CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); + ccw_unit_check(ch, irb->ecw[0]); + return; + } + if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) + fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); + else + fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); + return; + } + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { + fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); + return; + } + if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || + (irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) + fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); + else + fsm_event(ch->fsm, CTC_EVENT_IRQ, ch); + +} + +static const struct device_type ctcm_devtype = { + .name = "ctcm", + .groups = ctcm_attr_groups, +}; + +/** + * Add ctcm specific attributes. + * Add ctcm private data. + * + * cgdev pointer to ccwgroup_device just added + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_probe_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s %p", + __func__, cgdev); + + if (!get_device(&cgdev->dev)) + return -ENODEV; + + priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL); + if (!priv) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s: memory allocation failure", + CTCM_FUNTAIL); + put_device(&cgdev->dev); + return -ENOMEM; + } + priv->buffer_size = CTCM_BUFSIZE_DEFAULT; + cgdev->cdev[0]->handler = ctcm_irq_handler; + cgdev->cdev[1]->handler = ctcm_irq_handler; + dev_set_drvdata(&cgdev->dev, priv); + cgdev->dev.type = &ctcm_devtype; + + return 0; +} + +/** + * Add a new channel to the list of channels. + * Keeps the channel list sorted. + * + * cdev The ccw_device to be added. + * type The type class of the new channel. + * priv Points to the private data of the ccwgroup_device. + * + * returns 0 on success, !0 on error. + */ +static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, + struct ctcm_priv *priv) +{ + struct channel **c = &channels; + struct channel *ch; + int ccw_num; + int rc = 0; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s(%s), type %d, proto %d", + __func__, dev_name(&cdev->dev), type, priv->protocol); + + ch = kzalloc(sizeof(struct channel), GFP_KERNEL); + if (ch == NULL) + return -ENOMEM; + + ch->protocol = priv->protocol; + if (IS_MPC(priv)) { + ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type()); + if (ch->discontact_th == NULL) + goto nomem_return; + + ch->discontact_th->th_blk_flag = TH_DISCONTACT; + tasklet_init(&ch->ch_disc_tasklet, + mpc_action_send_discontact, (unsigned long)ch); + + tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch); + ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35); + ccw_num = 17; + } else + ccw_num = 8; + + ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (ch->ccw == NULL) + goto nomem_return; + + ch->cdev = cdev; + snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev)); + ch->type = type; + + /** + * "static" ccws are used in the following way: + * + * ccw[0..2] (Channel program for generic I/O): + * 0: prepare + * 1: read or write (depending on direction) with fixed + * buffer (idal allocated once when buffer is allocated) + * 2: nop + * ccw[3..5] (Channel program for direct write of packets) + * 3: prepare + * 4: write (idal allocated on every write). + * 5: nop + * ccw[6..7] (Channel program for initial channel setup): + * 6: set extended mode + * 7: nop + * + * ch->ccw[0..5] are initialized in ch_action_start because + * the channel's direction is yet unknown here. + * + * ccws used for xid2 negotiations + * ch-ccw[8-14] need to be used for the XID exchange either + * X side XID2 Processing + * 8: write control + * 9: write th + * 10: write XID + * 11: read th from secondary + * 12: read XID from secondary + * 13: read 4 byte ID + * 14: nop + * Y side XID Processing + * 8: sense + * 9: read th + * 10: read XID + * 11: write th + * 12: write XID + * 13: write 4 byte ID + * 14: nop + * + * ccws used for double noop due to VM timing issues + * which result in unrecoverable Busy on channel + * 15: nop + * 16: nop + */ + ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED; + ch->ccw[6].flags = CCW_FLAG_SLI; + + ch->ccw[7].cmd_code = CCW_CMD_NOOP; + ch->ccw[7].flags = CCW_FLAG_SLI; + + if (IS_MPC(priv)) { + ch->ccw[15].cmd_code = CCW_CMD_WRITE; + ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[15].count = TH_HEADER_LENGTH; + ch->ccw[15].cda = virt_to_phys(ch->discontact_th); + + ch->ccw[16].cmd_code = CCW_CMD_NOOP; + ch->ccw[16].flags = CCW_FLAG_SLI; + + ch->fsm = init_fsm(ch->id, ctc_ch_state_names, + ctc_ch_event_names, CTC_MPC_NR_STATES, + CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm, + mpc_ch_fsm_len, GFP_KERNEL); + } else { + ch->fsm = init_fsm(ch->id, ctc_ch_state_names, + ctc_ch_event_names, CTC_NR_STATES, + CTC_NR_EVENTS, ch_fsm, + ch_fsm_len, GFP_KERNEL); + } + if (ch->fsm == NULL) + goto nomem_return; + + fsm_newstate(ch->fsm, CTC_STATE_IDLE); + + ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); + if (ch->irb == NULL) + goto nomem_return; + + while (*c && ctcm_less_than((*c)->id, ch->id)) + c = &(*c)->next; + + if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s (%s) already in list, using old entry", + __func__, (*c)->id); + + goto free_return; + } + + spin_lock_init(&ch->collect_lock); + + fsm_settimer(ch->fsm, &ch->timer); + skb_queue_head_init(&ch->io_queue); + skb_queue_head_init(&ch->collect_queue); + + if (IS_MPC(priv)) { + fsm_settimer(ch->fsm, &ch->sweep_timer); + skb_queue_head_init(&ch->sweep_queue); + } + ch->next = *c; + *c = ch; + return 0; + +nomem_return: + rc = -ENOMEM; + +free_return: /* note that all channel pointers are 0 or valid */ + kfree(ch->ccw); + kfree(ch->discontact_th); + kfree_fsm(ch->fsm); + kfree(ch->irb); + kfree(ch); + return rc; +} + +/* + * Return type of a detected device. + */ +static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id) +{ + enum ctcm_channel_types type; + type = (enum ctcm_channel_types)id->driver_info; + + if (type == ctcm_channel_type_ficon) + type = ctcm_channel_type_escon; + + return type; +} + +/** + * + * Setup an interface. + * + * cgdev Device to be setup. + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_new_device(struct ccwgroup_device *cgdev) +{ + char read_id[CTCM_ID_SIZE]; + char write_id[CTCM_ID_SIZE]; + int direction; + enum ctcm_channel_types type; + struct ctcm_priv *priv; + struct net_device *dev; + struct ccw_device *cdev0; + struct ccw_device *cdev1; + struct channel *readc; + struct channel *writec; + int ret; + int result; + + priv = dev_get_drvdata(&cgdev->dev); + if (!priv) { + result = -ENODEV; + goto out_err_result; + } + + cdev0 = cgdev->cdev[0]; + cdev1 = cgdev->cdev[1]; + + type = get_channel_type(&cdev0->id); + + snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev)); + snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); + + ret = add_channel(cdev0, type, priv); + if (ret) { + result = ret; + goto out_err_result; + } + ret = add_channel(cdev1, type, priv); + if (ret) { + result = ret; + goto out_remove_channel1; + } + + ret = ccw_device_set_online(cdev0); + if (ret != 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) set_online rc=%d", + CTCM_FUNTAIL, read_id, ret); + result = -EIO; + goto out_remove_channel2; + } + + ret = ccw_device_set_online(cdev1); + if (ret != 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) set_online rc=%d", + CTCM_FUNTAIL, write_id, ret); + + result = -EIO; + goto out_ccw1; + } + + dev = ctcm_init_netdevice(priv); + if (dev == NULL) { + result = -ENODEV; + goto out_ccw2; + } + + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + priv->channel[direction] = + channel_get(type, direction == CTCM_READ ? + read_id : write_id, direction); + if (priv->channel[direction] == NULL) { + if (direction == CTCM_WRITE) + channel_free(priv->channel[CTCM_READ]); + result = -ENODEV; + goto out_dev; + } + priv->channel[direction]->netdev = dev; + priv->channel[direction]->protocol = priv->protocol; + priv->channel[direction]->max_bufsize = priv->buffer_size; + } + /* sysfs magic */ + SET_NETDEV_DEV(dev, &cgdev->dev); + + if (register_netdev(dev)) { + result = -ENODEV; + goto out_dev; + } + + strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); + + dev_info(&dev->dev, + "setup OK : r/w = %s/%s, protocol : %d\n", + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); + + return 0; +out_dev: + ctcm_free_netdevice(dev); +out_ccw2: + ccw_device_set_offline(cgdev->cdev[1]); +out_ccw1: + ccw_device_set_offline(cgdev->cdev[0]); +out_remove_channel2: + readc = channel_get(type, read_id, CTCM_READ); + channel_remove(readc); +out_remove_channel1: + writec = channel_get(type, write_id, CTCM_WRITE); + channel_remove(writec); +out_err_result: + return result; +} + +/** + * Shutdown an interface. + * + * cgdev Device to be shut down. + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv; + struct net_device *dev; + + priv = dev_get_drvdata(&cgdev->dev); + if (!priv) + return -ENODEV; + + if (priv->channel[CTCM_READ]) { + dev = priv->channel[CTCM_READ]->netdev; + CTCM_DBF_DEV(SETUP, dev, ""); + /* Close the device */ + ctcm_close(dev); + dev->flags &= ~IFF_RUNNING; + channel_free(priv->channel[CTCM_READ]); + } else + dev = NULL; + + if (priv->channel[CTCM_WRITE]) + channel_free(priv->channel[CTCM_WRITE]); + + if (dev) { + unregister_netdev(dev); + ctcm_free_netdevice(dev); + } + + if (priv->fsm) + kfree_fsm(priv->fsm); + + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + channel_remove(priv->channel[CTCM_READ]); + channel_remove(priv->channel[CTCM_WRITE]); + priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL; + + return 0; + +} + + +static void ctcm_remove_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "removing device %p, proto : %d", + cgdev, priv->protocol); + + if (cgdev->state == CCWGROUP_ONLINE) + ctcm_shutdown_device(cgdev); + dev_set_drvdata(&cgdev->dev, NULL); + kfree(priv); + put_device(&cgdev->dev); +} + +static struct ccw_device_id ctcm_ids[] = { + {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel}, + {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon}, + {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, ctcm_ids); + +static struct ccw_driver ctcm_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "ctcm", + }, + .ids = ctcm_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_CTC, +}; + +static struct ccwgroup_driver ctcm_group_driver = { + .driver = { + .owner = THIS_MODULE, + .name = CTC_DRIVER_NAME, + }, + .ccw_driver = &ctcm_ccw_driver, + .setup = ctcm_probe_device, + .remove = ctcm_remove_device, + .set_online = ctcm_new_device, + .set_offline = ctcm_shutdown_device, +}; + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + + err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf); + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *ctcm_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group ctcm_drv_attr_group = { + .attrs = ctcm_drv_attrs, +}; +static const struct attribute_group *ctcm_drv_attr_groups[] = { + &ctcm_drv_attr_group, + NULL, +}; + +/* + * Module related routines + */ + +/* + * Prepare to be unloaded. Free IRQ's and release all resources. + * This is called just before this module is unloaded. It is + * not called, if the usage count is !0, so we don't need to check + * for that. + */ +static void __exit ctcm_exit(void) +{ + ccwgroup_driver_unregister(&ctcm_group_driver); + ccw_driver_unregister(&ctcm_ccw_driver); + root_device_unregister(ctcm_root_dev); + ctcm_unregister_dbf_views(); + pr_info("CTCM driver unloaded\n"); +} + +/* + * Print Banner. + */ +static void print_banner(void) +{ + pr_info("CTCM driver initialized\n"); +} + +/** + * Initialize module. + * This is called just after the module is loaded. + * + * returns 0 on success, !0 on error. + */ +static int __init ctcm_init(void) +{ + int ret; + + channels = NULL; + + ret = ctcm_register_dbf_views(); + if (ret) + goto out_err; + ctcm_root_dev = root_device_register("ctcm"); + ret = PTR_ERR_OR_ZERO(ctcm_root_dev); + if (ret) + goto register_err; + ret = ccw_driver_register(&ctcm_ccw_driver); + if (ret) + goto ccw_err; + ctcm_group_driver.driver.groups = ctcm_drv_attr_groups; + ret = ccwgroup_driver_register(&ctcm_group_driver); + if (ret) + goto ccwgroup_err; + print_banner(); + return 0; + +ccwgroup_err: + ccw_driver_unregister(&ctcm_ccw_driver); +ccw_err: + root_device_unregister(ctcm_root_dev); +register_err: + ctcm_unregister_dbf_views(); +out_err: + pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n", + __func__, ret); + return ret; +} + +module_init(ctcm_init); +module_exit(ctcm_exit); + +MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>"); +MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h new file mode 100644 index 000000000..16bdf23ee --- /dev/null +++ b/drivers/s390/net/ctcm_main.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +#ifndef _CTCM_MAIN_H_ +#define _CTCM_MAIN_H_ + +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +#include "fsm.h" +#include "ctcm_dbug.h" +#include "ctcm_mpc.h" + +#define CTC_DRIVER_NAME "ctcm" +#define CTC_DEVICE_NAME "ctc" +#define MPC_DEVICE_NAME "mpc" +#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d" +#define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d" + +#define CHANNEL_FLAGS_READ 0 +#define CHANNEL_FLAGS_WRITE 1 +#define CHANNEL_FLAGS_INUSE 2 +#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4 +#define CHANNEL_FLAGS_FAILED 8 +#define CHANNEL_FLAGS_WAITIRQ 16 +#define CHANNEL_FLAGS_RWMASK 1 +#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK) + +#define LOG_FLAG_ILLEGALPKT 1 +#define LOG_FLAG_ILLEGALSIZE 2 +#define LOG_FLAG_OVERRUN 4 +#define LOG_FLAG_NOMEM 8 + +#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) + +#define CTCM_PR_DEBUG(fmt, arg...) \ + do { \ + if (do_debug) \ + printk(KERN_DEBUG fmt, ##arg); \ + } while (0) + +#define CTCM_PR_DBGDATA(fmt, arg...) \ + do { \ + if (do_debug_data) \ + printk(KERN_DEBUG fmt, ##arg); \ + } while (0) + +#define CTCM_D3_DUMP(buf, len) \ + do { \ + if (do_debug_data) \ + ctcmpc_dumpit(buf, len); \ + } while (0) + +#define CTCM_CCW_DUMP(buf, len) \ + do { \ + if (do_debug_ccw) \ + ctcmpc_dumpit(buf, len); \ + } while (0) + +/** + * Enum for classifying detected devices + */ +enum ctcm_channel_types { + /* Device is not a channel */ + ctcm_channel_type_none, + + /* Device is a CTC/A */ + ctcm_channel_type_parallel, + + /* Device is a FICON channel */ + ctcm_channel_type_ficon, + + /* Device is a ESCON channel */ + ctcm_channel_type_escon +}; + +/* + * CCW commands, used in this driver. + */ +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 +#define CCW_CMD_NOOP 0x03 +#define CCW_CMD_TIC 0x08 +#define CCW_CMD_SENSE_CMD 0x14 +#define CCW_CMD_WRITE_CTL 0x17 +#define CCW_CMD_SET_EXTENDED 0xc3 +#define CCW_CMD_PREPARE 0xe3 + +#define CTCM_PROTO_S390 0 +#define CTCM_PROTO_LINUX 1 +#define CTCM_PROTO_LINUX_TTY 2 +#define CTCM_PROTO_OS390 3 +#define CTCM_PROTO_MPC 4 +#define CTCM_PROTO_MAX 4 + +#define CTCM_BUFSIZE_LIMIT 65535 +#define CTCM_BUFSIZE_DEFAULT 32768 +#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT + +#define CTCM_TIME_1_SEC 1000 +#define CTCM_TIME_5_SEC 5000 +#define CTCM_TIME_10_SEC 10000 + +#define CTCM_INITIAL_BLOCKLEN 2 + +#define CTCM_READ 0 +#define CTCM_WRITE 1 + +#define CTCM_ID_SIZE 20+3 + +struct ctcm_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + unsigned long send_stamp; +}; + +/* + * Definition of one channel + */ +struct channel { + struct channel *next; + char id[CTCM_ID_SIZE]; + struct ccw_device *cdev; + /* + * Type of this channel. + * CTC/A or Escon for valid channels. + */ + enum ctcm_channel_types type; + /* + * Misc. flags. See CHANNEL_FLAGS_... below + */ + __u32 flags; + __u16 protocol; /* protocol of this channel (4 = MPC) */ + /* + * I/O and irq related stuff + */ + struct ccw1 *ccw; + struct irb *irb; + /* + * RX/TX buffer size + */ + int max_bufsize; + struct sk_buff *trans_skb; /* transmit/receive buffer */ + struct sk_buff_head io_queue; /* universal I/O queue */ + struct tasklet_struct ch_tasklet; /* MPC ONLY */ + /* + * TX queue for collecting skb's during busy. + */ + struct sk_buff_head collect_queue; + /* + * Amount of data in collect_queue. + */ + int collect_len; + /* + * spinlock for collect_queue and collect_len + */ + spinlock_t collect_lock; + /* + * Timer for detecting unresposive + * I/O operations. + */ + fsm_timer timer; + /* MPC ONLY section begin */ + __u32 th_seq_num; /* SNA TH seq number */ + __u8 th_seg; + __u32 pdu_seq; + struct sk_buff *xid_skb; + char *xid_skb_data; + struct th_header *xid_th; + struct xid2 *xid; + char *xid_id; + struct th_header *rcvd_xid_th; + struct xid2 *rcvd_xid; + char *rcvd_xid_id; + __u8 in_mpcgroup; + fsm_timer sweep_timer; + struct sk_buff_head sweep_queue; + struct th_header *discontact_th; + struct tasklet_struct ch_disc_tasklet; + /* MPC ONLY section end */ + + int retry; /* retry counter for misc. operations */ + fsm_instance *fsm; /* finite state machine of this channel */ + struct net_device *netdev; /* corresponding net_device */ + struct ctcm_profile prof; + __u8 *trans_skb_data; + __u16 logflags; + __u8 sense_rc; /* last unit check sense code report control */ +}; + +struct ctcm_priv { + struct net_device_stats stats; + unsigned long tbusy; + + /* The MPC group struct of this interface */ + struct mpc_group *mpcg; /* MPC only */ + struct xid2 *xid; /* MPC only */ + + /* The finite state machine of this interface */ + fsm_instance *fsm; + + /* The protocol of this device */ + __u16 protocol; + + /* Timer for restarting after I/O Errors */ + fsm_timer restart_timer; + + int buffer_size; /* ctc only */ + + struct channel *channel[2]; +}; + +int ctcm_open(struct net_device *dev); +int ctcm_close(struct net_device *dev); + +extern const struct attribute_group *ctcm_attr_groups[]; + +/* + * Compatibility macros for busy handling + * of network devices. + */ +static inline void ctcm_clear_busy_do(struct net_device *dev) +{ + clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); + netif_wake_queue(dev); +} + +static inline void ctcm_clear_busy(struct net_device *dev) +{ + struct mpc_group *grp; + grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg; + + if (!(grp && grp->in_sweep)) + ctcm_clear_busy_do(dev); +} + + +static inline int ctcm_test_and_set_busy(struct net_device *dev) +{ + netif_stop_queue(dev); + return test_and_set_bit(0, + &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); +} + +extern int loglevel; +extern struct channel *channels; + +void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb); + +/* + * Functions related to setup and device detection. + */ + +static inline int ctcm_less_than(char *id1, char *id2) +{ + unsigned long dev1, dev2; + + id1 = id1 + 5; + id2 = id2 + 5; + + dev1 = simple_strtoul(id1, &id1, 16); + dev2 = simple_strtoul(id2, &id2, 16); + + return (dev1 < dev2); +} + +int ctcm_ch_alloc_buffer(struct channel *ch); + +static inline int ctcm_checkalloc_buffer(struct channel *ch) +{ + if (ch->trans_skb == NULL) + return ctcm_ch_alloc_buffer(ch); + if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) { + dev_kfree_skb(ch->trans_skb); + return ctcm_ch_alloc_buffer(ch); + } + return 0; +} + +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); + +/* test if protocol attribute (of struct ctcm_priv or struct channel) + * has MPC protocol setting. Type is not checked + */ +#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) + +/* test if struct ctcm_priv of struct net_device has MPC protocol setting */ +#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv) + +static inline gfp_t gfp_type(void) +{ + return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; +} + +/* + * Definition of our link level header. + */ +struct ll_header { + __u16 length; + __u16 type; + __u16 unused; +}; +#define LL_HEADER_LENGTH (sizeof(struct ll_header)) + +#endif diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c new file mode 100644 index 000000000..20a6097e1 --- /dev/null +++ b/drivers/s390/net/ctcm_mpc.c @@ -0,0 +1,2152 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2004, 2007 + * Authors: Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +/* + This module exports functions to be used by CCS: + EXPORT_SYMBOL(ctc_mpc_alloc_channel); + EXPORT_SYMBOL(ctc_mpc_establish_connectivity); + EXPORT_SYMBOL(ctc_mpc_dealloc_ch); + EXPORT_SYMBOL(ctc_mpc_flow_control); +*/ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/sched.h> + +#include <linux/signal.h> +#include <linux/string.h> +#include <linux/proc_fs.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <linux/netdevice.h> +#include <net/dst.h> + +#include <linux/io.h> /* instead of <asm/io.h> ok ? */ +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */ +#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */ +#include <linux/wait.h> +#include <linux/moduleparam.h> +#include <asm/idals.h> + +#include "ctcm_main.h" +#include "ctcm_mpc.h" +#include "ctcm_fsms.h" + +static const struct xid2 init_xid = { + .xid2_type_id = XID_FM2, + .xid2_len = 0x45, + .xid2_adj_id = 0, + .xid2_rlen = 0x31, + .xid2_resv1 = 0, + .xid2_flag1 = 0, + .xid2_fmtt = 0, + .xid2_flag4 = 0x80, + .xid2_resv2 = 0, + .xid2_tgnum = 0, + .xid2_sender_id = 0, + .xid2_flag2 = 0, + .xid2_option = XID2_0, + .xid2_resv3 = "\x00", + .xid2_resv4 = 0, + .xid2_dlc_type = XID2_READ_SIDE, + .xid2_resv5 = 0, + .xid2_mpc_flag = 0, + .xid2_resv6 = 0, + .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35), +}; + +static const struct th_header thnorm = { + .th_seg = 0x00, + .th_ch_flag = TH_IS_XID, + .th_blk_flag = TH_DATA_IS_XID, + .th_is_xid = 0x01, + .th_seq_num = 0x00000000, +}; + +static const struct th_header thdummy = { + .th_seg = 0x00, + .th_ch_flag = 0x00, + .th_blk_flag = TH_DATA_IS_XID, + .th_is_xid = 0x01, + .th_seq_num = 0x00000000, +}; + +/* + * Definition of one MPC group + */ + +/* + * Compatibility macros for busy handling + * of network devices. + */ + +static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb); + +/* + * MPC Group state machine actions (static prototypes) + */ +static void mpc_action_nop(fsm_instance *fsm, int event, void *arg); +static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg); +static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg); +static void mpc_action_timeout(fsm_instance *fi, int event, void *arg); +static int mpc_validate_xid(struct mpcg_info *mpcginfo); +static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg); +static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg); +static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg); +static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg); +static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg); +static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg); + +#ifdef DEBUGDATA +/*-------------------------------------------------------------------* +* Dump buffer format * +* * +*--------------------------------------------------------------------*/ +void ctcmpc_dumpit(char *buf, int len) +{ + __u32 ct, sw, rm, dup; + char *ptr, *rptr; + char tbuf[82], tdup[82]; + char addr[22]; + char boff[12]; + char bhex[82], duphex[82]; + char basc[40]; + + sw = 0; + rptr = ptr = buf; + rm = 16; + duphex[0] = 0x00; + dup = 0; + + for (ct = 0; ct < len; ct++, ptr++, rptr++) { + if (sw == 0) { + sprintf(addr, "%16.16llx", (__u64)rptr); + + sprintf(boff, "%4.4X", (__u32)ct); + bhex[0] = '\0'; + basc[0] = '\0'; + } + if ((sw == 4) || (sw == 12)) + strcat(bhex, " "); + if (sw == 8) + strcat(bhex, " "); + + sprintf(tbuf, "%2.2llX", (__u64)*ptr); + + tbuf[2] = '\0'; + strcat(bhex, tbuf); + if ((0 != isprint(*ptr)) && (*ptr >= 0x20)) + basc[sw] = *ptr; + else + basc[sw] = '.'; + + basc[sw+1] = '\0'; + sw++; + rm--; + if (sw != 16) + continue; + if ((strcmp(duphex, bhex)) != 0) { + if (dup != 0) { + sprintf(tdup, + "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", + tdup); + } + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + dup = 0; + strcpy(duphex, bhex); + } else + dup++; + + sw = 0; + rm = 16; + } /* endfor */ + + if (sw != 0) { + for ( ; rm > 0; rm--, sw++) { + if ((sw == 4) || (sw == 12)) + strcat(bhex, " "); + if (sw == 8) + strcat(bhex, " "); + strcat(bhex, " "); + strcat(basc, " "); + } + if (dup != 0) { + sprintf(tdup, "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", tdup); + } + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } else { + if (dup >= 1) { + sprintf(tdup, "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", tdup); + } + if (dup != 0) { + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } + } + + return; + +} /* end of ctcmpc_dumpit */ +#endif + +#ifdef DEBUGDATA +/* + * Dump header and first 16 bytes of an sk_buff for debugging purposes. + * + * skb The sk_buff to dump. + * offset Offset relative to skb-data, where to start the dump. + */ +void ctcmpc_dump_skb(struct sk_buff *skb, int offset) +{ + __u8 *p = skb->data; + struct th_header *header; + struct pdu *pheader; + int bl = skb->len; + int i; + + if (p == NULL) + return; + + p += offset; + header = (struct th_header *)p; + + ctcm_pr_debug("dump:\n"); + ctcm_pr_debug("skb len=%d \n", skb->len); + if (skb->len > 2) { + switch (header->th_ch_flag) { + case TH_HAS_PDU: + break; + case 0x00: + case TH_IS_XID: + if ((header->th_blk_flag == TH_DATA_IS_XID) && + (header->th_is_xid == 0x01)) + goto dumpth; + case TH_SWEEP_REQ: + goto dumpth; + case TH_SWEEP_RESP: + goto dumpth; + default: + break; + } + + pheader = (struct pdu *)p; + ctcm_pr_debug("pdu->offset: %d hex: %04x\n", + pheader->pdu_offset, pheader->pdu_offset); + ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag); + ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto); + ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq); + goto dumpdata; + +dumpth: + ctcm_pr_debug("th->seg : %02x\n", header->th_seg); + ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag); + ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag); + ctcm_pr_debug("th->type : %s\n", + (header->th_is_xid) ? "DATA" : "XID"); + ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num); + + } +dumpdata: + if (bl > 32) + bl = 32; + ctcm_pr_debug("data: "); + for (i = 0; i < bl; i++) + ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n"); + ctcm_pr_debug("\n"); +} +#endif + +static struct net_device *ctcmpc_get_dev(int port_num) +{ + char device[20]; + struct net_device *dev; + struct ctcm_priv *priv; + + sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); + + dev = __dev_get_by_name(&init_net, device); + + if (dev == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s: Device not found by name: %s", + CTCM_FUNTAIL, device); + return NULL; + } + priv = dev->ml_priv; + if (priv == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): dev->ml_priv is NULL", + CTCM_FUNTAIL, device); + return NULL; + } + if (priv->mpcg == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): priv->mpcg is NULL", + CTCM_FUNTAIL, device); + return NULL; + } + return dev; +} + +/* + * ctc_mpc_alloc_channel + * (exported interface) + * + * Device Initialization : + * ACTPATH driven IO operations + */ +int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) +{ + struct net_device *dev; + struct mpc_group *grp; + struct ctcm_priv *priv; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return 1; + priv = dev->ml_priv; + grp = priv->mpcg; + + grp->allochanfunc = callback; + grp->port_num = port_num; + grp->port_persist = 1; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "%s(%s): state=%s", + CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_INOP: + /* Group is in the process of terminating */ + grp->alloc_called = 1; + break; + case MPCG_STATE_RESET: + /* MPC Group will transition to state */ + /* MPCG_STATE_XID2INITW iff the minimum number */ + /* of 1 read and 1 write channel have successfully*/ + /* activated */ + /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ + if (callback) + grp->send_qllc_disc = 1; + fallthrough; + case MPCG_STATE_XID0IOWAIT: + fsm_deltimer(&grp->timer); + grp->outstanding_xid2 = 0; + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + if (callback) + ctcm_open(dev); + fsm_event(priv->fsm, DEV_EVENT_START, dev); + break; + case MPCG_STATE_READY: + /* XID exchanges completed after PORT was activated */ + /* Link station already active */ + /* Maybe timing issue...retry callback */ + grp->allocchan_callback_retries++; + if (grp->allocchan_callback_retries < 4) { + if (grp->allochanfunc) + grp->allochanfunc(grp->port_num, + grp->group_max_buflen); + } else { + /* there are problems...bail out */ + /* there may be a state mismatch so restart */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + grp->allocchan_callback_retries = 0; + } + break; + } + + return 0; +} +EXPORT_SYMBOL(ctc_mpc_alloc_channel); + +/* + * ctc_mpc_establish_connectivity + * (exported interface) + */ +void ctc_mpc_establish_connectivity(int port_num, + void (*callback)(int, int, int)) +{ + struct net_device *dev; + struct mpc_group *grp; + struct ctcm_priv *priv; + struct channel *rch, *wch; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + rch = priv->channel[CTCM_READ]; + wch = priv->channel[CTCM_WRITE]; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "%s(%s): state=%s", + CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); + + grp->estconnfunc = callback; + grp->port_num = port_num; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_READY: + /* XID exchanges completed after PORT was activated */ + /* Link station already active */ + /* Maybe timing issue...retry callback */ + fsm_deltimer(&grp->timer); + grp->estconn_callback_retries++; + if (grp->estconn_callback_retries < 4) { + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 0, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } + } else { + /* there are problems...bail out */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + grp->estconn_callback_retries = 0; + } + break; + case MPCG_STATE_INOP: + case MPCG_STATE_RESET: + /* MPC Group is not ready to start XID - min num of */ + /* 1 read and 1 write channel have not been acquired*/ + + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): REJECTED - inactive channels", + CTCM_FUNTAIL, dev->name); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + break; + case MPCG_STATE_XID2INITW: + /* alloc channel was called but no XID exchange */ + /* has occurred. initiate xside XID exchange */ + /* make sure yside XID0 processing has not started */ + + if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) || + (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): ABORT - PASSIVE XID", + CTCM_FUNTAIL, dev->name); + break; + } + grp->send_qllc_disc = 1; + fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT); + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + if ((rch->in_mpcgroup) && + (fsm_getstate(rch->fsm) == CH_XID0_PENDING)) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch); + else { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): RX-%s not ready for ACTIVE XID0", + CTCM_FUNTAIL, dev->name, rch->id); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + fsm_deltimer(&grp->timer); + goto done; + } + if ((wch->in_mpcgroup) && + (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch); + else { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): WX-%s not ready for ACTIVE XID0", + CTCM_FUNTAIL, dev->name, wch->id); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + fsm_deltimer(&grp->timer); + goto done; + } + break; + case MPCG_STATE_XID0IOWAIT: + /* already in active XID negotiations */ + default: + break; + } + +done: + CTCM_PR_DEBUG("Exit %s()\n", __func__); + return; +} +EXPORT_SYMBOL(ctc_mpc_establish_connectivity); + +/* + * ctc_mpc_dealloc_ch + * (exported interface) + */ +void ctc_mpc_dealloc_ch(int port_num) +{ + struct net_device *dev; + struct ctcm_priv *priv; + struct mpc_group *grp; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, + "%s: %s: refcount = %d\n", + CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev)); + + fsm_deltimer(&priv->restart_timer); + grp->channels_terminating = 0; + fsm_deltimer(&grp->timer); + grp->allochanfunc = NULL; + grp->estconnfunc = NULL; + grp->port_persist = 0; + grp->send_qllc_disc = 0; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + ctcm_close(dev); + return; +} +EXPORT_SYMBOL(ctc_mpc_dealloc_ch); + +/* + * ctc_mpc_flow_control + * (exported interface) + */ +void ctc_mpc_flow_control(int port_num, int flowc) +{ + struct ctcm_priv *priv; + struct mpc_group *grp; + struct net_device *dev; + struct channel *rch; + int mpcg_state; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s: %s: flowc = %d", + CTCM_FUNTAIL, dev->name, flowc); + + rch = priv->channel[CTCM_READ]; + + mpcg_state = fsm_getstate(grp->fsm); + switch (flowc) { + case 1: + if (mpcg_state == MPCG_STATE_FLOWC) + break; + if (mpcg_state == MPCG_STATE_READY) { + if (grp->flow_off_called == 1) + grp->flow_off_called = 0; + else + fsm_newstate(grp->fsm, MPCG_STATE_FLOWC); + break; + } + break; + case 0: + if (mpcg_state == MPCG_STATE_FLOWC) { + fsm_newstate(grp->fsm, MPCG_STATE_READY); + /* ensure any data that has accumulated */ + /* on the io_queue will now be sen t */ + tasklet_schedule(&rch->ch_tasklet); + } + /* possible race condition */ + if (mpcg_state == MPCG_STATE_READY) { + grp->flow_off_called = 1; + break; + } + break; + } + +} +EXPORT_SYMBOL(ctc_mpc_flow_control); + +static int mpc_send_qllc_discontact(struct net_device *); + +/* + * helper function of ctcmpc_unpack_skb +*/ +static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) +{ + struct channel *rch = mpcginfo->ch; + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = priv->channel[CTCM_WRITE]; + + CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); + CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); + + grp->sweep_rsp_pend_num--; + + if ((grp->sweep_req_pend_num == 0) && + (grp->sweep_rsp_pend_num == 0)) { + fsm_deltimer(&ch->sweep_timer); + grp->in_sweep = 0; + rch->th_seq_num = 0x00; + ch->th_seq_num = 0x00; + ctcm_clear_busy_do(dev); + } + + return; + +} + +/* + * helper function of mpc_rcvd_sweep_req + * which is a helper of ctcmpc_unpack_skb + */ +static void ctcmpc_send_sweep_resp(struct channel *rch) +{ + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct th_sweep *header; + struct sk_buff *sweep_skb; + struct channel *ch = priv->channel[CTCM_WRITE]; + + CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); + + sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); + if (sweep_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): sweep_skb allocation ERROR\n", + CTCM_FUNTAIL, rch->id); + goto done; + } + + header = kmalloc(sizeof(struct th_sweep), gfp_type()); + + if (!header) { + dev_kfree_skb_any(sweep_skb); + goto done; + } + + header->th.th_seg = 0x00 ; + header->th.th_ch_flag = TH_SWEEP_RESP; + header->th.th_blk_flag = 0x00; + header->th.th_is_xid = 0x00; + header->th.th_seq_num = 0x00; + header->sw.th_last_seq = ch->th_seq_num; + + skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH); + + kfree(header); + + netif_trans_update(dev); + skb_queue_tail(&ch->sweep_queue, sweep_skb); + + fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); + + return; + +done: + grp->in_sweep = 0; + ctcm_clear_busy_do(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + return; +} + +/* + * helper function of ctcmpc_unpack_skb + */ +static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) +{ + struct channel *rch = mpcginfo->ch; + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = priv->channel[CTCM_WRITE]; + + if (do_debug) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id); + + if (grp->in_sweep == 0) { + grp->in_sweep = 1; + ctcm_test_and_set_busy(dev); + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; + } + + CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); + + grp->sweep_req_pend_num--; + ctcmpc_send_sweep_resp(ch); + kfree(mpcginfo); + return; +} + +/* + * MPC Group Station FSM definitions + */ +static const char *mpcg_event_names[] = { + [MPCG_EVENT_INOP] = "INOP Condition", + [MPCG_EVENT_DISCONC] = "Discontact Received", + [MPCG_EVENT_XID0DO] = "Channel Active - Start XID", + [MPCG_EVENT_XID2] = "XID2 Received", + [MPCG_EVENT_XID2DONE] = "XID0 Complete", + [MPCG_EVENT_XID7DONE] = "XID7 Complete", + [MPCG_EVENT_TIMER] = "XID Setup Timer", + [MPCG_EVENT_DOIO] = "XID DoIO", +}; + +static const char *mpcg_state_names[] = { + [MPCG_STATE_RESET] = "Reset", + [MPCG_STATE_INOP] = "INOP", + [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start", + [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete", + [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start", + [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete", + [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start", + [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete", + [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start", + [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ", + [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ", + [MPCG_STATE_FLOWC] = "FLOW CONTROL ON", + [MPCG_STATE_READY] = "READY", +}; + +/* + * The MPC Group Station FSM + * 22 events + */ +static const fsm_node mpcg_fsm[] = { + { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop }, + { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop }, + + { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop }, + + { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready }, +}; + +static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm); + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + fsm_deltimer(&grp->timer); + + if (grp->saved_xid2->xid2_flag2 == 0x40) { + priv->xid->xid2_flag2 = 0x00; + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 1, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } else if (grp->allochanfunc) + grp->send_qllc_disc = 1; + + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): fails", + CTCM_FUNTAIL, dev->name); + return; + } + + grp->port_persist = 1; + grp->out_of_sequence = 0; + grp->estconn_called = 0; + + tasklet_hi_schedule(&grp->mpc_tasklet2); + + return; +} + +/* + * helper of ctcm_init_netdevice + * CTCM_PROTO_MPC only + */ +void mpc_group_ready(unsigned long adev) +{ + struct net_device *dev = (struct net_device *)adev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = NULL; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, + "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n", + CTCM_FUNTAIL, dev->name, grp->group_max_buflen); + + fsm_newstate(grp->fsm, MPCG_STATE_READY); + + /* Put up a read on the channel */ + ch = priv->channel[CTCM_READ]; + ch->pdu_seq = 0; + CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , + __func__, ch->pdu_seq); + + ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); + /* Put the write channel in idle state */ + ch = priv->channel[CTCM_WRITE]; + if (ch->collect_len > 0) { + spin_lock(&ch->collect_lock); + ctcm_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + } + ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch); + ctcm_clear_busy(dev); + + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 0, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } else if (grp->allochanfunc) + grp->allochanfunc(grp->port_num, grp->group_max_buflen); + + grp->send_qllc_disc = 1; + grp->changed_side = 0; + + return; + +} + +/* + * Increment the MPC Group Active Channel Counts + * helper of dev_action (called from channel fsm) + */ +void mpc_channel_action(struct channel *ch, int direction, int action) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id); + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s: %i / Grp:%s total_channels=%i, active_channels: " + "read=%i, write=%i\n", __func__, action, + fsm_getstate_str(grp->fsm), grp->num_channel_paths, + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); + + if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { + grp->num_channel_paths++; + grp->active_channels[direction]++; + grp->outstanding_xid2++; + ch->in_mpcgroup = 1; + + if (ch->xid_skb != NULL) + dev_kfree_skb_any(ch->xid_skb); + + ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, + GFP_ATOMIC | GFP_DMA); + if (ch->xid_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Couldn't alloc ch xid_skb\n", + CTCM_FUNTAIL, dev->name); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + return; + } + ch->xid_skb_data = ch->xid_skb->data; + ch->xid_th = (struct th_header *)ch->xid_skb->data; + skb_put(ch->xid_skb, TH_HEADER_LENGTH); + ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb); + skb_put(ch->xid_skb, XID2_LENGTH); + ch->xid_id = skb_tail_pointer(ch->xid_skb); + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + skb_put_data(ch->xid_skb, grp->xid_skb->data, + grp->xid_skb->len); + + ch->xid->xid2_dlc_type = + ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? XID2_READ_SIDE : XID2_WRITE_SIDE); + + if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) + ch->xid->xid2_buf_len = 0x00; + + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + fsm_newstate(ch->fsm, CH_XID0_PENDING); + + if ((grp->active_channels[CTCM_READ] > 0) && + (grp->active_channels[CTCM_WRITE] > 0) && + (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, + "%s: %s: MPC GROUP CHANNELS ACTIVE\n", + __func__, dev->name); + } + } else if ((action == MPC_CHANNEL_REMOVE) && + (ch->in_mpcgroup == 1)) { + ch->in_mpcgroup = 0; + grp->num_channel_paths--; + grp->active_channels[direction]--; + + if (ch->xid_skb != NULL) + dev_kfree_skb_any(ch->xid_skb); + ch->xid_skb = NULL; + + if (grp->channels_terminating) + goto done; + + if (((grp->active_channels[CTCM_READ] == 0) && + (grp->active_channels[CTCM_WRITE] > 0)) + || ((grp->active_channels[CTCM_WRITE] == 0) && + (grp->active_channels[CTCM_READ] > 0))) + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } +done: + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "exit %s: %i / Grp:%s total_channels=%i, active_channels: " + "read=%i, write=%i\n", __func__, action, + fsm_getstate_str(grp->fsm), grp->num_channel_paths, + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); + + CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); +} + +/** + * Unpack a just received skb and hand it over to + * upper layers. + * special MPC version of unpack_skb. + * + * ch The channel where this skb has been received. + * pskb The received skb. + */ +static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct pdu *curr_pdu; + struct mpcg_info *mpcginfo; + struct th_header *header = NULL; + struct th_sweep *sweep = NULL; + int pdu_last_seen = 0; + __u32 new_len; + struct sk_buff *skb; + int skblen; + int sendrc = 0; + + CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n", + __func__, dev->name, smp_processor_id(), ch->id); + + header = (struct th_header *)pskb->data; + if ((header->th_seg == 0) && + (header->th_ch_flag == 0) && + (header->th_blk_flag == 0) && + (header->th_seq_num == 0)) + /* nothing for us */ goto done; + + CTCM_PR_DBGDATA("%s: th_header\n", __func__); + CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH); + CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len); + + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + skb_pull(pskb, TH_HEADER_LENGTH); + + if (likely(header->th_ch_flag == TH_HAS_PDU)) { + CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__); + if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) || + ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) && + (header->th_seq_num != ch->th_seq_num + 1) && + (ch->th_seq_num != 0))) { + /* This is NOT the next segment * + * we are not the correct race winner * + * go away and let someone else win * + * BUT..this only applies if xid negot * + * is done * + */ + grp->out_of_sequence += 1; + __skb_push(pskb, TH_HEADER_LENGTH); + skb_queue_tail(&ch->io_queue, pskb); + CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x " + "got:%08x\n", __func__, + ch->th_seq_num + 1, header->th_seq_num); + + return; + } + grp->out_of_sequence = 0; + ch->th_seq_num = header->th_seq_num; + + CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n", + __func__, ch->th_seq_num); + + if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY)) + goto done; + while ((pskb->len > 0) && !pdu_last_seen) { + curr_pdu = (struct pdu *)pskb->data; + + CTCM_PR_DBGDATA("%s: pdu_header\n", __func__); + CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH); + CTCM_PR_DBGDATA("%s: pskb len: %04x \n", + __func__, pskb->len); + + skb_pull(pskb, PDU_HEADER_LENGTH); + + if (curr_pdu->pdu_flag & PDU_LAST) + pdu_last_seen = 1; + if (curr_pdu->pdu_flag & PDU_CNTL) + pskb->protocol = htons(ETH_P_SNAP); + else + pskb->protocol = htons(ETH_P_SNA_DIX); + + if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Dropping packet with " + "illegal siize %d", + CTCM_FUNTAIL, dev->name, pskb->len); + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto done; + } + skb_reset_mac_header(pskb); + new_len = curr_pdu->pdu_offset; + CTCM_PR_DBGDATA("%s: new_len: %04x \n", + __func__, new_len); + if ((new_len == 0) || (new_len > pskb->len)) { + /* should never happen */ + /* pskb len must be hosed...bail out */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): non valid pdu_offset: %04x", + /* "data may be lost", */ + CTCM_FUNTAIL, dev->name, new_len); + goto done; + } + skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC); + + if (!skb) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): MEMORY allocation error", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + skb_put_data(skb, pskb->data, new_len); + + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + skb->ip_summed = CHECKSUM_UNNECESSARY; + *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq; + ch->pdu_seq++; + + if (do_debug_data) { + ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n", + __func__, ch->pdu_seq); + ctcm_pr_debug("%s: skb:%0lx " + "skb len: %d \n", __func__, + (unsigned long)skb, skb->len); + ctcm_pr_debug("%s: up to 32 bytes " + "of pdu_data sent\n", __func__); + ctcmpc_dump32((char *)skb->data, skb->len); + } + + skblen = skb->len; + sendrc = netif_rx(skb); + priv->stats.rx_packets++; + priv->stats.rx_bytes += skblen; + skb_pull(pskb, new_len); /* point to next PDU */ + } + } else { + mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type()); + if (mpcginfo == NULL) + goto done; + + mpcginfo->ch = ch; + mpcginfo->th = header; + mpcginfo->skb = pskb; + CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n", + __func__); + /* it's a sweep? */ + sweep = (struct th_sweep *)pskb->data; + mpcginfo->sweep = sweep; + if (header->th_ch_flag == TH_SWEEP_REQ) + mpc_rcvd_sweep_req(mpcginfo); + else if (header->th_ch_flag == TH_SWEEP_RESP) + mpc_rcvd_sweep_resp(mpcginfo); + else if (header->th_blk_flag == TH_DATA_IS_XID) { + struct xid2 *thisxid = (struct xid2 *)pskb->data; + skb_pull(pskb, XID2_LENGTH); + mpcginfo->xid = thisxid; + fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo); + } else if (header->th_blk_flag == TH_DISCONTACT) + fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo); + else if (header->th_seq_num != 0) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): control pkt expected\n", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + /* mpcginfo only used for non-data transfers */ + if (do_debug_data) + ctcmpc_dump_skb(pskb, -8); + } + kfree(mpcginfo); + } +done: + + dev_kfree_skb_any(pskb); + if (sendrc == NET_RX_DROP) { + dev_warn(&dev->dev, + "The network backlog for %s is exceeded, " + "package dropped\n", __func__); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + + CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", + __func__, dev->name, ch, ch->id); +} + +/** + * tasklet helper for mpc's skb unpacking. + * + * ch The channel to work on. + * Allow flow control back pressure to occur here. + * Throttling back channel can result in excessive + * channel inactivity and system deact of channel + */ +void ctcmpc_bh(unsigned long thischan) +{ + struct channel *ch = (struct channel *)thischan; + struct sk_buff *skb; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", + dev->name, smp_processor_id(), __func__, ch->id); + /* caller has requested driver to throttle back */ + while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) && + (skb = skb_dequeue(&ch->io_queue))) { + ctcmpc_unpack_skb(ch, skb); + if (grp->out_of_sequence > 20) { + /* assume data loss has occurred if */ + /* missing seq_num for extended */ + /* period of time */ + grp->out_of_sequence = 0; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + break; + } + if (skb == skb_peek(&ch->io_queue)) + break; + } + CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", + __func__, dev->name, ch, ch->id); + return; +} + +/* + * MPC Group Initializations + */ +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) +{ + struct mpc_group *grp; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "Enter %s(%p)", CTCM_FUNTAIL, priv); + + grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL); + if (grp == NULL) + return NULL; + + grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names, + MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm, + mpcg_fsm_len, GFP_KERNEL); + if (grp->fsm == NULL) { + kfree(grp); + return NULL; + } + + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + fsm_settimer(grp->fsm, &grp->timer); + + grp->xid_skb = + __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); + if (grp->xid_skb == NULL) { + kfree_fsm(grp->fsm); + kfree(grp); + return NULL; + } + /* base xid for all channels in group */ + grp->xid_skb_data = grp->xid_skb->data; + grp->xid_th = (struct th_header *)grp->xid_skb->data; + skb_put_data(grp->xid_skb, &thnorm, TH_HEADER_LENGTH); + + grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb); + skb_put_data(grp->xid_skb, &init_xid, XID2_LENGTH); + grp->xid->xid2_adj_id = jiffies | 0xfff00000; + grp->xid->xid2_sender_id = jiffies; + + grp->xid_id = skb_tail_pointer(grp->xid_skb); + skb_put_data(grp->xid_skb, "VTAM", 4); + + grp->rcvd_xid_skb = + __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); + if (grp->rcvd_xid_skb == NULL) { + kfree_fsm(grp->fsm); + dev_kfree_skb(grp->xid_skb); + kfree(grp); + return NULL; + } + grp->rcvd_xid_data = grp->rcvd_xid_skb->data; + grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data; + skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH); + grp->saved_xid2 = NULL; + priv->xid = grp->xid; + priv->mpcg = grp; + return grp; +} + +/* + * The MPC Group Station FSM + */ + +/* + * MPC Group Station FSM actions + * CTCM_PROTO_MPC only + */ + +/** + * NOP action for statemachines + */ +static void mpc_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * invoked when the device transitions to dev_stopped + * MPC will stop each individual channel if a single XID failure + * occurs, or will intitiate all channels be stopped if a GROUP + * level failure occurs. + */ +static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct channel *wch; + + CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); + + priv = dev->ml_priv; + grp = priv->mpcg; + grp->flow_off_called = 0; + fsm_deltimer(&grp->timer); + if (grp->channels_terminating) + return; + + grp->channels_terminating = 1; + grp->saved_state = fsm_getstate(grp->fsm); + fsm_newstate(grp->fsm, MPCG_STATE_INOP); + if (grp->saved_state > MPCG_STATE_XID7INITF) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): MPC GROUP INOPERATIVE", + CTCM_FUNTAIL, dev->name); + if ((grp->saved_state != MPCG_STATE_RESET) || + /* dealloc_channel has been called */ + (grp->port_persist == 0)) + fsm_deltimer(&priv->restart_timer); + + wch = priv->channel[CTCM_WRITE]; + + switch (grp->saved_state) { + case MPCG_STATE_RESET: + case MPCG_STATE_INOP: + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID7INITF: + break; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + default: + tasklet_hi_schedule(&wch->ch_disc_tasklet); + } + + grp->xid2_tgnum = 0; + grp->group_max_buflen = 0; /*min of all received */ + grp->outstanding_xid2 = 0; + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + grp->xidnogood = 0; + grp->changed_side = 0; + + grp->rcvd_xid_skb->data = grp->rcvd_xid_data; + skb_reset_tail_pointer(grp->rcvd_xid_skb); + grp->rcvd_xid_skb->len = 0; + grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data; + skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH); + + if (grp->send_qllc_disc == 1) { + grp->send_qllc_disc = 0; + mpc_send_qllc_discontact(dev); + } + + /* DO NOT issue DEV_EVENT_STOP directly out of this code */ + /* This can result in INOP of VTAM PU due to halting of */ + /* outstanding IO which causes a sense to be returned */ + /* Only about 3 senses are allowed and then IOS/VTAM will*/ + /* become unreachable without manual intervention */ + if ((grp->port_persist == 1) || (grp->alloc_called)) { + grp->alloc_called = 0; + fsm_deltimer(&priv->restart_timer); + fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev); + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + if (grp->saved_state > MPCG_STATE_XID7INITF) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, + "%s(%s): MPC GROUP RECOVERY SCHEDULED", + CTCM_FUNTAIL, dev->name); + } else { + fsm_deltimer(&priv->restart_timer); + fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev); + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, + "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED", + CTCM_FUNTAIL, dev->name); + } +} + +/** + * Handle mpc group action timeout. + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + * + * fi An instance of an mpc_group fsm. + * event The event, just happened. + * arg Generic pointer, casted from net_device * upon call. + */ +static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct channel *wch; + struct channel *rch; + + priv = dev->ml_priv; + grp = priv->mpcg; + wch = priv->channel[CTCM_WRITE]; + rch = priv->channel[CTCM_READ]; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + /* Unless there is outstanding IO on the */ + /* channel just return and wait for ATTN */ + /* interrupt to begin XID negotiations */ + if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && + (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) + break; + fallthrough; + default: + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s: dev=%s exit", + CTCM_FUNTAIL, dev->name); + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +void mpc_action_discontact(fsm_instance *fi, int event, void *arg) +{ + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev; + struct ctcm_priv *priv; + struct mpc_group *grp; + + if (ch) { + dev = ch->netdev; + if (dev) { + priv = dev->ml_priv; + if (priv) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s: %s: %s\n", + CTCM_FUNTAIL, dev->name, ch->id); + grp = priv->mpcg; + grp->send_qllc_disc = 1; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + } + } + + return; +} + +/* + * MPC Group Station - not part of FSM + * CTCM_PROTO_MPC only + * called from add_channel in ctcm_main.c + */ +void mpc_action_send_discontact(unsigned long thischan) +{ + int rc; + struct channel *ch = (struct channel *)thischan; + unsigned long saveflags = 0; + + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[15], 0, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + + if (rc != 0) { + ctcm_ccw_check_rc(ch, rc, (char *)__func__); + } + + return; +} + + +/* + * helper function of mpc FSM + * CTCM_PROTO_MPC only + * mpc_action_rcvd_xid7 +*/ +static int mpc_validate_xid(struct mpcg_info *mpcginfo) +{ + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct xid2 *xid = mpcginfo->xid; + int rc = 0; + __u64 our_id = 0; + __u64 their_id = 0; + int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + + CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid); + + if (xid == NULL) { + rc = 1; + /* XID REJECTED: xid == NULL */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid = NULL", + CTCM_FUNTAIL, ch->id); + goto done; + } + + CTCM_D3_DUMP((char *)xid, XID2_LENGTH); + + /*the received direction should be the opposite of ours */ + if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE : + XID2_READ_SIDE) != xid->xid2_dlc_type) { + rc = 2; + /* XID REJECTED: r/w channel pairing mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): r/w channel pairing mismatch", + CTCM_FUNTAIL, ch->id); + goto done; + } + + if (xid->xid2_dlc_type == XID2_READ_SIDE) { + CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__, + grp->group_max_buflen, xid->xid2_buf_len); + + if (grp->group_max_buflen == 0 || grp->group_max_buflen > + xid->xid2_buf_len - len) + grp->group_max_buflen = xid->xid2_buf_len - len; + } + + if (grp->saved_xid2 == NULL) { + grp->saved_xid2 = + (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb); + + skb_put_data(grp->rcvd_xid_skb, xid, XID2_LENGTH); + grp->rcvd_xid_skb->data = grp->rcvd_xid_data; + + skb_reset_tail_pointer(grp->rcvd_xid_skb); + grp->rcvd_xid_skb->len = 0; + + /* convert two 32 bit numbers into 1 64 bit for id compare */ + our_id = (__u64)priv->xid->xid2_adj_id; + our_id = our_id << 32; + our_id = our_id + priv->xid->xid2_sender_id; + their_id = (__u64)xid->xid2_adj_id; + their_id = their_id << 32; + their_id = their_id + xid->xid2_sender_id; + /* lower id assume the xside role */ + if (our_id < their_id) { + grp->roll = XSIDE; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): WE HAVE LOW ID - TAKE XSIDE", + CTCM_FUNTAIL, ch->id); + } else { + grp->roll = YSIDE; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): WE HAVE HIGH ID - TAKE YSIDE", + CTCM_FUNTAIL, ch->id); + } + + } else { + if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) { + rc = 3; + /* XID REJECTED: xid flag byte4 mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid flag byte4 mismatch", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_flag2 == 0x40) { + rc = 4; + /* XID REJECTED - xid NOGOOD */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid NOGOOD", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) { + rc = 5; + /* XID REJECTED - Adjacent Station ID Mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Adjacent Station ID Mismatch", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) { + rc = 6; + /* XID REJECTED - Sender Address Mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Sender Address Mismatch", + CTCM_FUNTAIL, ch->id); + } + } +done: + if (rc) { + dev_warn(&dev->dev, + "The XID used in the MPC protocol is not valid, " + "rc = %d\n", rc); + priv->xid->xid2_flag2 = 0x40; + grp->saved_xid2->xid2_flag2 = 0x40; + } + + return rc; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) +{ + struct channel *ch = arg; + int rc = 0; + int gotlock = 0; + unsigned long saveflags = 0; /* avoids compiler warning with + spin_unlock_irqrestore */ + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + if (ctcm_checkalloc_buffer(ch)) + goto done; + + /* + * skb data-buffer referencing: + */ + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + /* result of the previous 3 statements is NOT always + * already set after ctcm_checkalloc_buffer + * because of possible reuse of the trans_skb + */ + memset(ch->trans_skb->data, 0, 16); + ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; + /* check is main purpose here: */ + skb_put(ch->trans_skb, TH_HEADER_LENGTH); + ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb); + /* check is main purpose here: */ + skb_put(ch->trans_skb, XID2_LENGTH); + ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb); + /* cleanup back to startpoint */ + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + + /* non-checking rewrite of above skb data-buffer referencing: */ + /* + memset(ch->trans_skb->data, 0, 16); + ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; + ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH); + ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH; + */ + + ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[8].count = 0; + ch->ccw[8].cda = 0x00; + + if (!(ch->xid_th && ch->xid && ch->xid_id)) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, + "%s(%s): xid_th=%p, xid=%p, xid_id=%p", + CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id); + + if (side == XSIDE) { + /* mpc_action_xside_xid */ + if (ch->xid_th == NULL) + goto done; + ch->ccw[9].cmd_code = CCW_CMD_WRITE; + ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[9].count = TH_HEADER_LENGTH; + ch->ccw[9].cda = virt_to_phys(ch->xid_th); + + if (ch->xid == NULL) + goto done; + ch->ccw[10].cmd_code = CCW_CMD_WRITE; + ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[10].count = XID2_LENGTH; + ch->ccw[10].cda = virt_to_phys(ch->xid); + + ch->ccw[11].cmd_code = CCW_CMD_READ; + ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[11].count = TH_HEADER_LENGTH; + ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th); + + ch->ccw[12].cmd_code = CCW_CMD_READ; + ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[12].count = XID2_LENGTH; + ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid); + + ch->ccw[13].cmd_code = CCW_CMD_READ; + ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id); + + } else { /* side == YSIDE : mpc_action_yside_xid */ + ch->ccw[9].cmd_code = CCW_CMD_READ; + ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[9].count = TH_HEADER_LENGTH; + ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th); + + ch->ccw[10].cmd_code = CCW_CMD_READ; + ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[10].count = XID2_LENGTH; + ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); + + if (ch->xid_th == NULL) + goto done; + ch->ccw[11].cmd_code = CCW_CMD_WRITE; + ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[11].count = TH_HEADER_LENGTH; + ch->ccw[11].cda = virt_to_phys(ch->xid_th); + + if (ch->xid == NULL) + goto done; + ch->ccw[12].cmd_code = CCW_CMD_WRITE; + ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[12].count = XID2_LENGTH; + ch->ccw[12].cda = virt_to_phys(ch->xid); + + if (ch->xid_id == NULL) + goto done; + ch->ccw[13].cmd_code = CCW_CMD_WRITE; + ch->ccw[13].cda = virt_to_phys(ch->xid_id); + + } + ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[13].count = 4; + + ch->ccw[14].cmd_code = CCW_CMD_NOOP; + ch->ccw[14].flags = CCW_FLAG_SLI; + ch->ccw[14].count = 0; + ch->ccw[14].cda = 0; + + CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7); + CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH); + CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH); + CTCM_D3_DUMP((char *)ch->xid_id, 4); + + if (!in_irq()) { + /* Such conditional locking is a known problem for + * sparse because its static undeterministic. + * Warnings should be ignored here. */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + gotlock = 1; + } + + fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch); + rc = ccw_device_start(ch->cdev, &ch->ccw[8], 0, 0xff, 0); + + if (gotlock) /* see remark above about conditional locking */ + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + + if (rc != 0) { + ctcm_ccw_check_rc(ch, rc, + (side == XSIDE) ? "x-side XID" : "y-side XID"); + } + +done: + CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n", + __func__, ch, ch->id); + return; + +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg) +{ + mpc_action_side_xid(fsm, arg, XSIDE); +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg) +{ + mpc_action_side_xid(fsm, arg, YSIDE); +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + if (ch->xid == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): ch->xid == NULL", + CTCM_FUNTAIL, dev->name); + return; + } + + fsm_newstate(ch->fsm, CH_XID0_INPROGRESS); + + ch->xid->xid2_option = XID2_0; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID2INITX: + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + break; + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + break; + } + + fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); + + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only +*/ +static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = NULL; + int direction; + int send = 0; + + if (priv) + grp = priv->mpcg; + if (grp == NULL) + return; + + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + struct xid2 *thisxid = ch->xid; + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + thisxid->xid2_option = XID2_7; + send = 0; + + /* xid7 phase 1 */ + if (grp->outstanding_xid7_p2 > 0) { + if (grp->roll == YSIDE) { + if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) { + fsm_newstate(ch->fsm, CH_XID7_PENDING2); + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + skb_put_data(ch->xid_skb, &thdummy, + TH_HEADER_LENGTH); + send = 1; + } + } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) { + fsm_newstate(ch->fsm, CH_XID7_PENDING2); + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + skb_put_data(ch->xid_skb, &thnorm, + TH_HEADER_LENGTH); + send = 1; + } + } else { + /* xid7 phase 2 */ + if (grp->roll == YSIDE) { + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) { + fsm_newstate(ch->fsm, CH_XID7_PENDING4); + skb_put_data(ch->xid_skb, &thnorm, + TH_HEADER_LENGTH); + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + send = 1; + } + } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) { + fsm_newstate(ch->fsm, CH_XID7_PENDING4); + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + skb_put_data(ch->xid_skb, &thdummy, + TH_HEADER_LENGTH); + send = 1; + } + } + + if (send) + fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); + } + + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) +{ + + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", + __func__, ch->id, grp->outstanding_xid2, + grp->outstanding_xid7, grp->outstanding_xid7_p2); + + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING) + fsm_newstate(ch->fsm, CH_XID7_PENDING); + + grp->outstanding_xid2--; + grp->outstanding_xid7++; + grp->outstanding_xid7_p2++; + + /* must change state before validating xid to */ + /* properly handle interim interrupts received*/ + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID0IOWAIT: + fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID2INITX: + if (grp->outstanding_xid2 == 0) { + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW); + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev); + } + break; + case MPCG_STATE_XID0IOWAIX: + if (grp->outstanding_xid2 == 0) { + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI); + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev); + } + break; + } + + CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", + __func__, ch->id, grp->outstanding_xid2, + grp->outstanding_xid7, grp->outstanding_xid7_p2); + CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n", + __func__, ch->id, + fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm)); + return; + +} + + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) +{ + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n", + __func__, grp->outstanding_xid7, grp->outstanding_xid7_p2); + + grp->outstanding_xid7--; + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID7INITI: + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID7INITW: + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID7INITX: + if (grp->outstanding_xid7 == 0) { + if (grp->outstanding_xid7_p2 > 0) { + grp->outstanding_xid7 = + grp->outstanding_xid7_p2; + grp->outstanding_xid7_p2 = 0; + } else + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF); + + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); + break; + } + mpc_validate_xid(mpcginfo); + break; + } + return; +} + +/* + * mpc_action helper of an MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static int mpc_send_qllc_discontact(struct net_device *dev) +{ + __u32 new_len = 0; + struct sk_buff *skb; + struct qllc *qllcptr; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s: GROUP STATE: %s\n", + __func__, mpcg_state_names[grp->saved_state]); + + switch (grp->saved_state) { + /* + * establish conn callback function is + * preferred method to report failure + */ + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + break; + } + fallthrough; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + grp->send_qllc_disc = 2; + new_len = sizeof(struct qllc); + qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA); + if (qllcptr == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): qllcptr allocation error", + CTCM_FUNTAIL, dev->name); + return -ENOMEM; + } + + qllcptr->qllc_address = 0xcc; + qllcptr->qllc_commands = 0x03; + + skb = __dev_alloc_skb(new_len, GFP_ATOMIC); + + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): skb allocation error", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + kfree(qllcptr); + return -ENOMEM; + } + + skb_put_data(skb, qllcptr, new_len); + kfree(qllcptr); + + if (skb_headroom(skb) < 4) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): skb_headroom error", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + *((__u32 *)skb_push(skb, 4)) = + priv->channel[CTCM_READ]->pdu_seq; + priv->channel[CTCM_READ]->pdu_seq++; + CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", + __func__, priv->channel[CTCM_READ]->pdu_seq); + + /* receipt of CC03 resets anticipated sequence number on + receiving side */ + priv->channel[CTCM_READ]->pdu_seq = 0x00; + skb_reset_mac_header(skb); + skb->dev = dev; + skb->protocol = htons(ETH_P_SNAP); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4)); + + netif_rx(skb); + break; + default: + break; + + } + + return 0; +} +/* --- This is the END my friend --- */ + diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h new file mode 100644 index 000000000..da41b26f7 --- /dev/null +++ b/drivers/s390/net/ctcm_mpc.h @@ -0,0 +1,238 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + * MPC additions: + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ + +#ifndef _CTC_MPC_H_ +#define _CTC_MPC_H_ + +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include "fsm.h" + +/* + * MPC external interface + * Note that ctc_mpc_xyz are called with a lock on ................ + */ + +/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */ + +/* passive open Just wait for XID2 exchange */ +extern int ctc_mpc_alloc_channel(int port, + void (*callback)(int port_num, int max_write_size)); +/* active open Alloc then send XID2 */ +extern void ctc_mpc_establish_connectivity(int port, + void (*callback)(int port_num, int rc, int max_write_size)); + +extern void ctc_mpc_dealloc_ch(int port); +extern void ctc_mpc_flow_control(int port, int flowc); + +/* + * other MPC Group prototypes and structures + */ + +#define ETH_P_SNA_DIX 0x80D5 + +/* + * Declaration of an XID2 + * + */ +#define ALLZEROS 0x0000000000000000 + +#define XID_FM2 0x20 +#define XID2_0 0x00 +#define XID2_7 0x07 +#define XID2_WRITE_SIDE 0x04 +#define XID2_READ_SIDE 0x05 + +struct xid2 { + __u8 xid2_type_id; + __u8 xid2_len; + __u32 xid2_adj_id; + __u8 xid2_rlen; + __u8 xid2_resv1; + __u8 xid2_flag1; + __u8 xid2_fmtt; + __u8 xid2_flag4; + __u16 xid2_resv2; + __u8 xid2_tgnum; + __u32 xid2_sender_id; + __u8 xid2_flag2; + __u8 xid2_option; + char xid2_resv3[8]; + __u16 xid2_resv4; + __u8 xid2_dlc_type; + __u16 xid2_resv5; + __u8 xid2_mpc_flag; + __u8 xid2_resv6; + __u16 xid2_buf_len; + char xid2_buffer[255 - (13 * sizeof(__u8) + + 2 * sizeof(__u32) + + 4 * sizeof(__u16) + + 8 * sizeof(char))]; +} __attribute__ ((packed)); + +#define XID2_LENGTH (sizeof(struct xid2)) + +struct th_header { + __u8 th_seg; + __u8 th_ch_flag; +#define TH_HAS_PDU 0xf0 +#define TH_IS_XID 0x01 +#define TH_SWEEP_REQ 0xfe +#define TH_SWEEP_RESP 0xff + __u8 th_blk_flag; +#define TH_DATA_IS_XID 0x80 +#define TH_RETRY 0x40 +#define TH_DISCONTACT 0xc0 +#define TH_SEG_BLK 0x20 +#define TH_LAST_SEG 0x10 +#define TH_PDU_PART 0x08 + __u8 th_is_xid; /* is 0x01 if this is XID */ + __u32 th_seq_num; +} __attribute__ ((packed)); + +struct th_addon { + __u32 th_last_seq; + __u32 th_resvd; +} __attribute__ ((packed)); + +struct th_sweep { + struct th_header th; + struct th_addon sw; +} __attribute__ ((packed)); + +#define TH_HEADER_LENGTH (sizeof(struct th_header)) +#define TH_SWEEP_LENGTH (sizeof(struct th_sweep)) + +#define PDU_LAST 0x80 +#define PDU_CNTL 0x40 +#define PDU_FIRST 0x20 + +struct pdu { + __u32 pdu_offset; + __u8 pdu_flag; + __u8 pdu_proto; /* 0x01 is APPN SNA */ + __u16 pdu_seq; +} __attribute__ ((packed)); + +#define PDU_HEADER_LENGTH (sizeof(struct pdu)) + +struct qllc { + __u8 qllc_address; +#define QLLC_REQ 0xFF +#define QLLC_RESP 0x00 + __u8 qllc_commands; +#define QLLC_DISCONNECT 0x53 +#define QLLC_UNSEQACK 0x73 +#define QLLC_SETMODE 0x93 +#define QLLC_EXCHID 0xBF +} __attribute__ ((packed)); + + +/* + * Definition of one MPC group + */ + +#define MAX_MPCGCHAN 10 +#define MPC_XID_TIMEOUT_VALUE 10000 +#define MPC_CHANNEL_ADD 0 +#define MPC_CHANNEL_REMOVE 1 +#define MPC_CHANNEL_ATTN 2 +#define XSIDE 1 +#define YSIDE 0 + +struct mpcg_info { + struct sk_buff *skb; + struct channel *ch; + struct xid2 *xid; + struct th_sweep *sweep; + struct th_header *th; +}; + +struct mpc_group { + struct tasklet_struct mpc_tasklet; + struct tasklet_struct mpc_tasklet2; + int changed_side; + int saved_state; + int channels_terminating; + int out_of_sequence; + int flow_off_called; + int port_num; + int port_persist; + int alloc_called; + __u32 xid2_adj_id; + __u8 xid2_tgnum; + __u32 xid2_sender_id; + int num_channel_paths; + int active_channels[2]; + __u16 group_max_buflen; + int outstanding_xid2; + int outstanding_xid7; + int outstanding_xid7_p2; + int sweep_req_pend_num; + int sweep_rsp_pend_num; + struct sk_buff *xid_skb; + char *xid_skb_data; + struct th_header *xid_th; + struct xid2 *xid; + char *xid_id; + struct th_header *rcvd_xid_th; + struct sk_buff *rcvd_xid_skb; + char *rcvd_xid_data; + __u8 in_sweep; + __u8 roll; + struct xid2 *saved_xid2; + void (*allochanfunc)(int, int); + int allocchan_callback_retries; + void (*estconnfunc)(int, int, int); + int estconn_callback_retries; + int estconn_called; + int xidnogood; + int send_qllc_disc; + fsm_timer timer; + fsm_instance *fsm; /* group xid fsm */ +}; + +#ifdef DEBUGDATA +void ctcmpc_dumpit(char *buf, int len); +#else +static inline void ctcmpc_dumpit(char *buf, int len) +{ +} +#endif + +#ifdef DEBUGDATA +/* + * Dump header and first 16 bytes of an sk_buff for debugging purposes. + * + * skb The struct sk_buff to dump. + * offset Offset relative to skb-data, where to start the dump. + */ +void ctcmpc_dump_skb(struct sk_buff *skb, int offset); +#else +static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset) +{} +#endif + +static inline void ctcmpc_dump32(char *buf, int len) +{ + if (len < 32) + ctcmpc_dumpit(buf, len); + else + ctcmpc_dumpit(buf, 32); +} + +void ctcm_ccw_check_rc(struct channel *, int, char *); +void mpc_group_ready(unsigned long adev); +void mpc_channel_action(struct channel *ch, int direction, int action); +void mpc_action_send_discontact(unsigned long thischan); +void mpc_action_discontact(fsm_instance *fi, int event, void *arg); +void ctcmpc_bh(unsigned long thischan); +#endif +/* --- This is the END my friend --- */ diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c new file mode 100644 index 000000000..e3813a7aa --- /dev/null +++ b/drivers/s390/net/ctcm_sysfs.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/slab.h> +#include "ctcm_main.h" + +/* + * sysfs attributes + */ + +static ssize_t ctcm_buffer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + return sprintf(buf, "%d\n", priv->buffer_size); +} + +static ssize_t ctcm_buffer_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct net_device *ndev; + unsigned int bs1; + struct ctcm_priv *priv = dev_get_drvdata(dev); + int rc; + + if (!(priv && priv->channel[CTCM_READ] && + priv->channel[CTCM_READ]->netdev)) { + CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); + return -ENODEV; + } + ndev = priv->channel[CTCM_READ]->netdev; + + rc = kstrtouint(buf, 0, &bs1); + if (rc) + goto einval; + if (bs1 > CTCM_BUFSIZE_LIMIT) + goto einval; + if (bs1 < (576 + LL_HEADER_LENGTH + 2)) + goto einval; + priv->buffer_size = bs1; /* just to overwrite the default */ + + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) + goto einval; + + priv->channel[CTCM_READ]->max_bufsize = bs1; + priv->channel[CTCM_WRITE]->max_bufsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; + priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + + CTCM_DBF_DEV(SETUP, ndev, buf); + return count; + +einval: + CTCM_DBF_DEV(SETUP, ndev, "buff_err"); + return -EINVAL; +} + +static void ctcm_print_statistics(struct ctcm_priv *priv) +{ + char *sbuf; + char *p; + + if (!priv) + return; + sbuf = kmalloc(2048, GFP_KERNEL); + if (sbuf == NULL) + return; + p = sbuf; + + p += sprintf(p, " Device FSM state: %s\n", + fsm_getstate_str(priv->fsm)); + p += sprintf(p, " RX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[CTCM_READ]->fsm)); + p += sprintf(p, " TX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm)); + p += sprintf(p, " Max. TX buffer used: %ld\n", + priv->channel[WRITE]->prof.maxmulti); + p += sprintf(p, " Max. chained SKBs: %ld\n", + priv->channel[WRITE]->prof.maxcqueue); + p += sprintf(p, " TX single write ops: %ld\n", + priv->channel[WRITE]->prof.doios_single); + p += sprintf(p, " TX multi write ops: %ld\n", + priv->channel[WRITE]->prof.doios_multi); + p += sprintf(p, " Netto bytes written: %ld\n", + priv->channel[WRITE]->prof.txlen); + p += sprintf(p, " Max. TX IO-time: %u\n", + jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time)); + + printk(KERN_INFO "Statistics for %s:\n%s", + priv->channel[CTCM_WRITE]->netdev->name, sbuf); + kfree(sbuf); + return; +} + +static ssize_t stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv || gdev->state != CCWGROUP_ONLINE) + return -ENODEV; + ctcm_print_statistics(priv); + return sprintf(buf, "0\n"); +} + +static ssize_t stats_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + if (!priv) + return -ENODEV; + /* Reset statistics */ + memset(&priv->channel[WRITE]->prof, 0, + sizeof(priv->channel[CTCM_WRITE]->prof)); + return count; +} + +static ssize_t ctcm_proto_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + if (!priv) + return -ENODEV; + + return sprintf(buf, "%d\n", priv->protocol); +} + +static ssize_t ctcm_proto_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int value, rc; + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + rc = kstrtoint(buf, 0, &value); + if (rc || + !((value == CTCM_PROTO_S390) || + (value == CTCM_PROTO_LINUX) || + (value == CTCM_PROTO_MPC) || + (value == CTCM_PROTO_OS390))) + return -EINVAL; + priv->protocol = value; + CTCM_DBF_DEV(SETUP, dev, buf); + + return count; +} + +static const char *ctcm_type[] = { + "not a channel", + "CTC/A", + "FICON channel", + "ESCON channel", + "unknown channel type", + "unsupported channel type", +}; + +static ssize_t ctcm_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", + ctcm_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); +static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store); +static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL); +static DEVICE_ATTR(stats, 0644, stats_show, stats_write); + +static struct attribute *ctcm_attr[] = { + &dev_attr_protocol.attr, + &dev_attr_type.attr, + &dev_attr_buffer.attr, + &dev_attr_stats.attr, + NULL, +}; + +static struct attribute_group ctcm_attr_group = { + .attrs = ctcm_attr, +}; +const struct attribute_group *ctcm_attr_groups[] = { + &ctcm_attr_group, + NULL, +}; diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c new file mode 100644 index 000000000..eb07862bd --- /dev/null +++ b/drivers/s390/net/fsm.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * A generic FSM based on fsm used in isdn4linux + * + */ + +#include "fsm.h" +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> + +MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Finite state machine helper functions"); +MODULE_LICENSE("GPL"); + +fsm_instance * +init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, + int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) +{ + int i; + fsm_instance *this; + fsm_function_t *m; + fsm *f; + + this = kzalloc(sizeof(fsm_instance), order); + if (this == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc instance\n", name); + return NULL; + } + strlcpy(this->name, name, sizeof(this->name)); + init_waitqueue_head(&this->wait_q); + + f = kzalloc(sizeof(fsm), order); + if (f == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); + kfree_fsm(this); + return NULL; + } + f->nr_events = nr_events; + f->nr_states = nr_states; + f->event_names = event_names; + f->state_names = state_names; + this->f = f; + + m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); + if (m == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); + kfree_fsm(this); + return NULL; + } + f->jumpmatrix = m; + + for (i = 0; i < tmpl_len; i++) { + if ((tmpl[i].cond_state >= nr_states) || + (tmpl[i].cond_event >= nr_events) ) { + printk(KERN_ERR + "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n", + name, i, (long)tmpl[i].cond_state, (long)f->nr_states, + (long)tmpl[i].cond_event, (long)f->nr_events); + kfree_fsm(this); + return NULL; + } else + m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] = + tmpl[i].function; + } + return this; +} + +void +kfree_fsm(fsm_instance *this) +{ + if (this) { + if (this->f) { + kfree(this->f->jumpmatrix); + kfree(this->f); + } + kfree(this); + } else + printk(KERN_WARNING + "fsm: kfree_fsm called with NULL argument\n"); +} + +#if FSM_DEBUG_HISTORY +void +fsm_print_history(fsm_instance *fi) +{ + int idx = 0; + int i; + + if (fi->history_size >= FSM_HISTORY_SIZE) + idx = fi->history_index; + + printk(KERN_DEBUG "fsm(%s): History:\n", fi->name); + for (i = 0; i < fi->history_size; i++) { + int e = fi->history[idx].event; + int s = fi->history[idx++].state; + idx %= FSM_HISTORY_SIZE; + if (e == -1) + printk(KERN_DEBUG " S=%s\n", + fi->f->state_names[s]); + else + printk(KERN_DEBUG " S=%s E=%s\n", + fi->f->state_names[s], + fi->f->event_names[e]); + } + fi->history_size = fi->history_index = 0; +} + +void +fsm_record_history(fsm_instance *fi, int state, int event) +{ + fi->history[fi->history_index].state = state; + fi->history[fi->history_index++].event = event; + fi->history_index %= FSM_HISTORY_SIZE; + if (fi->history_size < FSM_HISTORY_SIZE) + fi->history_size++; +} +#endif + +const char * +fsm_getstate_str(fsm_instance *fi) +{ + int st = atomic_read(&fi->state); + if (st >= fi->f->nr_states) + return "Invalid"; + return fi->f->state_names[st]; +} + +static void +fsm_expire_timer(struct timer_list *t) +{ + fsm_timer *this = from_timer(this, t, tl); +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Timer %p expired\n", + this->fi->name, this); +#endif + fsm_event(this->fi, this->expire_event, this->event_arg); +} + +void +fsm_settimer(fsm_instance *fi, fsm_timer *this) +{ + this->fi = fi; +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name, + this); +#endif + timer_setup(&this->tl, fsm_expire_timer, 0); +} + +void +fsm_deltimer(fsm_timer *this) +{ +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, + this); +#endif + del_timer(&this->tl); +} + +int +fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + timer_setup(&this->tl, fsm_expire_timer, 0); + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); + return 0; +} + +/* FIXME: this function is never used, why */ +void +fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + del_timer(&this->tl); + timer_setup(&this->tl, fsm_expire_timer, 0); + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); +} + +EXPORT_SYMBOL(init_fsm); +EXPORT_SYMBOL(kfree_fsm); +EXPORT_SYMBOL(fsm_settimer); +EXPORT_SYMBOL(fsm_deltimer); +EXPORT_SYMBOL(fsm_addtimer); +EXPORT_SYMBOL(fsm_modtimer); +EXPORT_SYMBOL(fsm_getstate_str); + +#if FSM_DEBUG_HISTORY +EXPORT_SYMBOL(fsm_print_history); +EXPORT_SYMBOL(fsm_record_history); +#endif diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h new file mode 100644 index 000000000..16dc071a2 --- /dev/null +++ b/drivers/s390/net/fsm.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FSM_H_ +#define _FSM_H_ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/time.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/atomic.h> + +/** + * Define this to get debugging messages. + */ +#define FSM_DEBUG 0 + +/** + * Define this to get debugging massages for + * timer handling. + */ +#define FSM_TIMER_DEBUG 0 + +/** + * Define these to record a history of + * Events/Statechanges and print it if a + * action_function is not found. + */ +#define FSM_DEBUG_HISTORY 0 +#define FSM_HISTORY_SIZE 40 + +struct fsm_instance_t; + +/** + * Definition of an action function, called by a FSM + */ +typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *); + +/** + * Internal jump table for a FSM + */ +typedef struct { + fsm_function_t *jumpmatrix; + int nr_events; + int nr_states; + const char **event_names; + const char **state_names; +} fsm; + +#if FSM_DEBUG_HISTORY +/** + * Element of State/Event history used for debugging. + */ +typedef struct { + int state; + int event; +} fsm_history; +#endif + +/** + * Representation of a FSM + */ +typedef struct fsm_instance_t { + fsm *f; + atomic_t state; + char name[16]; + void *userdata; + int userint; + wait_queue_head_t wait_q; +#if FSM_DEBUG_HISTORY + int history_index; + int history_size; + fsm_history history[FSM_HISTORY_SIZE]; +#endif +} fsm_instance; + +/** + * Description of a state-event combination + */ +typedef struct { + int cond_state; + int cond_event; + fsm_function_t function; +} fsm_node; + +/** + * Description of a FSM Timer. + */ +typedef struct { + fsm_instance *fi; + struct timer_list tl; + int expire_event; + void *event_arg; +} fsm_timer; + +/** + * Creates an FSM + * + * @param name Name of this instance for logging purposes. + * @param state_names An array of names for all states for logging purposes. + * @param event_names An array of names for all events for logging purposes. + * @param nr_states Number of states for this instance. + * @param nr_events Number of events for this instance. + * @param tmpl An array of fsm_nodes, describing this FSM. + * @param tmpl_len Length of the describing array. + * @param order Parameter for allocation of the FSM data structs. + */ +extern fsm_instance * +init_fsm(char *name, const char **state_names, + const char **event_names, + int nr_states, int nr_events, const fsm_node *tmpl, + int tmpl_len, gfp_t order); + +/** + * Releases an FSM + * + * @param fi Pointer to an FSM, previously created with init_fsm. + */ +extern void kfree_fsm(fsm_instance *fi); + +#if FSM_DEBUG_HISTORY +extern void +fsm_print_history(fsm_instance *fi); + +extern void +fsm_record_history(fsm_instance *fi, int state, int event); +#endif + +/** + * Emits an event to a FSM. + * If an action function is defined for the current state/event combination, + * this function is called. + * + * @param fi Pointer to FSM which should receive the event. + * @param event The event do be delivered. + * @param arg A generic argument, handed to the action function. + * + * @return 0 on success, + * 1 if current state or event is out of range + * !0 if state and event in range, but no action defined. + */ +static inline int +fsm_event(fsm_instance *fi, int event, void *arg) +{ + fsm_function_t r; + int state = atomic_read(&fi->state); + + if ((state >= fi->f->nr_states) || + (event >= fi->f->nr_events) ) { + printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n", + fi->name, (long)state,(long)fi->f->nr_states, event, + (long)fi->f->nr_events); +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return 1; + } + r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; + if (r) { +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): state %s event %s\n", + fi->name, fi->f->state_names[state], + fi->f->event_names[event]); +#endif +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, state, event); +#endif + r(fi, event, arg); + return 0; + } else { +#if FSM_DEBUG || FSM_DEBUG_HISTORY + printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n", + fi->name, fi->f->event_names[event], + fi->f->state_names[state]); +#endif +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return !0; + } +} + +/** + * Modifies the state of an FSM. + * This does <em>not</em> trigger an event or calls an action function. + * + * @param fi Pointer to FSM + * @param state The new state for this FSM. + */ +static inline void +fsm_newstate(fsm_instance *fi, int newstate) +{ + atomic_set(&fi->state,newstate); +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, newstate, -1); +#endif +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, + fi->f->state_names[newstate]); +#endif + wake_up(&fi->wait_q); +} + +/** + * Retrieves the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM. + */ +static inline int +fsm_getstate(fsm_instance *fi) +{ + return atomic_read(&fi->state); +} + +/** + * Retrieves the name of the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM in a human readable form. + */ +extern const char *fsm_getstate_str(fsm_instance *fi); + +/** + * Initializes a timer for an FSM. + * This prepares an fsm_timer for usage with fsm_addtimer. + * + * @param fi Pointer to FSM + * @param timer The timer to be initialized. + */ +extern void fsm_settimer(fsm_instance *fi, fsm_timer *); + +/** + * Clears a pending timer of an FSM instance. + * + * @param timer The timer to clear. + */ +extern void fsm_deltimer(fsm_timer *timer); + +/** + * Adds and starts a timer to an FSM instance. + * + * @param timer The timer to be added. The field fi of that timer + * must have been set to point to the instance. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + * + * @return 0 on success, -1 if timer is already active. + */ +extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg); + +/** + * Modifies a timer of an FSM. + * + * @param timer The timer to modify. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + */ +extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg); + +#endif /* _FSM_H_ */ diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h new file mode 100644 index 000000000..38fe90c25 --- /dev/null +++ b/drivers/s390/net/ism.h @@ -0,0 +1,249 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_ISM_H +#define S390_ISM_H + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <net/smc.h> +#include <asm/pci_insn.h> + +#define UTIL_STR_LEN 16 + +/* + * Do not use the first word of the DMB bits to ensure 8 byte aligned access. + */ +#define ISM_DMB_WORD_OFFSET 1 +#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32) +#define ISM_NR_DMBS 1920 +#define ISM_IDENT_MASK 0x00FFFF + +#define ISM_REG_SBA 0x1 +#define ISM_REG_IEQ 0x2 +#define ISM_READ_GID 0x3 +#define ISM_ADD_VLAN_ID 0x4 +#define ISM_DEL_VLAN_ID 0x5 +#define ISM_SET_VLAN 0x6 +#define ISM_RESET_VLAN 0x7 +#define ISM_QUERY_INFO 0x8 +#define ISM_QUERY_RGID 0x9 +#define ISM_REG_DMB 0xA +#define ISM_UNREG_DMB 0xB +#define ISM_SIGNAL_IEQ 0xE +#define ISM_UNREG_SBA 0x11 +#define ISM_UNREG_IEQ 0x12 + +struct ism_req_hdr { + u32 cmd; + u16 : 16; + u16 len; +}; + +struct ism_resp_hdr { + u32 cmd; + u16 ret; + u16 len; +}; + +union ism_reg_sba { + struct { + struct ism_req_hdr hdr; + u64 sba; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_ieq { + struct { + struct ism_req_hdr hdr; + u64 ieq; + u64 len; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_read_gid { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u64 gid; + } response; +} __aligned(16); + +union ism_qi { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u32 version; + u32 max_len; + u64 ism_state; + u64 my_gid; + u64 sba; + u64 ieq; + u32 ieq_len; + u32 : 32; + u32 dmbs_owned; + u32 dmbs_used; + u32 vlan_required; + u32 vlan_nr_ids; + u16 vlan_id[64]; + } response; +} __aligned(64); + +union ism_query_rgid { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 vlan_valid; + u32 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + u64 rgid; + } request; + struct { + struct ism_resp_hdr hdr; + u64 dmb_tok; + } response; +} __aligned(32); + +union ism_sig_ieq { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 trigger_irq; + u32 event_code; + u64 info; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(32); + +union ism_unreg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb_tok; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_cmd_simple { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(8); + +union ism_set_vlan_id { + struct { + struct ism_req_hdr hdr; + u64 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +struct ism_eq_header { + u64 idx; + u64 ieq_len; + u64 entry_len; + u64 : 64; +}; + +struct ism_eq { + struct ism_eq_header header; + struct smcd_event entry[15]; +}; + +struct ism_sba { + u32 s : 1; /* summary bit */ + u32 e : 1; /* event bit */ + u32 : 30; + u32 dmb_bits[ISM_NR_DMBS / 32]; + u32 reserved[3]; + u16 dmbe_mask[ISM_NR_DMBS]; +}; + +struct ism_dev { + spinlock_t lock; + struct pci_dev *pdev; + struct smcd_dev *smcd; + + struct ism_sba *sba; + dma_addr_t sba_dma_addr; + DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); + + struct ism_eq *ieq; + dma_addr_t ieq_dma_addr; + + int ieq_idx; +}; + +#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ + ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) + +struct ism_systemeid { + u8 seid_string[24]; + u8 serial_number[4]; + u8 type[4]; +}; + +static inline void __ism_read_cmd(struct ism_dev *ism, void *data, + unsigned long offset, unsigned long len) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, 8); + + while (len > 0) { + __zpci_load(data, req, offset); + offset += 8; + data += 8; + len -= 8; + } +} + +static inline void __ism_write_cmd(struct ism_dev *ism, void *data, + unsigned long offset, unsigned long len) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 2, len); + + if (len) + __zpci_store_block(data, req, offset); +} + +static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data, + unsigned int size) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size); + + return __zpci_store_block(data, req, dmb_req); +} + +#endif /* S390_ISM_H */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c new file mode 100644 index 000000000..26cc943d2 --- /dev/null +++ b/drivers/s390/net/ism_drv.c @@ -0,0 +1,649 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ISM driver for s390. + * + * Copyright IBM Corp. 2018 + */ +#define KMSG_COMPONENT "ism" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <linux/ctype.h> +#include <linux/processor.h> +#include <net/smc.h> + +#include <asm/debug.h> + +#include "ism.h" + +MODULE_DESCRIPTION("ISM driver for s390"); +MODULE_LICENSE("GPL"); + +#define PCI_DEVICE_ID_IBM_ISM 0x04ED +#define DRV_NAME "ism" + +static const struct pci_device_id ism_device_table[] = { + { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ism_device_table); + +static debug_info_t *ism_debug_info; + +static int ism_cmd(struct ism_dev *ism, void *cmd) +{ + struct ism_req_hdr *req = cmd; + struct ism_resp_hdr *resp = cmd; + + __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); + __ism_write_cmd(ism, req, 0, sizeof(*req)); + + WRITE_ONCE(resp->ret, ISM_ERROR); + + __ism_read_cmd(ism, resp, 0, sizeof(*resp)); + if (resp->ret) { + debug_text_event(ism_debug_info, 0, "cmd failure"); + debug_event(ism_debug_info, 0, resp, sizeof(*resp)); + goto out; + } + __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); +out: + return resp->ret; +} + +static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) +{ + union ism_cmd_simple cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = cmd_code; + cmd.request.hdr.len = sizeof(cmd.request); + + return ism_cmd(ism, &cmd); +} + +static int query_info(struct ism_dev *ism) +{ + union ism_qi cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_INFO; + cmd.request.hdr.len = sizeof(cmd.request); + + if (ism_cmd(ism, &cmd)) + goto out; + + debug_text_event(ism_debug_info, 3, "query info"); + debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); +out: + return 0; +} + +static int register_sba(struct ism_dev *ism) +{ + union ism_reg_sba cmd; + dma_addr_t dma_handle; + struct ism_sba *sba; + + sba = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, + GFP_KERNEL); + if (!sba) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_SBA; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.sba = dma_handle; + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); + return -EIO; + } + + ism->sba = sba; + ism->sba_dma_addr = dma_handle; + + return 0; +} + +static int register_ieq(struct ism_dev *ism) +{ + union ism_reg_ieq cmd; + dma_addr_t dma_handle; + struct ism_eq *ieq; + + ieq = dma_alloc_coherent(&ism->pdev->dev, PAGE_SIZE, &dma_handle, + GFP_KERNEL); + if (!ieq) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.ieq = dma_handle; + cmd.request.len = sizeof(*ieq); + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); + return -EIO; + } + + ism->ieq = ieq; + ism->ieq_idx = -1; + ism->ieq_dma_addr = dma_handle; + + return 0; +} + +static int unregister_sba(struct ism_dev *ism) +{ + int ret; + + if (!ism->sba) + return 0; + + ret = ism_cmd_simple(ism, ISM_UNREG_SBA); + if (ret && ret != ISM_ERROR) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->sba, ism->sba_dma_addr); + + ism->sba = NULL; + ism->sba_dma_addr = 0; + + return 0; +} + +static int unregister_ieq(struct ism_dev *ism) +{ + int ret; + + if (!ism->ieq) + return 0; + + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); + if (ret && ret != ISM_ERROR) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->ieq, ism->ieq_dma_addr); + + ism->ieq = NULL; + ism->ieq_dma_addr = 0; + + return 0; +} + +static int ism_read_local_gid(struct ism_dev *ism) +{ + union ism_read_gid cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_READ_GID; + cmd.request.hdr.len = sizeof(cmd.request); + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism->smcd->local_gid = cmd.response.gid; +out: + return ret; +} + +static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, + u32 vid) +{ + struct ism_dev *ism = smcd->priv; + union ism_query_rgid cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_RGID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.vlan_valid = vid_valid; + cmd.request.vlan_id = vid; + + return ism_cmd(ism, &cmd); +} + +static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + clear_bit(dmb->sba_idx, ism->sba_bitmap); + dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, + dmb->cpu_addr, dmb->dma_addr); +} + +static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + unsigned long bit; + + if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) + return -EINVAL; + + if (!dmb->sba_idx) { + bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, + ISM_DMB_BIT_OFFSET); + if (bit == ISM_NR_DMBS) + return -ENOSPC; + + dmb->sba_idx = bit; + } + if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || + test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) + return -EINVAL; + + dmb->cpu_addr = dma_alloc_coherent(&ism->pdev->dev, dmb->dmb_len, + &dmb->dma_addr, + GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC | __GFP_COMP | __GFP_NORETRY); + if (!dmb->cpu_addr) + clear_bit(dmb->sba_idx, ism->sba_bitmap); + + return dmb->cpu_addr ? 0 : -ENOMEM; +} + +static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_reg_dmb cmd; + int ret; + + ret = ism_alloc_dmb(ism, dmb); + if (ret) + goto out; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb = dmb->dma_addr; + cmd.request.dmb_len = dmb->dmb_len; + cmd.request.sba_idx = dmb->sba_idx; + cmd.request.vlan_valid = dmb->vlan_valid; + cmd.request.vlan_id = dmb->vlan_id; + cmd.request.rgid = dmb->rgid; + + ret = ism_cmd(ism, &cmd); + if (ret) { + ism_free_dmb(ism, dmb); + goto out; + } + dmb->dmb_tok = cmd.response.dmb_tok; +out: + return ret; +} + +static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_unreg_dmb cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_UNREG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb_tok = dmb->dmb_tok; + + ret = ism_cmd(ism, &cmd); + if (ret && ret != ISM_ERROR) + goto out; + + ism_free_dmb(ism, dmb); +out: + return ret; +} + +static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_set_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); +} + +static int ism_reset_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); +} + +static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info) +{ + struct ism_dev *ism = smcd->priv; + union ism_sig_ieq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.trigger_irq = trigger_irq; + cmd.request.event_code = event_code; + cmd.request.info = info; + + return ism_cmd(ism, &cmd); +} + +static unsigned int max_bytes(unsigned int start, unsigned int len, + unsigned int boundary) +{ + return min(boundary - (start & (boundary - 1)), len); +} + +static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, unsigned int size) +{ + struct ism_dev *ism = smcd->priv; + unsigned int bytes; + u64 dmb_req; + int ret; + + while (size) { + bytes = max_bytes(offset, size, PAGE_SIZE); + dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, + offset); + + ret = __ism_move(ism, dmb_req, data, bytes); + if (ret) + return ret; + + size -= bytes; + data += bytes; + offset += bytes; + } + + return 0; +} + +static struct ism_systemeid SYSTEM_EID = { + .seid_string = "IBM-SYSZ-ISMSEID00000000", + .serial_number = "0000", + .type = "0000", +}; + +static void ism_create_system_eid(void) +{ + struct cpuid id; + u16 ident_tail; + char tmp[5]; + + get_cpu_id(&id); + ident_tail = (u16)(id.ident & ISM_IDENT_MASK); + snprintf(tmp, 5, "%04X", ident_tail); + memcpy(&SYSTEM_EID.serial_number, tmp, 4); + snprintf(tmp, 5, "%04X", id.machine); + memcpy(&SYSTEM_EID.type, tmp, 4); +} + +static void ism_get_system_eid(struct smcd_dev *smcd, u8 **eid) +{ + *eid = &SYSTEM_EID.seid_string[0]; +} + +static u16 ism_get_chid(struct smcd_dev *smcd) +{ + struct ism_dev *ismdev; + + ismdev = (struct ism_dev *)smcd->priv; + if (!ismdev || !ismdev->pdev) + return 0; + + return to_zpci(ismdev->pdev)->pchid; +} + +static void ism_handle_event(struct ism_dev *ism) +{ + struct smcd_event *entry; + + while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { + if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) + ism->ieq_idx = 0; + + entry = &ism->ieq->entry[ism->ieq_idx]; + debug_event(ism_debug_info, 2, entry, sizeof(*entry)); + smcd_handle_event(ism->smcd, entry); + } +} + +static irqreturn_t ism_handle_irq(int irq, void *data) +{ + struct ism_dev *ism = data; + unsigned long bit, end; + unsigned long *bv; + + bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; + end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; + + spin_lock(&ism->lock); + ism->sba->s = 0; + barrier(); + for (bit = 0;;) { + bit = find_next_bit_inv(bv, end, bit); + if (bit >= end) + break; + + clear_bit_inv(bit, bv); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; + barrier(); + smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); + } + + if (ism->sba->e) { + ism->sba->e = 0; + barrier(); + ism_handle_event(ism); + } + spin_unlock(&ism->lock); + return IRQ_HANDLED; +} + +static const struct smcd_ops ism_ops = { + .query_remote_gid = ism_query_rgid, + .register_dmb = ism_register_dmb, + .unregister_dmb = ism_unregister_dmb, + .add_vlan_id = ism_add_vlan_id, + .del_vlan_id = ism_del_vlan_id, + .set_vlan_required = ism_set_vlan_required, + .reset_vlan_required = ism_reset_vlan_required, + .signal_event = ism_signal_ieq, + .move_data = ism_move, + .get_system_eid = ism_get_system_eid, + .get_chid = ism_get_chid, +}; + +static int ism_dev_init(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret <= 0) + goto out; + + ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, + pci_name(pdev), ism); + if (ret) + goto free_vectors; + + ret = register_sba(ism); + if (ret) + goto free_irq; + + ret = register_ieq(ism); + if (ret) + goto unreg_sba; + + ret = ism_read_local_gid(ism); + if (ret) + goto unreg_ieq; + + if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID)) + /* hardware is V2 capable */ + ism_create_system_eid(); + + ret = smcd_register_dev(ism->smcd); + if (ret) + goto unreg_ieq; + + query_info(ism); + return 0; + +unreg_ieq: + unregister_ieq(ism); +unreg_sba: + unregister_sba(ism); +free_irq: + free_irq(pci_irq_vector(pdev, 0), ism); +free_vectors: + pci_free_irq_vectors(pdev); +out: + return ret; +} + +static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ism_dev *ism; + int ret; + + ism = kzalloc(sizeof(*ism), GFP_KERNEL); + if (!ism) + return -ENOMEM; + + spin_lock_init(&ism->lock); + dev_set_drvdata(&pdev->dev, ism); + ism->pdev = pdev; + + ret = pci_enable_device_mem(pdev); + if (ret) + goto err; + + ret = pci_request_mem_regions(pdev, DRV_NAME); + if (ret) + goto err_disable; + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + goto err_resource; + + dma_set_seg_boundary(&pdev->dev, SZ_1M - 1); + dma_set_max_seg_size(&pdev->dev, SZ_1M); + pci_set_master(pdev); + + ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, + ISM_NR_DMBS); + if (!ism->smcd) { + ret = -ENOMEM; + goto err_resource; + } + + ism->smcd->priv = ism; + ret = ism_dev_init(ism); + if (ret) + goto err_free; + + return 0; + +err_free: + smcd_free_dev(ism->smcd); +err_resource: + pci_release_mem_regions(pdev); +err_disable: + pci_disable_device(pdev); +err: + kfree(ism); + dev_set_drvdata(&pdev->dev, NULL); + return ret; +} + +static void ism_dev_exit(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + + smcd_unregister_dev(ism->smcd); + if (SYSTEM_EID.serial_number[0] != '0' || + SYSTEM_EID.type[0] != '0') + ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID); + unregister_ieq(ism); + unregister_sba(ism); + free_irq(pci_irq_vector(pdev, 0), ism); + pci_free_irq_vectors(pdev); +} + +static void ism_remove(struct pci_dev *pdev) +{ + struct ism_dev *ism = dev_get_drvdata(&pdev->dev); + + ism_dev_exit(ism); + + smcd_free_dev(ism->smcd); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + dev_set_drvdata(&pdev->dev, NULL); + kfree(ism); +} + +static struct pci_driver ism_driver = { + .name = DRV_NAME, + .id_table = ism_device_table, + .probe = ism_probe, + .remove = ism_remove, +}; + +static int __init ism_init(void) +{ + int ret; + + ism_debug_info = debug_register("ism", 2, 1, 16); + if (!ism_debug_info) + return -ENODEV; + + debug_register_view(ism_debug_info, &debug_hex_ascii_view); + ret = pci_register_driver(&ism_driver); + if (ret) + debug_unregister(ism_debug_info); + + return ret; +} + +static void __exit ism_exit(void) +{ + pci_unregister_driver(&ism_driver); + debug_unregister(ism_debug_info); +} + +module_init(ism_init); +module_exit(ism_exit); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c new file mode 100644 index 000000000..7e743f471 --- /dev/null +++ b/drivers/s390/net/lcs.c @@ -0,0 +1,2410 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Linux for S/390 Lan Channel Station Network Driver + * + * Copyright IBM Corp. 1999, 2009 + * Author(s): Original Code written by + * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> + * Rewritten by + * Frank Pavlic <fpavlic@de.ibm.com> and + * Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#define KMSG_COMPONENT "lcs" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/if.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/fddidevice.h> +#include <linux/inetdevice.h> +#include <linux/in.h> +#include <linux/igmp.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <net/arp.h> +#include <net/ip.h> + +#include <asm/debug.h> +#include <asm/idals.h> +#include <asm/timex.h> +#include <linux/device.h> +#include <asm/ccwgroup.h> + +#include "lcs.h" + + +#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) +#error Cannot compile lcs.c without some net devices switched on. +#endif + +/** + * initialization string for output + */ + +static char version[] __initdata = "LCS driver"; + +/** + * the root device for lcs group devices + */ +static struct device *lcs_root_dev; + +/** + * Some prototypes. + */ +static void lcs_tasklet(unsigned long); +static void lcs_start_kernel_thread(struct work_struct *); +static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); +#ifdef CONFIG_IP_MULTICAST +static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); +#endif /* CONFIG_IP_MULTICAST */ +static int lcs_recovery(void *ptr); + +/** + * Debug Facility Stuff + */ +static char debug_buffer[255]; +static debug_info_t *lcs_dbf_setup; +static debug_info_t *lcs_dbf_trace; + +/** + * LCS Debug Facility functions + */ +static void +lcs_unregister_debug_facility(void) +{ + debug_unregister(lcs_dbf_setup); + debug_unregister(lcs_dbf_trace); +} + +static int +lcs_register_debug_facility(void) +{ + lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); + lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); + if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { + pr_err("Not enough memory for debug facility.\n"); + lcs_unregister_debug_facility(); + return -ENOMEM; + } + debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_setup, 2); + debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_trace, 2); + return 0; +} + +/** + * Allocate io buffers. + */ +static int +lcs_alloc_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichalloc"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + /* alloc memory fo iobuffer */ + channel->iob[cnt].data = + kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = LCS_BUF_STATE_EMPTY; + } + if (cnt < LCS_NUM_BUFFS) { + /* Not all io buffers could be allocated. */ + LCS_DBF_TEXT(2, setup, "echalloc"); + while (cnt-- > 0) + kfree(channel->iob[cnt].data); + return -ENOMEM; + } + return 0; +} + +/** + * Free io buffers. + */ +static void +lcs_free_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichfree"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + kfree(channel->iob[cnt].data); + channel->iob[cnt].data = NULL; + } +} + +/* + * Cleanup channel. + */ +static void +lcs_cleanup_channel(struct lcs_channel *channel) +{ + LCS_DBF_TEXT(3, setup, "cleanch"); + /* Kill write channel tasklets. */ + tasklet_kill(&channel->irq_tasklet); + /* Free channel buffers. */ + lcs_free_channel(channel); +} + +/** + * LCS free memory for card and channels. + */ +static void +lcs_free_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "remcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + kfree(card); +} + +/** + * LCS alloc memory for card and channels + */ +static struct lcs_card * +lcs_alloc_card(void) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, setup, "alloclcs"); + + card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); + if (card == NULL) + return NULL; + card->lan_type = LCS_FRAME_TYPE_AUTO; + card->pkt_seq = 0; + card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; + /* Allocate io buffers for the read channel. */ + rc = lcs_alloc_channel(&card->read); + if (rc){ + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_free_card(card); + return NULL; + } + /* Allocate io buffers for the write channel. */ + rc = lcs_alloc_channel(&card->write); + if (rc) { + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_cleanup_channel(&card->read); + lcs_free_card(card); + return NULL; + } + +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + return card; +} + +/* + * Setup read channel. + */ +static void +lcs_setup_read_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ireadccw"); + /* Setup read ccws. */ + memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->read.ccws[cnt].cmd_code = LCS_CCW_READ; + card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; + card->read.ccws[cnt].flags = + CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->read.ccws[cnt].cda = + (__u32) __pa(card->read.iob[cnt].data); + ((struct lcs_header *) + card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; + card->read.iob[cnt].callback = lcs_get_frames_cb; + card->read.iob[cnt].state = LCS_BUF_STATE_READY; + card->read.iob[cnt].count = LCS_IOBUFFERSIZE; + } + card->read.ccws[0].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; + /* Last ccw is a tic (transfer in channel). */ + card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->read.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->read.ccws); + /* Setg initial state of the read channel. */ + card->read.state = LCS_CH_STATE_INIT; + + card->read.io_idx = 0; + card->read.buf_idx = 0; +} + +static void +lcs_setup_read(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initread"); + + lcs_setup_read_ccws(card); + /* Initialize read channel tasklet. */ + card->read.irq_tasklet.data = (unsigned long) &card->read; + card->read.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->read.wait_q); +} + +/* + * Setup write channel. + */ +static void +lcs_setup_write_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(3, setup, "iwritccw"); + /* Setup write ccws. */ + memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; + card->write.ccws[cnt].count = 0; + card->write.ccws[cnt].flags = + CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->write.ccws[cnt].cda = + (__u32) __pa(card->write.iob[cnt].data); + } + /* Last ccw is a tic (transfer in channel). */ + card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->write.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->write.ccws); + /* Set initial state of the write channel. */ + card->read.state = LCS_CH_STATE_INIT; + + card->write.io_idx = 0; + card->write.buf_idx = 0; +} + +static void +lcs_setup_write(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initwrit"); + + lcs_setup_write_ccws(card); + /* Initialize write channel tasklet. */ + card->write.irq_tasklet.data = (unsigned long) &card->write; + card->write.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->write.wait_q); +} + +static void +lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_allowed_mask = threads; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} +static int lcs_threads_running(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) +{ + return wait_event_interruptible(card->wait_q, + lcs_threads_running(card, threads) == 0); +} + +static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + if ( !(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread) ) { + spin_unlock_irqrestore(&card->mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + return 0; +} + +static void +lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} + +static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + if (card->thread_start_mask & thread){ + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)){ + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + int rc = 0; + wait_event(card->wait_q, + (rc = __lcs_do_run_thread(card, thread)) >= 0); + return rc; +} + +static int +lcs_do_start_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +/** + * Initialize channels,card and state machines. + */ +static void +lcs_setup_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "initcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + + lcs_setup_read(card); + lcs_setup_write(card); + /* Set cards initial state. */ + card->state = DEV_STATE_DOWN; + card->tx_buffer = NULL; + card->tx_emitted = 0; + + init_waitqueue_head(&card->wait_q); + spin_lock_init(&card->lock); + spin_lock_init(&card->ipm_lock); + spin_lock_init(&card->mask_lock); +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + INIT_LIST_HEAD(&card->lancmd_waiters); +} + +static void lcs_clear_multicast_list(struct lcs_card *card) +{ +#ifdef CONFIG_IP_MULTICAST + struct lcs_ipm_list *ipm; + unsigned long flags; + + /* Free multicast list. */ + LCS_DBF_TEXT(3, setup, "clmclist"); + spin_lock_irqsave(&card->ipm_lock, flags); + while (!list_empty(&card->ipm_list)){ + ipm = list_entry(card->ipm_list.next, + struct lcs_ipm_list, list); + list_del(&ipm->list); + if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + } + kfree(ipm); + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +#endif +} +/** + * Cleanup channels,card and state machines. + */ +static void +lcs_cleanup_card(struct lcs_card *card) +{ + + LCS_DBF_TEXT(3, setup, "cleancrd"); + LCS_DBF_HEX(2,setup,&card,sizeof(void*)); + + if (card->dev != NULL) + free_netdev(card->dev); + /* Cleanup channels. */ + lcs_cleanup_channel(&card->write); + lcs_cleanup_channel(&card->read); +} + +/** + * Start channel. + */ +static int +lcs_start_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start(channel->ccwdev, + channel->ccws + channel->io_idx, 0, 0, + DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); + if (rc == 0) + channel->state = LCS_CH_STATE_RUNNING; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4,trace,"essh%s", + dev_name(&channel->ccwdev->dev)); + dev_err(&channel->ccwdev->dev, + "Starting an LCS device resulted in an error," + " rc=%d!\n", rc); + } + return rc; +} + +static int +lcs_clear_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace,"clearch"); + LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, 0); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ecsc%s", + dev_name(&channel->ccwdev->dev)); + return rc; + } + wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); + channel->state = LCS_CH_STATE_STOPPED; + return rc; +} + + +/** + * Stop channel. + */ +static int +lcs_stop_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + if (channel->state == LCS_CH_STATE_STOPPED) + return 0; + LCS_DBF_TEXT(4,trace,"haltsch"); + LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); + channel->state = LCS_CH_STATE_INIT; + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, 0); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ehsc%s", + dev_name(&channel->ccwdev->dev)); + return rc; + } + /* Asynchronous halt initialted. Wait for its completion. */ + wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED)); + lcs_clear_channel(channel); + return 0; +} + +/** + * start read and write channel + */ +static int +lcs_start_channels(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "chstart"); + /* start read channel */ + rc = lcs_start_channel(&card->read); + if (rc) + return rc; + /* start write channel */ + rc = lcs_start_channel(&card->write); + if (rc) + lcs_stop_channel(&card->read); + return rc; +} + +/** + * stop read and write channel + */ +static int +lcs_stop_channels(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, trace, "chhalt"); + lcs_stop_channel(&card->read); + lcs_stop_channel(&card->write); + return 0; +} + +/** + * Get empty buffer. + */ +static struct lcs_buffer * +__lcs_get_buffer(struct lcs_channel *channel) +{ + int index; + + LCS_DBF_TEXT(5, trace, "_getbuff"); + index = channel->io_idx; + do { + if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) { + channel->iob[index].state = LCS_BUF_STATE_LOCKED; + return channel->iob + index; + } + index = (index + 1) & (LCS_NUM_BUFFS - 1); + } while (index != channel->io_idx); + return NULL; +} + +static struct lcs_buffer * +lcs_get_buffer(struct lcs_channel *channel) +{ + struct lcs_buffer *buffer; + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "getbuff"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer = __lcs_get_buffer(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return buffer; +} + +/** + * Resume channel program if the channel is suspended. + */ +static int +__lcs_resume_channel(struct lcs_channel *channel) +{ + int rc; + + if (channel->state != LCS_CH_STATE_SUSPENDED) + return 0; + if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) + return 0; + LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); + rc = ccw_device_resume(channel->ccwdev); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ersc%s", + dev_name(&channel->ccwdev->dev)); + dev_err(&channel->ccwdev->dev, + "Sending data from the LCS device to the LAN failed" + " with rc=%d\n",rc); + } else + channel->state = LCS_CH_STATE_RUNNING; + return rc; + +} + +/** + * Make a buffer ready for processing. + */ +static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +{ + int prev, next; + + LCS_DBF_TEXT(5, trace, "rdybits"); + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Check if we may clear the suspend bit of this buffer. */ + if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { + /* Check if we have to set the PCI bit. */ + if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) + /* Suspend bit of the previous buffer is not set. */ + channel->ccws[index].flags |= CCW_FLAG_PCI; + /* Suspend bit of the next buffer is set. */ + channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; + } +} + +static int +lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + int index, rc; + + LCS_DBF_TEXT(5, trace, "rdybuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && + buffer->state != LCS_BUF_STATE_PROCESSED); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = LCS_BUF_STATE_READY; + index = buffer - channel->iob; + /* Set length. */ + channel->ccws[index].count = buffer->count; + /* Check relevant PCI/suspend bits. */ + __lcs_ready_buffer_bits(channel, index); + rc = __lcs_resume_channel(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return rc; +} + +/** + * Mark the buffer as processed. Take care of the suspend bit + * of the previous buffer. This function is called from + * interrupt context, so the lock must not be taken. + */ +static int +__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + int index, prev, next; + + LCS_DBF_TEXT(5, trace, "prcsbuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_READY); + buffer->state = LCS_BUF_STATE_PROCESSED; + index = buffer - channel->iob; + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Set the suspend bit and clear the PCI bit of this buffer. */ + channel->ccws[index].flags |= CCW_FLAG_SUSPEND; + channel->ccws[index].flags &= ~CCW_FLAG_PCI; + /* Check the suspend bit of the previous buffer. */ + if (channel->iob[prev].state == LCS_BUF_STATE_READY) { + /* + * Previous buffer is in state ready. It might have + * happened in lcs_ready_buffer that the suspend bit + * has not been cleared to avoid an endless loop. + * Do it now. + */ + __lcs_ready_buffer_bits(channel, prev); + } + /* Clear PCI bit of next buffer. */ + channel->ccws[next].flags &= ~CCW_FLAG_PCI; + return __lcs_resume_channel(channel); +} + +/** + * Put a processed buffer back to state empty. + */ +static void +lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "relbuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && + buffer->state != LCS_BUF_STATE_PROCESSED); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = LCS_BUF_STATE_EMPTY; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); +} + +/** + * Get buffer for a lan command. + */ +static struct lcs_buffer * +lcs_get_lancmd(struct lcs_card *card, int count) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(4, trace, "getlncmd"); + /* Get buffer and wait if none is available. */ + wait_event(card->write.wait_q, + ((buffer = lcs_get_buffer(&card->write)) != NULL)); + count += sizeof(struct lcs_header); + *(__u16 *)(buffer->data + count) = 0; + buffer->count = count + sizeof(__u16); + buffer->callback = lcs_release_buffer; + cmd = (struct lcs_cmd *) buffer->data; + cmd->offset = count; + cmd->type = LCS_FRAME_TYPE_CONTROL; + cmd->slot = 0; + return buffer; +} + + +static void +lcs_get_reply(struct lcs_reply *reply) +{ + refcount_inc(&reply->refcnt); +} + +static void +lcs_put_reply(struct lcs_reply *reply) +{ + if (refcount_dec_and_test(&reply->refcnt)) + kfree(reply); +} + +static struct lcs_reply * +lcs_alloc_reply(struct lcs_cmd *cmd) +{ + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "getreply"); + + reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); + if (!reply) + return NULL; + refcount_set(&reply->refcnt, 1); + reply->sequence_no = cmd->sequence_no; + reply->received = 0; + reply->rc = 0; + init_waitqueue_head(&reply->wait_q); + + return reply; +} + +/** + * Notifier function for lancmd replies. Called from read irq. + */ +static void +lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) +{ + struct list_head *l, *n; + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "notiwait"); + spin_lock(&card->lock); + list_for_each_safe(l, n, &card->lancmd_waiters) { + reply = list_entry(l, struct lcs_reply, list); + if (reply->sequence_no == cmd->sequence_no) { + lcs_get_reply(reply); + list_del_init(&reply->list); + if (reply->callback != NULL) + reply->callback(card, cmd); + reply->received = 1; + reply->rc = cmd->return_code; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + break; + } + } + spin_unlock(&card->lock); +} + +/** + * Emit buffer of a lan command. + */ +static void +lcs_lancmd_timeout(struct timer_list *t) +{ + struct lcs_reply *reply = from_timer(reply, t, timer); + struct lcs_reply *list_reply, *r; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "timeout"); + spin_lock_irqsave(&reply->card->lock, flags); + list_for_each_entry_safe(list_reply, r, + &reply->card->lancmd_waiters,list) { + if (reply == list_reply) { + lcs_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + reply->received = 1; + reply->rc = -ETIME; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + return; + } + } + spin_unlock_irqrestore(&reply->card->lock, flags); +} + +static int +lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, + void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) +{ + struct lcs_reply *reply; + struct lcs_cmd *cmd; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4, trace, "sendcmd"); + cmd = (struct lcs_cmd *) buffer->data; + cmd->return_code = 0; + cmd->sequence_no = card->sequence_no++; + reply = lcs_alloc_reply(cmd); + if (!reply) + return -ENOMEM; + reply->callback = reply_callback; + reply->card = card; + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->lancmd_waiters); + spin_unlock_irqrestore(&card->lock, flags); + + buffer->callback = lcs_release_buffer; + rc = lcs_ready_buffer(&card->write, buffer); + if (rc) + return rc; + timer_setup(&reply->timer, lcs_lancmd_timeout, 0); + mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); + wait_event(reply->wait_q, reply->received); + del_timer_sync(&reply->timer); + LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); + rc = reply->rc; + lcs_put_reply(reply); + return rc ? -EIO : 0; +} + +/** + * LCS startup command + */ +static int +lcs_send_startup(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "startup"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTUP; + cmd->initiator = initiator; + cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS shutdown command + */ +static int +lcs_send_shutdown(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "shutdown"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SHUTDOWN; + cmd->initiator = LCS_INITIATOR_TCPIP; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS lanstat command + */ +static void +__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "statcb"); + memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); +} + +static int +lcs_send_lanstat(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2,trace, "cmdstat"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + /* Setup lanstat command. */ + cmd->cmd_code = LCS_CMD_LANSTAT; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); +} + +/** + * send stoplan command + */ +static int +lcs_send_stoplan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstpln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STOPLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send startlan command + */ +static void +__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "srtlancb"); + card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; + card->portno = cmd->cmd.lcs_std_cmd.portno; +} + +static int +lcs_send_startlan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstaln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); +} + +#ifdef CONFIG_IP_MULTICAST +/** + * send setipm command (Multicast) + */ +static int +lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdsetim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SETIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send delipm command (Multicast) + */ +static int +lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmddelim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_DELIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * check if multicast is supported by LCS + */ +static void +__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "chkmccb"); + card->ip_assists_supported = + cmd->cmd.lcs_qipassist.ip_assists_supported; + card->ip_assists_enabled = + cmd->cmd.lcs_qipassist.ip_assists_enabled; +} + +static int +lcs_check_multicast_support(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + int rc; + + LCS_DBF_TEXT(2, trace, "cmdqipa"); + /* Send query ipassist. */ + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_QIPASSIST; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); + if (rc != 0) { + pr_err("Query IPAssist failed. Assuming unsupported!\n"); + return -EOPNOTSUPP; + } + if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) + return 0; + return -EOPNOTSUPP; +} + +/** + * set or del multicast address on LCS card + */ +static void +lcs_fix_multicast_list(struct lcs_card *card) +{ + struct list_head failed_list; + struct lcs_ipm_list *ipm, *tmp; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace, "fixipm"); + INIT_LIST_HEAD(&failed_list); + spin_lock_irqsave(&card->ipm_lock, flags); +list_modified: + list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ + switch (ipm->ipm_state) { + case LCS_IPM_STATE_SET_REQUIRED: + /* del from ipm_list so no one else can tamper with + * this entry */ + list_del_init(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + rc = lcs_send_setipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + if (rc) { + pr_info("Adding multicast address failed." + " Table possibly full!\n"); + /* store ipm in failed list -> will be added + * to ipm_list again, so a retry will be done + * during the next call of this function */ + list_add_tail(&ipm->list, &failed_list); + } else { + ipm->ipm_state = LCS_IPM_STATE_ON_CARD; + /* re-insert into ipm_list */ + list_add_tail(&ipm->list, &card->ipm_list); + } + goto list_modified; + case LCS_IPM_STATE_DEL_REQUIRED: + list_del(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + kfree(ipm); + goto list_modified; + case LCS_IPM_STATE_ON_CARD: + break; + } + } + /* re-insert all entries from the failed_list into ipm_list */ + list_for_each_entry_safe(ipm, tmp, &failed_list, list) + list_move_tail(&ipm->list, &card->ipm_list); + + spin_unlock_irqrestore(&card->ipm_lock, flags); +} + +/** + * get mac address for the relevant Multicast address + */ +static void +lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) +{ + LCS_DBF_TEXT(4,trace, "getmac"); + ip_eth_mc_map(ipm, mac); +} + +/** + * function called by net device to handle multicast address relevant things + */ +static void lcs_remove_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) +{ + struct ip_mc_list *im4; + struct list_head *l; + struct lcs_ipm_list *ipm; + unsigned long flags; + char buf[MAX_ADDR_LEN]; + + LCS_DBF_TEXT(4, trace, "remmclst"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + ipm = list_entry(l, struct lcs_ipm_list, list); + for (im4 = rcu_dereference(in4_dev->mc_list); + im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + if ( (ipm->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &ipm->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) + break; + } + if (im4 == NULL) + ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +} + +static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, + struct ip_mc_list *im4, + char *buf) +{ + struct lcs_ipm_list *tmp, *ipm = NULL; + struct list_head *l; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "chkmcent"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + tmp = list_entry(l, struct lcs_ipm_list, list); + if ( (tmp->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &tmp->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) { + ipm = tmp; + break; + } + } + spin_unlock_irqrestore(&card->ipm_lock, flags); + return ipm; +} + +static void lcs_set_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) +{ + + struct ip_mc_list *im4; + struct lcs_ipm_list *ipm; + char buf[MAX_ADDR_LEN]; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "setmclst"); + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rcu_dereference(im4->next_rcu)) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + ipm = lcs_check_addr_entry(card, im4, buf); + if (ipm != NULL) + continue; /* Address already in list. */ + ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); + if (ipm == NULL) { + pr_info("Not enough memory to add" + " new multicast entry!\n"); + break; + } + memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); + ipm->ipm.ip_addr = im4->multiaddr; + ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; + spin_lock_irqsave(&card->ipm_lock, flags); + LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); + list_add(&ipm->list, &card->ipm_list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + } +} + +static int +lcs_register_mc_addresses(void *data) +{ + struct lcs_card *card; + struct in_device *in4_dev; + + card = (struct lcs_card *) data; + + if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "regmulti"); + + in4_dev = in_dev_get(card->dev); + if (in4_dev == NULL) + goto out; + rcu_read_lock(); + lcs_remove_mc_addresses(card,in4_dev); + lcs_set_mc_addresses(card, in4_dev); + rcu_read_unlock(); + in_dev_put(in4_dev); + + netif_carrier_off(card->dev); + netif_tx_disable(card->dev); + wait_event(card->write.wait_q, + (card->write.state != LCS_CH_STATE_RUNNING)); + lcs_fix_multicast_list(card); + if (card->state == DEV_STATE_UP) { + netif_carrier_on(card->dev); + netif_wake_queue(card->dev); + } +out: + lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); + return 0; +} +#endif /* CONFIG_IP_MULTICAST */ + +/** + * function called by net device to + * handle multicast address relevant things + */ +static void +lcs_set_multicast_list(struct net_device *dev) +{ +#ifdef CONFIG_IP_MULTICAST + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "setmulti"); + card = (struct lcs_card *) dev->ml_priv; + + if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) + schedule_work(&card->kernel_thread_starter); +#endif /* CONFIG_IP_MULTICAST */ +} + +static long +lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + dev_warn(&cdev->dev, + "An I/O-error occurred on the LCS device\n"); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); + break; + case -ETIMEDOUT: + dev_warn(&cdev->dev, + "A command timed out on the LCS device\n"); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); + break; + default: + dev_warn(&cdev->dev, + "An error occurred on the LCS device, rc=%ld\n", + PTR_ERR(irb)); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT(2, trace, " rc???"); + } + return PTR_ERR(irb); +} + +static int +lcs_get_problem(struct ccw_device *cdev, struct irb *irb) +{ + int dstat, cstat; + char *sense; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + LCS_DBF_TEXT(2, trace, "CGENCHK"); + return 1; + } + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[LCS_SENSE_BYTE_1] & + LCS_SENSE_RESETTING_EVENT) { + LCS_DBF_TEXT(2, trace, "REVIND"); + return 1; + } + if (sense[LCS_SENSE_BYTE_0] & + LCS_SENSE_CMD_REJECT) { + LCS_DBF_TEXT(2, trace, "CMDREJ"); + return 0; + } + if ((!sense[LCS_SENSE_BYTE_0]) && + (!sense[LCS_SENSE_BYTE_1]) && + (!sense[LCS_SENSE_BYTE_2]) && + (!sense[LCS_SENSE_BYTE_3])) { + LCS_DBF_TEXT(2, trace, "ZEROSEN"); + return 0; + } + LCS_DBF_TEXT(2, trace, "DGENCHK"); + return 1; + } + return 0; +} + +static void +lcs_schedule_recovery(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, trace, "startrec"); + if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) + schedule_work(&card->kernel_thread_starter); +} + +/** + * IRQ Handler for LCS channels + */ +static void +lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +{ + struct lcs_card *card; + struct lcs_channel *channel; + int rc, index; + int cstat, dstat; + + if (lcs_check_irb_error(cdev, irb)) + return; + + card = CARD_FROM_DEV(cdev); + if (card->read.ccwdev == cdev) + channel = &card->read; + else + channel = &card->write; + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, + irb->scsw.cmd.actl); + + /* Check for channel and device errors presented */ + rc = lcs_get_problem(cdev, irb); + if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { + dev_warn(&cdev->dev, + "The LCS device stopped because of an error," + " dstat=0x%X, cstat=0x%X \n", + dstat, cstat); + if (rc) { + channel->state = LCS_CH_STATE_ERROR; + } + } + if (channel->state == LCS_CH_STATE_ERROR) { + lcs_schedule_recovery(card); + wake_up(&card->wait_q); + return; + } + /* How far in the ccw chain have we processed? */ + if ((channel->state != LCS_CH_STATE_INIT) && + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && + (irb->scsw.cmd.cpa != 0)) { + index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) + - channel->ccws; + if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || + (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) + /* Bloody io subsystem tells us lies about cpa... */ + index = (index - 1) & (LCS_NUM_BUFFS - 1); + while (channel->io_idx != index) { + __lcs_processed_buffer(channel, + channel->iob + channel->io_idx); + channel->io_idx = + (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); + } + } + + if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || + (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) + /* Mark channel as stopped. */ + channel->state = LCS_CH_STATE_STOPPED; + else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) + /* CCW execution stopped on a suspend bit. */ + channel->state = LCS_CH_STATE_SUSPENDED; + if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { + if (irb->scsw.cmd.cc != 0) { + ccw_device_halt(channel->ccwdev, 0); + return; + } + /* The channel has been stopped by halt_IO. */ + channel->state = LCS_CH_STATE_HALTED; + } + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) + channel->state = LCS_CH_STATE_CLEARED; + /* Do the rest in the tasklet. */ + tasklet_schedule(&channel->irq_tasklet); +} + +/** + * Tasklet for IRQ handler + */ +static void +lcs_tasklet(unsigned long data) +{ + unsigned long flags; + struct lcs_channel *channel; + struct lcs_buffer *iob; + int buf_idx; + + channel = (struct lcs_channel *) data; + LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); + + /* Check for processed buffers. */ + iob = channel->iob; + buf_idx = channel->buf_idx; + while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) { + /* Do the callback thing. */ + if (iob[buf_idx].callback != NULL) + iob[buf_idx].callback(channel, iob + buf_idx); + buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); + } + channel->buf_idx = buf_idx; + + if (channel->state == LCS_CH_STATE_STOPPED) + lcs_start_channel(channel); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + if (channel->state == LCS_CH_STATE_SUSPENDED && + channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) + __lcs_resume_channel(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + /* Something happened on the channel. Wake up waiters. */ + wake_up(&channel->wait_q); +} + +/** + * Finish current tx buffer and make it ready for transmit. + */ +static void +__lcs_emit_txbuffer(struct lcs_card *card) +{ + LCS_DBF_TEXT(5, trace, "emittx"); + *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; + card->tx_buffer->count += 2; + lcs_ready_buffer(&card->write, card->tx_buffer); + card->tx_buffer = NULL; + card->tx_emitted++; +} + +/** + * Callback for finished tx buffers. + */ +static void +lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(5, trace, "txbuffcb"); + /* Put buffer back to pool. */ + lcs_release_buffer(channel, buffer); + card = container_of(channel, struct lcs_card, write); + if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) + netif_wake_queue(card->dev); + spin_lock(&card->lock); + card->tx_emitted--; + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) + /* + * Last running tx buffer has finished. Submit partially + * filled current buffer. + */ + __lcs_emit_txbuffer(card); + spin_unlock(&card->lock); +} + +/** + * Packet transmit function called by network stack + */ +static netdev_tx_t __lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, + struct net_device *dev) +{ + struct lcs_header *header; + int rc = NETDEV_TX_OK; + + LCS_DBF_TEXT(5, trace, "hardxmit"); + if (skb == NULL) { + card->stats.tx_dropped++; + card->stats.tx_errors++; + return NETDEV_TX_OK; + } + if (card->state != DEV_STATE_UP) { + dev_kfree_skb(skb); + card->stats.tx_dropped++; + card->stats.tx_errors++; + card->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + if (skb->protocol == htons(ETH_P_IPV6)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + netif_stop_queue(card->dev); + spin_lock(&card->lock); + if (card->tx_buffer != NULL && + card->tx_buffer->count + sizeof(struct lcs_header) + + skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) + /* skb too big for current tx buffer. */ + __lcs_emit_txbuffer(card); + if (card->tx_buffer == NULL) { + /* Get new tx buffer */ + card->tx_buffer = lcs_get_buffer(&card->write); + if (card->tx_buffer == NULL) { + card->stats.tx_dropped++; + rc = NETDEV_TX_BUSY; + goto out; + } + card->tx_buffer->callback = lcs_txbuffer_cb; + card->tx_buffer->count = 0; + } + header = (struct lcs_header *) + (card->tx_buffer->data + card->tx_buffer->count); + card->tx_buffer->count += skb->len + sizeof(struct lcs_header); + header->offset = card->tx_buffer->count; + header->type = card->lan_type; + header->slot = card->portno; + skb_copy_from_linear_data(skb, header + 1, skb->len); + spin_unlock(&card->lock); + card->stats.tx_bytes += skb->len; + card->stats.tx_packets++; + dev_kfree_skb(skb); + netif_wake_queue(card->dev); + spin_lock(&card->lock); + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) + /* If this is the first tx buffer emit it immediately. */ + __lcs_emit_txbuffer(card); +out: + spin_unlock(&card->lock); + return rc; +} + +static netdev_tx_t lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(5, trace, "pktxmit"); + card = (struct lcs_card *) dev->ml_priv; + rc = __lcs_start_xmit(card, skb, dev); + return rc; +} + +/** + * send startlan and lanstat command to make LCS device ready + */ +static int +lcs_startlan_auto(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "strtauto"); +#ifdef CONFIG_ETHERNET + card->lan_type = LCS_FRAME_TYPE_ENET; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; + +#endif +#ifdef CONFIG_FDDI + card->lan_type = LCS_FRAME_TYPE_FDDI; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; +#endif + return -EIO; +} + +static int +lcs_startlan(struct lcs_card *card) +{ + int rc, i; + + LCS_DBF_TEXT(2, trace, "startlan"); + rc = 0; + if (card->portno != LCS_INVALID_PORT_NO) { + if (card->lan_type == LCS_FRAME_TYPE_AUTO) + rc = lcs_startlan_auto(card); + else + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + } else { + for (i = 0; i <= 16; i++) { + card->portno = i; + if (card->lan_type != LCS_FRAME_TYPE_AUTO) + rc = lcs_send_startlan(card, + LCS_INITIATOR_TCPIP); + else + /* autodetecting lan type */ + rc = lcs_startlan_auto(card); + if (rc == 0) + break; + } + } + if (rc == 0) + return lcs_send_lanstat(card); + return rc; +} + +/** + * LCS detect function + * setup channels and make them I/O ready + */ +static int +lcs_detect(struct lcs_card *card) +{ + int rc = 0; + + LCS_DBF_TEXT(2, setup, "lcsdetct"); + /* start/reset card */ + if (card->dev) + netif_stop_queue(card->dev); + rc = lcs_stop_channels(card); + if (rc == 0) { + rc = lcs_start_channels(card); + if (rc == 0) { + rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + rc = lcs_startlan(card); + } + } + if (rc == 0) { + card->state = DEV_STATE_UP; + } else { + card->state = DEV_STATE_DOWN; + card->write.state = LCS_CH_STATE_INIT; + card->read.state = LCS_CH_STATE_INIT; + } + return rc; +} + +/** + * LCS Stop card + */ +static int +lcs_stopcard(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(3, setup, "stopcard"); + + if (card->read.state != LCS_CH_STATE_STOPPED && + card->write.state != LCS_CH_STATE_STOPPED && + card->read.state != LCS_CH_STATE_ERROR && + card->write.state != LCS_CH_STATE_ERROR && + card->state == DEV_STATE_UP) { + lcs_clear_multicast_list(card); + rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); + rc = lcs_send_shutdown(card); + } + rc = lcs_stop_channels(card); + card->state = DEV_STATE_DOWN; + + return rc; +} + +/** + * Kernel Thread helper functions for LGW initiated commands + */ +static void +lcs_start_kernel_thread(struct work_struct *work) +{ + struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); + LCS_DBF_TEXT(5, trace, "krnthrd"); + if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) + kthread_run(lcs_recovery, card, "lcs_recover"); +#ifdef CONFIG_IP_MULTICAST + if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) + kthread_run(lcs_register_mc_addresses, card, "regipm"); +#endif +} + +/** + * Process control frames. + */ +static void +lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(5, trace, "getctrl"); + if (cmd->initiator == LCS_INITIATOR_LGW) { + switch(cmd->cmd_code) { + case LCS_CMD_STARTUP: + case LCS_CMD_STARTLAN: + lcs_schedule_recovery(card); + break; + case LCS_CMD_STOPLAN: + if (card->dev) { + pr_warn("Stoplan for %s initiated by LGW\n", + card->dev->name); + netif_carrier_off(card->dev); + } + break; + default: + LCS_DBF_TEXT(5, trace, "noLGWcmd"); + break; + } + } else + lcs_notify_lancmd_waiters(card, cmd); +} + +/** + * Unpack network packet. + */ +static void +lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) +{ + struct sk_buff *skb; + + LCS_DBF_TEXT(5, trace, "getskb"); + if (card->dev == NULL || + card->state != DEV_STATE_UP) + /* The card isn't up. Ignore the packet. */ + return; + + skb = dev_alloc_skb(skb_len); + if (skb == NULL) { + dev_err(&card->dev->dev, + " Allocating a socket buffer to interface %s failed\n", + card->dev->name); + card->stats.rx_dropped++; + return; + } + skb_put_data(skb, skb_data, skb_len); + skb->protocol = card->lan_type_trans(skb, card->dev); + card->stats.rx_bytes += skb_len; + card->stats.rx_packets++; + if (skb->protocol == htons(ETH_P_802_2)) + *((__u32 *)skb->cb) = ++card->pkt_seq; + netif_rx(skb); +} + +/** + * LCS main routine to get packets and lancmd replies from the buffers + */ +static void +lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + struct lcs_header *lcs_hdr; + __u16 offset; + + LCS_DBF_TEXT(5, trace, "lcsgtpkt"); + lcs_hdr = (struct lcs_header *) buffer->data; + if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { + LCS_DBF_TEXT(4, trace, "-eiogpkt"); + return; + } + card = container_of(channel, struct lcs_card, read); + offset = 0; + while (lcs_hdr->offset != 0) { + if (lcs_hdr->offset <= 0 || + lcs_hdr->offset > LCS_IOBUFFERSIZE || + lcs_hdr->offset < offset) { + /* Offset invalid. */ + card->stats.rx_length_errors++; + card->stats.rx_errors++; + return; + } + /* What kind of frame is it? */ + if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) + /* Control frame. */ + lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); + else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET || + lcs_hdr->type == LCS_FRAME_TYPE_TR || + lcs_hdr->type == LCS_FRAME_TYPE_FDDI) + /* Normal network packet. */ + lcs_get_skb(card, (char *)(lcs_hdr + 1), + lcs_hdr->offset - offset - + sizeof(struct lcs_header)); + else + /* Unknown frame type. */ + ; // FIXME: error message ? + /* Proceed to next frame. */ + offset = lcs_hdr->offset; + lcs_hdr->offset = LCS_ILLEGAL_OFFSET; + lcs_hdr = (struct lcs_header *) (buffer->data + offset); + } + /* The buffer is now empty. Make it ready again. */ + lcs_ready_buffer(&card->read, buffer); +} + +/** + * get network statistics for ifconfig and other user programs + */ +static struct net_device_stats * +lcs_getstats(struct net_device *dev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "netstats"); + card = (struct lcs_card *) dev->ml_priv; + return &card->stats; +} + +/** + * stop lcs device + * This function will be called by user doing ifconfig xxx down + */ +static int +lcs_stop_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "stopdev"); + card = (struct lcs_card *) dev->ml_priv; + netif_carrier_off(dev); + netif_tx_disable(dev); + dev->flags &= ~IFF_UP; + wait_event(card->write.wait_q, + (card->write.state != LCS_CH_STATE_RUNNING)); + rc = lcs_stopcard(card); + if (rc) + dev_err(&card->dev->dev, + " Shutting down the LCS device failed\n"); + return rc; +} + +/** + * start lcs device and make it runnable + * This function will be called by user doing ifconfig xxx up + */ +static int +lcs_open_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "opendev"); + card = (struct lcs_card *) dev->ml_priv; + /* initialize statistics */ + rc = lcs_detect(card); + if (rc) { + pr_err("Error in opening device!\n"); + + } else { + dev->flags |= IFF_UP; + netif_carrier_on(dev); + netif_wake_queue(dev); + card->state = DEV_STATE_UP; + } + return rc; +} + +/** + * show function for portno called by cat or similar things + */ +static ssize_t +lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lcs_card *card; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + return sprintf(buf, "%d\n", card->portno); +} + +/** + * store the value which is piped to file portno + */ +static ssize_t +lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct lcs_card *card; + int rc; + s16 value; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + rc = kstrtos16(buf, 0, &value); + if (rc) + return -EINVAL; + /* TODO: sanity checks */ + card->portno = value; + if (card->dev) + card->dev->dev_port = card->portno; + + return count; + +} + +static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); + +static const char *lcs_type[] = { + "not a channel", + "2216 parallel", + "2216 channel", + "OSA LCS card", + "unknown channel type", + "unsupported channel type", +}; + +static ssize_t +lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); + +static ssize_t +lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lcs_card *card; + + card = dev_get_drvdata(dev); + + return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0; +} + +static ssize_t +lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct lcs_card *card; + unsigned int value; + int rc; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + rc = kstrtouint(buf, 0, &value); + if (rc) + return -EINVAL; + /* TODO: sanity checks */ + card->lancmd_timeout = value; + + return count; + +} + +static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); + +static ssize_t +lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct lcs_card *card = dev_get_drvdata(dev); + char *tmp; + int i; + + if (!card) + return -EINVAL; + if (card->state != DEV_STATE_UP) + return -EPERM; + i = simple_strtoul(buf, &tmp, 16); + if (i == 1) + lcs_schedule_recovery(card); + return count; +} + +static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); + +static struct attribute * lcs_attrs[] = { + &dev_attr_portno.attr, + &dev_attr_type.attr, + &dev_attr_lancmd_timeout.attr, + &dev_attr_recover.attr, + NULL, +}; +static struct attribute_group lcs_attr_group = { + .attrs = lcs_attrs, +}; +static const struct attribute_group *lcs_attr_groups[] = { + &lcs_attr_group, + NULL, +}; +static const struct device_type lcs_devtype = { + .name = "lcs", + .groups = lcs_attr_groups, +}; + +/** + * lcs_probe_device is called on establishing a new ccwgroup_device. + */ +static int +lcs_probe_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + if (!get_device(&ccwgdev->dev)) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "add_dev"); + card = lcs_alloc_card(); + if (!card) { + LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); + put_device(&ccwgdev->dev); + return -ENOMEM; + } + dev_set_drvdata(&ccwgdev->dev, card); + ccwgdev->cdev[0]->handler = lcs_irq; + ccwgdev->cdev[1]->handler = lcs_irq; + card->gdev = ccwgdev; + INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); + card->thread_start_mask = 0; + card->thread_allowed_mask = 0; + card->thread_running_mask = 0; + ccwgdev->dev.type = &lcs_devtype; + + return 0; +} + +static int +lcs_register_netdev(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(2, setup, "regnetdv"); + card = dev_get_drvdata(&ccwgdev->dev); + if (card->dev->reg_state != NETREG_UNINITIALIZED) + return 0; + SET_NETDEV_DEV(card->dev, &ccwgdev->dev); + return register_netdev(card->dev); +} + +/** + * lcs_new_device will be called by setting the group device online. + */ +static const struct net_device_ops lcs_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, +}; + +static const struct net_device_ops lcs_mc_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, + .ndo_set_rx_mode = lcs_set_multicast_list, +}; + +static int +lcs_new_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + struct net_device *dev=NULL; + enum lcs_dev_states recover_state; + int rc; + + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "newdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + card->read.ccwdev = ccwgdev->cdev[0]; + card->write.ccwdev = ccwgdev->cdev[1]; + + recover_state = card->state; + rc = ccw_device_set_online(card->read.ccwdev); + if (rc) + goto out_err; + rc = ccw_device_set_online(card->write.ccwdev); + if (rc) + goto out_werr; + + LCS_DBF_TEXT(3, setup, "lcsnewdv"); + + lcs_setup_card(card); + rc = lcs_detect(card); + if (rc) { + LCS_DBF_TEXT(2, setup, "dtctfail"); + dev_err(&ccwgdev->dev, + "Detecting a network adapter for LCS devices" + " failed with rc=%d (0x%x)\n", rc, rc); + lcs_stopcard(card); + goto out; + } + if (card->dev) { + LCS_DBF_TEXT(2, setup, "samedev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + goto netdev_out; + } + switch (card->lan_type) { +#ifdef CONFIG_ETHERNET + case LCS_FRAME_TYPE_ENET: + card->lan_type_trans = eth_type_trans; + dev = alloc_etherdev(0); + break; +#endif +#ifdef CONFIG_FDDI + case LCS_FRAME_TYPE_FDDI: + card->lan_type_trans = fddi_type_trans; + dev = alloc_fddidev(0); + break; +#endif + default: + LCS_DBF_TEXT(3, setup, "errinit"); + pr_err(" Initialization failed\n"); + goto out; + } + if (!dev) + goto out; + card->dev = dev; + card->dev->ml_priv = card; + card->dev->netdev_ops = &lcs_netdev_ops; + card->dev->dev_port = card->portno; + memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); +#ifdef CONFIG_IP_MULTICAST + if (!lcs_check_multicast_support(card)) + card->dev->netdev_ops = &lcs_mc_netdev_ops; +#endif +netdev_out: + lcs_set_allowed_threads(card,0xffffffff); + if (recover_state == DEV_STATE_RECOVER) { + lcs_set_multicast_list(card->dev); + card->dev->flags |= IFF_UP; + netif_carrier_on(card->dev); + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + } else { + lcs_stopcard(card); + } + + if (lcs_register_netdev(ccwgdev) != 0) + goto out; + + /* Print out supported assists: IPv6 */ + pr_info("LCS device %s %s IPv6 support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? + "with" : "without"); + /* Print out supported assist: Multicast */ + pr_info("LCS device %s %s Multicast support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? + "with" : "without"); + return 0; +out: + + ccw_device_set_offline(card->write.ccwdev); +out_werr: + ccw_device_set_offline(card->read.ccwdev); +out_err: + return -ENODEV; +} + +/** + * lcs_shutdown_device, called when setting the group device offline. + */ +static int +__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) +{ + struct lcs_card *card; + enum lcs_dev_states recover_state; + int ret = 0, ret2 = 0, ret3 = 0; + + LCS_DBF_TEXT(3, setup, "shtdndev"); + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return -ENODEV; + if (recovery_mode == 0) { + lcs_set_allowed_threads(card, 0); + if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) + return -ERESTARTSYS; + } + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + recover_state = card->state; + + ret = lcs_stop_device(card->dev); + ret2 = ccw_device_set_offline(card->read.ccwdev); + ret3 = ccw_device_set_offline(card->write.ccwdev); + if (!ret) + ret = (ret2) ? ret2 : ret3; + if (ret) + LCS_DBF_TEXT_(3, setup, "1err:%d", ret); + if (recover_state == DEV_STATE_UP) { + card->state = DEV_STATE_RECOVER; + } + return 0; +} + +static int +lcs_shutdown_device(struct ccwgroup_device *ccwgdev) +{ + return __lcs_shutdown_device(ccwgdev, 0); +} + +/** + * drive lcs recovery after startup and startlan initiated by Lan Gateway + */ +static int +lcs_recovery(void *ptr) +{ + struct lcs_card *card; + struct ccwgroup_device *gdev; + int rc; + + card = (struct lcs_card *) ptr; + + LCS_DBF_TEXT(4, trace, "recover1"); + if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "recover2"); + gdev = card->gdev; + dev_warn(&gdev->dev, + "A recovery process has been started for the LCS device\n"); + rc = __lcs_shutdown_device(gdev, 1); + rc = lcs_new_device(gdev); + if (!rc) + pr_info("Device %s successfully recovered!\n", + card->dev->name); + else + pr_info("Device %s could not be recovered!\n", + card->dev->name); + lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); + return 0; +} + +/** + * lcs_remove_device, free buffers and card + */ +static void +lcs_remove_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return; + + LCS_DBF_TEXT(3, setup, "remdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + if (ccwgdev->state == CCWGROUP_ONLINE) { + lcs_shutdown_device(ccwgdev); + } + if (card->dev) + unregister_netdev(card->dev); + lcs_cleanup_card(card); + lcs_free_card(card); + dev_set_drvdata(&ccwgdev->dev, NULL); + put_device(&ccwgdev->dev); +} + +static struct ccw_device_id lcs_ids[] = { + {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, + {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, + {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, lcs_ids); + +static struct ccw_driver lcs_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, + .ids = lcs_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_LCS, +}; + +/** + * LCS ccwgroup driver registration + */ +static struct ccwgroup_driver lcs_group_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, + .ccw_driver = &lcs_ccw_driver, + .setup = lcs_probe_device, + .remove = lcs_remove_device, + .set_online = lcs_new_device, + .set_offline = lcs_shutdown_device, +}; + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *lcs_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group lcs_drv_attr_group = { + .attrs = lcs_drv_attrs, +}; +static const struct attribute_group *lcs_drv_attr_groups[] = { + &lcs_drv_attr_group, + NULL, +}; + +/** + * LCS Module/Kernel initialization function + */ +static int +__init lcs_init_module(void) +{ + int rc; + + pr_info("Loading %s\n", version); + rc = lcs_register_debug_facility(); + LCS_DBF_TEXT(0, setup, "lcsinit"); + if (rc) + goto out_err; + lcs_root_dev = root_device_register("lcs"); + rc = PTR_ERR_OR_ZERO(lcs_root_dev); + if (rc) + goto register_err; + rc = ccw_driver_register(&lcs_ccw_driver); + if (rc) + goto ccw_err; + lcs_group_driver.driver.groups = lcs_drv_attr_groups; + rc = ccwgroup_driver_register(&lcs_group_driver); + if (rc) + goto ccwgroup_err; + return 0; + +ccwgroup_err: + ccw_driver_unregister(&lcs_ccw_driver); +ccw_err: + root_device_unregister(lcs_root_dev); +register_err: + lcs_unregister_debug_facility(); +out_err: + pr_err("Initializing the lcs device driver failed\n"); + return rc; +} + + +/** + * LCS module cleanup function + */ +static void +__exit lcs_cleanup_module(void) +{ + pr_info("Terminating lcs module.\n"); + LCS_DBF_TEXT(0, trace, "cleanup"); + ccwgroup_driver_unregister(&lcs_group_driver); + ccw_driver_unregister(&lcs_ccw_driver); + root_device_unregister(lcs_root_dev); + lcs_unregister_debug_facility(); +} + +module_init(lcs_init_module); +module_exit(lcs_cleanup_module); + +MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h new file mode 100644 index 000000000..bd52caa3b --- /dev/null +++ b/drivers/s390/net/lcs.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/*lcs.h*/ + +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/refcount.h> +#include <asm/ccwdev.h> + +#define LCS_DBF_TEXT(level, name, text) \ + do { \ + debug_text_event(lcs_dbf_##name, level, text); \ + } while (0) + +#define LCS_DBF_HEX(level,name,addr,len) \ +do { \ + debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ +} while (0) + +#define LCS_DBF_TEXT_(level,name,text...) \ + do { \ + if (debug_level_enabled(lcs_dbf_##name, level)) { \ + sprintf(debug_buffer, text); \ + debug_text_event(lcs_dbf_##name, level, debug_buffer); \ + } \ + } while (0) + +/** + * sysfs related stuff + */ +#define CARD_FROM_DEV(cdev) \ + (struct lcs_card *) dev_get_drvdata( \ + &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); + +/** + * Enum for classifying detected devices. + */ +enum lcs_channel_types { + /* Device is not a channel */ + lcs_channel_type_none, + + /* Device is a 2216 channel */ + lcs_channel_type_parallel, + + /* Device is a 2216 channel */ + lcs_channel_type_2216, + + /* Device is a OSA2 card */ + lcs_channel_type_osa2 +}; + +/** + * CCW commands used in this driver + */ +#define LCS_CCW_WRITE 0x01 +#define LCS_CCW_READ 0x02 +#define LCS_CCW_TRANSFER 0x08 + +/** + * LCS device status primitives + */ +#define LCS_CMD_STARTLAN 0x01 +#define LCS_CMD_STOPLAN 0x02 +#define LCS_CMD_LANSTAT 0x04 +#define LCS_CMD_STARTUP 0x07 +#define LCS_CMD_SHUTDOWN 0x08 +#define LCS_CMD_QIPASSIST 0xb2 +#define LCS_CMD_SETIPM 0xb4 +#define LCS_CMD_DELIPM 0xb5 + +#define LCS_INITIATOR_TCPIP 0x00 +#define LCS_INITIATOR_LGW 0x01 +#define LCS_STD_CMD_SIZE 16 +#define LCS_MULTICAST_CMD_SIZE 404 + +/** + * LCS IPASSIST MASKS,only used when multicast is switched on + */ +/* Not supported by LCS */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002 +#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +/* Supported by lcs 3172 */ +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS sense byte definitions + */ +#define LCS_SENSE_BYTE_0 0 +#define LCS_SENSE_BYTE_1 1 +#define LCS_SENSE_BYTE_2 2 +#define LCS_SENSE_BYTE_3 3 +#define LCS_SENSE_INTERFACE_DISCONNECT 0x01 +#define LCS_SENSE_EQUIPMENT_CHECK 0x10 +#define LCS_SENSE_BUS_OUT_CHECK 0x20 +#define LCS_SENSE_INTERVENTION_REQUIRED 0x40 +#define LCS_SENSE_CMD_REJECT 0x80 +#define LCS_SENSE_RESETTING_EVENT 0x80 +#define LCS_SENSE_DEVICE_ONLINE 0x20 + +/** + * LCS packet type definitions + */ +#define LCS_FRAME_TYPE_CONTROL 0 +#define LCS_FRAME_TYPE_ENET 1 +#define LCS_FRAME_TYPE_TR 2 +#define LCS_FRAME_TYPE_FDDI 7 +#define LCS_FRAME_TYPE_AUTO -1 + +/** + * some more definitions,we will sort them later + */ +#define LCS_ILLEGAL_OFFSET 0xffff +#define LCS_IOBUFFERSIZE 0x5000 +#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ +#define LCS_MAC_LENGTH 6 +#define LCS_INVALID_PORT_NO -1 +#define LCS_LANCMD_TIMEOUT_DEFAULT 5 + +/** + * Multicast state + */ +#define LCS_IPM_STATE_SET_REQUIRED 0 +#define LCS_IPM_STATE_DEL_REQUIRED 1 +#define LCS_IPM_STATE_ON_CARD 2 + +/** + * LCS IP Assist declarations + * seems to be only used for multicast + */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002 +#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS Buffer states + */ +enum lcs_buffer_states { + LCS_BUF_STATE_EMPTY, /* buffer is empty */ + LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */ + LCS_BUF_STATE_READY, /* buffer is ready for read/write */ + LCS_BUF_STATE_PROCESSED, +}; + +/** + * LCS Channel State Machine declarations + */ +enum lcs_channel_states { + LCS_CH_STATE_INIT, + LCS_CH_STATE_HALTED, + LCS_CH_STATE_STOPPED, + LCS_CH_STATE_RUNNING, + LCS_CH_STATE_SUSPENDED, + LCS_CH_STATE_CLEARED, + LCS_CH_STATE_ERROR, +}; + +/** + * LCS device state machine + */ +enum lcs_dev_states { + DEV_STATE_DOWN, + DEV_STATE_UP, + DEV_STATE_RECOVER, +}; + +enum lcs_threads { + LCS_SET_MC_THREAD = 1, + LCS_RECOVERY_THREAD = 2, +}; + +/** + * LCS struct declarations + */ +struct lcs_header { + __u16 offset; + __u8 type; + __u8 slot; +} __attribute__ ((packed)); + +struct lcs_ip_mac_pair { + __be32 ip_addr; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u8 reserved[2]; +} __attribute__ ((packed)); + +struct lcs_ipm_list { + struct list_head list; + struct lcs_ip_mac_pair ipm; + __u8 ipm_state; +}; + +struct lcs_cmd { + __u16 offset; + __u8 type; + __u8 slot; + __u8 cmd_code; + __u8 initiator; + __u16 sequence_no; + __u16 return_code; + union { + struct { + __u8 lan_type; + __u8 portno; + __u16 parameter_count; + __u8 operator_flags[3]; + __u8 reserved[3]; + } lcs_std_cmd; + struct { + __u16 unused1; + __u16 buff_size; + __u8 unused2[6]; + } lcs_startup; + struct { + __u8 lan_type; + __u8 portno; + __u8 unused[10]; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u32 num_packets_deblocked; + __u32 num_packets_blocked; + __u32 num_packets_tx_on_lan; + __u32 num_tx_errors_detected; + __u32 num_tx_packets_disgarded; + __u32 num_packets_rx_from_lan; + __u32 num_rx_errors_detected; + __u32 num_rx_discarded_nobuffs_avail; + __u32 num_rx_packets_too_large; + } lcs_lanstat_cmd; +#ifdef CONFIG_IP_MULTICAST + struct { + __u8 lan_type; + __u8 portno; + __u16 num_ip_pairs; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __u16 version; + struct { + struct lcs_ip_mac_pair + ip_mac_pair[32]; + __u32 response_data; + } lcs_ipass_ctlmsg __attribute ((packed)); + } lcs_qipassist __attribute__ ((packed)); +#endif /*CONFIG_IP_MULTICAST */ + } cmd __attribute__ ((packed)); +} __attribute__ ((packed)); + +/** + * Forward declarations. + */ +struct lcs_card; +struct lcs_channel; + +/** + * Definition of an lcs buffer. + */ +struct lcs_buffer { + enum lcs_buffer_states state; + void *data; + int count; + /* Callback for completion notification. */ + void (*callback)(struct lcs_channel *, struct lcs_buffer *); +}; + +struct lcs_reply { + struct list_head list; + __u16 sequence_no; + refcount_t refcnt; + /* Callback for completion notification. */ + void (*callback)(struct lcs_card *, struct lcs_cmd *); + wait_queue_head_t wait_q; + struct lcs_card *card; + struct timer_list timer; + int received; + int rc; +}; + +/** + * Definition of an lcs channel + */ +struct lcs_channel { + enum lcs_channel_states state; + struct ccw_device *ccwdev; + struct ccw1 ccws[LCS_NUM_BUFFS + 1]; + wait_queue_head_t wait_q; + struct tasklet_struct irq_tasklet; + struct lcs_buffer iob[LCS_NUM_BUFFS]; + int io_idx; + int buf_idx; +}; + + +/** + * definition of the lcs card + */ +struct lcs_card { + spinlock_t lock; + spinlock_t ipm_lock; + enum lcs_dev_states state; + struct net_device *dev; + struct net_device_stats stats; + __be16 (*lan_type_trans)(struct sk_buff *skb, + struct net_device *dev); + struct ccwgroup_device *gdev; + struct lcs_channel read; + struct lcs_channel write; + struct lcs_buffer *tx_buffer; + int tx_emitted; + struct list_head lancmd_waiters; + int lancmd_timeout; + + struct work_struct kernel_thread_starter; + spinlock_t mask_lock; + unsigned long thread_start_mask; + unsigned long thread_running_mask; + unsigned long thread_allowed_mask; + wait_queue_head_t wait_q; + +#ifdef CONFIG_IP_MULTICAST + struct list_head ipm_list; +#endif + __u8 mac[LCS_MAC_LENGTH]; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __s8 lan_type; + __u32 pkt_seq; + __u16 sequence_no; + __s16 portno; + /* Some info copied from probeinfo */ + u8 device_forced; + u8 max_port_no; + u8 hint_port_no; + s16 port_protocol_no; +} __attribute__ ((aligned(8))); + diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c new file mode 100644 index 000000000..a2f403c4e --- /dev/null +++ b/drivers/s390/net/netiucv.c @@ -0,0 +1,2107 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IUCV network driver + * + * Copyright IBM Corp. 2001, 2009 + * + * Author(s): + * Original netiucv driver: + * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Sysfs integration and all bugs therein: + * Cornelia Huck (cornelia.huck@de.ibm.com) + * PM functions: + * Ursula Braun (ursula.braun@de.ibm.com) + * + * Documentation used: + * the source of the original IUCV driver by: + * Stefan Hegewald <hegewald@de.ibm.com> + * Hartmut Penner <hpenner@de.ibm.com> + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 + */ + +#define KMSG_COMPONENT "netiucv" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#undef DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> +#include <linux/device.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <asm/io.h> +#include <linux/uaccess.h> +#include <asm/ebcdic.h> + +#include <net/iucv/iucv.h> +#include "fsm.h" + +MODULE_AUTHOR + ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); + +/** + * Debug Facility stuff + */ +#define IUCV_DBF_SETUP_NAME "iucv_setup" +#define IUCV_DBF_SETUP_LEN 64 +#define IUCV_DBF_SETUP_PAGES 2 +#define IUCV_DBF_SETUP_NR_AREAS 1 +#define IUCV_DBF_SETUP_LEVEL 3 + +#define IUCV_DBF_DATA_NAME "iucv_data" +#define IUCV_DBF_DATA_LEN 128 +#define IUCV_DBF_DATA_PAGES 2 +#define IUCV_DBF_DATA_NR_AREAS 1 +#define IUCV_DBF_DATA_LEVEL 2 + +#define IUCV_DBF_TRACE_NAME "iucv_trace" +#define IUCV_DBF_TRACE_LEN 16 +#define IUCV_DBF_TRACE_PAGES 4 +#define IUCV_DBF_TRACE_NR_AREAS 1 +#define IUCV_DBF_TRACE_LEVEL 3 + +#define IUCV_DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(iucv_dbf_##name,level,text); \ + } while (0) + +#define IUCV_DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); + +#define IUCV_DBF_TEXT_(name, level, text...) \ + do { \ + if (debug_level_enabled(iucv_dbf_##name, level)) { \ + char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ + sprintf(__buf, text); \ + debug_text_event(iucv_dbf_##name, level, __buf); \ + put_cpu_var(iucv_dbf_txt_buf); \ + } \ + } while (0) + +#define IUCV_DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ + debug_sprintf_event(iucv_dbf_trace, level, text ); \ + } while (0) + +/** + * some more debug stuff + */ +#define PRINTK_HEADER " iucv: " /* for debugging */ + +static struct device_driver netiucv_driver = { + .owner = THIS_MODULE, + .name = "netiucv", + .bus = &iucv_bus, +}; + +static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *); +static void netiucv_callback_connack(struct iucv_path *, u8 *); +static void netiucv_callback_connrej(struct iucv_path *, u8 *); +static void netiucv_callback_connsusp(struct iucv_path *, u8 *); +static void netiucv_callback_connres(struct iucv_path *, u8 *); +static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); +static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler netiucv_handler = { + .path_pending = netiucv_callback_connreq, + .path_complete = netiucv_callback_connack, + .path_severed = netiucv_callback_connrej, + .path_quiesced = netiucv_callback_connsusp, + .path_resumed = netiucv_callback_connres, + .message_pending = netiucv_callback_rx, + .message_complete = netiucv_callback_txdone +}; + +/** + * Per connection profiling data + */ +struct connection_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + unsigned long send_stamp; + unsigned long tx_pending; + unsigned long tx_max_pending; +}; + +/** + * Representation of one iucv connection + */ +struct iucv_connection { + struct list_head list; + struct iucv_path *path; + struct sk_buff *rx_buff; + struct sk_buff *tx_buff; + struct sk_buff_head collect_queue; + struct sk_buff_head commit_queue; + spinlock_t collect_lock; + int collect_len; + int max_buffsize; + fsm_timer timer; + fsm_instance *fsm; + struct net_device *netdev; + struct connection_profile prof; + char userid[9]; + char userdata[17]; +}; + +/** + * Linked list of all connection structs. + */ +static LIST_HEAD(iucv_connection_list); +static DEFINE_RWLOCK(iucv_connection_rwlock); + +/** + * Representation of event-data for the + * connection state machine. + */ +struct iucv_event { + struct iucv_connection *conn; + void *data; +}; + +/** + * Private part of the network device structure + */ +struct netiucv_priv { + struct net_device_stats stats; + unsigned long tbusy; + fsm_instance *fsm; + struct iucv_connection *conn; + struct device *dev; +}; + +/** + * Link level header for a packet. + */ +struct ll_header { + u16 next; +}; + +#define NETIUCV_HDRLEN (sizeof(struct ll_header)) +#define NETIUCV_BUFSIZE_MAX 65537 +#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX +#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) +#define NETIUCV_MTU_DEFAULT 9216 +#define NETIUCV_QUEUELEN_DEFAULT 50 +#define NETIUCV_TIMEOUT_5SEC 5000 + +/** + * Compatibility macros for busy handling + * of network devices. + */ +static void netiucv_clear_busy(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + clear_bit(0, &priv->tbusy); + netif_wake_queue(dev); +} + +static int netiucv_test_and_set_busy(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + netif_stop_queue(dev); + return test_and_set_bit(0, &priv->tbusy); +} + +static u8 iucvMagic_ascii[16] = { + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 +}; + +static u8 iucvMagic_ebcdic[16] = { + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 +}; + +/** + * Convert an iucv userId to its printable + * form (strip whitespace at end). + * + * @param An iucv userId + * + * @returns The printable string (static data!!) + */ +static char *netiucv_printname(char *name, int len) +{ + static char tmp[17]; + char *p = tmp; + memcpy(tmp, name, len); + tmp[len] = '\0'; + while (*p && ((p - tmp) < len) && (!isspace(*p))) + p++; + *p = '\0'; + return tmp; +} + +static char *netiucv_printuser(struct iucv_connection *conn) +{ + static char tmp_uid[9]; + static char tmp_udat[17]; + static char buf[100]; + + if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { + tmp_uid[8] = '\0'; + tmp_udat[16] = '\0'; + memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8); + memcpy(tmp_udat, conn->userdata, 16); + EBCASC(tmp_udat, 16); + memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); + sprintf(buf, "%s.%s", tmp_uid, tmp_udat); + return buf; + } else + return netiucv_printname(conn->userid, 8); +} + +/** + * States of the interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT, + DEV_STATE_STOPWAIT, + DEV_STATE_RUNNING, + /** + * MUST be always the last element!! + */ + NR_DEV_STATES +}; + +static const char *dev_state_names[] = { + "Stopped", + "StartWait", + "StopWait", + "Running", +}; + +/** + * Events of the interface statemachine. + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_CONUP, + DEV_EVENT_CONDOWN, + /** + * MUST be always the last element!! + */ + NR_DEV_EVENTS +}; + +static const char *dev_event_names[] = { + "Start", + "Stop", + "Connection up", + "Connection down", +}; + +/** + * Events of the connection statemachine + */ +enum conn_events { + /** + * Events, representing callbacks from + * lowlevel iucv layer) + */ + CONN_EVENT_CONN_REQ, + CONN_EVENT_CONN_ACK, + CONN_EVENT_CONN_REJ, + CONN_EVENT_CONN_SUS, + CONN_EVENT_CONN_RES, + CONN_EVENT_RX, + CONN_EVENT_TXDONE, + + /** + * Events, representing errors return codes from + * calls to lowlevel iucv layer + */ + + /** + * Event, representing timer expiry. + */ + CONN_EVENT_TIMER, + + /** + * Events, representing commands from upper levels. + */ + CONN_EVENT_START, + CONN_EVENT_STOP, + + /** + * MUST be always the last element!! + */ + NR_CONN_EVENTS, +}; + +static const char *conn_event_names[] = { + "Remote connection request", + "Remote connection acknowledge", + "Remote connection reject", + "Connection suspended", + "Connection resumed", + "Data received", + "Data sent", + + "Timer", + + "Start", + "Stop", +}; + +/** + * States of the connection statemachine. + */ +enum conn_states { + /** + * Connection not assigned to any device, + * initial state, invalid + */ + CONN_STATE_INVALID, + + /** + * Userid assigned but not operating + */ + CONN_STATE_STOPPED, + + /** + * Connection registered, + * no connection request sent yet, + * no connection request received + */ + CONN_STATE_STARTWAIT, + + /** + * Connection registered and connection request sent, + * no acknowledge and no connection request received yet. + */ + CONN_STATE_SETUPWAIT, + + /** + * Connection up and running idle + */ + CONN_STATE_IDLE, + + /** + * Data sent, awaiting CONN_EVENT_TXDONE + */ + CONN_STATE_TX, + + /** + * Error during registration. + */ + CONN_STATE_REGERR, + + /** + * Error during registration. + */ + CONN_STATE_CONNERR, + + /** + * MUST be always the last element!! + */ + NR_CONN_STATES, +}; + +static const char *conn_state_names[] = { + "Invalid", + "Stopped", + "StartWait", + "SetupWait", + "Idle", + "TX", + "Terminating", + "Registration error", + "Connect error", +}; + + +/** + * Debug Facility Stuff + */ +static debug_info_t *iucv_dbf_setup = NULL; +static debug_info_t *iucv_dbf_data = NULL; +static debug_info_t *iucv_dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); + +static void iucv_unregister_dbf_views(void) +{ + debug_unregister(iucv_dbf_setup); + debug_unregister(iucv_dbf_data); + debug_unregister(iucv_dbf_trace); +} +static int iucv_register_dbf_views(void) +{ + iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, + IUCV_DBF_SETUP_PAGES, + IUCV_DBF_SETUP_NR_AREAS, + IUCV_DBF_SETUP_LEN); + iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, + IUCV_DBF_DATA_PAGES, + IUCV_DBF_DATA_NR_AREAS, + IUCV_DBF_DATA_LEN); + iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, + IUCV_DBF_TRACE_PAGES, + IUCV_DBF_TRACE_NR_AREAS, + IUCV_DBF_TRACE_LEN); + + if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || + (iucv_dbf_trace == NULL)) { + iucv_unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); + + debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); + + debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); + + return 0; +} + +/* + * Callback-wrappers, called from lowlevel iucv layer. + */ + +static void netiucv_callback_rx(struct iucv_path *path, + struct iucv_message *msg) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + + ev.conn = conn; + ev.data = msg; + fsm_event(conn->fsm, CONN_EVENT_RX, &ev); +} + +static void netiucv_callback_txdone(struct iucv_path *path, + struct iucv_message *msg) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + + ev.conn = conn; + ev.data = msg; + fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); +} + +static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); +} + +static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid, + u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + static char tmp_user[9]; + static char tmp_udat[17]; + int rc; + + rc = -EINVAL; + memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); + memcpy(tmp_udat, ipuser, 16); + EBCASC(tmp_udat, 16); + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(conn, &iucv_connection_list, list) { + if (strncmp(ipvmid, conn->userid, 8) || + strncmp(ipuser, conn->userdata, 16)) + continue; + /* Found a matching connection for this path. */ + conn->path = path; + ev.conn = conn; + ev.data = path; + fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); + rc = 0; + } + IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", + tmp_user, netiucv_printname(tmp_udat, 16)); + read_unlock_bh(&iucv_connection_rwlock); + return rc; +} + +static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); +} + +static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); +} + +static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); +} + +/** + * NOP action for statemachines + */ +static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * Actions of the connection statemachine + */ + +/** + * netiucv_unpack_skb + * @conn: The connection where this skb has been received. + * @pskb: The received skb. + * + * Unpack a just received skb and hand it over to upper layers. + * Helper function for conn_action_rx. + */ +static void netiucv_unpack_skb(struct iucv_connection *conn, + struct sk_buff *pskb) +{ + struct net_device *dev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(dev); + u16 offset = 0; + + skb_put(pskb, NETIUCV_HDRLEN); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_NONE; + pskb->protocol = cpu_to_be16(ETH_P_IP); + + while (1) { + struct sk_buff *skb; + struct ll_header *header = (struct ll_header *) pskb->data; + + if (!header->next) + break; + + skb_pull(pskb, NETIUCV_HDRLEN); + header->next -= offset; + offset += header->next; + header->next -= NETIUCV_HDRLEN; + if (skb_tailroom(pskb) < header->next) { + IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", + header->next, skb_tailroom(pskb)); + return; + } + skb_put(pskb, header->next); + skb_reset_mac_header(pskb); + skb = dev_alloc_skb(pskb->len); + if (!skb) { + IUCV_DBF_TEXT(data, 2, + "Out of memory in netiucv_unpack_skb\n"); + privptr->stats.rx_dropped++; + return; + } + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + privptr->stats.rx_packets++; + privptr->stats.rx_bytes += skb->len; + /* + * Since receiving is always initiated from a tasklet (in iucv.c), + * we must use netif_rx_ni() instead of netif_rx() + */ + netif_rx_ni(skb); + skb_pull(pskb, header->next); + skb_put(pskb, NETIUCV_HDRLEN); + } +} + +static void conn_action_rx(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_message *msg = ev->data; + struct netiucv_priv *privptr = netdev_priv(conn->netdev); + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + + if (!conn->netdev) { + iucv_message_reject(conn->path, msg); + IUCV_DBF_TEXT(data, 2, + "Received data for unlinked connection\n"); + return; + } + if (msg->length > conn->max_buffsize) { + iucv_message_reject(conn->path, msg); + privptr->stats.rx_dropped++; + IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", + msg->length, conn->max_buffsize); + return; + } + conn->rx_buff->data = conn->rx_buff->head; + skb_reset_tail_pointer(conn->rx_buff); + conn->rx_buff->len = 0; + rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, + msg->length, NULL); + if (rc || msg->length < 5) { + privptr->stats.rx_errors++; + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); + return; + } + netiucv_unpack_skb(conn, conn->rx_buff); +} + +static void conn_action_txdone(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_message *msg = ev->data; + struct iucv_message txmsg; + struct netiucv_priv *privptr = NULL; + u32 single_flag = msg->tag; + u32 txbytes = 0; + u32 txpackets = 0; + u32 stat_maxcq = 0; + struct sk_buff *skb; + unsigned long saveflags; + struct ll_header header; + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + + if (!conn || !conn->netdev) { + IUCV_DBF_TEXT(data, 2, + "Send confirmation for unlinked connection\n"); + return; + } + privptr = netdev_priv(conn->netdev); + conn->prof.tx_pending--; + if (single_flag) { + if ((skb = skb_dequeue(&conn->commit_queue))) { + refcount_dec(&skb->users); + if (privptr) { + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += + (skb->len - NETIUCV_HDRLEN + - NETIUCV_HDRLEN); + } + dev_kfree_skb_any(skb); + } + } + conn->tx_buff->data = conn->tx_buff->head; + skb_reset_tail_pointer(conn->tx_buff); + conn->tx_buff->len = 0; + spin_lock_irqsave(&conn->collect_lock, saveflags); + while ((skb = skb_dequeue(&conn->collect_queue))) { + header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; + skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); + skb_copy_from_linear_data(skb, + skb_put(conn->tx_buff, skb->len), + skb->len); + txbytes += skb->len; + txpackets++; + stat_maxcq++; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } + if (conn->collect_len > conn->prof.maxmulti) + conn->prof.maxmulti = conn->collect_len; + conn->collect_len = 0; + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + if (conn->tx_buff->len == 0) { + fsm_newstate(fi, CONN_STATE_IDLE); + return; + } + + header.next = 0; + skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); + conn->prof.send_stamp = jiffies; + txmsg.class = 0; + txmsg.tag = 0; + rc = iucv_message_send(conn->path, &txmsg, 0, 0, + conn->tx_buff->data, conn->tx_buff->len); + conn->prof.doios_multi++; + conn->prof.txlen += conn->tx_buff->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + conn->prof.tx_pending--; + fsm_newstate(fi, CONN_STATE_IDLE); + if (privptr) + privptr->stats.tx_errors += txpackets; + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (privptr) { + privptr->stats.tx_packets += txpackets; + privptr->stats.tx_bytes += txbytes; + } + if (stat_maxcq > conn->prof.maxcqueue) + conn->prof.maxcqueue = stat_maxcq; + } +} + +static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_path *path = ev->data; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + + conn->path = path; + path->msglim = NETIUCV_QUEUELEN_DEFAULT; + path->flags = 0; + rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); + if (rc) { + IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); + return; + } + fsm_newstate(fi, CONN_STATE_IDLE); + netdev->tx_queue_len = conn->path->msglim; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void conn_action_connreject(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_path *path = ev->data; + + IUCV_DBF_TEXT(trace, 3, __func__); + iucv_path_sever(path, NULL); +} + +static void conn_action_connack(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_IDLE); + netdev->tx_queue_len = conn->path->msglim; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + + IUCV_DBF_TEXT(trace, 3, __func__); + fsm_deltimer(&conn->timer); + iucv_path_sever(conn->path, conn->userdata); + fsm_newstate(fi, CONN_STATE_STARTWAIT); +} + +static void conn_action_connsever(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_deltimer(&conn->timer); + iucv_path_sever(conn->path, conn->userdata); + dev_info(privptr->dev, "The peer z/VM guest %s has closed the " + "connection\n", netiucv_printuser(conn)); + IUCV_DBF_TEXT(data, 2, + "conn_action_connsever: Remote dropped connection\n"); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void conn_action_start(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_newstate(fi, CONN_STATE_STARTWAIT); + + /* + * We must set the state before calling iucv_connect because the + * callback handler could be called at any point after the connection + * request is sent + */ + + fsm_newstate(fi, CONN_STATE_SETUPWAIT); + conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); + IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", + netdev->name, netiucv_printuser(conn)); + + rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, + NULL, conn->userdata, conn); + switch (rc) { + case 0: + netdev->tx_queue_len = conn->path->msglim; + fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, + CONN_EVENT_TIMER, conn); + return; + case 11: + dev_warn(privptr->dev, + "The IUCV device failed to connect to z/VM guest %s\n", + netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 12: + dev_warn(privptr->dev, + "The IUCV device failed to connect to the peer on z/VM" + " guest %s\n", netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 13: + dev_err(privptr->dev, + "Connecting the IUCV device would exceed the maximum" + " number of IUCV connections\n"); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 14: + dev_err(privptr->dev, + "z/VM guest %s has too many IUCV connections" + " to connect with the IUCV device\n", + netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 15: + dev_err(privptr->dev, + "The IUCV device cannot connect to a z/VM guest with no" + " IUCV authorization\n"); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + default: + dev_err(privptr->dev, + "Connecting the IUCV device failed with error %d\n", + rc); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + } + IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); + kfree(conn->path); + conn->path = NULL; +} + +static void netiucv_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } +} + +static void conn_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_STOPPED); + netiucv_purge_skb_queue(&conn->collect_queue); + if (conn->path) { + IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); + iucv_path_sever(conn->path, conn->userdata); + kfree(conn->path); + conn->path = NULL; + } + netiucv_purge_skb_queue(&conn->commit_queue); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void conn_action_inval(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + + IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", + netdev->name, conn->userid); +} + +static const fsm_node conn_fsm[] = { + { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, + { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, + + { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, + + { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, + + { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, + { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, + + { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, + { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, +}; + +static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); + + +/* + * Actions for interface - statemachine. + */ + +/** + * dev_action_start + * @fi: An instance of an interface statemachine. + * @event: The event, just happened. + * @arg: Generic pointer, casted from struct net_device * upon call. + * + * Startup connection by sending CONN_EVENT_START to it. + */ +static void dev_action_start(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_newstate(fi, DEV_STATE_STARTWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); +} + +/** + * Shutdown connection by sending CONN_EVENT_STOP to it. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + struct iucv_event ev; + + IUCV_DBF_TEXT(trace, 3, __func__); + + ev.conn = privptr->conn; + + fsm_newstate(fi, DEV_STATE_STOPWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); +} + +/** + * Called from connection statemachine + * when a connection is up and running. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_connup(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT: + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(privptr->dev, + "The IUCV device has been connected" + " successfully to %s\n", + netiucv_printuser(privptr->conn)); + IUCV_DBF_TEXT(setup, 3, + "connection is up and running\n"); + break; + case DEV_STATE_STOPWAIT: + IUCV_DBF_TEXT(data, 2, + "dev_action_connup: in DEV_STATE_STOPWAIT\n"); + break; + } +} + +/** + * Called from connection statemachine + * when a connection has been shutdown. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_conndown(fsm_instance *fi, int event, void *arg) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + fsm_newstate(fi, DEV_STATE_STARTWAIT); + break; + case DEV_STATE_STOPWAIT: + fsm_newstate(fi, DEV_STATE_STOPPED); + IUCV_DBF_TEXT(setup, 3, "connection is down\n"); + break; + } +} + +static const fsm_node dev_fsm[] = { + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + + { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, + + { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, + + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, + { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, +}; + +static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); + +/** + * Transmit a packet. + * This is a helper function for netiucv_tx(). + * + * @param conn Connection to be used for sending. + * @param skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by netiucv_tx(). + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_transmit_skb(struct iucv_connection *conn, + struct sk_buff *skb) +{ + struct iucv_message msg; + unsigned long saveflags; + struct ll_header header; + int rc; + + if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { + int l = skb->len + NETIUCV_HDRLEN; + + spin_lock_irqsave(&conn->collect_lock, saveflags); + if (conn->collect_len + l > + (conn->max_buffsize - NETIUCV_HDRLEN)) { + rc = -EBUSY; + IUCV_DBF_TEXT(data, 2, + "EBUSY from netiucv_transmit_skb\n"); + } else { + refcount_inc(&skb->users); + skb_queue_tail(&conn->collect_queue, skb); + conn->collect_len += l; + rc = 0; + } + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + } else { + struct sk_buff *nskb = skb; + /** + * Copy the skb to a new allocated skb in lowmem only if the + * data is located above 2G in memory or tailroom is < 2. + */ + unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + + NETIUCV_HDRLEN)) >> 31; + int copied = 0; + if (hi || (skb_tailroom(skb) < 2)) { + nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); + rc = -ENOMEM; + return rc; + } else { + skb_reserve(nskb, NETIUCV_HDRLEN); + skb_put_data(nskb, skb->data, skb->len); + } + copied = 1; + } + /** + * skb now is below 2G and has enough room. Add headers. + */ + header.next = nskb->len + NETIUCV_HDRLEN; + memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); + header.next = 0; + skb_put_data(nskb, &header, NETIUCV_HDRLEN); + + fsm_newstate(conn->fsm, CONN_STATE_TX); + conn->prof.send_stamp = jiffies; + + msg.tag = 1; + msg.class = 0; + rc = iucv_message_send(conn->path, &msg, 0, 0, + nskb->data, nskb->len); + conn->prof.doios_single++; + conn->prof.txlen += skb->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + struct netiucv_priv *privptr; + fsm_newstate(conn->fsm, CONN_STATE_IDLE); + conn->prof.tx_pending--; + privptr = netdev_priv(conn->netdev); + if (privptr) + privptr->stats.tx_errors++; + if (copied) + dev_kfree_skb(nskb); + else { + /** + * Remove our headers. They get added + * again on retransmit. + */ + skb_pull(skb, NETIUCV_HDRLEN); + skb_trim(skb, skb->len - NETIUCV_HDRLEN); + } + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (copied) + dev_kfree_skb(skb); + refcount_inc(&nskb->users); + skb_queue_tail(&conn->commit_queue, nskb); + } + } + + return rc; +} + +/* + * Interface API for upper network layers + */ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_open(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_START, dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_close(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + return 0; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + */ +static netdev_tx_t netiucv_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netiucv_priv *privptr = netdev_priv(dev); + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + /** + * Some sanity checks ... + */ + if (skb == NULL) { + IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); + privptr->stats.tx_dropped++; + return NETDEV_TX_OK; + } + if (skb_headroom(skb) < NETIUCV_HDRLEN) { + IUCV_DBF_TEXT(data, 2, + "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /** + * If connection is not running, try to restart it + * and throw away packet. + */ + if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + privptr->stats.tx_errors++; + privptr->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + + if (netiucv_test_and_set_busy(dev)) { + IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); + return NETDEV_TX_BUSY; + } + netif_trans_update(dev); + rc = netiucv_transmit_skb(privptr->conn, skb); + netiucv_clear_busy(dev); + return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; +} + +/** + * netiucv_stats + * @dev: Pointer to interface struct. + * + * Returns interface statistics of a device. + * + * Returns pointer to stats struct of this interface. + */ +static struct net_device_stats *netiucv_stats (struct net_device * dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return &priv->stats; +} + +/* + * attributes in sysfs + */ + +static ssize_t user_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); +} + +static int netiucv_check_user(const char *buf, size_t count, char *username, + char *userdata) +{ + const char *p; + int i; + + p = strchr(buf, '.'); + if ((p && ((count > 26) || + ((p - buf) > 8) || + (buf + count - p > 18))) || + (!p && (count > 9))) { + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); + return -EINVAL; + } + + for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); + continue; + } + if (*p == '\n') + /* trailing lf, grr */ + break; + IUCV_DBF_TEXT_(setup, 2, + "conn_write: invalid character %02x\n", *p); + return -EINVAL; + } + while (i < 8) + username[i++] = ' '; + username[8] = '\0'; + + if (*p == '.') { + p++; + for (i = 0; i < 16 && *p; i++, p++) { + if (*p == '\n') + break; + userdata[i] = toupper(*p); + } + while (i > 0 && i < 16) + userdata[i++] = ' '; + } else + memcpy(userdata, iucvMagic_ascii, 16); + userdata[16] = '\0'; + ASCEBC(userdata, 16); + + return 0; +} + +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + char username[9]; + char userdata[17]; + int rc; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + + if (memcmp(username, priv->conn->userid, 9) && + (ndev->flags & (IFF_UP | IFF_RUNNING))) { + /* username changed while the interface is active. */ + IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); + return -EPERM; + } + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); + return -EEXIST; + } + } + read_unlock_bh(&iucv_connection_rwlock); + memcpy(priv->conn->userid, username, 9); + memcpy(priv->conn->userdata, userdata, 17); + return count; +} + +static DEVICE_ATTR(user, 0644, user_show, user_write); + +static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%d\n", priv->conn->max_buffsize); +} + +static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + unsigned int bs1; + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (count >= 39) + return -EINVAL; + + rc = kstrtouint(buf, 0, &bs1); + + if (rc == -EINVAL) { + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n", + buf); + return -EINVAL; + } + if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too large\n", + bs1); + return -EINVAL; + } + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + + priv->conn->max_buffsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; + + return count; + +} + +static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); + +static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); +} + +static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); + +static ssize_t conn_fsm_show (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); +} + +static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); + +static ssize_t maxmulti_show (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); +} + +static ssize_t maxmulti_write (struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.maxmulti = 0; + return count; +} + +static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); + +static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); +} + +static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.maxcqueue = 0; + return count; +} + +static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); + +static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); +} + +static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.doios_single = 0; + return count; +} + +static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); + +static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); +} + +static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + priv->conn->prof.doios_multi = 0; + return count; +} + +static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); + +static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.txlen); +} + +static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.txlen = 0; + return count; +} + +static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); + +static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); +} + +static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_time = 0; + return count; +} + +static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); + +static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); +} + +static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); + +static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); +} + +static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_max_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); + +static struct attribute *netiucv_attrs[] = { + &dev_attr_buffer.attr, + &dev_attr_user.attr, + NULL, +}; + +static struct attribute_group netiucv_attr_group = { + .attrs = netiucv_attrs, +}; + +static struct attribute *netiucv_stat_attrs[] = { + &dev_attr_device_fsm_state.attr, + &dev_attr_connection_fsm_state.attr, + &dev_attr_max_tx_buffer_used.attr, + &dev_attr_max_chained_skbs.attr, + &dev_attr_tx_single_write_ops.attr, + &dev_attr_tx_multi_write_ops.attr, + &dev_attr_netto_bytes.attr, + &dev_attr_max_tx_io_time.attr, + &dev_attr_tx_pending.attr, + &dev_attr_tx_max_pending.attr, + NULL, +}; + +static struct attribute_group netiucv_stat_attr_group = { + .name = "stats", + .attrs = netiucv_stat_attrs, +}; + +static const struct attribute_group *netiucv_attr_groups[] = { + &netiucv_stat_attr_group, + &netiucv_attr_group, + NULL, +}; + +static int netiucv_register_device(struct net_device *ndev) +{ + struct netiucv_priv *priv = netdev_priv(ndev); + struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); + int ret; + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (dev) { + dev_set_name(dev, "net%s", ndev->name); + dev->bus = &iucv_bus; + dev->parent = iucv_root; + dev->groups = netiucv_attr_groups; + /* + * The release function could be called after the + * module has been unloaded. It's _only_ task is to + * free the struct. Therefore, we specify kfree() + * directly here. (Probably a little bit obfuscating + * but legitime ...). + */ + dev->release = (void (*)(struct device *))kfree; + dev->driver = &netiucv_driver; + } else + return -ENOMEM; + + ret = device_register(dev); + if (ret) { + put_device(dev); + return ret; + } + priv->dev = dev; + dev_set_drvdata(dev, priv); + return 0; +} + +static void netiucv_unregister_device(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + device_unregister(dev); +} + +/** + * Allocate and initialize a new connection structure. + * Add it to the list of netiucv connections; + */ +static struct iucv_connection *netiucv_new_connection(struct net_device *dev, + char *username, + char *userdata) +{ + struct iucv_connection *conn; + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + goto out; + skb_queue_head_init(&conn->collect_queue); + skb_queue_head_init(&conn->commit_queue); + spin_lock_init(&conn->collect_lock); + conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; + conn->netdev = dev; + + conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->rx_buff) + goto out_conn; + conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->tx_buff) + goto out_rx; + conn->fsm = init_fsm("netiucvconn", conn_state_names, + conn_event_names, NR_CONN_STATES, + NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, + GFP_KERNEL); + if (!conn->fsm) + goto out_tx; + + fsm_settimer(conn->fsm, &conn->timer); + fsm_newstate(conn->fsm, CONN_STATE_INVALID); + + if (userdata) + memcpy(conn->userdata, userdata, 17); + if (username) { + memcpy(conn->userid, username, 9); + fsm_newstate(conn->fsm, CONN_STATE_STOPPED); + } + + write_lock_bh(&iucv_connection_rwlock); + list_add_tail(&conn->list, &iucv_connection_list); + write_unlock_bh(&iucv_connection_rwlock); + return conn; + +out_tx: + kfree_skb(conn->tx_buff); +out_rx: + kfree_skb(conn->rx_buff); +out_conn: + kfree(conn); +out: + return NULL; +} + +/** + * Release a connection structure and remove it from the + * list of netiucv connections. + */ +static void netiucv_remove_connection(struct iucv_connection *conn) +{ + + IUCV_DBF_TEXT(trace, 3, __func__); + write_lock_bh(&iucv_connection_rwlock); + list_del_init(&conn->list); + write_unlock_bh(&iucv_connection_rwlock); + fsm_deltimer(&conn->timer); + netiucv_purge_skb_queue(&conn->collect_queue); + if (conn->path) { + iucv_path_sever(conn->path, conn->userdata); + kfree(conn->path); + conn->path = NULL; + } + netiucv_purge_skb_queue(&conn->commit_queue); + kfree_fsm(conn->fsm); + kfree_skb(conn->rx_buff); + kfree_skb(conn->tx_buff); +} + +/** + * Release everything of a net device. + */ +static void netiucv_free_netdevice(struct net_device *dev) +{ + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (!dev) + return; + + if (privptr) { + if (privptr->conn) + netiucv_remove_connection(privptr->conn); + if (privptr->fsm) + kfree_fsm(privptr->fsm); + privptr->conn = NULL; privptr->fsm = NULL; + /* privptr gets freed by free_netdev() */ + } +} + +/** + * Initialize a net device. (Called from kernel in alloc_netdev()) + */ +static const struct net_device_ops netiucv_netdev_ops = { + .ndo_open = netiucv_open, + .ndo_stop = netiucv_close, + .ndo_get_stats = netiucv_stats, + .ndo_start_xmit = netiucv_tx, +}; + +static void netiucv_setup_netdevice(struct net_device *dev) +{ + dev->mtu = NETIUCV_MTU_DEFAULT; + dev->min_mtu = 576; + dev->max_mtu = NETIUCV_MTU_MAX; + dev->needs_free_netdev = true; + dev->priv_destructor = netiucv_free_netdevice; + dev->hard_header_len = NETIUCV_HDRLEN; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->netdev_ops = &netiucv_netdev_ops; +} + +/** + * Allocate and initialize everything of a net device. + */ +static struct net_device *netiucv_init_netdevice(char *username, char *userdata) +{ + struct netiucv_priv *privptr; + struct net_device *dev; + + dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", + NET_NAME_UNKNOWN, netiucv_setup_netdevice); + if (!dev) + return NULL; + rtnl_lock(); + if (dev_alloc_name(dev, dev->name) < 0) + goto out_netdev; + + privptr = netdev_priv(dev); + privptr->fsm = init_fsm("netiucvdev", dev_state_names, + dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (!privptr->fsm) + goto out_netdev; + + privptr->conn = netiucv_new_connection(dev, username, userdata); + if (!privptr->conn) { + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); + goto out_fsm; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + return dev; + +out_fsm: + kfree_fsm(privptr->fsm); +out_netdev: + rtnl_unlock(); + free_netdev(dev); + return NULL; +} + +static ssize_t connection_store(struct device_driver *drv, const char *buf, + size_t count) +{ + char username[9]; + char userdata[17]; + int rc; + struct net_device *dev; + struct netiucv_priv *priv; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17)) { + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); + return -EEXIST; + } + } + read_unlock_bh(&iucv_connection_rwlock); + + dev = netiucv_init_netdevice(username, userdata); + if (!dev) { + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); + return -ENODEV; + } + + rc = netiucv_register_device(dev); + if (rc) { + rtnl_unlock(); + IUCV_DBF_TEXT_(setup, 2, + "ret %d from netiucv_register_device\n", rc); + goto out_free_ndev; + } + + /* sysfs magic */ + priv = netdev_priv(dev); + SET_NETDEV_DEV(dev, priv->dev); + + rc = register_netdevice(dev); + rtnl_unlock(); + if (rc) + goto out_unreg; + + dev_info(priv->dev, "The IUCV interface to %s has been established " + "successfully\n", + netiucv_printuser(priv->conn)); + + return count; + +out_unreg: + netiucv_unregister_device(priv->dev); +out_free_ndev: + netiucv_free_netdevice(dev); + return rc; +} +static DRIVER_ATTR_WO(connection); + +static ssize_t remove_store(struct device_driver *drv, const char *buf, + size_t count) +{ + struct iucv_connection *cp; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + char name[IFNAMSIZ]; + const char *p; + int i; + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (count >= IFNAMSIZ) + count = IFNAMSIZ - 1; + + for (i = 0, p = buf; i < count && *p; i++, p++) { + if (*p == '\n' || *p == ' ') + /* trailing lf, grr */ + break; + name[i] = *p; + } + name[i] = '\0'; + + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + ndev = cp->netdev; + priv = netdev_priv(ndev); + dev = priv->dev; + if (strncmp(name, ndev->name, count)) + continue; + read_unlock_bh(&iucv_connection_rwlock); + if (ndev->flags & (IFF_UP | IFF_RUNNING)) { + dev_warn(dev, "The IUCV device is connected" + " to %s and cannot be removed\n", + priv->conn->userid); + IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); + return -EPERM; + } + unregister_netdev(ndev); + netiucv_unregister_device(dev); + return count; + } + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); + return -EINVAL; +} +static DRIVER_ATTR_WO(remove); + +static struct attribute * netiucv_drv_attrs[] = { + &driver_attr_connection.attr, + &driver_attr_remove.attr, + NULL, +}; + +static struct attribute_group netiucv_drv_attr_group = { + .attrs = netiucv_drv_attrs, +}; + +static const struct attribute_group *netiucv_drv_attr_groups[] = { + &netiucv_drv_attr_group, + NULL, +}; + +static void netiucv_banner(void) +{ + pr_info("driver initialized\n"); +} + +static void __exit netiucv_exit(void) +{ + struct iucv_connection *cp; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + + IUCV_DBF_TEXT(trace, 3, __func__); + while (!list_empty(&iucv_connection_list)) { + cp = list_entry(iucv_connection_list.next, + struct iucv_connection, list); + ndev = cp->netdev; + priv = netdev_priv(ndev); + dev = priv->dev; + + unregister_netdev(ndev); + netiucv_unregister_device(dev); + } + + driver_unregister(&netiucv_driver); + iucv_unregister(&netiucv_handler, 1); + iucv_unregister_dbf_views(); + + pr_info("driver unloaded\n"); + return; +} + +static int __init netiucv_init(void) +{ + int rc; + + rc = iucv_register_dbf_views(); + if (rc) + goto out; + rc = iucv_register(&netiucv_handler, 1); + if (rc) + goto out_dbf; + IUCV_DBF_TEXT(trace, 3, __func__); + netiucv_driver.groups = netiucv_drv_attr_groups; + rc = driver_register(&netiucv_driver); + if (rc) { + IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); + goto out_iucv; + } + + netiucv_banner(); + return rc; + +out_iucv: + iucv_unregister(&netiucv_handler, 1); +out_dbf: + iucv_unregister_dbf_views(); +out: + return rc; +} + +module_init(netiucv_init); +module_exit(netiucv_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h new file mode 100644 index 000000000..2544edd4d --- /dev/null +++ b/drivers/s390/net/qeth_core.h @@ -0,0 +1,1154 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_CORE_H__ +#define __QETH_CORE_H__ + +#include <linux/completion.h> +#include <linux/debugfs.h> +#include <linux/if.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ctype.h> +#include <linux/in6.h> +#include <linux/bitops.h> +#include <linux/seq_file.h> +#include <linux/hashtable.h> +#include <linux/ip.h> +#include <linux/rcupdate.h> +#include <linux/refcount.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/wait.h> +#include <linux/workqueue.h> + +#include <net/dst.h> +#include <net/ip6_fib.h> +#include <net/ipv6.h> +#include <net/if_inet6.h> +#include <net/addrconf.h> +#include <net/route.h> +#include <net/sch_generic.h> +#include <net/tcp.h> + +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <asm/sysinfo.h> + +#include <uapi/linux/if_link.h> + +#include "qeth_core_mpc.h" + +/** + * Debug Facility stuff + */ +enum qeth_dbf_names { + QETH_DBF_SETUP, + QETH_DBF_MSG, + QETH_DBF_CTRL, + QETH_DBF_INFOS /* must be last element */ +}; + +struct qeth_dbf_info { + char name[DEBUG_MAX_NAME_LEN]; + int pages; + int areas; + int len; + int level; + struct debug_view *view; + debug_info_t *id; +}; + +#define QETH_DBF_CTRL_LEN 256U + +#define QETH_DBF_TEXT(name, level, text) \ + debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_DBF_HEX(name, level, addr, len) \ + debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len) + +#define QETH_DBF_MESSAGE(level, text...) \ + debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) + +#define QETH_DBF_TEXT_(name, level, text...) \ + qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_CARD_TEXT(card, level, text) \ + debug_text_event(card->debug, level, text) + +#define QETH_CARD_HEX(card, level, addr, len) \ + debug_event(card->debug, level, (void *)(addr), len) + +#define QETH_CARD_MESSAGE(card, text...) \ + debug_sprintf_event(card->debug, level, text) + +#define QETH_CARD_TEXT_(card, level, text...) \ + qeth_dbf_longtext(card->debug, level, text) + +#define SENSE_COMMAND_REJECT_BYTE 0 +#define SENSE_COMMAND_REJECT_FLAG 0x80 +#define SENSE_RESETTING_EVENT_BYTE 1 +#define SENSE_RESETTING_EVENT_FLAG 0x80 + +static inline u32 qeth_get_device_id(struct ccw_device *cdev) +{ + struct ccw_dev_id dev_id; + u32 id; + + ccw_device_get_id(cdev, &dev_id); + id = dev_id.devno; + id |= (u32) (dev_id.ssid << 16); + + return id; +} + +/* + * Common IO related definitions + */ +#define CARD_RDEV(card) card->read.ccwdev +#define CARD_WDEV(card) card->write.ccwdev +#define CARD_DDEV(card) card->data.ccwdev +#define CARD_BUS_ID(card) dev_name(&card->gdev->dev) +#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev) +#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev) +#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev) +#define CCW_DEVID(cdev) (qeth_get_device_id(cdev)) +#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card))) + +/* Routing stuff */ +struct qeth_routing_info { + enum qeth_routing_types type; +}; + +/* SETBRIDGEPORT stuff */ +enum qeth_sbp_roles { + QETH_SBP_ROLE_NONE = 0, + QETH_SBP_ROLE_PRIMARY = 1, + QETH_SBP_ROLE_SECONDARY = 2, +}; + +enum qeth_sbp_states { + QETH_SBP_STATE_INACTIVE = 0, + QETH_SBP_STATE_STANDBY = 1, + QETH_SBP_STATE_ACTIVE = 2, +}; + +#define QETH_SBP_HOST_NOTIFICATION 1 + +struct qeth_sbp_info { + __u32 supported_funcs; + enum qeth_sbp_roles role; + __u32 hostnotification:1; + __u32 reflect_promisc:1; + __u32 reflect_promisc_primary:1; +}; + +struct qeth_vnicc_info { + /* supported/currently configured VNICCs; updated in IPA exchanges */ + u32 sup_chars; + u32 cur_chars; + /* supported commands: bitmasks which VNICCs support respective cmd */ + u32 set_char_sup; + u32 getset_timeout_sup; + /* timeout value for the learning characteristic */ + u32 learning_timeout; + /* characteristics wanted/configured by user */ + u32 wanted_chars; + /* has user explicitly enabled rx_bcast while online? */ + bool rx_bcast_enabled; +}; + +#define QETH_IDX_FUNC_LEVEL_OSD 0x0101 +#define QETH_IDX_FUNC_LEVEL_IQD 0x4108 + +#define QETH_BUFSIZE 4096 +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 + +/** + * some more defs + */ +#define QETH_TX_TIMEOUT (100 * HZ) +#define QETH_RCD_TIMEOUT (60 * HZ) +#define QETH_RECLAIM_WORK_TIME HZ +#define QETH_MAX_PORTNO 15 + +/*****************************************************************************/ +/* QDIO queue and buffer handling */ +/*****************************************************************************/ +#define QETH_MAX_OUT_QUEUES 4 +#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */ +#define QETH_IQD_MCAST_TXQ 0 +#define QETH_IQD_MIN_UCAST_TXQ 1 + +#define QETH_MAX_IN_QUEUES 2 +#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1) +#define QETH_IN_BUF_SIZE_DEFAULT 65536 +#define QETH_IN_BUF_COUNT_DEFAULT 64 +#define QETH_IN_BUF_COUNT_HSDEFAULT 128 +#define QETH_IN_BUF_COUNT_MIN 8U +#define QETH_IN_BUF_COUNT_MAX 128U +#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) +#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ + ((card)->qdio.in_buf_pool.buf_count / 2) + +/* buffers we have to be behind before we get a PCI */ +#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) +/*enqueued free buffers left before we get a PCI*/ +#define QETH_PCI_THRESHOLD_B(card) 0 +/*not used unless the microcode gets patched*/ +#define QETH_PCI_TIMER_VALUE(card) 3 + +/* priority queing */ +#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING +#define QETH_DEFAULT_QUEUE 2 +#define QETH_NO_PRIO_QUEUEING 0 +#define QETH_PRIO_Q_ING_PREC 1 +#define QETH_PRIO_Q_ING_TOS 2 +#define QETH_PRIO_Q_ING_SKB 3 +#define QETH_PRIO_Q_ING_VLAN 4 +#define QETH_PRIO_Q_ING_FIXED 5 + +/* Packing */ +#define QETH_LOW_WATERMARK_PACK 2 +#define QETH_HIGH_WATERMARK_PACK 5 +#define QETH_WATERMARK_PACK_FUZZ 1 + +struct qeth_hdr_layer3 { + __u8 id; + __u8 flags; + __u16 inbound_checksum; /*TSO:__u16 seqno */ + __u32 token; /*TSO: __u32 reserved */ + __u16 length; + __u8 vlan_prio; + __u8 ext_flags; + __u16 vlan_id; + __u16 frame_offset; + union { + /* TX: */ + struct in6_addr addr; + /* RX: */ + struct rx { + u8 res1[2]; + u8 src_mac[6]; + u8 res2[4]; + u16 vlan_id; + u8 res3[2]; + } rx; + } next_hop; +}; + +struct qeth_hdr_layer2 { + __u8 id; + __u8 flags[3]; + __u8 port_no; + __u8 hdr_length; + __u16 pkt_length; + __u16 seq_no; + __u16 vlan_id; + __u32 reserved; + __u8 reserved2[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_osn { + __u8 id; + __u8 reserved; + __u16 seq_no; + __u16 reserved2; + __u16 control_flags; + __u16 pdu_length; + __u8 reserved3[18]; + __u32 ccid; +} __attribute__ ((packed)); + +struct qeth_hdr { + union { + struct qeth_hdr_layer2 l2; + struct qeth_hdr_layer3 l3; + struct qeth_hdr_osn osn; + } hdr; +} __attribute__ ((packed)); + +#define QETH_QIB_PQUE_ORDER_RR 0 +#define QETH_QIB_PQUE_UNITS_SBAL 2 +#define QETH_QIB_PQUE_PRIO_DEFAULT 4 + +struct qeth_qib_parms { + char pcit_magic[4]; + u32 pcit_a; + u32 pcit_b; + u32 pcit_c; + char blkt_magic[4]; + u32 blkt_total; + u32 blkt_inter_packet; + u32 blkt_inter_packet_jumbo; + char pque_magic[4]; + u8 pque_order; + u8 pque_units; + u16 reserved; + u32 pque_priority[4]; +}; + +/*TCP Segmentation Offload header*/ +struct qeth_hdr_ext_tso { + __u16 hdr_tot_len; + __u8 imb_hdr_no; + __u8 reserved; + __u8 hdr_type; + __u8 hdr_version; + __u16 hdr_len; + __u32 payload_len; + __u16 mss; + __u16 dg_hdr_len; + __u8 padding[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_tso { + struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ + struct qeth_hdr_ext_tso ext; +} __attribute__ ((packed)); + + +/* flags for qeth_hdr.flags */ +#define QETH_HDR_PASSTHRU 0x10 +#define QETH_HDR_IPV6 0x80 +#define QETH_HDR_CAST_MASK 0x07 +enum qeth_cast_flags { + QETH_CAST_UNICAST = 0x06, + QETH_CAST_MULTICAST = 0x04, + QETH_CAST_BROADCAST = 0x05, + QETH_CAST_ANYCAST = 0x07, + QETH_CAST_NOCAST = 0x00, +}; + +enum qeth_layer2_frame_flags { + QETH_LAYER2_FLAG_MULTICAST = 0x01, + QETH_LAYER2_FLAG_BROADCAST = 0x02, + QETH_LAYER2_FLAG_UNICAST = 0x04, + QETH_LAYER2_FLAG_VLAN = 0x10, +}; + +enum qeth_header_ids { + QETH_HEADER_TYPE_LAYER3 = 0x01, + QETH_HEADER_TYPE_LAYER2 = 0x02, + QETH_HEADER_TYPE_L3_TSO = 0x03, + QETH_HEADER_TYPE_OSN = 0x04, + QETH_HEADER_TYPE_L2_TSO = 0x06, + QETH_HEADER_MASK_INVAL = 0x80, +}; +/* flags for qeth_hdr.ext_flags */ +#define QETH_HDR_EXT_VLAN_FRAME 0x01 +#define QETH_HDR_EXT_TOKEN_ID 0x02 +#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04 +#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08 +#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10 +#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 +#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ + +static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1, + struct qeth_hdr_layer2 *h2) +{ + return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) && + h1->vlan_id == h2->vlan_id; +} + +static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1, + struct qeth_hdr_layer3 *h2) +{ + return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) && + ipv6_addr_equal(&h1->next_hop.addr, &h2->next_hop.addr); +} + +struct qeth_local_addr { + struct hlist_node hnode; + struct rcu_head rcu; + struct in6_addr addr; +}; + +enum qeth_qdio_info_states { + QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED, + QETH_QDIO_CLEANING +}; + +struct qeth_buffer_pool_entry { + struct list_head list; + struct list_head init_list; + struct page *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; +}; + +struct qeth_qdio_buffer_pool { + struct list_head entry_list; + int buf_count; +}; + +struct qeth_qdio_buffer { + struct qdio_buffer *buffer; + /* the buffer pool entry currently associated to this buffer */ + struct qeth_buffer_pool_entry *pool_entry; + struct sk_buff *rx_skb; +}; + +struct qeth_qdio_q { + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + int next_buf_to_init; +}; + +enum qeth_qdio_out_buffer_state { + /* Owned by driver, in order to be filled. */ + QETH_QDIO_BUF_EMPTY, + /* Filled by driver; owned by hardware in order to be sent. */ + QETH_QDIO_BUF_PRIMED, + /* Discovered by the TX completion code: */ + QETH_QDIO_BUF_PENDING, + /* Finished by the TX completion code: */ + QETH_QDIO_BUF_NEED_QAOB, + /* Received QAOB notification on CQ: */ + QETH_QDIO_BUF_QAOB_OK, + QETH_QDIO_BUF_QAOB_ERROR, +}; + +struct qeth_qdio_out_buffer { + struct qdio_buffer *buffer; + atomic_t state; + int next_element_to_fill; + unsigned int frames; + unsigned int bytes; + struct sk_buff_head skb_list; + int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; + + struct qeth_qdio_out_q *q; + struct list_head list_entry; +}; + +struct qeth_card; + +#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val)) +#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1) + +#define QETH_TXQ_STAT_ADD(_q, _stat, _val) ((_q)->stats._stat += (_val)) +#define QETH_TXQ_STAT_INC(_q, _stat) QETH_TXQ_STAT_ADD(_q, _stat, 1) + +struct qeth_card_stats { + u64 rx_bufs; + u64 rx_skb_csum; + u64 rx_sg_skbs; + u64 rx_sg_frags; + u64 rx_sg_alloc_page; + + u64 rx_dropped_nomem; + u64 rx_dropped_notsupp; + u64 rx_dropped_runt; + + /* rtnl_link_stats64 */ + u64 rx_packets; + u64 rx_bytes; + u64 rx_multicast; + u64 rx_length_errors; + u64 rx_frame_errors; + u64 rx_fifo_errors; +}; + +struct qeth_out_q_stats { + u64 bufs; + u64 bufs_pack; + u64 buf_elements; + u64 skbs_pack; + u64 skbs_sg; + u64 skbs_csum; + u64 skbs_tso; + u64 skbs_linearized; + u64 skbs_linearized_fail; + u64 tso_bytes; + u64 packing_mode_switch; + u64 stopped; + u64 doorbell; + u64 coal_frames; + u64 completion_yield; + u64 completion_timer; + + /* rtnl_link_stats64 */ + u64 tx_packets; + u64 tx_bytes; + u64 tx_errors; + u64 tx_dropped; +}; + +#define QETH_TX_MAX_COALESCED_FRAMES 1 +#define QETH_TX_COALESCE_USECS 25 +#define QETH_TX_TIMER_USECS 500 + +struct qeth_qdio_out_q { + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_outbuf_state *bufstates; /* convenience pointer */ + struct list_head pending_bufs; + struct qeth_out_q_stats stats; + spinlock_t lock; + unsigned int priority; + u8 next_buf_to_fill; + u8 max_elements; + u8 queue_no; + u8 do_pack; + struct qeth_card *card; + /* + * number of buffers that are currently filled (PRIMED) + * -> these buffers are hardware-owned + */ + atomic_t used_buffers; + /* indicates whether PCI flag must be set (or if one is outstanding) */ + atomic_t set_pci_flags_count; + struct napi_struct napi; + struct timer_list timer; + struct qeth_hdr *prev_hdr; + unsigned int coalesced_frames; + u8 bulk_start; + u8 bulk_count; + u8 bulk_max; + + unsigned int coalesce_usecs; + unsigned int max_coalesced_frames; +}; + +#define qeth_for_each_output_queue(card, q, i) \ + for (i = 0; i < card->qdio.no_out_queues && \ + (q = card->qdio.out_qs[i]); i++) + +#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi) + +static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue, + unsigned long usecs) +{ + timer_reduce(&queue->timer, usecs_to_jiffies(usecs) + jiffies); +} + +static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q; +} + +static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue) +{ + return atomic_read(&queue->used_buffers) == 0; +} + +struct qeth_qdio_info { + atomic_t state; + /* input */ + int no_in_queues; + struct qeth_qdio_q *in_q; + struct qeth_qdio_q *c_q; + struct qeth_qdio_buffer_pool in_buf_pool; + struct qeth_qdio_buffer_pool init_pool; + int in_buf_size; + + /* output */ + unsigned int no_out_queues; + struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES]; + struct qdio_outbuf_state *out_bufstates; + + /* priority queueing */ + int do_prio_queueing; + int default_out_queue; +}; + +/** + * channel state machine + */ +enum qeth_channel_states { + CH_STATE_UP, + CH_STATE_DOWN, + CH_STATE_HALTED, + CH_STATE_STOPPED, +}; +/** + * card state machine + */ +enum qeth_card_states { + CARD_STATE_DOWN, + CARD_STATE_SOFTSETUP, +}; + +/** + * Protocol versions + */ +enum qeth_prot_versions { + QETH_PROT_NONE = 0x0000, + QETH_PROT_IPV4 = 0x0004, + QETH_PROT_IPV6 = 0x0006, +}; + +enum qeth_cq { + QETH_CQ_DISABLED = 0, + QETH_CQ_ENABLED = 1, + QETH_CQ_NOTAVAILABLE = 2, +}; + +struct qeth_ipato { + bool enabled; + bool invert4; + bool invert6; + struct list_head entries; +}; + +struct qeth_channel { + struct ccw_device *ccwdev; + struct qeth_cmd_buffer *active_cmd; + enum qeth_channel_states state; + atomic_t irq_pending; +}; + +struct qeth_reply { + int (*callback)(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data); + void *param; +}; + +struct qeth_cmd_buffer { + struct list_head list; + struct completion done; + spinlock_t lock; + unsigned int length; + refcount_t ref_count; + struct qeth_channel *channel; + struct qeth_reply reply; + long timeout; + unsigned char *data; + void (*finalize)(struct qeth_card *card, struct qeth_cmd_buffer *iob); + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply); + void (*callback)(struct qeth_card *card, struct qeth_cmd_buffer *iob, + unsigned int data_length); + int rc; +}; + +static inline void qeth_get_cmd(struct qeth_cmd_buffer *iob) +{ + refcount_inc(&iob->ref_count); +} + +static inline struct qeth_ipa_cmd *__ipa_reply(struct qeth_cmd_buffer *iob) +{ + if (!IS_IPA(iob->data)) + return NULL; + + return (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); +} + +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + +static inline struct ccw1 *__ccw_from_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct ccw1 *)(iob->data + ALIGN(iob->length, 8)); +} + +static inline bool qeth_trylock_channel(struct qeth_channel *channel) +{ + return atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0; +} + +/** + * OSA card related definitions + */ +struct qeth_token { + __u32 issuer_rm_w; + __u32 issuer_rm_r; + __u32 cm_filter_w; + __u32 cm_filter_r; + __u32 cm_connection_w; + __u32 cm_connection_r; + __u32 ulp_filter_w; + __u32 ulp_filter_r; + __u32 ulp_connection_w; + __u32 ulp_connection_r; +}; + +struct qeth_seqno { + __u32 trans_hdr; + __u32 pdu_hdr; + __u32 pdu_hdr_ack; + __u16 ipa; +}; + +struct qeth_card_blkt { + int time_total; + int inter_packet; + int inter_packet_jumbo; +}; + +enum qeth_pnso_mode { + QETH_PNSO_NONE, + QETH_PNSO_BRIDGEPORT, + QETH_PNSO_ADDR_INFO, +}; + +#define QETH_BROADCAST_WITH_ECHO 0x01 +#define QETH_BROADCAST_WITHOUT_ECHO 0x02 +struct qeth_card_info { + unsigned short unit_addr2; + unsigned short cula; + __u16 func_level; + char mcl_level[QETH_MCL_LENGTH + 1]; + /* doubleword below corresponds to net_if_token */ + u16 ddev_devno; + u8 cssid; + u8 iid; + u8 ssid; + u8 chpid; + u16 chid; + u8 ids_valid:1; /* cssid,iid,chid */ + u8 dev_addr_is_registered:1; + u8 promisc_mode:1; + u8 use_v1_blkt:1; + u8 is_vm_nic:1; + /* no bitfield, we take a pointer on these two: */ + u8 has_lp2lp_cso_v6; + u8 has_lp2lp_cso_v4; + enum qeth_pnso_mode pnso_mode; + enum qeth_card_types type; + enum qeth_link_types link_type; + int broadcast_capable; + bool layer_enforced; + struct qeth_card_blkt blkt; + __u32 diagass_support; + __u32 hwtrap; +}; + +enum qeth_discipline_id { + QETH_DISCIPLINE_UNDETERMINED = -1, + QETH_DISCIPLINE_LAYER3 = 0, + QETH_DISCIPLINE_LAYER2 = 1, +}; + +struct qeth_card_options { + struct qeth_ipa_caps ipa4; + struct qeth_ipa_caps ipa6; + struct qeth_routing_info route4; + struct qeth_routing_info route6; + struct qeth_ipa_caps adp; /* Adapter parameters */ + struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ + struct qeth_vnicc_info vnicc; /* VNICC options */ + enum qeth_discipline_id layer; + enum qeth_ipa_isolation_modes isolation; + int sniffer; + enum qeth_cq cq; + char hsuid[9]; +}; + +#define IS_LAYER2(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER2) +#define IS_LAYER3(card) ((card)->options.layer == QETH_DISCIPLINE_LAYER3) + +/* + * thread bits for qeth_card thread masks + */ +enum qeth_threads { + QETH_RECOVER_THREAD = 1, +}; + +struct qeth_osn_info { + int (*assist_cb)(struct net_device *dev, void *data); + int (*data_cb)(struct sk_buff *skb); +}; + +struct qeth_discipline { + const struct device_type *devtype; + int (*setup) (struct ccwgroup_device *); + void (*remove) (struct ccwgroup_device *); + int (*set_online)(struct qeth_card *card, bool carrier_ok); + void (*set_offline)(struct qeth_card *card); + int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd); + int (*control_event_handler)(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +}; + +enum qeth_addr_disposition { + QETH_DISP_ADDR_DELETE = 0, + QETH_DISP_ADDR_DO_NOTHING = 1, + QETH_DISP_ADDR_ADD = 2, +}; + +struct qeth_rx { + int b_count; + int b_index; + u8 buf_element; + int e_offset; + int qdio_err; + u8 bufs_refill; +}; + +struct carrier_info { + __u8 card_type; + __u16 port_mode; + __u32 port_speed; +}; + +struct qeth_switch_info { + __u32 capabilities; + __u32 settings; +}; + +struct qeth_priv { + unsigned int rx_copybreak; + unsigned int tx_wanted_queues; + u32 brport_hw_features; + u32 brport_features; +}; + +#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT + +struct qeth_card { + enum qeth_card_states state; + spinlock_t lock; + struct ccwgroup_device *gdev; + struct qeth_cmd_buffer *read_cmd; + struct qeth_channel read; + struct qeth_channel write; + struct qeth_channel data; + + struct net_device *dev; + struct dentry *debugfs; + struct qeth_card_stats stats; + struct qeth_card_info info; + struct qeth_token token; + struct qeth_seqno seqno; + struct qeth_card_options options; + + struct workqueue_struct *event_wq; + struct workqueue_struct *cmd_wq; + wait_queue_head_t wait_q; + + struct mutex ip_lock; + /* protected by ip_lock: */ + DECLARE_HASHTABLE(ip_htable, 4); + struct qeth_ipato ipato; + + DECLARE_HASHTABLE(local_addrs4, 4); + DECLARE_HASHTABLE(local_addrs6, 4); + spinlock_t local_addrs4_lock; + spinlock_t local_addrs6_lock; + DECLARE_HASHTABLE(rx_mode_addrs, 4); + struct work_struct rx_mode_work; + struct work_struct kernel_thread_starter; + spinlock_t thread_mask_lock; + unsigned long thread_start_mask; + unsigned long thread_allowed_mask; + unsigned long thread_running_mask; + struct list_head cmd_waiter_list; + /* QDIO buffer handling */ + struct qeth_qdio_info qdio; + int read_or_write_problem; + struct qeth_osn_info osn_info; + const struct qeth_discipline *discipline; + atomic_t force_alloc_skb; + struct service_level qeth_service_level; + struct qdio_ssqd_desc ssqd; + debug_info_t *debug; + struct mutex sbp_lock; + struct mutex conf_mutex; + struct mutex discipline_mutex; + struct napi_struct napi; + struct qeth_rx rx; + struct delayed_work buffer_reclaim_work; + struct work_struct close_dev_work; +}; + +static inline bool qeth_card_hw_is_reachable(struct qeth_card *card) +{ + return card->state == CARD_STATE_SOFTSETUP; +} + +static inline void qeth_unlock_channel(struct qeth_card *card, + struct qeth_channel *channel) +{ + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); +} + +struct qeth_trap_id { + __u16 lparnr; + char vmname[8]; + __u8 chpid; + __u8 ssid; + __u16 devno; +} __packed; + +static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card) +{ + return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING; +} + +static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + + if (qeth_uses_tx_prio_queueing(card)) + return min(card->dev->num_tx_queues, card->qdio.no_out_queues); + + return min(priv->tx_wanted_queues, card->qdio.no_out_queues); +} + +static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq) +{ + if (txq == QETH_IQD_MCAST_TXQ) + return dev->num_tx_queues - 1; + if (txq == dev->num_tx_queues - 1) + return QETH_IQD_MCAST_TXQ; + return txq; +} + +static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card, + struct qeth_qdio_out_q *queue) +{ + return qeth_iqd_translate_txq(card->dev, queue->queue_no) == + QETH_IQD_MCAST_TXQ; +} + +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + +/** + * qeth_get_elements_for_range() - find number of SBALEs to cover range. + * @start: Start of the address range. + * @end: Address after the end of the range. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * the specified address range. + */ +static inline int qeth_get_elements_for_range(addr_t start, addr_t end) +{ + return PFN_UP(end) - PFN_DOWN(start); +} + +static inline int qeth_get_ip_version(struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + __be16 prot = veth->h_vlan_proto; + + if (prot == htons(ETH_P_8021Q)) + prot = veth->h_vlan_encapsulated_proto; + + switch (prot) { + case htons(ETH_P_IPV6): + return 6; + case htons(ETH_P_IP): + return 4; + default: + return 0; + } +} + +static inline int qeth_get_ether_cast_type(struct sk_buff *skb) +{ + u8 *addr = eth_hdr(skb)->h_dest; + + if (is_multicast_ether_addr(addr)) + return is_broadcast_ether_addr(addr) ? RTN_BROADCAST : + RTN_MULTICAST; + return RTN_UNICAST; +} + +static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, int ipv) +{ + struct dst_entry *dst = skb_dst(skb); + struct rt6_info *rt; + + rt = (struct rt6_info *) dst; + if (dst) + dst = dst_check(dst, (ipv == 6) ? rt6_get_cookie(rt) : 0); + return dst; +} + +static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb, + struct dst_entry *dst) +{ + struct rtable *rt = (struct rtable *) dst; + + return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr; +} + +static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb, + struct dst_entry *dst) +{ + struct rt6_info *rt = (struct rt6_info *) dst; + + if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) + return &rt->rt6i_gateway; + else + return &ipv6_hdr(skb)->daddr; +} + +static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) +{ + *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; + if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) || + (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) + *flags |= QETH_HDR_EXT_UDP; +} + +static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, + struct qeth_buffer_pool_entry *entry) +{ + list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); +} + +static inline int qeth_is_diagass_supported(struct qeth_card *card, + enum qeth_diags_cmds cmd) +{ + return card->info.diagass_support & (__u32)cmd; +} + +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, u32 *data, + enum qeth_prot_versions prot); +/* IPv4 variant */ +static inline int qeth_send_simple_setassparms(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, u32 *data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV4); +} + +static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, u32 *data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV6); +} + +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb); + +extern const struct qeth_discipline qeth_l2_discipline; +extern const struct qeth_discipline qeth_l3_discipline; +extern const struct ethtool_ops qeth_ethtool_ops; +extern const struct ethtool_ops qeth_osn_ethtool_ops; +extern const struct attribute_group *qeth_generic_attr_groups[]; +extern const struct attribute_group *qeth_osn_attr_groups[]; +extern const struct attribute_group qeth_device_attr_group; +extern const struct attribute_group qeth_device_blkt_group; +extern const struct device_type qeth_generic_devtype; + +const char *qeth_get_cardname_short(struct qeth_card *); +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count); +int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); +void qeth_core_free_discipline(struct qeth_card *); + +/* exports for qeth discipline device drivers */ +extern struct kmem_cache *qeth_core_header_cache; +extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; + +struct net_device *qeth_clone_netdev(struct net_device *orig); +struct qeth_card *qeth_get_card_by_busid(char *bus_id); +void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, + int clear_start_mask); +int qeth_threads_running(struct qeth_card *, unsigned long); +int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, + bool resetting); + +int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, + int (*reply_cb) + (struct qeth_card *, struct qeth_reply *, unsigned long), + void *); +struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, + enum qeth_ipa_cmds cmd_code, + enum qeth_prot_versions prot, + unsigned int data_length); +struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, + unsigned int length, unsigned int ccws, + long timeout); +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, + unsigned int data_length, + enum qeth_prot_versions prot); +struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, + enum qeth_diags_cmds sub_cmd, + unsigned int data_length); +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason); +void qeth_put_cmd(struct qeth_cmd_buffer *iob); + +int qeth_schedule_recovery(struct qeth_card *card); +int qeth_poll(struct napi_struct *napi, int budget); +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable); +int qeth_setadpparms_change_macaddr(struct qeth_card *); +void qeth_tx_timeout(struct net_device *, unsigned int txqueue); +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length, + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply)); +int qeth_query_switch_attributes(struct qeth_card *card, + struct qeth_switch_info *sw_info); +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info); +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode); + +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset); +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed); +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); +int qeth_configure_cq(struct qeth_card *, enum qeth_cq); +int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); +int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); +int qeth_set_features(struct net_device *, netdev_features_t); +void qeth_enable_hw_features(struct net_device *dev); +netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats); +int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count); +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev); +int qeth_open(struct net_device *dev); +int qeth_stop(struct net_device *dev); + +int qeth_vm_request_mac(struct qeth_card *card); +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, unsigned int data_len)); + +/* exports for OSN */ +int qeth_osn_assist(struct net_device *, void *, int); +int qeth_osn_register(unsigned char *read_dev_no, struct net_device **, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)); +void qeth_osn_deregister(struct net_device *); + +#endif /* __QETH_CORE_H__ */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c new file mode 100644 index 000000000..73d564906 --- /dev/null +++ b/drivers/s390/net/qeth_core_main.c @@ -0,0 +1,7147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/compat.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/log2.h> +#include <linux/io.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/mii.h> +#include <linux/mm.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <linux/netdev_features.h> +#include <linux/rcutree.h> +#include <linux/skbuff.h> +#include <linux/vmalloc.h> + +#include <net/iucv/af_iucv.h> +#include <net/dsfield.h> +#include <net/sock.h> + +#include <asm/ebcdic.h> +#include <asm/chpid.h> +#include <asm/sysinfo.h> +#include <asm/diag.h> +#include <asm/cio.h> +#include <asm/ccwdev.h> +#include <asm/cpcmd.h> + +#include "qeth_core.h" + +struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { + /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ + /* N P A M L V H */ + [QETH_DBF_SETUP] = {"qeth_setup", + 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, + [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, + &debug_sprintf_view, NULL}, + [QETH_DBF_CTRL] = {"qeth_control", + 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, +}; +EXPORT_SYMBOL_GPL(qeth_dbf); + +struct kmem_cache *qeth_core_header_cache; +EXPORT_SYMBOL_GPL(qeth_core_header_cache); +static struct kmem_cache *qeth_qdio_outbuf_cache; + +static struct device *qeth_core_root_dev; +static struct dentry *qeth_debugfs_root; +static struct lock_class_key qdio_out_skb_queue_key; + +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length); +static int qeth_qdio_establish(struct qeth_card *); +static void qeth_free_qdio_queues(struct qeth_card *card); +static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification); + +static void qeth_close_dev_handler(struct work_struct *work) +{ + struct qeth_card *card; + + card = container_of(work, struct qeth_card, close_dev_work); + QETH_CARD_TEXT(card, 2, "cldevhdl"); + ccwgroup_set_offline(card->gdev); +} + +static const char *qeth_get_cardname(struct qeth_card *card) +{ + if (IS_VM_NIC(card)) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return " Virtual NIC QDIO"; + case QETH_CARD_TYPE_IQD: + return " Virtual NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return " Virtual NIC QDIO - OSM"; + case QETH_CARD_TYPE_OSX: + return " Virtual NIC QDIO - OSX"; + default: + return " unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return " OSD Express"; + case QETH_CARD_TYPE_IQD: + return " HiperSockets"; + case QETH_CARD_TYPE_OSN: + return " OSN QDIO"; + case QETH_CARD_TYPE_OSM: + return " OSM QDIO"; + case QETH_CARD_TYPE_OSX: + return " OSX QDIO"; + default: + return " unknown"; + } + } + return " n/a"; +} + +/* max length to be returned: 14 */ +const char *qeth_get_cardname_short(struct qeth_card *card) +{ + if (IS_VM_NIC(card)) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return "Virt.NIC QDIO"; + case QETH_CARD_TYPE_IQD: + return "Virt.NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return "Virt.NIC OSM"; + case QETH_CARD_TYPE_OSX: + return "Virt.NIC OSX"; + default: + return "unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + switch (card->info.link_type) { + case QETH_LINK_TYPE_FAST_ETH: + return "OSD_100"; + case QETH_LINK_TYPE_HSTR: + return "HSTR"; + case QETH_LINK_TYPE_GBIT_ETH: + return "OSD_1000"; + case QETH_LINK_TYPE_10GBIT_ETH: + return "OSD_10GIG"; + case QETH_LINK_TYPE_25GBIT_ETH: + return "OSD_25GIG"; + case QETH_LINK_TYPE_LANE_ETH100: + return "OSD_FE_LANE"; + case QETH_LINK_TYPE_LANE_TR: + return "OSD_TR_LANE"; + case QETH_LINK_TYPE_LANE_ETH1000: + return "OSD_GbE_LANE"; + case QETH_LINK_TYPE_LANE: + return "OSD_ATM_LANE"; + default: + return "OSD_Express"; + } + case QETH_CARD_TYPE_IQD: + return "HiperSockets"; + case QETH_CARD_TYPE_OSN: + return "OSN"; + case QETH_CARD_TYPE_OSM: + return "OSM_1000"; + case QETH_CARD_TYPE_OSX: + return "OSX_10GIG"; + default: + return "unknown"; + } + } + return "n/a"; +} + +void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, + int clear_start_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_allowed_mask = threads; + if (clear_start_mask) + card->thread_start_mask &= threads; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); + +int qeth_threads_running(struct qeth_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_threads_running); + +static void qeth_clear_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + struct qeth_qdio_q *queue = card->qdio.in_q; + unsigned int i; + + QETH_CARD_TEXT(card, 5, "clwrklst"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.in_buf_pool.entry_list, list) + list_del(&pool_entry->list); + + if (!queue) + return; + + for (i = 0; i < ARRAY_SIZE(queue->bufs); i++) + queue->bufs[i].pool_entry = NULL; +} + +static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(entry->elements); i++) { + if (entry->elements[i]) + __free_page(entry->elements[i]); + } + + kfree(entry); +} + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &card->qdio.init_pool.entry_list, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages) +{ + struct qeth_buffer_pool_entry *entry; + unsigned int i; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + for (i = 0; i < pages; i++) { + entry->elements[i] = __dev_alloc_page(GFP_KERNEL); + + if (!entry->elements[i]) { + qeth_free_pool_entry(entry); + return NULL; + } + } + + return entry; +} + +static int qeth_alloc_buffer_pool(struct qeth_card *card) +{ + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + unsigned int i; + + QETH_CARD_TEXT(card, 5, "alocpool"); + for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { + struct qeth_buffer_pool_entry *entry; + + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + qeth_free_buffer_pool(card); + return -ENOMEM; + } + + list_add(&entry->init_list, &card->qdio.init_pool.entry_list); + } + return 0; +} + +int qeth_resize_buffer_pool(struct qeth_card *card, unsigned int count) +{ + unsigned int buf_elements = QETH_MAX_BUFFER_ELEMENTS(card); + struct qeth_qdio_buffer_pool *pool = &card->qdio.init_pool; + struct qeth_buffer_pool_entry *entry, *tmp; + int delta = count - pool->buf_count; + LIST_HEAD(entries); + + QETH_CARD_TEXT(card, 2, "realcbp"); + + /* Defer until queue is allocated: */ + if (!card->qdio.in_q) + goto out; + + /* Remove entries from the pool: */ + while (delta < 0) { + entry = list_first_entry(&pool->entry_list, + struct qeth_buffer_pool_entry, + init_list); + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + + delta++; + } + + /* Allocate additional entries: */ + while (delta > 0) { + entry = qeth_alloc_pool_entry(buf_elements); + if (!entry) { + list_for_each_entry_safe(entry, tmp, &entries, + init_list) { + list_del(&entry->init_list); + qeth_free_pool_entry(entry); + } + + return -ENOMEM; + } + + list_add(&entry->init_list, &entries); + + delta--; + } + + list_splice(&entries, &pool->entry_list); + +out: + card->qdio.in_buf_pool.buf_count = count; + pool->buf_count = count; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_resize_buffer_pool); + +static void qeth_free_qdio_queue(struct qeth_qdio_q *q) +{ + if (!q) + return; + + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) +{ + struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + int i; + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { + kfree(q); + return NULL; + } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + q->bufs[i].buffer = q->qdio_bufs[i]; + + QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); + return q; +} + +static int qeth_cq_init(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + QETH_CARD_TEXT(card, 2, "cqinit"); + qdio_reset_buffers(card->qdio.c_q->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); + card->qdio.c_q->next_buf_to_init = 127; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, + card->qdio.no_in_queues - 1, 0, + 127); + if (rc) { + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + goto out; + } + } + rc = 0; +out: + return rc; +} + +static int qeth_alloc_cq(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + int i; + struct qdio_outbuf_state *outbuf_states; + + QETH_CARD_TEXT(card, 2, "cqon"); + card->qdio.c_q = qeth_alloc_qdio_queue(); + if (!card->qdio.c_q) { + rc = -1; + goto kmsg_out; + } + card->qdio.no_in_queues = 2; + card->qdio.out_bufstates = + kcalloc(card->qdio.no_out_queues * + QDIO_MAX_BUFFERS_PER_Q, + sizeof(struct qdio_outbuf_state), + GFP_KERNEL); + outbuf_states = card->qdio.out_bufstates; + if (outbuf_states == NULL) { + rc = -1; + goto free_cq_out; + } + for (i = 0; i < card->qdio.no_out_queues; ++i) { + card->qdio.out_qs[i]->bufstates = outbuf_states; + outbuf_states += QDIO_MAX_BUFFERS_PER_Q; + } + } else { + QETH_CARD_TEXT(card, 2, "nocq"); + card->qdio.c_q = NULL; + card->qdio.no_in_queues = 1; + } + QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues); + rc = 0; +out: + return rc; +free_cq_out: + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; +kmsg_out: + dev_err(&card->gdev->dev, "Failed to create completion queue\n"); + goto out; +} + +static void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + --card->qdio.no_in_queues; + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; + } + kfree(card->qdio.out_bufstates); + card->qdio.out_bufstates = NULL; +} + +static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) +{ + enum iucv_tx_notify n; + + switch (sbalf15) { + case 0: + n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; + break; + case 4: + case 16: + case 17: + case 18: + n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : + TX_NOTIFY_UNREACHABLE; + break; + default: + n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : + TX_NOTIFY_GENERALERROR; + break; + } + + return n; +} + +static void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) +{ + enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK; + struct qaob *aob; + struct qeth_qdio_out_buffer *buffer; + enum iucv_tx_notify notification; + struct qeth_qdio_out_q *queue; + unsigned int i; + + aob = (struct qaob *) phys_to_virt(phys_aob_addr); + QETH_CARD_TEXT(card, 5, "haob"); + QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); + buffer = (struct qeth_qdio_out_buffer *) aob->user1; + QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); + + if (aob->aorc) { + QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); + new_state = QETH_QDIO_BUF_QAOB_ERROR; + } + + switch (atomic_xchg(&buffer->state, new_state)) { + case QETH_QDIO_BUF_PRIMED: + /* Faster than TX completion code, let it handle the async + * completion for us. + */ + break; + case QETH_QDIO_BUF_PENDING: + /* TX completion code is active and will handle the async + * completion for us. + */ + break; + case QETH_QDIO_BUF_NEED_QAOB: + /* TX completion code is already finished. */ + notification = qeth_compute_cq_notification(aob->aorc, 1); + qeth_notify_skbs(buffer->q, buffer, notification); + + /* Free dangling allocations. The attached skbs are handled by + * qeth_tx_complete_pending_bufs(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + void *data = phys_to_virt(aob->sba[i]); + + if (data && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, data); + } + + queue = buffer->q; + atomic_set(&buffer->state, QETH_QDIO_BUF_EMPTY); + napi_schedule(&queue->napi); + break; + default: + WARN_ON_ONCE(1); + } + + qdio_release_aob(aob); +} + +static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len, + void *data) +{ + ccw->cmd_code = cmd_code; + ccw->flags = flags | CCW_FLAG_SLI; + ccw->count = len; + ccw->cda = (__u32) __pa(data); +} + +static int __qeth_issue_next_read(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob = card->read_cmd; + struct qeth_channel *channel = iob->channel; + struct ccw1 *ccw = __ccw_from_cmd(iob); + int rc; + + QETH_CARD_TEXT(card, 5, "issnxrd"); + if (channel->state != CH_STATE_UP) + return -EIO; + + memset(iob->data, 0, iob->length); + qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data); + iob->callback = qeth_issue_next_read_cb; + /* keep the cmd alive after completion: */ + qeth_get_cmd(iob); + + QETH_CARD_TEXT(card, 6, "noirqpnd"); + rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0); + if (!rc) { + channel->active_cmd = iob; + } else { + QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", + rc, CARD_DEVID(card)); + qeth_unlock_channel(card, channel); + qeth_put_cmd(iob); + card->read_or_write_problem = 1; + qeth_schedule_recovery(card); + } + return rc; +} + +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + +static void qeth_enqueue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + spin_lock_irq(&card->lock); + list_add_tail(&iob->list, &card->cmd_waiter_list); + spin_unlock_irq(&card->lock); +} + +static void qeth_dequeue_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + spin_lock_irq(&card->lock); + list_del(&iob->list); + spin_unlock_irq(&card->lock); +} + +void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason) +{ + iob->rc = reason; + complete(&iob->done); +} +EXPORT_SYMBOL_GPL(qeth_notify_cmd); + +static void qeth_flush_local_addrs4(struct qeth_card *card) +{ + struct qeth_local_addr *addr; + struct hlist_node *tmp; + unsigned int i; + + spin_lock_irq(&card->local_addrs4_lock); + hash_for_each_safe(card->local_addrs4, i, tmp, addr, hnode) { + hash_del_rcu(&addr->hnode); + kfree_rcu(addr, rcu); + } + spin_unlock_irq(&card->local_addrs4_lock); +} + +static void qeth_flush_local_addrs6(struct qeth_card *card) +{ + struct qeth_local_addr *addr; + struct hlist_node *tmp; + unsigned int i; + + spin_lock_irq(&card->local_addrs6_lock); + hash_for_each_safe(card->local_addrs6, i, tmp, addr, hnode) { + hash_del_rcu(&addr->hnode); + kfree_rcu(addr, rcu); + } + spin_unlock_irq(&card->local_addrs6_lock); +} + +static void qeth_flush_local_addrs(struct qeth_card *card) +{ + qeth_flush_local_addrs4(card); + qeth_flush_local_addrs6(card); +} + +static void qeth_add_local_addrs4(struct qeth_card *card, + struct qeth_ipacmd_local_addrs4 *cmd) +{ + unsigned int i; + + if (cmd->addr_length != + sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { + dev_err_ratelimited(&card->gdev->dev, + "Dropped IPv4 ADD LOCAL ADDR event with bad length %u\n", + cmd->addr_length); + return; + } + + spin_lock(&card->local_addrs4_lock); + for (i = 0; i < cmd->count; i++) { + unsigned int key = ipv4_addr_hash(cmd->addrs[i].addr); + struct qeth_local_addr *addr; + bool duplicate = false; + + hash_for_each_possible(card->local_addrs4, addr, hnode, key) { + if (addr->addr.s6_addr32[3] == cmd->addrs[i].addr) { + duplicate = true; + break; + } + } + + if (duplicate) + continue; + + addr = kmalloc(sizeof(*addr), GFP_ATOMIC); + if (!addr) { + dev_err(&card->gdev->dev, + "Failed to allocate local addr object. Traffic to %pI4 might suffer.\n", + &cmd->addrs[i].addr); + continue; + } + + ipv6_addr_set(&addr->addr, 0, 0, 0, cmd->addrs[i].addr); + hash_add_rcu(card->local_addrs4, &addr->hnode, key); + } + spin_unlock(&card->local_addrs4_lock); +} + +static void qeth_add_local_addrs6(struct qeth_card *card, + struct qeth_ipacmd_local_addrs6 *cmd) +{ + unsigned int i; + + if (cmd->addr_length != + sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { + dev_err_ratelimited(&card->gdev->dev, + "Dropped IPv6 ADD LOCAL ADDR event with bad length %u\n", + cmd->addr_length); + return; + } + + spin_lock(&card->local_addrs6_lock); + for (i = 0; i < cmd->count; i++) { + u32 key = ipv6_addr_hash(&cmd->addrs[i].addr); + struct qeth_local_addr *addr; + bool duplicate = false; + + hash_for_each_possible(card->local_addrs6, addr, hnode, key) { + if (ipv6_addr_equal(&addr->addr, &cmd->addrs[i].addr)) { + duplicate = true; + break; + } + } + + if (duplicate) + continue; + + addr = kmalloc(sizeof(*addr), GFP_ATOMIC); + if (!addr) { + dev_err(&card->gdev->dev, + "Failed to allocate local addr object. Traffic to %pI6c might suffer.\n", + &cmd->addrs[i].addr); + continue; + } + + addr->addr = cmd->addrs[i].addr; + hash_add_rcu(card->local_addrs6, &addr->hnode, key); + } + spin_unlock(&card->local_addrs6_lock); +} + +static void qeth_del_local_addrs4(struct qeth_card *card, + struct qeth_ipacmd_local_addrs4 *cmd) +{ + unsigned int i; + + if (cmd->addr_length != + sizeof_field(struct qeth_ipacmd_local_addr4, addr)) { + dev_err_ratelimited(&card->gdev->dev, + "Dropped IPv4 DEL LOCAL ADDR event with bad length %u\n", + cmd->addr_length); + return; + } + + spin_lock(&card->local_addrs4_lock); + for (i = 0; i < cmd->count; i++) { + struct qeth_ipacmd_local_addr4 *addr = &cmd->addrs[i]; + unsigned int key = ipv4_addr_hash(addr->addr); + struct qeth_local_addr *tmp; + + hash_for_each_possible(card->local_addrs4, tmp, hnode, key) { + if (tmp->addr.s6_addr32[3] == addr->addr) { + hash_del_rcu(&tmp->hnode); + kfree_rcu(tmp, rcu); + break; + } + } + } + spin_unlock(&card->local_addrs4_lock); +} + +static void qeth_del_local_addrs6(struct qeth_card *card, + struct qeth_ipacmd_local_addrs6 *cmd) +{ + unsigned int i; + + if (cmd->addr_length != + sizeof_field(struct qeth_ipacmd_local_addr6, addr)) { + dev_err_ratelimited(&card->gdev->dev, + "Dropped IPv6 DEL LOCAL ADDR event with bad length %u\n", + cmd->addr_length); + return; + } + + spin_lock(&card->local_addrs6_lock); + for (i = 0; i < cmd->count; i++) { + struct qeth_ipacmd_local_addr6 *addr = &cmd->addrs[i]; + u32 key = ipv6_addr_hash(&addr->addr); + struct qeth_local_addr *tmp; + + hash_for_each_possible(card->local_addrs6, tmp, hnode, key) { + if (ipv6_addr_equal(&tmp->addr, &addr->addr)) { + hash_del_rcu(&tmp->hnode); + kfree_rcu(tmp, rcu); + break; + } + } + } + spin_unlock(&card->local_addrs6_lock); +} + +static bool qeth_next_hop_is_local_v4(struct qeth_card *card, + struct sk_buff *skb) +{ + struct qeth_local_addr *tmp; + bool is_local = false; + unsigned int key; + __be32 next_hop; + + if (hash_empty(card->local_addrs4)) + return false; + + rcu_read_lock(); + next_hop = qeth_next_hop_v4_rcu(skb, qeth_dst_check_rcu(skb, 4)); + key = ipv4_addr_hash(next_hop); + + hash_for_each_possible_rcu(card->local_addrs4, tmp, hnode, key) { + if (tmp->addr.s6_addr32[3] == next_hop) { + is_local = true; + break; + } + } + rcu_read_unlock(); + + return is_local; +} + +static bool qeth_next_hop_is_local_v6(struct qeth_card *card, + struct sk_buff *skb) +{ + struct qeth_local_addr *tmp; + struct in6_addr *next_hop; + bool is_local = false; + u32 key; + + if (hash_empty(card->local_addrs6)) + return false; + + rcu_read_lock(); + next_hop = qeth_next_hop_v6_rcu(skb, qeth_dst_check_rcu(skb, 6)); + key = ipv6_addr_hash(next_hop); + + hash_for_each_possible_rcu(card->local_addrs6, tmp, hnode, key) { + if (ipv6_addr_equal(&tmp->addr, next_hop)) { + is_local = true; + break; + } + } + rcu_read_unlock(); + + return is_local; +} + +static int qeth_debugfs_local_addr_show(struct seq_file *m, void *v) +{ + struct qeth_card *card = m->private; + struct qeth_local_addr *tmp; + unsigned int i; + + rcu_read_lock(); + hash_for_each_rcu(card->local_addrs4, i, tmp, hnode) + seq_printf(m, "%pI4\n", &tmp->addr.s6_addr32[3]); + hash_for_each_rcu(card->local_addrs6, i, tmp, hnode) + seq_printf(m, "%pI6c\n", &tmp->addr); + rcu_read_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(qeth_debugfs_local_addr); + +static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, + struct qeth_card *card) +{ + const char *ipa_name; + int com = cmd->hdr.command; + + ipa_name = qeth_get_ipa_cmd_name(com); + + if (rc) + QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n", + ipa_name, com, CARD_DEVID(card), rc, + qeth_get_ipa_msg(rc)); + else + QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n", + ipa_name, com, CARD_DEVID(card)); +} + +static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + QETH_CARD_TEXT(card, 5, "chkipad"); + + if (IS_IPA_REPLY(cmd)) { + if (cmd->hdr.command != IPA_CMD_SETCCID && + cmd->hdr.command != IPA_CMD_DELCCID && + cmd->hdr.command != IPA_CMD_MODCCID && + cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + return cmd; + } + + /* handle unsolicited event: */ + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) { + dev_err(&card->gdev->dev, + "Interface %s is down because the adjacent port is no longer in reflective relay mode\n", + netdev_name(card->dev)); + schedule_work(&card->close_dev_work); + } else { + dev_warn(&card->gdev->dev, + "The link for interface %s on CHPID 0x%X failed\n", + netdev_name(card->dev), card->info.chpid); + qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card); + netif_carrier_off(card->dev); + } + return NULL; + case IPA_CMD_STARTLAN: + dev_info(&card->gdev->dev, + "The link for %s on CHPID 0x%X has been restored\n", + netdev_name(card->dev), card->info.chpid); + if (card->info.hwtrap) + card->info.hwtrap = 2; + qeth_schedule_recovery(card); + return NULL; + case IPA_CMD_SETBRIDGEPORT_IQD: + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + if (card->discipline->control_event_handler(card, cmd)) + return cmd; + return NULL; + case IPA_CMD_MODCCID: + return cmd; + case IPA_CMD_REGISTER_LOCAL_ADDR: + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + qeth_add_local_addrs4(card, &cmd->data.local_addrs4); + else if (cmd->hdr.prot_version == QETH_PROT_IPV6) + qeth_add_local_addrs6(card, &cmd->data.local_addrs6); + + QETH_CARD_TEXT(card, 3, "irla"); + return NULL; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + qeth_del_local_addrs4(card, &cmd->data.local_addrs4); + else if (cmd->hdr.prot_version == QETH_PROT_IPV6) + qeth_del_local_addrs6(card, &cmd->data.local_addrs6); + + QETH_CARD_TEXT(card, 3, "urla"); + return NULL; + default: + QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n"); + return cmd; + } +} + +static void qeth_clear_ipacmd_list(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + unsigned long flags; + + QETH_CARD_TEXT(card, 4, "clipalst"); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(iob, &card->cmd_waiter_list, list) + qeth_notify_cmd(iob, -ECANCELED); + spin_unlock_irqrestore(&card->lock, flags); +} + +static int qeth_check_idx_response(struct qeth_card *card, + unsigned char *buffer) +{ + QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); + if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) { + QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n", + buffer[4]); + QETH_CARD_TEXT(card, 2, "ckidxres"); + QETH_CARD_TEXT(card, 2, " idxterm"); + QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]); + if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT || + buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) { + dev_err(&card->gdev->dev, + "The device does not support the configured transport mode\n"); + return -EPROTONOSUPPORT; + } + return -EIO; + } + return 0; +} + +void qeth_put_cmd(struct qeth_cmd_buffer *iob) +{ + if (refcount_dec_and_test(&iob->ref_count)) { + kfree(iob->data); + kfree(iob); + } +} +EXPORT_SYMBOL_GPL(qeth_put_cmd); + +static void qeth_release_buffer_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + qeth_put_cmd(iob); +} + +static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc) +{ + qeth_notify_cmd(iob, rc); + qeth_put_cmd(iob); +} + +struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel, + unsigned int length, unsigned int ccws, + long timeout) +{ + struct qeth_cmd_buffer *iob; + + if (length > QETH_BUFSIZE) + return NULL; + + iob = kzalloc(sizeof(*iob), GFP_KERNEL); + if (!iob) + return NULL; + + iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1), + GFP_KERNEL | GFP_DMA); + if (!iob->data) { + kfree(iob); + return NULL; + } + + init_completion(&iob->done); + spin_lock_init(&iob->lock); + INIT_LIST_HEAD(&iob->list); + refcount_set(&iob->ref_count, 1); + iob->channel = channel; + iob->timeout = timeout; + iob->length = length; + return iob; +} +EXPORT_SYMBOL_GPL(qeth_alloc_cmd); + +static void qeth_issue_next_read_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + struct qeth_cmd_buffer *request = NULL; + struct qeth_ipa_cmd *cmd = NULL; + struct qeth_reply *reply = NULL; + struct qeth_cmd_buffer *tmp; + unsigned long flags; + int rc = 0; + + QETH_CARD_TEXT(card, 4, "sndctlcb"); + rc = qeth_check_idx_response(card, iob->data); + switch (rc) { + case 0: + break; + case -EIO: + qeth_schedule_recovery(card); + fallthrough; + default: + qeth_clear_ipacmd_list(card); + goto err_idx; + } + + cmd = __ipa_reply(iob); + if (cmd) { + cmd = qeth_check_ipa_data(card, cmd); + if (!cmd) + goto out; + if (IS_OSN(card) && card->osn_info.assist_cb && + cmd->hdr.command != IPA_CMD_STARTLAN) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } + } + + /* match against pending cmd requests */ + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry(tmp, &card->cmd_waiter_list, list) { + if (tmp->match && tmp->match(tmp, iob)) { + request = tmp; + /* take the object outside the lock */ + qeth_get_cmd(request); + break; + } + } + spin_unlock_irqrestore(&card->lock, flags); + + if (!request) + goto out; + + reply = &request->reply; + if (!reply->callback) { + rc = 0; + goto no_callback; + } + + spin_lock_irqsave(&request->lock, flags); + if (request->rc) + /* Bail out when the requestor has already left: */ + rc = request->rc; + else + rc = reply->callback(card, reply, cmd ? (unsigned long)cmd : + (unsigned long)iob); + spin_unlock_irqrestore(&request->lock, flags); + +no_callback: + if (rc <= 0) + qeth_notify_cmd(request, rc); + qeth_put_cmd(request); +out: + memcpy(&card->seqno.pdu_hdr_ack, + QETH_PDU_HEADER_SEQ_NO(iob->data), + QETH_SEQ_NO_LENGTH); + __qeth_issue_next_read(card); +err_idx: + qeth_put_cmd(iob); +} + +static int qeth_set_thread_start_bit(struct qeth_card *card, + unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (!(card->thread_allowed_mask & thread)) + rc = -EPERM; + else if (card->thread_start_mask & thread) + rc = -EBUSY; + else + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + + return rc; +} + +static void qeth_clear_thread_start_bit(struct qeth_card *card, + unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_start_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} + +static void qeth_clear_thread_running_bit(struct qeth_card *card, + unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up_all(&card->wait_q); +} + +static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (card->thread_start_mask & thread) { + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)) { + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + int rc = 0; + + wait_event(card->wait_q, + (rc = __qeth_do_run_thread(card, thread)) >= 0); + return rc; +} + +int qeth_schedule_recovery(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "startrec"); + + rc = qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD); + if (!rc) + schedule_work(&card->kernel_thread_starter); + + return rc; +} + +static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev, + struct irb *irb) +{ + int dstat, cstat; + char *sense; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + QETH_CARD_TEXT(card, 2, "CGENCHK"); + dev_warn(&cdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n", + CCW_DEVID(cdev), dstat, cstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, + 16, 1, irb, 64, 1); + return -EIO; + } + + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[SENSE_RESETTING_EVENT_BYTE] & + SENSE_RESETTING_EVENT_FLAG) { + QETH_CARD_TEXT(card, 2, "REVIND"); + return -EIO; + } + if (sense[SENSE_COMMAND_REJECT_BYTE] & + SENSE_COMMAND_REJECT_FLAG) { + QETH_CARD_TEXT(card, 2, "CMDREJi"); + return -EIO; + } + if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { + QETH_CARD_TEXT(card, 2, "AFFE"); + return -EIO; + } + if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { + QETH_CARD_TEXT(card, 2, "ZEROSEN"); + return 0; + } + QETH_CARD_TEXT(card, 2, "DGENCHK"); + return -EIO; + } + return 0; +} + +static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev, + struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n", + CCW_DEVID(cdev)); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); + return -EIO; + case -ETIMEDOUT: + dev_warn(&cdev->dev, "A hardware operation timed out" + " on the device\n"); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); + return -ETIMEDOUT; + default: + QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n", + PTR_ERR(irb), CCW_DEVID(cdev)); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT(card, 2, " rc???"); + return PTR_ERR(irb); + } +} + +static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + int rc; + int cstat, dstat; + struct qeth_cmd_buffer *iob = NULL; + struct ccwgroup_device *gdev; + struct qeth_channel *channel; + struct qeth_card *card; + + /* while we hold the ccwdev lock, this stays valid: */ + gdev = dev_get_drvdata(&cdev->dev); + card = dev_get_drvdata(&gdev->dev); + + QETH_CARD_TEXT(card, 5, "irq"); + + if (card->read.ccwdev == cdev) { + channel = &card->read; + QETH_CARD_TEXT(card, 5, "read"); + } else if (card->write.ccwdev == cdev) { + channel = &card->write; + QETH_CARD_TEXT(card, 5, "write"); + } else { + channel = &card->data; + QETH_CARD_TEXT(card, 5, "data"); + } + + if (intparm == 0) { + QETH_CARD_TEXT(card, 5, "irqunsol"); + } else if ((addr_t)intparm != (addr_t)channel->active_cmd) { + QETH_CARD_TEXT(card, 5, "irqunexp"); + + dev_err(&cdev->dev, + "Received IRQ with intparm %lx, expected %px\n", + intparm, channel->active_cmd); + if (channel->active_cmd) + qeth_cancel_cmd(channel->active_cmd, -EIO); + } else { + iob = (struct qeth_cmd_buffer *) (addr_t)intparm; + } + + channel->active_cmd = NULL; + qeth_unlock_channel(card, channel); + + rc = qeth_check_irb_error(card, cdev, irb); + if (rc) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_cancel_cmd(iob, rc); + return; + } + + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { + channel->state = CH_STATE_STOPPED; + wake_up(&card->wait_q); + } + + if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { + channel->state = CH_STATE_HALTED; + wake_up(&card->wait_q); + } + + if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC | + SCSW_FCTL_HALT_FUNC))) { + qeth_cancel_cmd(iob, -ECANCELED); + iob = NULL; + } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + if ((dstat & DEV_STAT_UNIT_EXCEP) || + (dstat & DEV_STAT_UNIT_CHECK) || + (cstat)) { + if (irb->esw.esw0.erw.cons) { + dev_warn(&channel->ccwdev->dev, + "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n", + CCW_DEVID(channel->ccwdev), cstat, + dstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", + DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); + print_hex_dump(KERN_WARNING, "qeth: sense data ", + DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); + } + + rc = qeth_get_problem(card, cdev, irb); + if (rc) { + card->read_or_write_problem = 1; + if (iob) + qeth_cancel_cmd(iob, rc); + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + return; + } + } + + if (iob) { + /* sanity check: */ + if (irb->scsw.cmd.count > iob->length) { + qeth_cancel_cmd(iob, -EIO); + return; + } + if (iob->callback) + iob->callback(card, iob, + iob->length - irb->scsw.cmd.count); + } +} + +static void qeth_notify_skbs(struct qeth_qdio_out_q *q, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification) +{ + struct sk_buff *skb; + + skb_queue_walk(&buf->skb_list, skb) { + QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); + QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); + if (skb->sk && skb->sk->sk_family == PF_IUCV) + iucv_sk(skb->sk)->sk_txnotify(skb, notification); + } +} + +static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error, + int budget) +{ + struct qeth_qdio_out_q *queue = buf->q; + struct sk_buff *skb; + + /* Empty buffer? */ + if (buf->next_element_to_fill == 0) + return; + + QETH_TXQ_STAT_INC(queue, bufs); + QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill); + if (error) { + QETH_TXQ_STAT_ADD(queue, tx_errors, buf->frames); + } else { + QETH_TXQ_STAT_ADD(queue, tx_packets, buf->frames); + QETH_TXQ_STAT_ADD(queue, tx_bytes, buf->bytes); + } + + while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) { + unsigned int bytes = qdisc_pkt_len(skb); + bool is_tso = skb_is_gso(skb); + unsigned int packets; + + packets = is_tso ? skb_shinfo(skb)->gso_segs : 1; + if (!error) { + if (skb->ip_summed == CHECKSUM_PARTIAL) + QETH_TXQ_STAT_ADD(queue, skbs_csum, packets); + if (skb_is_nonlinear(skb)) + QETH_TXQ_STAT_INC(queue, skbs_sg); + if (is_tso) { + QETH_TXQ_STAT_INC(queue, skbs_tso); + QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes); + } + } + + napi_consume_skb(skb, budget); + } +} + +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + bool error, int budget) +{ + int i; + + /* is PCI flag set on buffer? */ + if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) + atomic_dec(&queue->set_pci_flags_count); + + qeth_tx_complete_buf(buf, error, budget); + + for (i = 0; i < queue->max_elements; ++i) { + void *data = phys_to_virt(buf->buffer->element[i].addr); + + if (data && buf->is_header[i]) + kmem_cache_free(qeth_core_header_cache, data); + buf->is_header[i] = 0; + } + + qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements); + buf->next_element_to_fill = 0; + buf->frames = 0; + buf->bytes = 0; + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); +} + +static void qeth_tx_complete_pending_bufs(struct qeth_card *card, + struct qeth_qdio_out_q *queue, + bool drain) +{ + struct qeth_qdio_out_buffer *buf, *tmp; + + list_for_each_entry_safe(buf, tmp, &queue->pending_bufs, list_entry) { + if (drain || atomic_read(&buf->state) == QETH_QDIO_BUF_EMPTY) { + QETH_CARD_TEXT(card, 5, "fp"); + QETH_CARD_TEXT_(card, 5, "%lx", (long) buf); + + if (drain) + qeth_notify_skbs(queue, buf, + TX_NOTIFY_GENERALERROR); + qeth_tx_complete_buf(buf, drain, 0); + + list_del(&buf->list_entry); + kmem_cache_free(qeth_qdio_outbuf_cache, buf); + } + } +} + +static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) +{ + int j; + + qeth_tx_complete_pending_bufs(q->card, q, true); + + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (!q->bufs[j]) + continue; + + qeth_clear_output_buffer(q, q->bufs[j], true, 0); + if (free) { + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); + q->bufs[j] = NULL; + } + } +} + +static void qeth_drain_output_queues(struct qeth_card *card) +{ + int i; + + QETH_CARD_TEXT(card, 2, "clearqdbf"); + /* clear outbound buffers to free skbs */ + for (i = 0; i < card->qdio.no_out_queues; ++i) { + if (card->qdio.out_qs[i]) + qeth_drain_output_queue(card->qdio.out_qs[i], false); + } +} + +static void qeth_osa_set_output_queues(struct qeth_card *card, bool single) +{ + unsigned int max = single ? 1 : card->dev->num_tx_queues; + + if (card->qdio.no_out_queues == max) + return; + + if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) + qeth_free_qdio_queues(card); + + if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.no_out_queues = max; +} + +static int qeth_update_from_chp_desc(struct qeth_card *card) +{ + struct ccw_device *ccwdev; + struct channel_path_desc_fmt0 *chp_dsc; + + QETH_CARD_TEXT(card, 2, "chp_desc"); + + ccwdev = card->data.ccwdev; + chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); + if (!chp_dsc) + return -ENOMEM; + + card->info.func_level = 0x4100 + chp_dsc->desc; + + if (IS_OSD(card) || IS_OSX(card)) + /* CHPP field bit 6 == 1 -> single queue */ + qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02); + + kfree(chp_dsc); + QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues); + QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level); + return 0; +} + +static void qeth_init_qdio_info(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 4, "intqdinf"); + atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + + /* inbound */ + card->qdio.no_in_queues = 1; + card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + if (IS_IQD(card)) + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; + else + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; + INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); + INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); +} + +static void qeth_set_initial_options(struct qeth_card *card) +{ + card->options.route4.type = NO_ROUTER; + card->options.route6.type = NO_ROUTER; + card->options.isolation = ISOLATION_MODE_NONE; + card->options.cq = QETH_CQ_DISABLED; + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; +} + +static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static int qeth_do_reset(void *data); +static void qeth_start_kernel_thread(struct work_struct *work) +{ + struct task_struct *ts; + struct qeth_card *card = container_of(work, struct qeth_card, + kernel_thread_starter); + QETH_CARD_TEXT(card, 2, "strthrd"); + + if (card->read.state != CH_STATE_UP && + card->write.state != CH_STATE_UP) + return; + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { + ts = kthread_run(qeth_do_reset, card, "qeth_recover"); + if (IS_ERR(ts)) { + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, + QETH_RECOVER_THREAD); + } + } +} + +static void qeth_buffer_reclaim_work(struct work_struct *); +static void qeth_setup_card(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "setupcrd"); + + card->info.type = CARD_RDEV(card)->id.driver_info; + card->state = CARD_STATE_DOWN; + spin_lock_init(&card->lock); + spin_lock_init(&card->thread_mask_lock); + mutex_init(&card->conf_mutex); + mutex_init(&card->discipline_mutex); + INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); + INIT_LIST_HEAD(&card->cmd_waiter_list); + init_waitqueue_head(&card->wait_q); + qeth_set_initial_options(card); + /* IP address takeover */ + INIT_LIST_HEAD(&card->ipato.entries); + qeth_init_qdio_info(card); + INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); + INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); + hash_init(card->rx_mode_addrs); + hash_init(card->local_addrs4); + hash_init(card->local_addrs6); + spin_lock_init(&card->local_addrs4_lock); + spin_lock_init(&card->local_addrs6_lock); +} + +static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) +{ + struct qeth_card *card = container_of(slr, struct qeth_card, + qeth_service_level); + if (card->info.mcl_level[0]) + seq_printf(m, "qeth: %s firmware level %s\n", + CARD_BUS_ID(card), card->info.mcl_level); +} + +static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(SETUP, 2, "alloccrd"); + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) + goto out; + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->gdev = gdev; + dev_set_drvdata(&gdev->dev, card); + CARD_RDEV(card) = gdev->cdev[0]; + CARD_WDEV(card) = gdev->cdev[1]; + CARD_DDEV(card) = gdev->cdev[2]; + + card->event_wq = alloc_ordered_workqueue("%s_event", 0, + dev_name(&gdev->dev)); + if (!card->event_wq) + goto out_wq; + + card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0); + if (!card->read_cmd) + goto out_read_cmd; + + card->debugfs = debugfs_create_dir(dev_name(&gdev->dev), + qeth_debugfs_root); + debugfs_create_file("local_addrs", 0400, card->debugfs, card, + &qeth_debugfs_local_addr_fops); + + card->qeth_service_level.seq_print = qeth_core_sl_print; + register_service_level(&card->qeth_service_level); + return card; + +out_read_cmd: + destroy_workqueue(card->event_wq); +out_wq: + dev_set_drvdata(&gdev->dev, NULL); + kfree(card); +out: + return NULL; +} + +static int qeth_clear_channel(struct qeth_card *card, + struct qeth_channel *channel) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "clearch"); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); + rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_STOPPED) + return -ETIME; + channel->state = CH_STATE_DOWN; + return 0; +} + +static int qeth_halt_channel(struct qeth_card *card, + struct qeth_channel *channel) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "haltch"); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); + rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd); + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_HALTED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_HALTED) + return -ETIME; + return 0; +} + +static int qeth_stop_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int rc; + + rc = ccw_device_set_offline(cdev); + + spin_lock_irq(get_ccwdev_lock(cdev)); + if (channel->active_cmd) { + dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n", + channel->active_cmd); + channel->active_cmd = NULL; + } + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + return rc; +} + +static int qeth_start_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int rc; + + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + rc = ccw_device_set_online(cdev); + if (rc) + goto err; + + return 0; + +err: + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + return rc; +} + +static int qeth_halt_channels(struct qeth_card *card) +{ + int rc1 = 0, rc2 = 0, rc3 = 0; + + QETH_CARD_TEXT(card, 3, "haltchs"); + rc1 = qeth_halt_channel(card, &card->read); + rc2 = qeth_halt_channel(card, &card->write); + rc3 = qeth_halt_channel(card, &card->data); + if (rc1) + return rc1; + if (rc2) + return rc2; + return rc3; +} + +static int qeth_clear_channels(struct qeth_card *card) +{ + int rc1 = 0, rc2 = 0, rc3 = 0; + + QETH_CARD_TEXT(card, 3, "clearchs"); + rc1 = qeth_clear_channel(card, &card->read); + rc2 = qeth_clear_channel(card, &card->write); + rc3 = qeth_clear_channel(card, &card->data); + if (rc1) + return rc1; + if (rc2) + return rc2; + return rc3; +} + +static int qeth_clear_halt_card(struct qeth_card *card, int halt) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "clhacrd"); + + if (halt) + rc = qeth_halt_channels(card); + if (rc) + return rc; + return qeth_clear_channels(card); +} + +static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "qdioclr"); + switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, + QETH_QDIO_CLEANING)) { + case QETH_QDIO_ESTABLISHED: + if (IS_IQD(card)) + rc = qdio_shutdown(CARD_DDEV(card), + QDIO_FLAG_CLEANUP_USING_HALT); + else + rc = qdio_shutdown(CARD_DDEV(card), + QDIO_FLAG_CLEANUP_USING_CLEAR); + if (rc) + QETH_CARD_TEXT_(card, 3, "1err%d", rc); + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + break; + case QETH_QDIO_CLEANING: + return rc; + default: + break; + } + rc = qeth_clear_halt_card(card, use_halt); + if (rc) + QETH_CARD_TEXT_(card, 3, "2err%d", rc); + return rc; +} + +static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) +{ + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + struct diag26c_vnic_resp *response = NULL; + struct diag26c_vnic_req *request = NULL; + struct ccw_dev_id id; + char userid[80]; + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vmlayer"); + + cpcmd("QUERY USERID", userid, sizeof(userid), &rc); + if (rc) + goto out; + + request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); + response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); + if (!request || !response) { + rc = -ENOMEM; + goto out; + } + + ccw_device_get_id(CARD_RDEV(card), &id); + request->resp_buf_len = sizeof(*response); + request->resp_version = DIAG26C_VERSION6_VM65918; + request->req_format = DIAG26C_VNIC_INFO; + ASCEBC(userid, 8); + memcpy(&request->sys_name, userid, 8); + request->devno = id.devno; + + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + rc = diag26c(request, response, DIAG26C_PORT_VNIC); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + if (rc) + goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); + + if (request->resp_buf_len < sizeof(*response) || + response->version != request->resp_version) { + rc = -EIO; + goto out; + } + + if (response->protocol == VNIC_INFO_PROT_L2) + disc = QETH_DISCIPLINE_LAYER2; + else if (response->protocol == VNIC_INFO_PROT_L3) + disc = QETH_DISCIPLINE_LAYER3; + +out: + kfree(response); + kfree(request); + if (rc) + QETH_CARD_TEXT_(card, 2, "err%x", rc); + return disc; +} + +/* Determine whether the device requires a specific layer discipline */ +static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) +{ + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + + if (IS_OSM(card) || IS_OSN(card)) + disc = QETH_DISCIPLINE_LAYER2; + else if (IS_VM_NIC(card)) + disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + qeth_vm_detect_layer(card); + + switch (disc) { + case QETH_DISCIPLINE_LAYER2: + QETH_CARD_TEXT(card, 3, "force l2"); + break; + case QETH_DISCIPLINE_LAYER3: + QETH_CARD_TEXT(card, 3, "force l3"); + break; + default: + QETH_CARD_TEXT(card, 3, "force no"); + } + + return disc; +} + +static void qeth_set_blkt_defaults(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "cfgblkt"); + + if (card->info.use_v1_blkt) { + card->info.blkt.time_total = 0; + card->info.blkt.inter_packet = 0; + card->info.blkt.inter_packet_jumbo = 0; + } else { + card->info.blkt.time_total = 250; + card->info.blkt.inter_packet = 5; + card->info.blkt.inter_packet_jumbo = 15; + } +} + +static void qeth_idx_init(struct qeth_card *card) +{ + memset(&card->seqno, 0, sizeof(card->seqno)); + + card->token.issuer_rm_w = 0x00010103UL; + card->token.cm_filter_w = 0x00010108UL; + card->token.cm_connection_w = 0x0001010aUL; + card->token.ulp_filter_w = 0x0001010bUL; + card->token.ulp_connection_w = 0x0001010dUL; + + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; + break; + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSN: + card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; + break; + default: + break; + } +} + +static void qeth_idx_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr, + QETH_SEQ_NO_LENGTH); + if (iob->channel == &card->write) + card->seqno.trans_hdr++; +} + +static int qeth_peer_func_level(int level) +{ + if ((level & 0xff) == 8) + return (level & 0xff) + 0x400; + if (((level >> 8) & 3) == 1) + return (level & 0xff) + 0x200; + return level; +} + +static void qeth_mpc_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + qeth_idx_finalize_cmd(card, iob); + + memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), + &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.pdu_hdr++; + memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), + &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); + + iob->callback = qeth_release_buffer_cb; +} + +static bool qeth_mpc_match_reply(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply) +{ + /* MPC cmds are issued strictly in sequence. */ + return !IS_IPA(reply->data); +} + +static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card, + const void *data, + unsigned int data_length) +{ + struct qeth_cmd_buffer *iob; + + iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT); + if (!iob) + return NULL; + + memcpy(iob->data, data, data_length); + qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length, + iob->data); + iob->finalize = qeth_mpc_finalize_cmd; + iob->match = qeth_mpc_match_reply; + return iob; +} + +/** + * qeth_send_control_data() - send control command to the card + * @card: qeth_card structure pointer + * @iob: qeth_cmd_buffer pointer + * @reply_cb: callback function pointer + * @cb_card: pointer to the qeth_card structure + * @cb_reply: pointer to the qeth_reply structure + * @cb_cmd: pointer to the original iob for non-IPA + * commands, or to the qeth_ipa_cmd structure + * for the IPA commands. + * @reply_param: private pointer passed to the callback + * + * Callback function gets called one or more times, with cb_cmd + * pointing to the response returned by the hardware. Callback + * function must return + * > 0 if more reply blocks are expected, + * 0 if the last or only reply block is received, and + * < 0 on error. + * Callback function can get the value of the reply_param pointer from the + * field 'param' of the structure qeth_reply. + */ + +static int qeth_send_control_data(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), + void *reply_param) +{ + struct qeth_channel *channel = iob->channel; + struct qeth_reply *reply = &iob->reply; + long timeout = iob->timeout; + int rc; + + QETH_CARD_TEXT(card, 2, "sendctl"); + + reply->callback = reply_cb; + reply->param = reply_param; + + timeout = wait_event_interruptible_timeout(card->wait_q, + qeth_trylock_channel(channel), + timeout); + if (timeout <= 0) { + qeth_put_cmd(iob); + return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; + } + + if (iob->finalize) + iob->finalize(card, iob); + QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN)); + + qeth_enqueue_cmd(card, iob); + + /* This pairs with iob->callback, and keeps the iob alive after IO: */ + qeth_get_cmd(iob); + + QETH_CARD_TEXT(card, 6, "noirqpnd"); + spin_lock_irq(get_ccwdev_lock(channel->ccwdev)); + rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob), + (addr_t) iob, 0, 0, timeout); + if (!rc) + channel->active_cmd = iob; + spin_unlock_irq(get_ccwdev_lock(channel->ccwdev)); + if (rc) { + QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); + qeth_dequeue_cmd(card, iob); + qeth_put_cmd(iob); + qeth_unlock_channel(card, channel); + goto out; + } + + timeout = wait_for_completion_interruptible_timeout(&iob->done, + timeout); + if (timeout <= 0) + rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME; + + qeth_dequeue_cmd(card, iob); + + if (reply_cb) { + /* Wait until the callback for a late reply has completed: */ + spin_lock_irq(&iob->lock); + if (rc) + /* Zap any callback that's still pending: */ + iob->rc = rc; + spin_unlock_irq(&iob->lock); + } + + if (!rc) + rc = iob->rc; + +out: + qeth_put_cmd(iob); + return rc; +} + +struct qeth_node_desc { + struct node_descriptor nd1; + struct node_descriptor nd2; + struct node_descriptor nd3; +}; + +static void qeth_read_conf_data_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data; + int rc = 0; + u8 *tag; + + QETH_CARD_TEXT(card, 2, "cfgunit"); + + if (data_length < sizeof(*nd)) { + rc = -EINVAL; + goto out; + } + + card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] && + nd->nd1.plant[1] == _ascebc['M']; + tag = (u8 *)&nd->nd1.tag; + card->info.chpid = tag[0]; + card->info.unit_addr2 = tag[1]; + + tag = (u8 *)&nd->nd2.tag; + card->info.cula = tag[1]; + + card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 && + nd->nd3.model[1] == 0xF0 && + nd->nd3.model[2] >= 0xF1 && + nd->nd3.model[2] <= 0xF4; + +out: + qeth_notify_cmd(iob, rc); + qeth_put_cmd(iob); +} + +static int qeth_read_conf_data(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->data; + struct qeth_cmd_buffer *iob; + struct ciw *ciw; + + /* scan for RCD command in extended SenseID data */ + ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); + if (!ciw || ciw->cmd == 0) + return -EOPNOTSUPP; + if (ciw->count < sizeof(struct qeth_node_desc)) + return -EINVAL; + + iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT); + if (!iob) + return -ENOMEM; + + iob->callback = qeth_read_conf_data_cb; + qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length, + iob->data); + + return qeth_send_control_data(card, iob, NULL, NULL); +} + +static int qeth_idx_check_activate_response(struct qeth_card *card, + struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + int rc; + + rc = qeth_check_idx_response(card, iob->data); + if (rc) + return rc; + + if (QETH_IS_IDX_ACT_POS_REPLY(iob->data)) + return 0; + + /* negative reply: */ + QETH_CARD_TEXT_(card, 2, "idxneg%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); + + switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { + case QETH_IDX_ACT_ERR_EXCL: + dev_err(&channel->ccwdev->dev, + "The adapter is used exclusively by another host\n"); + return -EBUSY; + case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: + dev_err(&channel->ccwdev->dev, + "Setting the device online failed because of insufficient authorization\n"); + return -EPERM; + default: + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n", + CCW_DEVID(channel->ccwdev)); + return -EIO; + } +} + +static void qeth_idx_activate_read_channel_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + struct qeth_channel *channel = iob->channel; + u16 peer_level; + int rc; + + QETH_CARD_TEXT(card, 2, "idxrdcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if (peer_level != qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + goto out; + } + + memcpy(&card->token.issuer_rm_r, + QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + memcpy(&card->info.mcl_level[0], + QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); + +out: + qeth_notify_cmd(iob, rc); + qeth_put_cmd(iob); +} + +static void qeth_idx_activate_write_channel_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + struct qeth_channel *channel = iob->channel; + u16 peer_level; + int rc; + + QETH_CARD_TEXT(card, 2, "idxwrcb"); + + rc = qeth_idx_check_activate_response(card, channel, iob); + if (rc) + goto out; + + memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if ((peer_level & ~0x0100) != + qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n", + CCW_DEVID(channel->ccwdev), + card->info.func_level, peer_level); + rc = -EINVAL; + } + +out: + qeth_notify_cmd(iob, rc); + qeth_put_cmd(iob); +} + +static void qeth_idx_setup_activate_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + u16 addr = (card->info.cula << 8) + card->info.unit_addr2; + u8 port = ((u8)card->dev->dev_port) | 0x80; + struct ccw1 *ccw = __ccw_from_cmd(iob); + + qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE, + iob->data); + qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data); + iob->finalize = qeth_idx_finalize_cmd; + + port |= QETH_IDX_ACT_INVAL_FRAME; + memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1); + memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), + &card->info.func_level, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2); + memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2); +} + +static int qeth_idx_activate_read_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->read; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_CARD_TEXT(card, 2, "idxread"); + + iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + iob->callback = qeth_idx_activate_read_channel_cb; + + rc = qeth_send_control_data(card, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; +} + +static int qeth_idx_activate_write_channel(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->write; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_CARD_TEXT(card, 2, "idxwrite"); + + iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT); + if (!iob) + return -ENOMEM; + + memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); + qeth_idx_setup_activate_cmd(card, iob); + iob->callback = qeth_idx_activate_write_channel_cb; + + rc = qeth_send_control_data(card, iob, NULL, NULL); + if (rc) + return rc; + + channel->state = CH_STATE_UP; + return 0; +} + +static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "cmenblcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_filter_r, + QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + return 0; +} + +static int qeth_cm_enable(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "cmenable"); + + iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE); + if (!iob) + return -ENOMEM; + + memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), + &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); + + return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL); +} + +static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "cmsetpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_connection_r, + QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), + QETH_MPC_TOKEN_LENGTH); + return 0; +} + +static int qeth_cm_setup(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "cmsetup"); + + iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE); + if (!iob) + return -ENOMEM; + + memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), + &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); + return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL); +} + +static bool qeth_is_supported_link_type(struct qeth_card *card, u8 link_type) +{ + if (link_type == QETH_LINK_TYPE_LANE_TR || + link_type == QETH_LINK_TYPE_HSTR) { + dev_err(&card->gdev->dev, "Unsupported Token Ring device\n"); + return false; + } + + return true; +} + +static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) +{ + struct net_device *dev = card->dev; + unsigned int new_mtu; + + if (!max_mtu) { + /* IQD needs accurate max MTU to set up its RX buffers: */ + if (IS_IQD(card)) + return -EINVAL; + /* tolerate quirky HW: */ + max_mtu = ETH_MAX_MTU; + } + + rtnl_lock(); + if (IS_IQD(card)) { + /* move any device with default MTU to new max MTU: */ + new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; + + /* adjust RX buffer size to new max MTU: */ + card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; + if (dev->max_mtu && dev->max_mtu != max_mtu) + qeth_free_qdio_queues(card); + } else { + if (dev->mtu) + new_mtu = dev->mtu; + /* default MTUs for first setup: */ + else if (IS_LAYER2(card)) + new_mtu = ETH_DATA_LEN; + else + new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ + } + + dev->max_mtu = max_mtu; + dev->mtu = min(new_mtu, max_mtu); + rtnl_unlock(); + return 0; +} + +static int qeth_get_mtu_outof_framesize(int framesize) +{ + switch (framesize) { + case 0x4000: + return 8192; + case 0x6000: + return 16384; + case 0xa000: + return 32768; + case 0xffff: + return 57344; + default: + return 0; + } +} + +static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + __u16 mtu, framesize; + __u16 len; + struct qeth_cmd_buffer *iob; + u8 link_type = 0; + + QETH_CARD_TEXT(card, 2, "ulpenacb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_filter_r, + QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + if (IS_IQD(card)) { + memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); + mtu = qeth_get_mtu_outof_framesize(framesize); + } else { + mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); + } + *(u16 *)reply->param = mtu; + + memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); + if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { + memcpy(&link_type, + QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); + if (!qeth_is_supported_link_type(card, link_type)) + return -EPROTONOSUPPORT; + } + + card->info.link_type = link_type; + QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type); + return 0; +} + +static u8 qeth_mpc_select_prot_type(struct qeth_card *card) +{ + if (IS_OSN(card)) + return QETH_PROT_OSN2; + return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; +} + +static int qeth_ulp_enable(struct qeth_card *card) +{ + u8 prot_type = qeth_mpc_select_prot_type(card); + struct qeth_cmd_buffer *iob; + u16 max_mtu; + int rc; + + QETH_CARD_TEXT(card, 2, "ulpenabl"); + + iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE); + if (!iob) + return -ENOMEM; + + *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; + memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu); + if (rc) + return rc; + return qeth_update_max_mtu(card, max_mtu); +} + +static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "ulpstpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_connection_r, + QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + 3)) { + QETH_CARD_TEXT(card, 2, "olmlimit"); + dev_err(&card->gdev->dev, "A connection could not be " + "established because of an OLM limit\n"); + return -EMLINK; + } + return 0; +} + +static int qeth_ulp_setup(struct qeth_card *card) +{ + __u16 temp; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "ulpsetup"); + + iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE); + if (!iob) + return -ENOMEM; + + memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); + + memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2); + temp = (card->info.cula << 8) + card->info.unit_addr2; + memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); + return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL); +} + +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +{ + struct qeth_qdio_out_buffer *newbuf; + + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + if (!newbuf) + return -ENOMEM; + + newbuf->buffer = q->qdio_bufs[bidx]; + skb_queue_head_init(&newbuf->skb_list); + lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); + newbuf->q = q; + atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); + q->bufs[bidx] = newbuf; + return 0; +} + +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) +{ + if (!q) + return; + + qeth_drain_output_queue(q, true); + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_out_q *qeth_alloc_output_queue(void) +{ + struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + unsigned int i; + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) + goto err_qdio_bufs; + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { + if (qeth_init_qdio_out_buf(q, i)) + goto err_out_bufs; + } + + return q; + +err_out_bufs: + while (i > 0) + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[--i]); + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); +err_qdio_bufs: + kfree(q); + return NULL; +} + +static void qeth_tx_completion_timer(struct timer_list *timer) +{ + struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer); + + napi_schedule(&queue->napi); + QETH_TXQ_STAT_INC(queue, completion_timer); +} + +static int qeth_alloc_qdio_queues(struct qeth_card *card) +{ + unsigned int i; + + QETH_CARD_TEXT(card, 2, "allcqdbf"); + + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) + return 0; + + QETH_CARD_TEXT(card, 2, "inq"); + card->qdio.in_q = qeth_alloc_qdio_queue(); + if (!card->qdio.in_q) + goto out_nomem; + + /* inbound buffer pool */ + if (qeth_alloc_buffer_pool(card)) + goto out_freeinq; + + /* outbound */ + for (i = 0; i < card->qdio.no_out_queues; ++i) { + struct qeth_qdio_out_q *queue; + + queue = qeth_alloc_output_queue(); + if (!queue) + goto out_freeoutq; + QETH_CARD_TEXT_(card, 2, "outq %i", i); + QETH_CARD_HEX(card, 2, &queue, sizeof(void *)); + card->qdio.out_qs[i] = queue; + queue->card = card; + queue->queue_no = i; + INIT_LIST_HEAD(&queue->pending_bufs); + spin_lock_init(&queue->lock); + timer_setup(&queue->timer, qeth_tx_completion_timer, 0); + queue->coalesce_usecs = QETH_TX_COALESCE_USECS; + queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES; + queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT; + } + + /* completion */ + if (qeth_alloc_cq(card)) + goto out_freeoutq; + + return 0; + +out_freeoutq: + while (i > 0) { + qeth_free_output_queue(card->qdio.out_qs[--i]); + card->qdio.out_qs[i] = NULL; + } + qeth_free_buffer_pool(card); +out_freeinq: + qeth_free_qdio_queue(card->qdio.in_q); + card->qdio.in_q = NULL; +out_nomem: + atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + return -ENOMEM; +} + +static void qeth_free_qdio_queues(struct qeth_card *card) +{ + int i, j; + + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == + QETH_QDIO_UNINITIALIZED) + return; + + qeth_free_cq(card); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (card->qdio.in_q->bufs[j].rx_skb) + dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + } + qeth_free_qdio_queue(card->qdio.in_q); + card->qdio.in_q = NULL; + /* inbound buffer pool */ + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + for (i = 0; i < card->qdio.no_out_queues; i++) { + qeth_free_output_queue(card->qdio.out_qs[i]); + card->qdio.out_qs[i] = NULL; + } +} + +static void qeth_fill_qib_parms(struct qeth_card *card, + struct qeth_qib_parms *parms) +{ + struct qeth_qdio_out_q *queue; + unsigned int i; + + parms->pcit_magic[0] = 'P'; + parms->pcit_magic[1] = 'C'; + parms->pcit_magic[2] = 'I'; + parms->pcit_magic[3] = 'T'; + ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic)); + parms->pcit_a = QETH_PCI_THRESHOLD_A(card); + parms->pcit_b = QETH_PCI_THRESHOLD_B(card); + parms->pcit_c = QETH_PCI_TIMER_VALUE(card); + + parms->blkt_magic[0] = 'B'; + parms->blkt_magic[1] = 'L'; + parms->blkt_magic[2] = 'K'; + parms->blkt_magic[3] = 'T'; + ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic)); + parms->blkt_total = card->info.blkt.time_total; + parms->blkt_inter_packet = card->info.blkt.inter_packet; + parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo; + + /* Prio-queueing implicitly uses the default priorities: */ + if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1) + return; + + parms->pque_magic[0] = 'P'; + parms->pque_magic[1] = 'Q'; + parms->pque_magic[2] = 'U'; + parms->pque_magic[3] = 'E'; + ASCEBC(parms->pque_magic, sizeof(parms->pque_magic)); + parms->pque_order = QETH_QIB_PQUE_ORDER_RR; + parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL; + + qeth_for_each_output_queue(card, queue, i) + parms->pque_priority[i] = queue->priority; +} + +static int qeth_qdio_activate(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 3, "qdioact"); + return qdio_activate(CARD_DDEV(card)); +} + +static int qeth_dm_act(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "dmact"); + + iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE); + if (!iob) + return -ENOMEM; + + memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + return qeth_send_control_data(card, iob, NULL, NULL); +} + +static int qeth_mpc_initialize(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "mpcinit"); + + rc = qeth_issue_next_read(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + return rc; + } + rc = qeth_cm_enable(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "2err%d", rc); + return rc; + } + rc = qeth_cm_setup(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "3err%d", rc); + return rc; + } + rc = qeth_ulp_enable(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "4err%d", rc); + return rc; + } + rc = qeth_ulp_setup(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "5err%d", rc); + return rc; + } + rc = qeth_alloc_qdio_queues(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "5err%d", rc); + return rc; + } + rc = qeth_qdio_establish(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "6err%d", rc); + qeth_free_qdio_queues(card); + return rc; + } + rc = qeth_qdio_activate(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "7err%d", rc); + return rc; + } + rc = qeth_dm_act(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "8err%d", rc); + return rc; + } + + return 0; +} + +static void qeth_print_status_message(struct qeth_card *card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSM: + case QETH_CARD_TYPE_OSX: + /* VM will use a non-zero first character + * to indicate a HiperSockets like reporting + * of the level OSA sets the first character to zero + * */ + if (!card->info.mcl_level[0]) { + sprintf(card->info.mcl_level, "%02x%02x", + card->info.mcl_level[2], + card->info.mcl_level[3]); + break; + } + fallthrough; + case QETH_CARD_TYPE_IQD: + if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) { + card->info.mcl_level[0] = (char) _ebcasc[(__u8) + card->info.mcl_level[0]]; + card->info.mcl_level[1] = (char) _ebcasc[(__u8) + card->info.mcl_level[1]]; + card->info.mcl_level[2] = (char) _ebcasc[(__u8) + card->info.mcl_level[2]]; + card->info.mcl_level[3] = (char) _ebcasc[(__u8) + card->info.mcl_level[3]]; + card->info.mcl_level[QETH_MCL_LENGTH] = 0; + } + break; + default: + memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); + } + dev_info(&card->gdev->dev, + "Device is a%s card%s%s%s\nwith link type %s.\n", + qeth_get_cardname(card), + (card->info.mcl_level[0]) ? " (level: " : "", + (card->info.mcl_level[0]) ? card->info.mcl_level : "", + (card->info.mcl_level[0]) ? ")" : "", + qeth_get_cardname_short(card)); +} + +static void qeth_initialize_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry; + + QETH_CARD_TEXT(card, 5, "inwrklst"); + + list_for_each_entry(entry, + &card->qdio.init_pool.entry_list, init_list) { + qeth_put_buffer_pool_entry(card, entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( + struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry; + int i, free; + + if (list_empty(&card->qdio.in_buf_pool.entry_list)) + return NULL; + + list_for_each_entry(entry, &card->qdio.in_buf_pool.entry_list, list) { + free = 1; + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(entry->elements[i]) > 1) { + free = 0; + break; + } + } + if (free) { + list_del_init(&entry->list); + return entry; + } + } + + /* no free buffer in pool so take first one and swap pages */ + entry = list_first_entry(&card->qdio.in_buf_pool.entry_list, + struct qeth_buffer_pool_entry, list); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(entry->elements[i]) > 1) { + struct page *page = dev_alloc_page(); + + if (!page) + return NULL; + + __free_page(entry->elements[i]); + entry->elements[i] = page; + QETH_CARD_STAT_INC(card, rx_sg_alloc_page); + } + } + list_del_init(&entry->list); + return entry; +} + +static int qeth_init_input_buffer(struct qeth_card *card, + struct qeth_qdio_buffer *buf) +{ + struct qeth_buffer_pool_entry *pool_entry = buf->pool_entry; + int i; + + if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { + buf->rx_skb = netdev_alloc_skb(card->dev, + ETH_HLEN + + sizeof(struct ipv6hdr)); + if (!buf->rx_skb) + return -ENOMEM; + } + + if (!pool_entry) { + pool_entry = qeth_find_free_buffer_pool_entry(card); + if (!pool_entry) + return -ENOBUFS; + + buf->pool_entry = pool_entry; + } + + /* + * since the buffer is accessed only from the input_tasklet + * there shouldn't be a need to synchronize; also, since we use + * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off + * buffers + */ + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + buf->buffer->element[i].length = PAGE_SIZE; + buf->buffer->element[i].addr = + page_to_phys(pool_entry->elements[i]); + if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) + buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; + else + buf->buffer->element[i].eflags = 0; + buf->buffer->element[i].sflags = 0; + } + return 0; +} + +static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card, + struct qeth_qdio_out_q *queue) +{ + if (!IS_IQD(card) || + qeth_iqd_is_mcast_queue(card, queue) || + card->options.cq == QETH_CQ_ENABLED || + qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd)) + return 1; + + return card->ssqd.mmwc ? card->ssqd.mmwc : 1; +} + +static int qeth_init_qdio_queues(struct qeth_card *card) +{ + unsigned int rx_bufs = card->qdio.in_buf_pool.buf_count; + unsigned int i; + int rc; + + QETH_CARD_TEXT(card, 2, "initqdqs"); + + /* inbound queue */ + qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + memset(&card->rx, 0, sizeof(struct qeth_rx)); + + qeth_initialize_working_pool_list(card); + /*give only as many buffers to hardware as we have buffer pool entries*/ + for (i = 0; i < rx_bufs; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + + card->qdio.in_q->next_buf_to_init = QDIO_BUFNR(rx_bufs); + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, rx_bufs); + if (rc) { + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + return rc; + } + + /* completion */ + rc = qeth_cq_init(card); + if (rc) { + return rc; + } + + /* outbound queue */ + for (i = 0; i < card->qdio.no_out_queues; ++i) { + struct qeth_qdio_out_q *queue = card->qdio.out_qs[i]; + + qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + queue->next_buf_to_fill = 0; + queue->do_pack = 0; + queue->prev_hdr = NULL; + queue->coalesced_frames = 0; + queue->bulk_start = 0; + queue->bulk_count = 0; + queue->bulk_max = qeth_tx_select_bulk_max(card, queue); + atomic_set(&queue->used_buffers, 0); + atomic_set(&queue->set_pci_flags_count, 0); + netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i)); + } + return 0; +} + +static void qeth_ipa_finalize_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + qeth_mpc_finalize_cmd(card, iob); + + /* override with IPA-specific values: */ + __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++; +} + +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + u16 cmd_length, + bool (*match)(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply)) +{ + u8 prot_type = qeth_mpc_select_prot_type(card); + u16 total_length = iob->length; + + qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length, + iob->data); + iob->finalize = qeth_ipa_finalize_cmd; + iob->match = match; + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2); + memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2); +} +EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); + +static bool qeth_ipa_match_reply(struct qeth_cmd_buffer *iob, + struct qeth_cmd_buffer *reply) +{ + struct qeth_ipa_cmd *ipa_reply = __ipa_reply(reply); + + return ipa_reply && (__ipa_cmd(iob)->hdr.seqno == ipa_reply->hdr.seqno); +} + +struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card, + enum qeth_ipa_cmds cmd_code, + enum qeth_prot_versions prot, + unsigned int data_length) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipacmd_hdr *hdr; + + data_length += offsetof(struct qeth_ipa_cmd, data); + iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1, + QETH_IPA_TIMEOUT); + if (!iob) + return NULL; + + qeth_prepare_ipa_cmd(card, iob, data_length, qeth_ipa_match_reply); + + hdr = &__ipa_cmd(iob)->hdr; + hdr->command = cmd_code; + hdr->initiator = IPA_CMD_INITIATOR_HOST; + /* hdr->seqno is set by qeth_send_control_data() */ + hdr->adapter_type = QETH_LINK_TYPE_FAST_ETH; + hdr->rel_adapter_no = (u8) card->dev->dev_port; + hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1; + hdr->param_count = 1; + hdr->prot_version = prot; + return iob; +} +EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd); + +static int qeth_send_ipa_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + +/** + * qeth_send_ipa_cmd() - send an IPA command + * + * See qeth_send_control_data() for explanation of the arguments. + */ + +int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *, struct qeth_reply*, + unsigned long), + void *reply_param) +{ + int rc; + + QETH_CARD_TEXT(card, 4, "sendipa"); + + if (card->read_or_write_problem) { + qeth_put_cmd(iob); + return -EIO; + } + + if (reply_cb == NULL) + reply_cb = qeth_send_ipa_cmd_cb; + rc = qeth_send_control_data(card, iob, reply_cb, reply_param); + if (rc == -ETIME) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + } + return rc; +} +EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); + +static int qeth_send_startlan_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE) + return -ENETDOWN; + + return (cmd->hdr.return_code) ? -EIO : 0; +} + +static int qeth_send_startlan(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "strtlan"); + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL); +} + +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = + cmd->data.setadapterparms.hdr.return_code; + return cmd->hdr.return_code; +} + +static int qeth_query_setadapterparms_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_query_cmds_supp *query_cmd; + + QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return -EIO; + + query_cmd = &cmd->data.setadapterparms.data.query_cmds_supp; + if (query_cmd->lan_type & 0x7f) { + if (!qeth_is_supported_link_type(card, query_cmd->lan_type)) + return -EPROTONOSUPPORT; + + card->info.link_type = query_cmd->lan_type; + QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type); + } + + card->options.adp.supported = query_cmd->supported_cmds; + return 0; +} + +static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, + enum qeth_ipa_setadp_cmd adp_cmd, + unsigned int data_length) +{ + struct qeth_ipacmd_setadpparms_hdr *hdr; + struct qeth_cmd_buffer *iob; + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4, + data_length + + offsetof(struct qeth_ipacmd_setadpparms, + data)); + if (!iob) + return NULL; + + hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr; + hdr->cmdlength = sizeof(*hdr) + data_length; + hdr->command_code = adp_cmd; + hdr->used_total = 1; + hdr->seq_no = 1; + return iob; +} + +static int qeth_query_setadapterparms(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 3, "queryadp"); + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, + SETADP_DATA_SIZEOF(query_cmds_supp)); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); + return rc; +} + +static int qeth_query_ipassists_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 2, "qipasscb"); + + cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + break; + case IPA_RC_NOTSUPP: + case IPA_RC_L2_UNSUPPORTED_CMD: + QETH_CARD_TEXT(card, 2, "ipaunsup"); + card->options.ipa4.supported |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported |= IPA_SETADAPTERPARMS; + return -EOPNOTSUPP; + default: + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n", + CARD_DEVID(card), cmd->hdr.return_code); + return -EIO; + } + + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4 = cmd->hdr.assists; + else if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6 = cmd->hdr.assists; + else + QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n", + CARD_DEVID(card)); + return 0; +} + +static int qeth_query_ipassists(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT_(card, 2, "qipassi%i", prot); + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); + return rc; +} + +static int qeth_query_switch_attributes_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; + + QETH_CARD_TEXT(card, 2, "qswiatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return -EIO; + + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); + return 0; +} + +int qeth_query_switch_attributes(struct qeth_card *card, + struct qeth_switch_info *sw_info) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qswiattr"); + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) + return -EOPNOTSUPP; + if (!netif_carrier_ok(card->dev)) + return -ENOMEDIUM; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, + qeth_query_switch_attributes_cb, sw_info); +} + +struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card, + enum qeth_diags_cmds sub_cmd, + unsigned int data_length) +{ + struct qeth_ipacmd_diagass *cmd; + struct qeth_cmd_buffer *iob; + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE, + DIAG_HDR_LEN + data_length); + if (!iob) + return NULL; + + cmd = &__ipa_cmd(iob)->data.diagass; + cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length; + cmd->subcmd = sub_cmd; + return iob; +} +EXPORT_SYMBOL_GPL(qeth_get_diag_cmd); + +static int qeth_query_setdiagass_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; + + if (rc) { + QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); + return -EIO; + } + + card->info.diagass_support = cmd->data.diagass.ext; + return 0; +} + +static int qeth_query_setdiagass(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qdiagass"); + iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); +} + +static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) +{ + unsigned long info = get_zeroed_page(GFP_KERNEL); + struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; + struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; + struct ccw_dev_id ccwid; + int level; + + tid->chpid = card->info.chpid; + ccw_device_get_id(CARD_RDEV(card), &ccwid); + tid->ssid = ccwid.ssid; + tid->devno = ccwid.devno; + if (!info) + return; + level = stsi(NULL, 0, 0, 0); + if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) + tid->lparnr = info222->lpar_number; + if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { + EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); + memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); + } + free_page(info); +} + +static int qeth_hw_trap_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 rc = cmd->hdr.return_code; + + if (rc) { + QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); + return -EIO; + } + return 0; +} + +int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 2, "diagtrap"); + iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.diagass.type = 1; + cmd->data.diagass.action = action; + switch (action) { + case QETH_DIAGS_TRAP_ARM: + cmd->data.diagass.options = 0x0003; + cmd->data.diagass.ext = 0x00010000 + + sizeof(struct qeth_trap_id); + qeth_get_trap_id(card, + (struct qeth_trap_id *)cmd->data.diagass.cdata); + break; + case QETH_DIAGS_TRAP_DISARM: + cmd->data.diagass.options = 0x0001; + break; + case QETH_DIAGS_TRAP_CAPTURE: + break; + } + return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); +} + +static int qeth_check_qdio_errors(struct qeth_card *card, + struct qdio_buffer *buf, + unsigned int qdio_error, + const char *dbftext) +{ + if (qdio_error) { + QETH_CARD_TEXT(card, 2, dbftext); + QETH_CARD_TEXT_(card, 2, " F15=%02X", + buf->element[15].sflags); + QETH_CARD_TEXT_(card, 2, " F14=%02X", + buf->element[14].sflags); + QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); + if ((buf->element[15].sflags) == 0x12) { + QETH_CARD_STAT_INC(card, rx_fifo_errors); + return 0; + } else + return 1; + } + return 0; +} + +static unsigned int qeth_rx_refill_queue(struct qeth_card *card, + unsigned int count) +{ + struct qeth_qdio_q *queue = card->qdio.in_q; + struct list_head *lh; + int i; + int rc; + int newcount = 0; + + /* only requeue at a certain threshold to avoid SIGAs */ + if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { + for (i = queue->next_buf_to_init; + i < queue->next_buf_to_init + count; ++i) { + if (qeth_init_input_buffer(card, + &queue->bufs[QDIO_BUFNR(i)])) { + break; + } else { + newcount++; + } + } + + if (newcount < count) { + /* we are in memory shortage so we switch back to + traditional skb allocation and drop packages */ + atomic_set(&card->force_alloc_skb, 3); + count = newcount; + } else { + atomic_add_unless(&card->force_alloc_skb, -1, 0); + } + + if (!count) { + i = 0; + list_for_each(lh, &card->qdio.in_buf_pool.entry_list) + i++; + if (i == card->qdio.in_buf_pool.buf_count) { + QETH_CARD_TEXT(card, 2, "qsarbw"); + schedule_delayed_work( + &card->buffer_reclaim_work, + QETH_RECLAIM_WORK_TIME); + } + return 0; + } + + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, + queue->next_buf_to_init, count); + if (rc) { + QETH_CARD_TEXT(card, 2, "qinberr"); + } + queue->next_buf_to_init = QDIO_BUFNR(queue->next_buf_to_init + + count); + return count; + } + + return 0; +} + +static void qeth_buffer_reclaim_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(to_delayed_work(work), + struct qeth_card, + buffer_reclaim_work); + + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); +} + +static void qeth_handle_send_error(struct qeth_card *card, + struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) +{ + int sbalf15 = buffer->buffer->element[15].sflags; + + QETH_CARD_TEXT(card, 6, "hdsnderr"); + qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); + + if (!qdio_err) + return; + + if ((sbalf15 >= 15) && (sbalf15 <= 31)) + return; + + QETH_CARD_TEXT(card, 1, "lnkfail"); + QETH_CARD_TEXT_(card, 1, "%04x %02x", + (u16)qdio_err, (u8)sbalf15); +} + +/** + * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. + * @queue: queue to check for packing buffer + * + * Returns number of buffers that were prepared for flush. + */ +static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) +{ + struct qeth_qdio_out_buffer *buffer; + + buffer = queue->bufs[queue->next_buf_to_fill]; + if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && + (buffer->next_element_to_fill > 0)) { + /* it's a packing buffer */ + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = + QDIO_BUFNR(queue->next_buf_to_fill + 1); + return 1; + } + return 0; +} + +/* + * Switched to packing state if the number of used buffers on a queue + * reaches a certain limit. + */ +static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) +{ + if (!queue->do_pack) { + if (atomic_read(&queue->used_buffers) + >= QETH_HIGH_WATERMARK_PACK){ + /* switch non-PACKING -> PACKING */ + QETH_CARD_TEXT(queue->card, 6, "np->pack"); + QETH_TXQ_STAT_INC(queue, packing_mode_switch); + queue->do_pack = 1; + } + } +} + +/* + * Switches from packing to non-packing mode. If there is a packing + * buffer on the queue this buffer will be prepared to be flushed. + * In that case 1 is returned to inform the caller. If no buffer + * has to be flushed, zero is returned. + */ +static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) +{ + if (queue->do_pack) { + if (atomic_read(&queue->used_buffers) + <= QETH_LOW_WATERMARK_PACK) { + /* switch PACKING -> non-PACKING */ + QETH_CARD_TEXT(queue->card, 6, "pack->np"); + QETH_TXQ_STAT_INC(queue, packing_mode_switch); + queue->do_pack = 0; + return qeth_prep_flush_pack_buffer(queue); + } + } + return 0; +} + +static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, + int count) +{ + struct qeth_qdio_out_buffer *buf = queue->bufs[index]; + unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT; + struct qeth_card *card = queue->card; + int rc; + int i; + + for (i = index; i < index + count; ++i) { + unsigned int bidx = QDIO_BUFNR(i); + struct sk_buff *skb; + + buf = queue->bufs[bidx]; + buf->buffer->element[buf->next_element_to_fill - 1].eflags |= + SBAL_EFLAGS_LAST_ENTRY; + queue->coalesced_frames += buf->frames; + + if (queue->bufstates) + queue->bufstates[bidx].user = buf; + + if (IS_IQD(card)) { + skb_queue_walk(&buf->skb_list, skb) + skb_tx_timestamp(skb); + } + } + + if (!IS_IQD(card)) { + if (!queue->do_pack) { + if ((atomic_read(&queue->used_buffers) >= + (QETH_HIGH_WATERMARK_PACK - + QETH_WATERMARK_PACK_FUZZ)) && + !atomic_read(&queue->set_pci_flags_count)) { + /* it's likely that we'll go to packing + * mode soon */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; + } + } else { + if (!atomic_read(&queue->set_pci_flags_count)) { + /* + * there's no outstanding PCI any more, so we + * have to request a PCI to be sure the the PCI + * will wake at some time in the future then we + * can flush packed buffers that might still be + * hanging around, which can happen if no + * further send was requested by the stack + */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; + } + } + + if (atomic_read(&queue->set_pci_flags_count)) + qdio_flags |= QDIO_FLAG_PCI_OUT; + } + + QETH_TXQ_STAT_INC(queue, doorbell); + rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, + queue->queue_no, index, count); + + /* Fake the TX completion interrupt: */ + if (IS_IQD(card)) { + unsigned int frames = READ_ONCE(queue->max_coalesced_frames); + unsigned int usecs = READ_ONCE(queue->coalesce_usecs); + + if (frames && queue->coalesced_frames >= frames) { + napi_schedule(&queue->napi); + queue->coalesced_frames = 0; + QETH_TXQ_STAT_INC(queue, coal_frames); + } else if (usecs) { + qeth_tx_arm_timer(queue, usecs); + } + } + + if (rc) { + /* ignore temporary SIGA errors without busy condition */ + if (rc == -ENOBUFS) + return; + QETH_CARD_TEXT(queue->card, 2, "flushbuf"); + QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); + QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); + QETH_CARD_TEXT_(queue->card, 2, " c%d", count); + QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); + + /* this must not happen under normal circumstances. if it + * happens something is really wrong -> recover */ + qeth_schedule_recovery(queue->card); + return; + } +} + +static void qeth_flush_queue(struct qeth_qdio_out_q *queue) +{ + qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count); + + queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count); + queue->prev_hdr = NULL; + queue->bulk_count = 0; +} + +static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) +{ + /* + * check if weed have to switch to non-packing mode or if + * we have to get a pci flag out on the queue + */ + if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || + !atomic_read(&queue->set_pci_flags_count)) { + unsigned int index, flush_cnt; + bool q_was_packing; + + spin_lock(&queue->lock); + + index = queue->next_buf_to_fill; + q_was_packing = queue->do_pack; + + flush_cnt = qeth_switch_to_nonpacking_if_needed(queue); + if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count)) + flush_cnt = qeth_prep_flush_pack_buffer(queue); + + if (flush_cnt) { + qeth_flush_buffers(queue, index, flush_cnt); + if (q_was_packing) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt); + } + + spin_unlock(&queue->lock); + } +} + +static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + napi_schedule_irqoff(&card->napi); +} + +int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) +{ + int rc; + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -1; + goto out; + } else { + if (card->options.cq == cq) { + rc = 0; + goto out; + } + + qeth_free_qdio_queues(card); + card->options.cq = cq; + rc = 0; + } +out: + return rc; + +} +EXPORT_SYMBOL_GPL(qeth_configure_cq); + +static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, + unsigned int queue, int first_element, + int count) +{ + struct qeth_qdio_q *cq = card->qdio.c_q; + int i; + int rc; + + QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); + QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); + QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); + + if (qdio_err) { + netif_tx_stop_all_queues(card->dev); + qeth_schedule_recovery(card); + return; + } + + for (i = first_element; i < first_element + count; ++i) { + struct qdio_buffer *buffer = cq->qdio_bufs[QDIO_BUFNR(i)]; + int e = 0; + + while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && + buffer->element[e].addr) { + unsigned long phys_aob_addr = buffer->element[e].addr; + + qeth_qdio_handle_aob(card, phys_aob_addr); + ++e; + } + qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); + } + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, + card->qdio.c_q->next_buf_to_init, + count); + if (rc) { + dev_warn(&card->gdev->dev, + "QDIO reported an error, rc=%i\n", rc); + QETH_CARD_TEXT(card, 2, "qcqherr"); + } + + cq->next_buf_to_init = QDIO_BUFNR(cq->next_buf_to_init + count); +} + +static void qeth_qdio_input_handler(struct ccw_device *ccwdev, + unsigned int qdio_err, int queue, + int first_elem, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + QETH_CARD_TEXT_(card, 2, "qihq%d", queue); + QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); + + if (qdio_err) + qeth_schedule_recovery(card); +} + +static void qeth_qdio_output_handler(struct ccw_device *ccwdev, + unsigned int qdio_error, int __queue, + int first_element, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *) card_ptr; + struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; + struct net_device *dev = card->dev; + struct netdev_queue *txq; + int i; + + QETH_CARD_TEXT(card, 6, "qdouhdl"); + if (qdio_error & QDIO_ERROR_FATAL) { + QETH_CARD_TEXT(card, 2, "achkcond"); + netif_tx_stop_all_queues(dev); + qeth_schedule_recovery(card); + return; + } + + for (i = first_element; i < (first_element + count); ++i) { + struct qeth_qdio_out_buffer *buf = queue->bufs[QDIO_BUFNR(i)]; + + qeth_handle_send_error(card, buf, qdio_error); + qeth_clear_output_buffer(queue, buf, qdio_error, 0); + } + + atomic_sub(count, &queue->used_buffers); + qeth_check_outbound_queue(queue); + + txq = netdev_get_tx_queue(dev, __queue); + /* xmit may have observed the full-condition, but not yet stopped the + * txq. In which case the code below won't trigger. So before returning, + * xmit will re-check the txq's fill level and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); +} + +/** + * Note: Function assumes that we have 4 outbound queues. + */ +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + u8 tos; + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (qeth_get_ip_version(skb)) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; + } + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return ~tos >> 6 & 3; + if (tos & IPTOS_MINCOST) + return 3; + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return ~skb->priority >> 1 & 3; + case QETH_PRIO_Q_ING_VLAN: + if (veth->h_vlan_proto == htons(ETH_P_8021Q)) + return ~ntohs(veth->h_vlan_TCI) >> + (VLAN_PRIO_SHIFT + 1) & 3; + break; + case QETH_PRIO_Q_ING_FIXED: + return card->qdio.default_out_queue; + default: + break; + } + return card->qdio.default_out_queue; +} +EXPORT_SYMBOL_GPL(qeth_get_priority_queue); + +/** + * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. + * @skb: SKB address + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * fragmented part of the SKB. Returns zero for linear SKB. + */ +static int qeth_get_elements_for_frags(struct sk_buff *skb) +{ + int cnt, elements = 0; + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + elements += qeth_get_elements_for_range( + (addr_t)skb_frag_address(frag), + (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); + } + return elements; +} + +/** + * qeth_count_elements() - Counts the number of QDIO buffer elements needed + * to transmit an skb. + * @skb: the skb to operate on. + * @data_offset: skip this part of the skb's linear data + * + * Returns the number of pages, and thus QDIO buffer elements, needed to map the + * skb's data (both its linear part and paged fragments). + */ +unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset) +{ + unsigned int elements = qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); + return elements; +} +EXPORT_SYMBOL_GPL(qeth_count_elements); + +#define QETH_HDR_CACHE_OBJ_SIZE (sizeof(struct qeth_hdr_tso) + \ + MAX_TCP_HEADER) + +/** + * qeth_add_hw_header() - add a HW header to an skb. + * @skb: skb that the HW header should be added to. + * @hdr: double pointer to a qeth_hdr. When returning with >= 0, + * it contains a valid pointer to a qeth_hdr. + * @hdr_len: length of the HW header. + * @proto_len: length of protocol headers that need to be in same page as the + * HW header. + * + * Returns the pushed length. If the header can't be pushed on + * (eg. because it would cross a page boundary), it is allocated from + * the cache instead and 0 is returned. + * The number of needed buffer elements is returned in @elements. + * Error to create the hdr is indicated by returning with < 0. + */ +static int qeth_add_hw_header(struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr **hdr, + unsigned int hdr_len, unsigned int proto_len, + unsigned int *elements) +{ + gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); + const unsigned int contiguous = proto_len ? proto_len : 1; + const unsigned int max_elements = queue->max_elements; + unsigned int __elements; + addr_t start, end; + bool push_ok; + int rc; + +check_layout: + start = (addr_t)skb->data - hdr_len; + end = (addr_t)skb->data; + + if (qeth_get_elements_for_range(start, end + contiguous) == 1) { + /* Push HW header into same page as first protocol header. */ + push_ok = true; + /* ... but TSO always needs a separate element for headers: */ + if (skb_is_gso(skb)) + __elements = 1 + qeth_count_elements(skb, proto_len); + else + __elements = qeth_count_elements(skb, 0); + } else if (!proto_len && PAGE_ALIGNED(skb->data)) { + /* Push HW header into preceding page, flush with skb->data. */ + push_ok = true; + __elements = 1 + qeth_count_elements(skb, 0); + } else { + /* Use header cache, copy protocol headers up. */ + push_ok = false; + __elements = 1 + qeth_count_elements(skb, proto_len); + } + + /* Compress skb to fit into one IO buffer: */ + if (__elements > max_elements) { + if (!skb_is_nonlinear(skb)) { + /* Drop it, no easy way of shrinking it further. */ + QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", + max_elements, __elements, skb->len); + return -E2BIG; + } + + rc = skb_linearize(skb); + if (rc) { + QETH_TXQ_STAT_INC(queue, skbs_linearized_fail); + return rc; + } + + QETH_TXQ_STAT_INC(queue, skbs_linearized); + /* Linearization changed the layout, re-evaluate: */ + goto check_layout; + } + + *elements = __elements; + /* Add the header: */ + if (push_ok) { + *hdr = skb_push(skb, hdr_len); + return hdr_len; + } + + /* Fall back to cache element with known-good alignment: */ + if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE) + return -E2BIG; + *hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); + if (!*hdr) + return -ENOMEM; + /* Copy protocol headers behind HW header: */ + skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); + return 0; +} + +static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue, + struct sk_buff *curr_skb, + struct qeth_hdr *curr_hdr) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start]; + struct qeth_hdr *prev_hdr = queue->prev_hdr; + + if (!prev_hdr) + return true; + + /* All packets must have the same target: */ + if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) { + struct sk_buff *prev_skb = skb_peek(&buffer->skb_list); + + return ether_addr_equal(eth_hdr(prev_skb)->h_dest, + eth_hdr(curr_skb)->h_dest) && + qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2); + } + + return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) && + qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3); +} + +/** + * qeth_fill_buffer() - map skb into an output buffer + * @buf: buffer to transport the skb + * @skb: skb to map into the buffer + * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated + * from qeth_core_header_cache. + * @offset: when mapping the skb, start at skb->data + offset + * @hd_len: if > 0, build a dedicated header element of this size + */ +static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) +{ + struct qdio_buffer *buffer = buf->buffer; + int element = buf->next_element_to_fill; + int length = skb_headlen(skb) - offset; + char *data = skb->data + offset; + unsigned int elem_length, cnt; + bool is_first_elem = true; + + __skb_queue_tail(&buf->skb_list, skb); + + /* build dedicated element for HW Header */ + if (hd_len) { + is_first_elem = false; + + buffer->element[element].addr = virt_to_phys(hdr); + buffer->element[element].length = hd_len; + buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; + + /* HW header is allocated from cache: */ + if ((void *)hdr != skb->data) + buf->is_header[element] = 1; + /* HW header was pushed and is contiguous with linear part: */ + else if (length > 0 && !PAGE_ALIGNED(data) && + (data == (char *)hdr + hd_len)) + buffer->element[element].eflags |= + SBAL_EFLAGS_CONTIGUOUS; + + element++; + } + + /* map linear part into buffer element(s) */ + while (length > 0) { + elem_length = min_t(unsigned int, length, + PAGE_SIZE - offset_in_page(data)); + + buffer->element[element].addr = virt_to_phys(data); + buffer->element[element].length = elem_length; + length -= elem_length; + if (is_first_elem) { + is_first_elem = false; + if (length || skb_is_nonlinear(skb)) + /* skb needs additional elements */ + buffer->element[element].eflags = + SBAL_EFLAGS_FIRST_FRAG; + else + buffer->element[element].eflags = 0; + } else { + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + } + + data += elem_length; + element++; + } + + /* map page frags into buffer element(s) */ + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + data = skb_frag_address(frag); + length = skb_frag_size(frag); + while (length > 0) { + elem_length = min_t(unsigned int, length, + PAGE_SIZE - offset_in_page(data)); + + buffer->element[element].addr = virt_to_phys(data); + buffer->element[element].length = elem_length; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + + length -= elem_length; + data += elem_length; + element++; + } + } + + if (buffer->element[element - 1].eflags) + buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; + buf->next_element_to_fill = element; + return element; +} + +static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, unsigned int elements, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) +{ + unsigned int bytes = qdisc_pkt_len(skb); + struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; + struct netdev_queue *txq; + bool stopped = false; + bool flush; + + buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)]; + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); + + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + + flush = !qeth_iqd_may_bulk(queue, skb, hdr); + + if (flush || + (buffer->next_element_to_fill + elements > queue->max_elements)) { + if (buffer->next_element_to_fill > 0) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->bulk_count++; + } + + if (queue->bulk_count >= queue->bulk_max) + flush = true; + + if (flush) + qeth_flush_queue(queue); + + buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + + queue->bulk_count)]; + + /* Sanity-check again: */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + } + + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + buffer->bytes += bytes; + buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + queue->prev_hdr = hdr; + + flush = __netdev_tx_sent_queue(txq, bytes, + !stopped && netdev_xmit_more()); + + if (flush || next_element >= queue->max_elements) { + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->bulk_count++; + + if (queue->bulk_count >= queue->bulk_max) + flush = true; + + if (flush) + qeth_flush_queue(queue); + } + + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); + return 0; +} + +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed) +{ + unsigned int start_index = queue->next_buf_to_fill; + struct qeth_qdio_out_buffer *buffer; + unsigned int next_element; + struct netdev_queue *txq; + bool stopped = false; + int flush_count = 0; + int do_pack = 0; + int rc = 0; + + buffer = queue->bufs[queue->next_buf_to_fill]; + + /* Just a sanity check, the wake/stop logic should ensure that we always + * get a free buffer. + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + + txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb)); + + /* check if we need to switch packing state of this queue */ + qeth_switch_to_packing_if_needed(queue); + if (queue->do_pack) { + do_pack = 1; + /* does packet fit in current buffer? */ + if (buffer->next_element_to_fill + elements_needed > + queue->max_elements) { + /* ... no -> set state PRIMED */ + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + flush_count++; + queue->next_buf_to_fill = + QDIO_BUFNR(queue->next_buf_to_fill + 1); + buffer = queue->bufs[queue->next_buf_to_fill]; + + /* We stepped forward, so sanity-check again: */ + if (atomic_read(&buffer->state) != + QETH_QDIO_BUF_EMPTY) { + qeth_flush_buffers(queue, start_index, + flush_count); + rc = -EBUSY; + goto out; + } + } + } + + if (buffer->next_element_to_fill == 0 && + atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) { + /* If a TX completion happens right _here_ and misses to wake + * the txq, then our re-check below will catch the race. + */ + QETH_TXQ_STAT_INC(queue, stopped); + netif_tx_stop_queue(txq); + stopped = true; + } + + next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len); + buffer->bytes += qdisc_pkt_len(skb); + buffer->frames += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; + + if (queue->do_pack) + QETH_TXQ_STAT_INC(queue, skbs_pack); + if (!queue->do_pack || stopped || next_element >= queue->max_elements) { + flush_count++; + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = + QDIO_BUFNR(queue->next_buf_to_fill + 1); + } + + if (flush_count) + qeth_flush_buffers(queue, start_index, flush_count); + +out: + if (do_pack) + QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count); + + if (stopped && !qeth_out_queue_is_full(queue)) + netif_tx_start_queue(txq); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_send_packet); + +static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr, + unsigned int payload_len, struct sk_buff *skb, + unsigned int proto_len) +{ + struct qeth_hdr_ext_tso *ext = &hdr->ext; + + ext->hdr_tot_len = sizeof(*ext); + ext->imb_hdr_no = 1; + ext->hdr_type = 1; + ext->hdr_version = 1; + ext->hdr_len = 28; + ext->payload_len = payload_len; + ext->mss = skb_shinfo(skb)->gso_size; + ext->dg_hdr_len = proto_len; +} + +int qeth_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + void (*fill_header)(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, unsigned int data_len)) +{ + unsigned int proto_len, hw_hdr_len; + unsigned int frame_len = skb->len; + bool is_tso = skb_is_gso(skb); + unsigned int data_offset = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + unsigned int elements; + int push_len, rc; + + if (is_tso) { + hw_hdr_len = sizeof(struct qeth_hdr_tso); + proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + } else { + hw_hdr_len = sizeof(struct qeth_hdr); + proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0; + } + + rc = skb_cow_head(skb, hw_hdr_len); + if (rc) + return rc; + + push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len, + &elements); + if (push_len < 0) + return push_len; + if (is_tso || !push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = push_len + proto_len; + } + memset(hdr, 0, hw_hdr_len); + fill_header(queue, hdr, skb, ipv, frame_len); + if (is_tso) + qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr, + frame_len - proto_len, skb, proto_len); + + if (IS_IQD(card)) { + rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset, + hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + spin_lock(&queue->lock); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); + spin_unlock(&queue->lock); + } + + if (rc && !push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + + return rc; +} +EXPORT_SYMBOL_GPL(qeth_xmit); + +static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_setadpparms *setparms; + + QETH_CARD_TEXT(card, 4, "prmadpcb"); + + setparms = &(cmd->data.setadapterparms); + if (qeth_setadpparms_inspect_rc(cmd)) { + QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); + setparms->data.mode = SET_PROMISC_MODE_OFF; + } + card->info.promisc_mode = setparms->data.mode; + return (cmd->hdr.return_code) ? -EIO : 0; +} + +void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable) +{ + enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON : + SET_PROMISC_MODE_OFF; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "setprom"); + QETH_CARD_TEXT_(card, 4, "mode:%x", mode); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, + SETADP_DATA_SIZEOF(mode)); + if (!iob) + return; + cmd = __ipa_cmd(iob); + cmd->data.setadapterparms.data.mode = mode; + qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); +} +EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); + +static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_setadpparms *adp_cmd; + + QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return -EIO; + + adp_cmd = &cmd->data.setadapterparms; + if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr)) + return -EADDRNOTAVAIL; + + if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) && + !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC)) + return -EADDRNOTAVAIL; + + ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr); + return 0; +} + +int qeth_setadpparms_change_macaddr(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "chgmac"); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, + SETADP_DATA_SIZEOF(change_addr)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; + cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; + ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, + card->dev->dev_addr); + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, + NULL); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); + +static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_set_access_ctrl *access_ctrl_req; + + QETH_CARD_TEXT(card, 4, "setaccb"); + + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; + QETH_CARD_TEXT_(card, 2, "rc=%d", + cmd->data.setadapterparms.hdr.return_code); + if (cmd->data.setadapterparms.hdr.return_code != + SET_ACCESS_CTRL_RC_SUCCESS) + QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n", + access_ctrl_req->subcmd_code, CARD_DEVID(card), + cmd->data.setadapterparms.hdr.return_code); + switch (qeth_setadpparms_inspect_rc(cmd)) { + case SET_ACCESS_CTRL_RC_SUCCESS: + if (access_ctrl_req->subcmd_code == ISOLATION_MODE_NONE) + dev_info(&card->gdev->dev, + "QDIO data connection isolation is deactivated\n"); + else + dev_info(&card->gdev->dev, + "QDIO data connection isolation is activated\n"); + return 0; + case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: + QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n", + CARD_DEVID(card)); + return 0; + case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: + QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n", + CARD_DEVID(card)); + return 0; + case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: + dev_err(&card->gdev->dev, "Adapter does not " + "support QDIO data connection isolation\n"); + return -EOPNOTSUPP; + case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: + dev_err(&card->gdev->dev, + "Adapter is dedicated. " + "QDIO data connection isolation not supported\n"); + return -EOPNOTSUPP; + case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: + dev_err(&card->gdev->dev, + "TSO does not permit QDIO data connection isolation\n"); + return -EPERM; + case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: + dev_err(&card->gdev->dev, "The adjacent switch port does not " + "support reflective relay mode\n"); + return -EOPNOTSUPP; + case SET_ACCESS_CTRL_RC_REFLREL_FAILED: + dev_err(&card->gdev->dev, "The reflective relay mode cannot be " + "enabled at the adjacent switch port"); + return -EREMOTEIO; + case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: + dev_warn(&card->gdev->dev, "Turning off reflective relay mode " + "at the adjacent switch failed\n"); + /* benign error while disabling ISOLATION_MODE_FWD */ + return 0; + default: + return -EIO; + } +} + +int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes mode) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_set_access_ctrl *access_ctrl_req; + + QETH_CARD_TEXT(card, 4, "setacctl"); + + if (!qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + dev_err(&card->gdev->dev, + "Adapter does not support QDIO data connection isolation\n"); + return -EOPNOTSUPP; + } + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, + SETADP_DATA_SIZEOF(set_access_ctrl)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; + access_ctrl_req->subcmd_code = mode; + + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, + NULL); + if (rc) { + QETH_CARD_TEXT_(card, 2, "rc=%d", rc); + QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n", + rc, CARD_DEVID(card)); + } + + return rc; +} + +void qeth_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct qeth_card *card; + + card = dev->ml_priv; + QETH_CARD_TEXT(card, 4, "txtimeo"); + qeth_schedule_recovery(card); +} +EXPORT_SYMBOL_GPL(qeth_tx_timeout); + +static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) +{ + struct qeth_card *card = dev->ml_priv; + int rc = 0; + + switch (regnum) { + case MII_BMCR: /* Basic mode control register */ + rc = BMCR_FULLDPLX; + if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && + (card->info.link_type != QETH_LINK_TYPE_OSN) && + (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) && + (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH)) + rc |= BMCR_SPEED100; + break; + case MII_BMSR: /* Basic mode status register */ + rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | + BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | + BMSR_100BASE4; + break; + case MII_PHYSID1: /* PHYS ID 1 */ + rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | + dev->dev_addr[2]; + rc = (rc >> 5) & 0xFFFF; + break; + case MII_PHYSID2: /* PHYS ID 2 */ + rc = (dev->dev_addr[2] << 10) & 0xFFFF; + break; + case MII_ADVERTISE: /* Advertisement control reg */ + rc = ADVERTISE_ALL; + break; + case MII_LPA: /* Link partner ability reg */ + rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | + LPA_100BASE4 | LPA_LPACK; + break; + case MII_EXPANSION: /* Expansion register */ + break; + case MII_DCOUNTER: /* disconnect counter */ + break; + case MII_FCSCOUNTER: /* false carrier counter */ + break; + case MII_NWAYTEST: /* N-way auto-neg test register */ + break; + case MII_RERRCOUNTER: /* rx error counter */ + rc = card->stats.rx_length_errors + + card->stats.rx_frame_errors + + card->stats.rx_fifo_errors; + break; + case MII_SREVISION: /* silicon revision */ + break; + case MII_RESV1: /* reserved 1 */ + break; + case MII_LBRERROR: /* loopback, rx, bypass error */ + break; + case MII_PHYADDR: /* physical address */ + break; + case MII_RESV2: /* reserved 2 */ + break; + case MII_TPISTATUS: /* TPI status for 10mbps */ + break; + case MII_NCONFIG: /* network interface config */ + break; + default: + break; + } + return rc; +} + +static int qeth_snmp_command_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_arp_query_info *qinfo = reply->param; + struct qeth_ipacmd_setadpparms *adp_cmd; + unsigned int data_len; + void *snmp_data; + + QETH_CARD_TEXT(card, 3, "snpcmdcb"); + + if (cmd->hdr.return_code) { + QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); + return -EIO; + } + if (cmd->data.setadapterparms.hdr.return_code) { + cmd->hdr.return_code = + cmd->data.setadapterparms.hdr.return_code; + QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); + return -EIO; + } + + adp_cmd = &cmd->data.setadapterparms; + data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr); + if (adp_cmd->hdr.seq_no == 1) { + snmp_data = &adp_cmd->data.snmp; + } else { + snmp_data = &adp_cmd->data.snmp.request; + data_len -= offsetof(struct qeth_snmp_cmd, request); + } + + /* check if there is enough room in userspace */ + if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC); + return -ENOSPC; + } + QETH_CARD_TEXT_(card, 4, "snore%i", + cmd->data.setadapterparms.hdr.used_total); + QETH_CARD_TEXT_(card, 4, "sseqn%i", + cmd->data.setadapterparms.hdr.seq_no); + /*copy entries to user buffer*/ + memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); + qinfo->udata_offset += data_len; + + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +static int qeth_snmp_command(struct qeth_card *card, char __user *udata) +{ + struct qeth_snmp_ureq __user *ureq; + struct qeth_cmd_buffer *iob; + unsigned int req_len; + struct qeth_arp_query_info qinfo = {0, }; + int rc = 0; + + QETH_CARD_TEXT(card, 3, "snmpcmd"); + + if (IS_VM_NIC(card)) + return -EOPNOTSUPP; + + if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && + IS_LAYER3(card)) + return -EOPNOTSUPP; + + ureq = (struct qeth_snmp_ureq __user *) udata; + if (get_user(qinfo.udata_len, &ureq->hdr.data_len) || + get_user(req_len, &ureq->hdr.req_len)) + return -EFAULT; + + /* Sanitize user input, to avoid overflows in iob size calculation: */ + if (req_len > QETH_BUFSIZE) + return -EINVAL; + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len); + if (!iob) + return -ENOMEM; + + if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp, + &ureq->cmd, req_len)) { + qeth_put_cmd(iob); + return -EFAULT; + } + + qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); + if (!qinfo.udata) { + qeth_put_cmd(iob); + return -ENOMEM; + } + qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); + + rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo); + if (rc) + QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n", + CARD_DEVID(card), rc); + else { + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) + rc = -EFAULT; + } + + kfree(qinfo.udata); + return rc; +} + +static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct qeth_qoat_priv *priv = reply->param; + int resdatalen; + + QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return -EIO; + + resdatalen = cmd->data.setadapterparms.hdr.cmdlength; + + if (resdatalen > (priv->buffer_len - priv->response_len)) + return -ENOSPC; + + memcpy(priv->buffer + priv->response_len, + &cmd->data.setadapterparms.hdr, resdatalen); + priv->response_len += resdatalen; + + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_query_oat *oat_req; + struct qeth_query_oat_data oat_data; + struct qeth_qoat_priv priv; + void __user *tmp; + + QETH_CARD_TEXT(card, 3, "qoatcmd"); + + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) + return -EOPNOTSUPP; + + if (copy_from_user(&oat_data, udata, sizeof(oat_data))) + return -EFAULT; + + priv.buffer_len = oat_data.buffer_len; + priv.response_len = 0; + priv.buffer = vzalloc(oat_data.buffer_len); + if (!priv.buffer) + return -ENOMEM; + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, + SETADP_DATA_SIZEOF(query_oat)); + if (!iob) { + rc = -ENOMEM; + goto out_free; + } + cmd = __ipa_cmd(iob); + oat_req = &cmd->data.setadapterparms.data.query_oat; + oat_req->subcmd_code = oat_data.command; + + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, &priv); + if (!rc) { + tmp = is_compat_task() ? compat_ptr(oat_data.ptr) : + u64_to_user_ptr(oat_data.ptr); + oat_data.response_len = priv.response_len; + + if (copy_to_user(tmp, priv.buffer, priv.response_len) || + copy_to_user(udata, &oat_data, sizeof(oat_data))) + rc = -EFAULT; + } + +out_free: + vfree(priv.buffer); + return rc; +} + +static int qeth_query_card_info_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct qeth_query_card_info *card_info; + + QETH_CARD_TEXT(card, 2, "qcrdincb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return -EIO; + + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; + return 0; +} + +int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qcrdinfo"); + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) + return -EOPNOTSUPP; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, + (void *)carrier_info); +} + +/** + * qeth_vm_request_mac() - Request a hypervisor-managed MAC address + * @card: pointer to a qeth_card + * + * Returns + * 0, if a MAC address has been set for the card's netdevice + * a return code, for various error conditions + */ +int qeth_vm_request_mac(struct qeth_card *card) +{ + struct diag26c_mac_resp *response; + struct diag26c_mac_req *request; + int rc; + + QETH_CARD_TEXT(card, 2, "vmreqmac"); + + request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); + response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); + if (!request || !response) { + rc = -ENOMEM; + goto out; + } + + request->resp_buf_len = sizeof(*response); + request->resp_version = DIAG26C_VERSION2; + request->op_code = DIAG26C_GET_MAC; + request->devno = card->info.ddev_devno; + + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + rc = diag26c(request, response, DIAG26C_MAC_SERVICES); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + if (rc) + goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); + + if (request->resp_buf_len < sizeof(*response) || + response->version != request->resp_version) { + rc = -EIO; + QETH_CARD_TEXT(card, 2, "badresp"); + QETH_CARD_HEX(card, 2, &request->resp_buf_len, + sizeof(request->resp_buf_len)); + } else if (!is_valid_ether_addr(response->mac)) { + rc = -EINVAL; + QETH_CARD_TEXT(card, 2, "badmac"); + QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN); + } else { + ether_addr_copy(card->dev->dev_addr, response->mac); + } + +out: + kfree(response); + kfree(request); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_vm_request_mac); + +static void qeth_determine_capabilities(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->data; + struct ccw_device *ddev = channel->ccwdev; + int rc; + int ddev_offline = 0; + + QETH_CARD_TEXT(card, 2, "detcapab"); + if (!ddev->online) { + ddev_offline = 1; + rc = qeth_start_channel(channel); + if (rc) { + QETH_CARD_TEXT_(card, 2, "3err%d", rc); + goto out; + } + } + + rc = qeth_read_conf_data(card); + if (rc) { + QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, "5err%d", rc); + goto out_offline; + } + + rc = qdio_get_ssqd_desc(ddev, &card->ssqd); + if (rc) + QETH_CARD_TEXT_(card, 2, "6err%d", rc); + + QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt); + QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1); + QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2); + QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3); + QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt); + if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || + ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || + ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { + dev_info(&card->gdev->dev, + "Completion Queueing supported\n"); + } else { + card->options.cq = QETH_CQ_NOTAVAILABLE; + } + +out_offline: + if (ddev_offline == 1) + qeth_stop_channel(channel); +out: + return; +} + +static void qeth_read_ccw_conf_data(struct qeth_card *card) +{ + struct qeth_card_info *info = &card->info; + struct ccw_device *cdev = CARD_DDEV(card); + struct ccw_dev_id dev_id; + + QETH_CARD_TEXT(card, 2, "ccwconfd"); + ccw_device_get_id(cdev, &dev_id); + + info->ddev_devno = dev_id.devno; + info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) && + !ccw_device_get_iid(cdev, &info->iid) && + !ccw_device_get_chid(cdev, 0, &info->chid); + info->ssid = dev_id.ssid; + + dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n", + info->chid, info->chpid); + + QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno); + QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid); + QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid); + QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid); + QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid); + QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid); + QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid); +} + +static int qeth_qdio_establish(struct qeth_card *card) +{ + struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES]; + struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES]; + struct qeth_qib_parms *qib_parms = NULL; + struct qdio_initialize init_data; + unsigned int i; + int rc = 0; + + QETH_CARD_TEXT(card, 2, "qdioest"); + + if (!IS_IQD(card) && !IS_VM_NIC(card)) { + qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); + if (!qib_parms) + return -ENOMEM; + + qeth_fill_qib_parms(card, qib_parms); + } + + in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs; + if (card->options.cq == QETH_CQ_ENABLED) + in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs; + + for (i = 0; i < card->qdio.no_out_queues; i++) + out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs; + + memset(&init_data, 0, sizeof(struct qdio_initialize)); + init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT : + QDIO_QETH_QFMT; + init_data.qib_param_field_format = 0; + init_data.qib_param_field = (void *)qib_parms; + init_data.no_input_qs = card->qdio.no_in_queues; + init_data.no_output_qs = card->qdio.no_out_queues; + init_data.input_handler = qeth_qdio_input_handler; + init_data.output_handler = qeth_qdio_output_handler; + init_data.irq_poll = qeth_qdio_poll; + init_data.int_parm = (unsigned long) card; + init_data.input_sbal_addr_array = in_sbal_ptrs; + init_data.output_sbal_addr_array = out_sbal_ptrs; + init_data.output_sbal_state_array = card->qdio.out_bufstates; + init_data.scan_threshold = IS_IQD(card) ? 0 : 32; + + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { + rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs, + init_data.no_output_qs); + if (rc) { + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + goto out; + } + rc = qdio_establish(CARD_DDEV(card), &init_data); + if (rc) { + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + qdio_free(CARD_DDEV(card)); + } + } + + switch (card->options.cq) { + case QETH_CQ_ENABLED: + dev_info(&card->gdev->dev, "Completion Queue support enabled"); + break; + case QETH_CQ_DISABLED: + dev_info(&card->gdev->dev, "Completion Queue support disabled"); + break; + default: + break; + } + +out: + kfree(qib_parms); + return rc; +} + +static void qeth_core_free_card(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "freecrd"); + + unregister_service_level(&card->qeth_service_level); + debugfs_remove_recursive(card->debugfs); + qeth_put_cmd(card->read_cmd); + destroy_workqueue(card->event_wq); + dev_set_drvdata(&card->gdev->dev, NULL); + kfree(card); +} + +static void qeth_trace_features(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "features"); + QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); + QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); + QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); + QETH_CARD_HEX(card, 2, &card->info.diagass_support, + sizeof(card->info.diagass_support)); +} + +static struct ccw_device_id qeth_ids[] = { + {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), + .driver_info = QETH_CARD_TYPE_OSD}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), + .driver_info = QETH_CARD_TYPE_IQD}, +#ifdef CONFIG_QETH_OSN + {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), + .driver_info = QETH_CARD_TYPE_OSN}, +#endif + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), + .driver_info = QETH_CARD_TYPE_OSM}, +#ifdef CONFIG_QETH_OSX + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), + .driver_info = QETH_CARD_TYPE_OSX}, +#endif + {}, +}; +MODULE_DEVICE_TABLE(ccw, qeth_ids); + +static struct ccw_driver qeth_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "qeth", + }, + .ids = qeth_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, +}; + +static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok) +{ + int retries = 3; + int rc; + + QETH_CARD_TEXT(card, 2, "hrdsetup"); + atomic_set(&card->force_alloc_skb, 0); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; +retry: + if (retries < 3) + QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n", + CARD_DEVID(card)); + rc = qeth_qdio_clear_card(card, !IS_IQD(card)); + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); + qdio_free(CARD_DDEV(card)); + + rc = qeth_start_channel(&card->read); + if (rc) + goto retriable; + rc = qeth_start_channel(&card->write); + if (rc) + goto retriable; + rc = qeth_start_channel(&card->data); + if (rc) + goto retriable; +retriable: + if (rc == -ERESTARTSYS) { + QETH_CARD_TEXT(card, 2, "break1"); + return rc; + } else if (rc) { + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + + qeth_determine_capabilities(card); + qeth_read_ccw_conf_data(card); + qeth_idx_init(card); + + rc = qeth_idx_activate_read_channel(card); + if (rc == -EINTR) { + QETH_CARD_TEXT(card, 2, "break2"); + return rc; + } else if (rc) { + QETH_CARD_TEXT_(card, 2, "3err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + + rc = qeth_idx_activate_write_channel(card); + if (rc == -EINTR) { + QETH_CARD_TEXT(card, 2, "break3"); + return rc; + } else if (rc) { + QETH_CARD_TEXT_(card, 2, "4err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + card->read_or_write_problem = 0; + rc = qeth_mpc_initialize(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "5err%d", rc); + goto out; + } + + rc = qeth_send_startlan(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "6err%d", rc); + if (rc == -ENETDOWN) { + dev_warn(&card->gdev->dev, "The LAN is offline\n"); + *carrier_ok = false; + } else { + goto out; + } + } else { + *carrier_ok = true; + } + + card->options.ipa4.supported = 0; + card->options.ipa6.supported = 0; + card->options.adp.supported = 0; + card->options.sbp.supported_funcs = 0; + card->info.diagass_support = 0; + rc = qeth_query_ipassists(card, QETH_PROT_IPV4); + if (rc == -ENOMEM) + goto out; + if (qeth_is_supported(card, IPA_IPV6)) { + rc = qeth_query_ipassists(card, QETH_PROT_IPV6); + if (rc == -ENOMEM) + goto out; + } + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc < 0) { + QETH_CARD_TEXT_(card, 2, "7err%d", rc); + goto out; + } + } + if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + rc = qeth_query_setdiagass(card); + if (rc) + QETH_CARD_TEXT_(card, 2, "8err%d", rc); + } + + qeth_trace_features(card); + + if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) || + (card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM))) + card->info.hwtrap = 0; + + if (card->options.isolation != ISOLATION_MODE_NONE) { + rc = qeth_setadpparms_set_access_ctrl(card, + card->options.isolation); + if (rc) + goto out; + } + + rc = qeth_init_qdio_queues(card); + if (rc) { + QETH_CARD_TEXT_(card, 2, "9err%d", rc); + goto out; + } + + return 0; +out: + dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n", + CARD_DEVID(card), rc); + return rc; +} + +static int qeth_set_online(struct qeth_card *card, + const struct qeth_discipline *disc) +{ + bool carrier_ok; + int rc; + + mutex_lock(&card->conf_mutex); + QETH_CARD_TEXT(card, 2, "setonlin"); + + rc = qeth_hardsetup_card(card, &carrier_ok); + if (rc) { + QETH_CARD_TEXT_(card, 2, "2err%04x", rc); + rc = -ENODEV; + goto err_hardsetup; + } + + qeth_print_status_message(card); + + if (card->dev->reg_state != NETREG_REGISTERED) + /* no need for locking / error handling at this early stage: */ + qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card)); + + rc = disc->set_online(card, carrier_ok); + if (rc) + goto err_online; + + /* let user_space know that device is online */ + kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); + + mutex_unlock(&card->conf_mutex); + return 0; + +err_online: +err_hardsetup: + qeth_qdio_clear_card(card, 0); + qeth_clear_working_pool_list(card); + qeth_flush_local_addrs(card); + + qeth_stop_channel(&card->data); + qeth_stop_channel(&card->write); + qeth_stop_channel(&card->read); + qdio_free(CARD_DDEV(card)); + + mutex_unlock(&card->conf_mutex); + return rc; +} + +int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, + bool resetting) +{ + int rc, rc2, rc3; + + mutex_lock(&card->conf_mutex); + QETH_CARD_TEXT(card, 3, "setoffl"); + + if ((!resetting && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } + + /* cancel any stalled cmd that might block the rtnl: */ + qeth_clear_ipacmd_list(card); + + rtnl_lock(); + netif_device_detach(card->dev); + netif_carrier_off(card->dev); + rtnl_unlock(); + + cancel_work_sync(&card->rx_mode_work); + + disc->set_offline(card); + + qeth_qdio_clear_card(card, 0); + qeth_drain_output_queues(card); + qeth_clear_working_pool_list(card); + qeth_flush_local_addrs(card); + card->info.promisc_mode = 0; + + rc = qeth_stop_channel(&card->data); + rc2 = qeth_stop_channel(&card->write); + rc3 = qeth_stop_channel(&card->read); + if (!rc) + rc = (rc2) ? rc2 : rc3; + if (rc) + QETH_CARD_TEXT_(card, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); + + /* let user_space know that device is offline */ + kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE); + + mutex_unlock(&card->conf_mutex); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_set_offline); + +static int qeth_do_reset(void *data) +{ + const struct qeth_discipline *disc; + struct qeth_card *card = data; + int rc; + + /* Lock-free, other users will block until we are done. */ + disc = card->discipline; + + QETH_CARD_TEXT(card, 2, "recover1"); + if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) + return 0; + QETH_CARD_TEXT(card, 2, "recover2"); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); + + qeth_set_offline(card, disc, true); + rc = qeth_set_online(card, disc); + if (!rc) { + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); + } else { + ccwgroup_set_offline(card->gdev); + dev_warn(&card->gdev->dev, + "The qeth device driver failed to recover an error on the device\n"); + } + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); + return 0; +} + +#if IS_ENABLED(CONFIG_QETH_L3) +static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ + struct af_iucv_trans_hdr *iucv = (struct af_iucv_trans_hdr *) skb->data; + struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; + struct net_device *dev = skb->dev; + + if (IS_IQD(card) && iucv->magic == ETH_P_AF_IUCV) { + dev_hard_header(skb, dev, ETH_P_AF_IUCV, dev->dev_addr, + "FAKELL", skb->len); + return; + } + + if (!(l3_hdr->flags & QETH_HDR_PASSTHRU)) { + u16 prot = (l3_hdr->flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : + ETH_P_IP; + unsigned char tg_addr[ETH_ALEN]; + + skb_reset_network_header(skb); + switch (l3_hdr->flags & QETH_HDR_CAST_MASK) { + case QETH_CAST_MULTICAST: + if (prot == ETH_P_IP) + ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); + else + ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); + QETH_CARD_STAT_INC(card, rx_multicast); + break; + case QETH_CAST_BROADCAST: + ether_addr_copy(tg_addr, dev->broadcast); + QETH_CARD_STAT_INC(card, rx_multicast); + break; + default: + if (card->options.sniffer) + skb->pkt_type = PACKET_OTHERHOST; + ether_addr_copy(tg_addr, dev->dev_addr); + } + + if (l3_hdr->ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) + dev_hard_header(skb, dev, prot, tg_addr, + &l3_hdr->next_hop.rx.src_mac, skb->len); + else + dev_hard_header(skb, dev, prot, tg_addr, "FAKELL", + skb->len); + } + + /* copy VLAN tag from hdr into skb */ + if (!card->options.sniffer && + (l3_hdr->ext_flags & (QETH_HDR_EXT_VLAN_FRAME | + QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { + u16 tag = (l3_hdr->ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? + l3_hdr->vlan_id : + l3_hdr->next_hop.rx.vlan_id; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + } +} +#endif + +static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr, bool uses_frags) +{ + struct napi_struct *napi = &card->napi; + bool is_cso; + + switch (hdr->hdr.l2.id) { + case QETH_HEADER_TYPE_OSN: + skb_push(skb, sizeof(*hdr)); + skb_copy_to_linear_data(skb, hdr, sizeof(*hdr)); + QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); + QETH_CARD_STAT_INC(card, rx_packets); + + card->osn_info.data_cb(skb); + return; +#if IS_ENABLED(CONFIG_QETH_L3) + case QETH_HEADER_TYPE_LAYER3: + qeth_l3_rebuild_skb(card, skb, hdr); + is_cso = hdr->hdr.l3.ext_flags & QETH_HDR_EXT_CSUM_TRANSP_REQ; + break; +#endif + case QETH_HEADER_TYPE_LAYER2: + is_cso = hdr->hdr.l2.flags[1] & QETH_HDR_EXT_CSUM_TRANSP_REQ; + break; + default: + /* never happens */ + if (uses_frags) + napi_free_frags(napi); + else + dev_kfree_skb_any(skb); + return; + } + + if (is_cso && (card->dev->features & NETIF_F_RXCSUM)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + QETH_CARD_STAT_INC(card, rx_skb_csum); + } else { + skb->ip_summed = CHECKSUM_NONE; + } + + QETH_CARD_STAT_ADD(card, rx_bytes, skb->len); + QETH_CARD_STAT_INC(card, rx_packets); + if (skb_is_nonlinear(skb)) { + QETH_CARD_STAT_INC(card, rx_sg_skbs); + QETH_CARD_STAT_ADD(card, rx_sg_frags, + skb_shinfo(skb)->nr_frags); + } + + if (uses_frags) { + napi_gro_frags(napi); + } else { + skb->protocol = eth_type_trans(skb, skb->dev); + napi_gro_receive(napi, skb); + } +} + +static void qeth_create_skb_frag(struct sk_buff *skb, char *data, int data_len) +{ + struct page *page = virt_to_page(data); + unsigned int next_frag; + + next_frag = skb_shinfo(skb)->nr_frags; + get_page(page); + skb_add_rx_frag(skb, next_frag, page, offset_in_page(data), data_len, + data_len); +} + +static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) +{ + return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); +} + +static int qeth_extract_skb(struct qeth_card *card, + struct qeth_qdio_buffer *qethbuffer, u8 *element_no, + int *__offset) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + struct qdio_buffer *buffer = qethbuffer->buffer; + struct napi_struct *napi = &card->napi; + struct qdio_buffer_element *element; + unsigned int linear_len = 0; + bool uses_frags = false; + int offset = *__offset; + bool use_rx_sg = false; + unsigned int headroom; + struct qeth_hdr *hdr; + struct sk_buff *skb; + int skb_len = 0; + + element = &buffer->element[*element_no]; + +next_packet: + /* qeth_hdr must not cross element boundaries */ + while (element->length < offset + sizeof(struct qeth_hdr)) { + if (qeth_is_last_sbale(element)) + return -ENODATA; + element++; + offset = 0; + } + + hdr = phys_to_virt(element->addr) + offset; + offset += sizeof(*hdr); + skb = NULL; + + switch (hdr->hdr.l2.id) { + case QETH_HEADER_TYPE_LAYER2: + skb_len = hdr->hdr.l2.pkt_length; + linear_len = ETH_HLEN; + headroom = 0; + break; + case QETH_HEADER_TYPE_LAYER3: + skb_len = hdr->hdr.l3.length; + if (!IS_LAYER3(card)) { + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + goto walk_packet; + } + + if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { + linear_len = ETH_HLEN; + headroom = 0; + break; + } + + if (hdr->hdr.l3.flags & QETH_HDR_IPV6) + linear_len = sizeof(struct ipv6hdr); + else + linear_len = sizeof(struct iphdr); + headroom = ETH_HLEN; + break; + case QETH_HEADER_TYPE_OSN: + skb_len = hdr->hdr.osn.pdu_length; + if (!IS_OSN(card)) { + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + goto walk_packet; + } + + linear_len = skb_len; + headroom = sizeof(struct qeth_hdr); + break; + default: + if (hdr->hdr.l2.id & QETH_HEADER_MASK_INVAL) + QETH_CARD_STAT_INC(card, rx_frame_errors); + else + QETH_CARD_STAT_INC(card, rx_dropped_notsupp); + + /* Can't determine packet length, drop the whole buffer. */ + return -EPROTONOSUPPORT; + } + + if (skb_len < linear_len) { + QETH_CARD_STAT_INC(card, rx_dropped_runt); + goto walk_packet; + } + + use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) || + (skb_len > READ_ONCE(priv->rx_copybreak) && + !atomic_read(&card->force_alloc_skb) && + !IS_OSN(card)); + + if (use_rx_sg) { + /* QETH_CQ_ENABLED only: */ + if (qethbuffer->rx_skb && + skb_tailroom(qethbuffer->rx_skb) >= linear_len + headroom) { + skb = qethbuffer->rx_skb; + qethbuffer->rx_skb = NULL; + goto use_skb; + } + + skb = napi_get_frags(napi); + if (!skb) { + /* -ENOMEM, no point in falling back further. */ + QETH_CARD_STAT_INC(card, rx_dropped_nomem); + goto walk_packet; + } + + if (skb_tailroom(skb) >= linear_len + headroom) { + uses_frags = true; + goto use_skb; + } + + netdev_info_once(card->dev, + "Insufficient linear space in NAPI frags skb, need %u but have %u\n", + linear_len + headroom, skb_tailroom(skb)); + /* Shouldn't happen. Don't optimize, fall back to linear skb. */ + } + + linear_len = skb_len; + skb = napi_alloc_skb(napi, linear_len + headroom); + if (!skb) { + QETH_CARD_STAT_INC(card, rx_dropped_nomem); + goto walk_packet; + } + +use_skb: + if (headroom) + skb_reserve(skb, headroom); +walk_packet: + while (skb_len) { + int data_len = min(skb_len, (int)(element->length - offset)); + char *data = phys_to_virt(element->addr) + offset; + + skb_len -= data_len; + offset += data_len; + + /* Extract data from current element: */ + if (skb && data_len) { + if (linear_len) { + unsigned int copy_len; + + copy_len = min_t(unsigned int, linear_len, + data_len); + + skb_put_data(skb, data, copy_len); + linear_len -= copy_len; + data_len -= copy_len; + data += copy_len; + } + + if (data_len) + qeth_create_skb_frag(skb, data, data_len); + } + + /* Step forward to next element: */ + if (skb_len) { + if (qeth_is_last_sbale(element)) { + QETH_CARD_TEXT(card, 4, "unexeob"); + QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); + if (skb) { + if (uses_frags) + napi_free_frags(napi); + else + dev_kfree_skb_any(skb); + QETH_CARD_STAT_INC(card, + rx_length_errors); + } + return -EMSGSIZE; + } + element++; + offset = 0; + } + } + + /* This packet was skipped, go get another one: */ + if (!skb) + goto next_packet; + + *element_no = element - &buffer->element[0]; + *__offset = offset; + + qeth_receive_skb(card, skb, hdr, uses_frags); + return 0; +} + +static unsigned int qeth_extract_skbs(struct qeth_card *card, int budget, + struct qeth_qdio_buffer *buf, bool *done) +{ + unsigned int work_done = 0; + + while (budget) { + if (qeth_extract_skb(card, buf, &card->rx.buf_element, + &card->rx.e_offset)) { + *done = true; + break; + } + + work_done++; + budget--; + } + + return work_done; +} + +static unsigned int qeth_rx_poll(struct qeth_card *card, int budget) +{ + struct qeth_rx *ctx = &card->rx; + unsigned int work_done = 0; + + while (budget > 0) { + struct qeth_qdio_buffer *buffer; + unsigned int skbs_done = 0; + bool done = false; + + /* Fetch completed RX buffers: */ + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + } + + /* Process one completed RX buffer: */ + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + skbs_done = qeth_extract_skbs(card, budget, buffer, + &done); + else + done = true; + + work_done += skbs_done; + budget -= skbs_done; + + if (done) { + QETH_CARD_STAT_INC(card, rx_bufs); + qeth_put_buffer_pool_entry(card, buffer->pool_entry); + buffer->pool_entry = NULL; + card->rx.b_count--; + ctx->bufs_refill++; + ctx->bufs_refill -= qeth_rx_refill_queue(card, + ctx->bufs_refill); + + /* Step forward to next buffer: */ + card->rx.b_index = QDIO_BUFNR(card->rx.b_index + 1); + card->rx.buf_element = 0; + card->rx.e_offset = 0; + } + } + + return work_done; +} + +static void qeth_cq_poll(struct qeth_card *card) +{ + unsigned int work_done = 0; + + while (work_done < QDIO_MAX_BUFFERS_PER_Q) { + unsigned int start, error; + int completed; + + completed = qdio_inspect_queue(CARD_DDEV(card), 1, true, &start, + &error); + if (completed <= 0) + return; + + qeth_qdio_cq_handler(card, error, 1, start, completed); + work_done += completed; + } +} + +int qeth_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + unsigned int work_done; + + work_done = qeth_rx_poll(card, budget); + + if (card->options.cq == QETH_CQ_ENABLED) + qeth_cq_poll(card); + + if (budget) { + struct qeth_rx *ctx = &card->rx; + + /* Process any substantial refill backlog: */ + ctx->bufs_refill -= qeth_rx_refill_queue(card, ctx->bufs_refill); + + /* Exhausted the RX budget. Keep IRQ disabled, we get called again. */ + if (work_done >= budget) + return work_done; + } + + if (napi_complete_done(napi, work_done) && + qdio_start_irq(CARD_DDEV(card))) + napi_schedule(napi); + + return work_done; +} +EXPORT_SYMBOL_GPL(qeth_poll); + +static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue, + unsigned int bidx, bool error, int budget) +{ + struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx]; + u8 sflags = buffer->buffer->element[15].sflags; + struct qeth_card *card = queue->card; + + if (queue->bufstates && (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING)) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + QETH_CARD_TEXT_(card, 5, "pel%u", bidx); + + switch (atomic_cmpxchg(&buffer->state, + QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING)) { + case QETH_QDIO_BUF_PRIMED: + /* We have initial ownership, no QAOB (yet): */ + qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING); + + /* Handle race with qeth_qdio_handle_aob(): */ + switch (atomic_xchg(&buffer->state, + QETH_QDIO_BUF_NEED_QAOB)) { + case QETH_QDIO_BUF_PENDING: + /* No concurrent QAOB notification. */ + + /* Prepare the queue slot for immediate re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + + list_add(&buffer->list_entry, + &queue->pending_bufs); + /* Skip clearing the buffer: */ + return; + case QETH_QDIO_BUF_QAOB_OK: + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_DELAYED_OK); + error = false; + break; + case QETH_QDIO_BUF_QAOB_ERROR: + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_DELAYED_GENERALERROR); + error = true; + break; + default: + WARN_ON_ONCE(1); + } + + break; + case QETH_QDIO_BUF_QAOB_OK: + /* qeth_qdio_handle_aob() already received a QAOB: */ + qeth_notify_skbs(queue, buffer, TX_NOTIFY_OK); + error = false; + break; + case QETH_QDIO_BUF_QAOB_ERROR: + /* qeth_qdio_handle_aob() already received a QAOB: */ + qeth_notify_skbs(queue, buffer, TX_NOTIFY_GENERALERROR); + error = true; + break; + default: + WARN_ON_ONCE(1); + } + } else if (card->options.cq == QETH_CQ_ENABLED) { + qeth_notify_skbs(queue, buffer, + qeth_compute_cq_notification(sflags, 0)); + } + + qeth_clear_output_buffer(queue, buffer, error, budget); +} + +static int qeth_tx_poll(struct napi_struct *napi, int budget) +{ + struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi); + unsigned int queue_no = queue->queue_no; + struct qeth_card *card = queue->card; + struct net_device *dev = card->dev; + unsigned int work_done = 0; + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no)); + + while (1) { + unsigned int start, error, i; + unsigned int packets = 0; + unsigned int bytes = 0; + int completed; + + qeth_tx_complete_pending_bufs(card, queue, false); + + if (qeth_out_queue_is_empty(queue)) { + napi_complete(napi); + return 0; + } + + /* Give the CPU a breather: */ + if (work_done >= QDIO_MAX_BUFFERS_PER_Q) { + QETH_TXQ_STAT_INC(queue, completion_yield); + if (napi_complete_done(napi, 0)) + napi_schedule(napi); + return 0; + } + + completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false, + &start, &error); + if (completed <= 0) { + /* Ensure we see TX completion for pending work: */ + if (napi_complete_done(napi, 0)) + qeth_tx_arm_timer(queue, QETH_TX_TIMER_USECS); + return 0; + } + + for (i = start; i < start + completed; i++) { + struct qeth_qdio_out_buffer *buffer; + unsigned int bidx = QDIO_BUFNR(i); + + buffer = queue->bufs[bidx]; + packets += buffer->frames; + bytes += buffer->bytes; + + qeth_handle_send_error(card, buffer, error); + qeth_iqd_tx_complete(queue, bidx, error, budget); + } + + netdev_tx_completed_queue(txq, packets, bytes); + atomic_sub(completed, &queue->used_buffers); + work_done += completed; + + /* xmit may have observed the full-condition, but not yet + * stopped the txq. In which case the code below won't trigger. + * So before returning, xmit will re-check the txq's fill level + * and wake it up if needed. + */ + if (netif_tx_queue_stopped(txq) && + !qeth_out_queue_is_full(queue)) + netif_tx_wake_queue(txq); + } +} + +static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + return cmd->hdr.return_code; +} + +static int qeth_setassparms_get_caps_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipa_caps *caps = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return -EIO; + + caps->supported = cmd->data.setassparms.data.caps.supported; + caps->enabled = cmd->data.setassparms.data.caps.enabled; + return 0; +} + +int qeth_setassparms_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + QETH_CARD_TEXT(card, 4, "defadpcb"); + + if (cmd->hdr.return_code) + return -EIO; + + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4.enabled = cmd->hdr.assists.enabled; + if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6.enabled = cmd->hdr.assists.enabled; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_setassparms_cb); + +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, + unsigned int data_length, + enum qeth_prot_versions prot) +{ + struct qeth_ipacmd_setassparms *setassparms; + struct qeth_ipacmd_setassparms_hdr *hdr; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 4, "getasscm"); + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot, + data_length + + offsetof(struct qeth_ipacmd_setassparms, + data)); + if (!iob) + return NULL; + + setassparms = &__ipa_cmd(iob)->data.setassparms; + setassparms->assist_no = ipa_func; + + hdr = &setassparms->hdr; + hdr->length = sizeof(*hdr) + data_length; + hdr->command_code = cmd_code; + return iob; +} +EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); + +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, u32 *data, + enum qeth_prot_versions prot) +{ + unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT_(card, 4, "simassp%i", prot); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); + if (!iob) + return -ENOMEM; + + if (data) + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data; + return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL); +} +EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); + +static void qeth_unregister_dbf_views(void) +{ + int x; + + for (x = 0; x < QETH_DBF_INFOS; x++) { + debug_unregister(qeth_dbf[x].id); + qeth_dbf[x].id = NULL; + } +} + +void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) +{ + char dbf_txt_buf[32]; + va_list args; + + if (!debug_level_enabled(id, level)) + return; + va_start(args, fmt); + vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); + va_end(args); + debug_text_event(id, level, dbf_txt_buf); +} +EXPORT_SYMBOL_GPL(qeth_dbf_longtext); + +static int qeth_register_dbf_views(void) +{ + int ret; + int x; + + for (x = 0; x < QETH_DBF_INFOS; x++) { + /* register the areas */ + qeth_dbf[x].id = debug_register(qeth_dbf[x].name, + qeth_dbf[x].pages, + qeth_dbf[x].areas, + qeth_dbf[x].len); + if (qeth_dbf[x].id == NULL) { + qeth_unregister_dbf_views(); + return -ENOMEM; + } + + /* register a view */ + ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); + if (ret) { + qeth_unregister_dbf_views(); + return ret; + } + + /* set a passing level */ + debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); + } + + return 0; +} + +static DEFINE_MUTEX(qeth_mod_mutex); /* for synchronized module loading */ + +int qeth_core_load_discipline(struct qeth_card *card, + enum qeth_discipline_id discipline) +{ + mutex_lock(&qeth_mod_mutex); + switch (discipline) { + case QETH_DISCIPLINE_LAYER3: + card->discipline = try_then_request_module( + symbol_get(qeth_l3_discipline), "qeth_l3"); + break; + case QETH_DISCIPLINE_LAYER2: + card->discipline = try_then_request_module( + symbol_get(qeth_l2_discipline), "qeth_l2"); + break; + default: + break; + } + mutex_unlock(&qeth_mod_mutex); + + if (!card->discipline) { + dev_err(&card->gdev->dev, "There is no kernel module to " + "support discipline %d\n", discipline); + return -EINVAL; + } + + card->options.layer = discipline; + return 0; +} + +void qeth_core_free_discipline(struct qeth_card *card) +{ + if (IS_LAYER2(card)) + symbol_put(qeth_l2_discipline); + else + symbol_put(qeth_l3_discipline); + card->options.layer = QETH_DISCIPLINE_UNDETERMINED; + card->discipline = NULL; +} + +const struct device_type qeth_generic_devtype = { + .name = "qeth_generic", + .groups = qeth_generic_attr_groups, +}; +EXPORT_SYMBOL_GPL(qeth_generic_devtype); + +static const struct device_type qeth_osn_devtype = { + .name = "qeth_osn", + .groups = qeth_osn_attr_groups, +}; + +#define DBF_NAME_LEN 20 + +struct qeth_dbf_entry { + char dbf_name[DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; + +static LIST_HEAD(qeth_dbf_list); +static DEFINE_MUTEX(qeth_dbf_list_mutex); + +static debug_info_t *qeth_get_dbf_entry(char *name) +{ + struct qeth_dbf_entry *entry; + debug_info_t *rc = NULL; + + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } + } + mutex_unlock(&qeth_dbf_list_mutex); + return rc; +} + +static int qeth_add_dbf_entry(struct qeth_card *card, char *name) +{ + struct qeth_dbf_entry *new_entry; + + card->debug = debug_register(name, 2, 1, 8); + if (!card->debug) { + QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); + goto err; + } + if (debug_register_view(card->debug, &debug_hex_ascii_view)) + goto err_dbg; + new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); + if (!new_entry) + goto err_dbg; + strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); + new_entry->dbf_info = card->debug; + mutex_lock(&qeth_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qeth_dbf_list); + mutex_unlock(&qeth_dbf_list_mutex); + + return 0; + +err_dbg: + debug_unregister(card->debug); +err: + return -ENOMEM; +} + +static void qeth_clear_dbf_list(void) +{ + struct qeth_dbf_entry *entry, *tmp; + + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qeth_dbf_list_mutex); +} + +static struct net_device *qeth_alloc_netdev(struct qeth_card *card) +{ + struct net_device *dev; + struct qeth_priv *priv; + + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN, + ether_setup, QETH_MAX_OUT_QUEUES, 1); + break; + case QETH_CARD_TYPE_OSM: + dev = alloc_etherdev(sizeof(*priv)); + break; + case QETH_CARD_TYPE_OSN: + dev = alloc_netdev(sizeof(*priv), "osn%d", NET_NAME_UNKNOWN, + ether_setup); + break; + default: + dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1); + } + + if (!dev) + return NULL; + + priv = netdev_priv(dev); + priv->rx_copybreak = QETH_RX_COPYBREAK; + priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1; + + dev->ml_priv = card; + dev->watchdog_timeo = QETH_TX_TIMEOUT; + dev->min_mtu = IS_OSN(card) ? 64 : 576; + /* initialized when device first goes online: */ + dev->max_mtu = 0; + dev->mtu = 0; + SET_NETDEV_DEV(dev, &card->gdev->dev); + netif_carrier_off(dev); + + if (IS_OSN(card)) { + dev->ethtool_ops = &qeth_osn_ethtool_ops; + } else { + dev->ethtool_ops = &qeth_ethtool_ops; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->hw_features |= NETIF_F_SG; + dev->vlan_features |= NETIF_F_SG; + if (IS_IQD(card)) + dev->features |= NETIF_F_SG; + } + + return dev; +} + +struct net_device *qeth_clone_netdev(struct net_device *orig) +{ + struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); + + if (!clone) + return NULL; + + clone->dev_port = orig->dev_port; + return clone; +} + +static int qeth_core_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card; + struct device *dev; + int rc; + enum qeth_discipline_id enforced_disc; + char dbf_name[DBF_NAME_LEN]; + + QETH_DBF_TEXT(SETUP, 2, "probedev"); + + dev = &gdev->dev; + if (!get_device(dev)) + return -ENODEV; + + QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); + + card = qeth_alloc_card(gdev); + if (!card) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); + rc = -ENOMEM; + goto err_dev; + } + + snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", + dev_name(&gdev->dev)); + card->debug = qeth_get_dbf_entry(dbf_name); + if (!card->debug) { + rc = qeth_add_dbf_entry(card, dbf_name); + if (rc) + goto err_card; + } + + qeth_setup_card(card); + card->dev = qeth_alloc_netdev(card); + if (!card->dev) { + rc = -ENOMEM; + goto err_card; + } + + qeth_determine_capabilities(card); + qeth_set_blkt_defaults(card); + + card->qdio.no_out_queues = card->dev->num_tx_queues; + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; + + enforced_disc = qeth_enforce_discipline(card); + switch (enforced_disc) { + case QETH_DISCIPLINE_UNDETERMINED: + gdev->dev.type = &qeth_generic_devtype; + break; + default: + card->info.layer_enforced = true; + /* It's so early that we don't need the discipline_mutex yet. */ + rc = qeth_core_load_discipline(card, enforced_disc); + if (rc) + goto err_load; + + gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype : + card->discipline->devtype; + rc = card->discipline->setup(card->gdev); + if (rc) + goto err_disc; + break; + } + + return 0; + +err_disc: + qeth_core_free_discipline(card); +err_load: +err_chp_desc: + free_netdev(card->dev); +err_card: + qeth_core_free_card(card); +err_dev: + put_device(dev); + return rc; +} + +static void qeth_core_remove_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + QETH_CARD_TEXT(card, 2, "removedv"); + + mutex_lock(&card->discipline_mutex); + if (card->discipline) { + card->discipline->remove(gdev); + qeth_core_free_discipline(card); + } + mutex_unlock(&card->discipline_mutex); + + qeth_free_qdio_queues(card); + + free_netdev(card->dev); + qeth_core_free_card(card); + put_device(&gdev->dev); +} + +static int qeth_core_set_online(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + enum qeth_discipline_id def_discipline; + + mutex_lock(&card->discipline_mutex); + if (!card->discipline) { + def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 : + QETH_DISCIPLINE_LAYER2; + rc = qeth_core_load_discipline(card, def_discipline); + if (rc) + goto err; + rc = card->discipline->setup(card->gdev); + if (rc) { + qeth_core_free_discipline(card); + goto err; + } + } + + rc = qeth_set_online(card, card->discipline); + +err: + mutex_unlock(&card->discipline_mutex); + return rc; +} + +static int qeth_core_set_offline(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + + mutex_lock(&card->discipline_mutex); + rc = qeth_set_offline(card, card->discipline, false); + mutex_unlock(&card->discipline_mutex); + + return rc; +} + +static void qeth_core_shutdown(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + qeth_qdio_clear_card(card, 0); + qeth_drain_output_queues(card); + qdio_free(CARD_DDEV(card)); +} + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + + err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, + buf); + + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *qeth_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group qeth_drv_attr_group = { + .attrs = qeth_drv_attrs, +}; +static const struct attribute_group *qeth_drv_attr_groups[] = { + &qeth_drv_attr_group, + NULL, +}; + +static struct ccwgroup_driver qeth_core_ccwgroup_driver = { + .driver = { + .groups = qeth_drv_attr_groups, + .owner = THIS_MODULE, + .name = "qeth", + }, + .ccw_driver = &qeth_ccw_driver, + .setup = qeth_core_probe_device, + .remove = qeth_core_remove_device, + .set_online = qeth_core_set_online, + .set_offline = qeth_core_set_offline, + .shutdown = qeth_core_shutdown, +}; + +struct qeth_card *qeth_get_card_by_busid(char *bus_id) +{ + struct ccwgroup_device *gdev; + struct qeth_card *card; + + gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id); + if (!gdev) + return NULL; + + card = dev_get_drvdata(&gdev->dev); + put_device(&gdev->dev); + return card; +} +EXPORT_SYMBOL_GPL(qeth_get_card_by_busid); + +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct mii_ioctl_data *mii_data; + int rc = 0; + + switch (cmd) { + case SIOC_QETH_ADP_SET_SNMP_CONTROL: + rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_GET_CARD_TYPE: + if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) && + !IS_VM_NIC(card)) + return 1; + return 0; + case SIOCGMIIPHY: + mii_data = if_mii(rq); + mii_data->phy_id = 0; + break; + case SIOCGMIIREG: + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + mii_data->val_out = qeth_mdio_read(dev, + mii_data->phy_id, mii_data->reg_num); + break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; + default: + if (card->discipline->do_ioctl) + rc = card->discipline->do_ioctl(dev, rq, cmd); + else + rc = -EOPNOTSUPP; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "ioce%x", rc); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_ioctl); + +static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u32 *features = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return -EIO; + + *features = cmd->data.setassparms.data.flags_32bit; + return 0; +} + +static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot) +{ + return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP, + NULL, prot); +} + +static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype, + enum qeth_prot_versions prot, u8 *lp2lp) +{ + u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; + u32 features; + int rc; + + /* some L3 HW requires combined L3+L4 csum offload: */ + if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 && + cstype == IPA_OUTBOUND_CHECKSUM) + required_features |= QETH_IPA_CHECKSUM_IP_HDR; + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0, + prot); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features); + if (rc) + return rc; + + if ((required_features & features) != required_features) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE, + SETASS_DATA_SIZEOF(flags_32bit), + prot); + if (!iob) { + qeth_set_csum_off(card, cstype, prot); + return -ENOMEM; + } + + if (features & QETH_IPA_CHECKSUM_LP2LP) + required_features |= QETH_IPA_CHECKSUM_LP2LP; + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features; + rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); + if (rc) { + qeth_set_csum_off(card, cstype, prot); + return rc; + } + + if (!qeth_ipa_caps_supported(&caps, required_features) || + !qeth_ipa_caps_enabled(&caps, required_features)) { + qeth_set_csum_off(card, cstype, prot); + return -EOPNOTSUPP; + } + + dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", + cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); + + if (lp2lp) + *lp2lp = qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP); + + return 0; +} + +static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, + enum qeth_prot_versions prot, u8 *lp2lp) +{ + return on ? qeth_set_csum_on(card, cstype, prot, lp2lp) : + qeth_set_csum_off(card, cstype, prot); +} + +static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_tso_start_data *tso_data = reply->param; + + if (qeth_setassparms_inspect_rc(cmd)) + return -EIO; + + tso_data->mss = cmd->data.setassparms.data.tso.mss; + tso_data->supported = cmd->data.setassparms.data.tso.supported; + return 0; +} + +static int qeth_set_tso_off(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_STOP, NULL, prot); +} + +static int qeth_set_tso_on(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + struct qeth_tso_start_data tso_data; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_caps caps; + int rc; + + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START, 0, prot); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data); + if (rc) + return rc; + + if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_ENABLE, + SETASS_DATA_SIZEOF(caps), prot); + if (!iob) { + qeth_set_tso_off(card, prot); + return -ENOMEM; + } + + /* enable TSO capability */ + __ipa_cmd(iob)->data.setassparms.data.caps.enabled = + QETH_IPA_LARGE_SEND_TCP; + rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps); + if (rc) { + qeth_set_tso_off(card, prot); + return rc; + } + + if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) || + !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) { + qeth_set_tso_off(card, prot); + return -EOPNOTSUPP; + } + + dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot, + tso_data.mss); + return 0; +} + +static int qeth_set_ipa_tso(struct qeth_card *card, bool on, + enum qeth_prot_versions prot) +{ + return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot); +} + +static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) +{ + int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; + int rc_ipv6; + + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV4, NULL); + if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) + /* no/one Offload Assist available, so the rc is trivial */ + return rc_ipv4; + + rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV6, NULL); + + if (on) + /* enable: success if any Assist is active */ + return (rc_ipv6) ? rc_ipv4 : 0; + + /* disable: failure if any Assist is still active */ + return (rc_ipv6) ? rc_ipv6 : rc_ipv4; +} + +/** + * qeth_enable_hw_features() - (Re-)Enable HW functions for device features + * @dev: a net_device + */ +void qeth_enable_hw_features(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t features; + + features = dev->features; + /* force-off any feature that might need an IPA sequence. + * netdev_update_features() will restart them. + */ + dev->features &= ~dev->hw_features; + /* toggle VLAN filter, so that VIDs are re-programmed: */ + if (IS_LAYER2(card) && IS_VM_NIC(card)) { + dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + netdev_update_features(dev); + if (features != dev->features) + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); +} +EXPORT_SYMBOL_GPL(qeth_enable_hw_features); + +static void qeth_check_restricted_features(struct qeth_card *card, + netdev_features_t changed, + netdev_features_t actual) +{ + netdev_features_t ipv6_features = NETIF_F_TSO6; + netdev_features_t ipv4_features = NETIF_F_TSO; + + if (!card->info.has_lp2lp_cso_v6) + ipv6_features |= NETIF_F_IPV6_CSUM; + if (!card->info.has_lp2lp_cso_v4) + ipv4_features |= NETIF_F_IP_CSUM; + + if ((changed & ipv6_features) && !(actual & ipv6_features)) + qeth_flush_local_addrs6(card); + if ((changed & ipv4_features) && !(actual & ipv4_features)) + qeth_flush_local_addrs4(card); +} + +int qeth_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t changed = dev->features ^ features; + int rc = 0; + + QETH_CARD_TEXT(card, 2, "setfeat"); + QETH_CARD_HEX(card, 2, &features, sizeof(features)); + + if ((changed & NETIF_F_IP_CSUM)) { + rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4, + &card->info.has_lp2lp_cso_v4); + if (rc) + changed ^= NETIF_F_IP_CSUM; + } + if (changed & NETIF_F_IPV6_CSUM) { + rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6, + &card->info.has_lp2lp_cso_v6); + if (rc) + changed ^= NETIF_F_IPV6_CSUM; + } + if (changed & NETIF_F_RXCSUM) { + rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); + if (rc) + changed ^= NETIF_F_RXCSUM; + } + if (changed & NETIF_F_TSO) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO, + QETH_PROT_IPV4); + if (rc) + changed ^= NETIF_F_TSO; + } + if (changed & NETIF_F_TSO6) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6, + QETH_PROT_IPV6); + if (rc) + changed ^= NETIF_F_TSO6; + } + + qeth_check_restricted_features(card, dev->features ^ features, + dev->features ^ changed); + + /* everything changed successfully? */ + if ((dev->features ^ features) == changed) + return 0; + /* something went wrong. save changed features and return error */ + dev->features ^= changed; + return -EIO; +} +EXPORT_SYMBOL_GPL(qeth_set_features); + +netdev_features_t qeth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 2, "fixfeat"); + if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) + features &= ~NETIF_F_IP_CSUM; + if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) + features &= ~NETIF_F_IPV6_CSUM; + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && + !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) + features &= ~NETIF_F_RXCSUM; + if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO; + if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO6; + + QETH_CARD_HEX(card, 2, &features, sizeof(features)); + return features; +} +EXPORT_SYMBOL_GPL(qeth_fix_features); + +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + + /* Traffic with local next-hop is not eligible for some offloads: */ + if (skb->ip_summed == CHECKSUM_PARTIAL && + READ_ONCE(card->options.isolation) != ISOLATION_MODE_FWD) { + netdev_features_t restricted = 0; + + if (skb_is_gso(skb) && !netif_needs_gso(skb, features)) + restricted |= NETIF_F_ALL_TSO; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + if (!card->info.has_lp2lp_cso_v4) + restricted |= NETIF_F_IP_CSUM; + + if (restricted && qeth_next_hop_is_local_v4(card, skb)) + features &= ~restricted; + break; + case htons(ETH_P_IPV6): + if (!card->info.has_lp2lp_cso_v6) + restricted |= NETIF_F_IPV6_CSUM; + + if (restricted && qeth_next_hop_is_local_v6(card, skb)) + features &= ~restricted; + break; + default: + break; + } + } + + /* GSO segmentation builds skbs with + * a (small) linear part for the headers, and + * page frags for the data. + * Compared to a linear skb, the header-only part consumes an + * additional buffer element. This reduces buffer utilization, and + * hurts throughput. So compress small segments into one element. + */ + if (netif_needs_gso(skb, features)) { + /* match skb_segment(): */ + unsigned int doffset = skb->data - skb_mac_header(skb); + unsigned int hsize = skb_shinfo(skb)->gso_size; + unsigned int hroom = skb_headroom(skb); + + /* linearize only if resulting skb allocations are order-0: */ + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) + features &= ~NETIF_F_SG; + } + + return vlan_features_check(skb, features); +} +EXPORT_SYMBOL_GPL(qeth_features_check); + +void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; + + QETH_CARD_TEXT(card, 5, "getstat"); + + stats->rx_packets = card->stats.rx_packets; + stats->rx_bytes = card->stats.rx_bytes; + stats->rx_errors = card->stats.rx_length_errors + + card->stats.rx_frame_errors + + card->stats.rx_fifo_errors; + stats->rx_dropped = card->stats.rx_dropped_nomem + + card->stats.rx_dropped_notsupp + + card->stats.rx_dropped_runt; + stats->multicast = card->stats.rx_multicast; + stats->rx_length_errors = card->stats.rx_length_errors; + stats->rx_frame_errors = card->stats.rx_frame_errors; + stats->rx_fifo_errors = card->stats.rx_fifo_errors; + + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + + stats->tx_packets += queue->stats.tx_packets; + stats->tx_bytes += queue->stats.tx_bytes; + stats->tx_errors += queue->stats.tx_errors; + stats->tx_dropped += queue->stats.tx_dropped; + } +} +EXPORT_SYMBOL_GPL(qeth_get_stats64); + +#define TC_IQD_UCAST 0 +static void qeth_iqd_set_prio_tc_map(struct net_device *dev, + unsigned int ucast_txqs) +{ + unsigned int prio; + + /* IQD requires mcast traffic to be placed on a dedicated queue, and + * qeth_iqd_select_queue() deals with this. + * For unicast traffic, we defer the queue selection to the stack. + * By installing a trivial prio map that spans over only the unicast + * queues, we can encourage the stack to spread the ucast traffic evenly + * without selecting the mcast queue. + */ + + /* One traffic class, spanning over all active ucast queues: */ + netdev_set_num_tc(dev, 1); + netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs, + QETH_IQD_MIN_UCAST_TXQ); + + /* Map all priorities to this traffic class: */ + for (prio = 0; prio <= TC_BITMASK; prio++) + netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST); +} + +int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count) +{ + struct net_device *dev = card->dev; + int rc; + + /* Per netif_setup_tc(), adjust the mapping first: */ + if (IS_IQD(card)) + qeth_iqd_set_prio_tc_map(dev, count - 1); + + rc = netif_set_real_num_tx_queues(dev, count); + + if (rc && IS_IQD(card)) + qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1); + + return rc; +} +EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues); + +u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + u8 cast_type, struct net_device *sb_dev) +{ + u16 txq; + + if (cast_type != RTN_UNICAST) + return QETH_IQD_MCAST_TXQ; + if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ) + return QETH_IQD_MIN_UCAST_TXQ; + + txq = netdev_pick_tx(dev, skb, sb_dev); + return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq; +} +EXPORT_SYMBOL_GPL(qeth_iqd_select_queue); + +int qeth_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethopen"); + + card->data.state = CH_STATE_UP; + netif_tx_start_all_queues(dev); + + local_bh_disable(); + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + qeth_for_each_output_queue(card, queue, i) { + netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll, + QETH_NAPI_WEIGHT); + napi_enable(&queue->napi); + napi_schedule(&queue->napi); + } + } + + napi_enable(&card->napi); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + + return 0; +} +EXPORT_SYMBOL_GPL(qeth_open); + +int qeth_stop(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethstop"); + + napi_disable(&card->napi); + cancel_delayed_work_sync(&card->buffer_reclaim_work); + qdio_stop_irq(CARD_DDEV(card)); + + if (IS_IQD(card)) { + struct qeth_qdio_out_q *queue; + unsigned int i; + + /* Quiesce the NAPI instances: */ + qeth_for_each_output_queue(card, queue, i) + napi_disable(&queue->napi); + + /* Stop .ndo_start_xmit, might still access queue->napi. */ + netif_tx_disable(dev); + + qeth_for_each_output_queue(card, queue, i) { + del_timer_sync(&queue->timer); + /* Queues may get re-allocated, so remove the NAPIs. */ + netif_napi_del(&queue->napi); + } + } else { + netif_tx_disable(dev); + } + + return 0; +} +EXPORT_SYMBOL_GPL(qeth_stop); + +static int __init qeth_core_init(void) +{ + int rc; + + pr_info("loading core functions\n"); + + qeth_debugfs_root = debugfs_create_dir("qeth", NULL); + + rc = qeth_register_dbf_views(); + if (rc) + goto dbf_err; + qeth_core_root_dev = root_device_register("qeth"); + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); + if (rc) + goto register_err; + qeth_core_header_cache = + kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE, + roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE), + 0, NULL); + if (!qeth_core_header_cache) { + rc = -ENOMEM; + goto slab_err; + } + qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", + sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); + if (!qeth_qdio_outbuf_cache) { + rc = -ENOMEM; + goto cqslab_err; + } + rc = ccw_driver_register(&qeth_ccw_driver); + if (rc) + goto ccw_err; + rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); + if (rc) + goto ccwgroup_err; + + return 0; + +ccwgroup_err: + ccw_driver_unregister(&qeth_ccw_driver); +ccw_err: + kmem_cache_destroy(qeth_qdio_outbuf_cache); +cqslab_err: + kmem_cache_destroy(qeth_core_header_cache); +slab_err: + root_device_unregister(qeth_core_root_dev); +register_err: + qeth_unregister_dbf_views(); +dbf_err: + debugfs_remove_recursive(qeth_debugfs_root); + pr_err("Initializing the qeth device driver failed\n"); + return rc; +} + +static void __exit qeth_core_exit(void) +{ + qeth_clear_dbf_list(); + ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); + ccw_driver_unregister(&qeth_ccw_driver); + kmem_cache_destroy(qeth_qdio_outbuf_cache); + kmem_cache_destroy(qeth_core_header_cache); + root_device_unregister(qeth_core_root_dev); + qeth_unregister_dbf_views(); + debugfs_remove_recursive(qeth_debugfs_root); + pr_info("core functions removed\n"); +} + +module_init(qeth_core_init); +module_exit(qeth_core_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth core functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c new file mode 100644 index 000000000..68c2588b9 --- /dev/null +++ b/drivers/s390/net/qeth_core_mpc.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#include <linux/module.h> +#include <asm/cio.h> +#include "qeth_core_mpc.h" + +const unsigned char IDX_ACTIVATE_READ[] = { + 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, + 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, + 0x00, 0x00 +}; + +const unsigned char IDX_ACTIVATE_WRITE[] = { + 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, + 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, + 0x00, 0x00 +}; + +const unsigned char CM_ENABLE[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23, + 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0b, 0x04, 0x01, + 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f, + 0x00, + 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff +}; + +const unsigned char CM_SETUP[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24, + 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x00, 0x01, 0x01, 0x11, + 0x00, 0x09, 0x04, + 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x06, + 0x04, 0x06, 0xc8, 0x00 +}; + +const unsigned char ULP_ENABLE[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b, + 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0b, 0x04, 0x01, + 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12, + 0x00, + 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7, + 0xf1, 0x00, 0x00 +}; + +const unsigned char ULP_SETUP[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c, + 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x00, 0x01, 0x01, 0x14, + 0x00, 0x09, 0x04, + 0x05, 0x05, 0x30, 0x01, 0x00, 0x00, + 0x00, 0x06, + 0x04, 0x06, 0x40, 0x00, + 0x00, 0x08, 0x04, 0x0b, + 0x00, 0x00, 0x00, 0x00 +}; + +const unsigned char DM_ACT[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15, + 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x40, 0x01, 0x01, 0x00 +}; + +const unsigned char IPA_PDU_HEADER[] = { + 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, + 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x05, 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, +}; + +struct ipa_rc_msg { + enum qeth_ipa_return_codes rc; + const char *msg; +}; + +static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { + {IPA_RC_SUCCESS, "success"}, + {IPA_RC_NOTSUPP, "Command not supported"}, + {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, + {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, + {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"}, + {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"}, + {IPA_RC_INVALID_FORMAT, "invalid format or length"}, + {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"}, + {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"}, + {IPA_RC_UNREGISTERED_ADDR, "Address not registered"}, + {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"}, + {IPA_RC_ID_NOT_FOUND, "Identifier not found"}, + {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"}, + {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"}, + {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"}, + {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"}, + {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"}, + {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"}, + {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"}, + {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"}, + {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"}, + {IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"}, + {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"}, + {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, + {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, + {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}, + {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"}, + {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"}, + {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"}, + {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"}, + {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"}, + {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"}, + {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"}, + {IPA_RC_INVALID_LANNUM, "Invalid LAN num"}, + {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"}, + {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"}, + {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"}, + {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, + {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, + {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, + {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}, + {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, + {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, + {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, + {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"}, + {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"}, + {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, + {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, + {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, + {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, + {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, + /* default for qeth_get_ipa_msg(): */ + {IPA_RC_FFFF, "Unknown Error"} +}; + +const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) +{ + int x; + + for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++) + if (qeth_ipa_rc_msg[x].rc == rc) + return qeth_ipa_rc_msg[x].msg; + return qeth_ipa_rc_msg[x].msg; +} + + +struct ipa_cmd_names { + enum qeth_ipa_cmds cmd; + const char *name; +}; + +static const struct ipa_cmd_names qeth_ipa_cmd_names[] = { + {IPA_CMD_STARTLAN, "startlan"}, + {IPA_CMD_STOPLAN, "stoplan"}, + {IPA_CMD_SETVMAC, "setvmac"}, + {IPA_CMD_DELVMAC, "delvmac"}, + {IPA_CMD_SETGMAC, "setgmac"}, + {IPA_CMD_DELGMAC, "delgmac"}, + {IPA_CMD_SETVLAN, "setvlan"}, + {IPA_CMD_DELVLAN, "delvlan"}, + {IPA_CMD_VNICC, "vnic_characteristics"}, + {IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"}, + {IPA_CMD_SETCCID, "setccid"}, + {IPA_CMD_DELCCID, "delccid"}, + {IPA_CMD_MODCCID, "modccid"}, + {IPA_CMD_SETIP, "setip"}, + {IPA_CMD_QIPASSIST, "qipassist"}, + {IPA_CMD_SETASSPARMS, "setassparms"}, + {IPA_CMD_SETIPM, "setipm"}, + {IPA_CMD_DELIPM, "delipm"}, + {IPA_CMD_SETRTG, "setrtg"}, + {IPA_CMD_DELIP, "delip"}, + {IPA_CMD_SETADAPTERPARMS, "setadapterparms"}, + {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"}, + {IPA_CMD_SETBRIDGEPORT_IQD, "set_bridge_port(hs)"}, + {IPA_CMD_CREATE_ADDR, "create_addr"}, + {IPA_CMD_DESTROY_ADDR, "destroy_addr"}, + {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"}, + {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"}, + {IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"}, + {IPA_CMD_UNKNOWN, "unknown"}, +}; + +const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) +{ + int x; + + for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++) + if (qeth_ipa_cmd_names[x].cmd == cmd) + return qeth_ipa_cmd_names[x].name; + return qeth_ipa_cmd_names[x].name; +} diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h new file mode 100644 index 000000000..6541bab96 --- /dev/null +++ b/drivers/s390/net/qeth_core_mpc.h @@ -0,0 +1,947 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_CORE_MPC_H__ +#define __QETH_CORE_MPC_H__ + +#include <asm/qeth.h> +#include <uapi/linux/if_ether.h> +#include <uapi/linux/in6.h> + +extern const unsigned char IPA_PDU_HEADER[]; +#define IPA_PDU_HEADER_SIZE 0x40 +#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e) +#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26) +#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29) +#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a) + +#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) + +#define QETH_SEQ_NO_LENGTH 4 +#define QETH_MPC_TOKEN_LENGTH 4 +#define QETH_MCL_LENGTH 4 + +#define QETH_TIMEOUT (10 * HZ) +#define QETH_IPA_TIMEOUT (45 * HZ) + +/*****************************************************************************/ +/* IP Assist related definitions */ +/*****************************************************************************/ +#define IPA_CMD_INITIATOR_HOST 0x00 +#define IPA_CMD_INITIATOR_OSA 0x01 +#define IPA_CMD_INITIATOR_HOST_REPLY 0x80 +#define IPA_CMD_INITIATOR_OSA_REPLY 0x81 +#define IPA_CMD_PRIM_VERSION_NO 0x01 + +struct qeth_ipa_caps { + u32 supported; + u32 enabled; +}; + +static inline bool qeth_ipa_caps_supported(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->supported & mask) == mask; +} + +static inline bool qeth_ipa_caps_enabled(struct qeth_ipa_caps *caps, u32 mask) +{ + return (caps->enabled & mask) == mask; +} + +#define qeth_adp_supported(c, f) \ + qeth_ipa_caps_supported(&c->options.adp, f) +#define qeth_is_supported(c, f) \ + qeth_ipa_caps_supported(&c->options.ipa4, f) +#define qeth_is_supported6(c, f) \ + qeth_ipa_caps_supported(&c->options.ipa6, f) +#define qeth_is_ipafunc_supported(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? qeth_is_supported6(c, f) : \ + qeth_is_supported(c, f)) + +enum qeth_card_types { + QETH_CARD_TYPE_OSD = 1, + QETH_CARD_TYPE_IQD = 5, + QETH_CARD_TYPE_OSN = 6, + QETH_CARD_TYPE_OSM = 3, + QETH_CARD_TYPE_OSX = 2, +}; + +#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) +#define IS_OSD(card) ((card)->info.type == QETH_CARD_TYPE_OSD) +#define IS_OSM(card) ((card)->info.type == QETH_CARD_TYPE_OSM) + +#ifdef CONFIG_QETH_OSN +#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) +#else +#define IS_OSN(card) false +#endif + +#ifdef CONFIG_QETH_OSX +#define IS_OSX(card) ((card)->info.type == QETH_CARD_TYPE_OSX) +#else +#define IS_OSX(card) false +#endif + +#define IS_VM_NIC(card) ((card)->info.is_vm_nic) + +#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 +/* only the first two bytes are looked at in qeth_get_cardname_short */ +enum qeth_link_types { + QETH_LINK_TYPE_FAST_ETH = 0x01, + QETH_LINK_TYPE_HSTR = 0x02, + QETH_LINK_TYPE_GBIT_ETH = 0x03, + QETH_LINK_TYPE_OSN = 0x04, + QETH_LINK_TYPE_10GBIT_ETH = 0x10, + QETH_LINK_TYPE_25GBIT_ETH = 0x12, + QETH_LINK_TYPE_LANE_ETH100 = 0x81, + QETH_LINK_TYPE_LANE_TR = 0x82, + QETH_LINK_TYPE_LANE_ETH1000 = 0x83, + QETH_LINK_TYPE_LANE = 0x88, +}; + +enum qeth_routing_types { + /* TODO: set to bit flag used in IPA Command */ + NO_ROUTER = 0, + PRIMARY_ROUTER = 1, + SECONDARY_ROUTER = 2, + MULTICAST_ROUTER = 3, + PRIMARY_CONNECTOR = 4, + SECONDARY_CONNECTOR = 5, +}; + +/* IPA Commands */ +enum qeth_ipa_cmds { + IPA_CMD_STARTLAN = 0x01, + IPA_CMD_STOPLAN = 0x02, + IPA_CMD_SETVMAC = 0x21, + IPA_CMD_DELVMAC = 0x22, + IPA_CMD_SETGMAC = 0x23, + IPA_CMD_DELGMAC = 0x24, + IPA_CMD_SETVLAN = 0x25, + IPA_CMD_DELVLAN = 0x26, + IPA_CMD_VNICC = 0x2a, + IPA_CMD_SETBRIDGEPORT_OSA = 0x2b, + IPA_CMD_SETCCID = 0x41, + IPA_CMD_DELCCID = 0x42, + IPA_CMD_MODCCID = 0x43, + IPA_CMD_SETIP = 0xb1, + IPA_CMD_QIPASSIST = 0xb2, + IPA_CMD_SETASSPARMS = 0xb3, + IPA_CMD_SETIPM = 0xb4, + IPA_CMD_DELIPM = 0xb5, + IPA_CMD_SETRTG = 0xb6, + IPA_CMD_DELIP = 0xb7, + IPA_CMD_SETADAPTERPARMS = 0xb8, + IPA_CMD_SET_DIAG_ASS = 0xb9, + IPA_CMD_SETBRIDGEPORT_IQD = 0xbe, + IPA_CMD_CREATE_ADDR = 0xc3, + IPA_CMD_DESTROY_ADDR = 0xc4, + IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1, + IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2, + IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3, + IPA_CMD_UNKNOWN = 0x00 +}; + +enum qeth_ip_ass_cmds { + IPA_CMD_ASS_START = 0x0001, + IPA_CMD_ASS_STOP = 0x0002, + IPA_CMD_ASS_CONFIGURE = 0x0003, + IPA_CMD_ASS_ENABLE = 0x0004, +}; + +enum qeth_arp_process_subcmds { + IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003, + IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004, + IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005, + IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006, + IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007, + IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104, + IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204, +}; + + +/* Return Codes for IPA Commands + * according to OSA card Specs */ + +enum qeth_ipa_return_codes { + IPA_RC_SUCCESS = 0x0000, + IPA_RC_NOTSUPP = 0x0001, + IPA_RC_IP_TABLE_FULL = 0x0002, + IPA_RC_UNKNOWN_ERROR = 0x0003, + IPA_RC_UNSUPPORTED_COMMAND = 0x0004, + IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005, + IPA_RC_INVALID_FORMAT = 0x0006, + IPA_RC_DUP_IPV6_REMOTE = 0x0008, + IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C, + IPA_RC_DUP_IPV6_HOME = 0x0010, + IPA_RC_UNREGISTERED_ADDR = 0x0011, + IPA_RC_NO_ID_AVAILABLE = 0x0012, + IPA_RC_ID_NOT_FOUND = 0x0013, + IPA_RC_SBP_IQD_ANO_DEV_PRIMARY = 0x0014, + IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018, + IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C, + IPA_RC_INVALID_IP_VERSION = 0x0020, + IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024, + IPA_RC_LAN_FRAME_MISMATCH = 0x0040, + IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB, + IPA_RC_L2_UNSUPPORTED_CMD = 0x2003, + IPA_RC_L2_DUP_MAC = 0x2005, + IPA_RC_L2_ADDR_TABLE_FULL = 0x2006, + IPA_RC_L2_DUP_LAYER3_MAC = 0x200a, + IPA_RC_L2_GMAC_NOT_FOUND = 0x200b, + IPA_RC_L2_MAC_NOT_AUTH_BY_HYP = 0x200c, + IPA_RC_L2_MAC_NOT_AUTH_BY_ADP = 0x200d, + IPA_RC_L2_MAC_NOT_FOUND = 0x2010, + IPA_RC_L2_INVALID_VLAN_ID = 0x2015, + IPA_RC_L2_DUP_VLAN_ID = 0x2016, + IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017, + IPA_RC_L2_VLAN_ID_NOT_ALLOWED = 0x2050, + IPA_RC_VNICC_VNICBP = 0x20B0, + IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C, + IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10, + IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14, + IPA_RC_SBP_OSA_CURRENT_SECOND = 0x2B18, + IPA_RC_SBP_OSA_LIMIT_SECOND = 0x2B1C, + IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN = 0x2B20, + IPA_RC_SBP_OSA_CURRENT_PRIMARY = 0x2B24, + IPA_RC_SBP_OSA_NO_QDIO_QUEUES = 0x2BEB, + IPA_RC_DATA_MISMATCH = 0xe001, + IPA_RC_INVALID_MTU_SIZE = 0xe002, + IPA_RC_INVALID_LANTYPE = 0xe003, + IPA_RC_INVALID_LANNUM = 0xe004, + IPA_RC_DUPLICATE_IP_ADDRESS = 0xe005, + IPA_RC_IP_ADDR_TABLE_FULL = 0xe006, + IPA_RC_LAN_PORT_STATE_ERROR = 0xe007, + IPA_RC_SETIP_NO_STARTLAN = 0xe008, + IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009, + IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a, + IPA_RC_MC_ADDR_NOT_FOUND = 0xe00b, + IPA_RC_SETIP_INVALID_VERSION = 0xe00d, + IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e, + IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f, + IPA_RC_PRIMARY_ALREADY_DEFINED = 0xe010, + IPA_RC_SECOND_ALREADY_DEFINED = 0xe011, + IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012, + IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013, + IPA_RC_LAN_OFFLINE = 0xe080, + IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090, + IPA_RC_INVALID_IP_VERSION2 = 0xf001, + IPA_RC_FFFF = 0xffff +}; +/* for VNIC Characteristics */ +#define IPA_RC_VNICC_OOSEQ 0x0005 + +/* for SET_DIAGNOSTIC_ASSIST */ +#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL +#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR + +/* for SETBRIDGEPORT (double occupancies) */ +#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME +#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION + +/* IPA function flags; each flag marks availability of respective function */ +enum qeth_ipa_funcs { + IPA_ARP_PROCESSING = 0x00000001L, + IPA_INBOUND_CHECKSUM = 0x00000002L, + IPA_OUTBOUND_CHECKSUM = 0x00000004L, + /* RESERVED = 0x00000008L,*/ + IPA_FILTERING = 0x00000010L, + IPA_IPV6 = 0x00000020L, + IPA_MULTICASTING = 0x00000040L, + IPA_IP_REASSEMBLY = 0x00000080L, + IPA_QUERY_ARP_COUNTERS = 0x00000100L, + IPA_QUERY_ARP_ADDR_INFO = 0x00000200L, + IPA_SETADAPTERPARMS = 0x00000400L, + IPA_VLAN_PRIO = 0x00000800L, + IPA_PASSTHRU = 0x00001000L, + IPA_FLUSH_ARP_SUPPORT = 0x00002000L, + IPA_FULL_VLAN = 0x00004000L, + IPA_INBOUND_PASSTHRU = 0x00008000L, + IPA_SOURCE_MAC = 0x00010000L, + IPA_OSA_MC_ROUTER = 0x00020000L, + IPA_QUERY_ARP_ASSIST = 0x00040000L, + IPA_INBOUND_TSO = 0x00080000L, + IPA_OUTBOUND_TSO = 0x00100000L, + IPA_INBOUND_CHECKSUM_V6 = 0x00400000L, + IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L, +}; + +/* SETIP/DELIP IPA Command: ***************************************************/ +enum qeth_ipa_setdelip_flags { + QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */ + QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */ + QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */ + QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L, + QETH_IPA_DELIP_VIPA_FLAG = 0x40L, + QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L, +}; + +/* SETADAPTER IPA Command: ****************************************************/ +enum qeth_ipa_setadp_cmd { + IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L, + IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L, + IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L, + IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L, + IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L, + IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L, + IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L, + IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L, + IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L, + IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L, + IPA_SETADP_QUERY_CARD_INFO = 0x00000400L, + IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, + IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L, + IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, + IPA_SETADP_QUERY_OAT = 0x00080000L, + IPA_SETADP_QUERY_SWITCH_ATTRIBUTES = 0x00100000L, +}; +enum qeth_ipa_mac_ops { + CHANGE_ADDR_READ_MAC = 0, + CHANGE_ADDR_REPLACE_MAC = 1, + CHANGE_ADDR_ADD_MAC = 2, + CHANGE_ADDR_DEL_MAC = 4, + CHANGE_ADDR_RESET_MAC = 8, +}; +enum qeth_ipa_addr_ops { + CHANGE_ADDR_READ_ADDR = 0, + CHANGE_ADDR_ADD_ADDR = 1, + CHANGE_ADDR_DEL_ADDR = 2, + CHANGE_ADDR_FLUSH_ADDR_TABLE = 4, +}; +enum qeth_ipa_promisc_modes { + SET_PROMISC_MODE_OFF = 0, + SET_PROMISC_MODE_ON = 1, +}; +enum qeth_ipa_isolation_modes { + ISOLATION_MODE_NONE = 0x00000000L, + ISOLATION_MODE_FWD = 0x00000001L, + ISOLATION_MODE_DROP = 0x00000002L, +}; +enum qeth_ipa_set_access_mode_rc { + SET_ACCESS_CTRL_RC_SUCCESS = 0x0000, + SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004, + SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008, + SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010, + SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014, + SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018, + SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED = 0x0022, + SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024, + SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028, +}; +enum qeth_card_info_card_type { + CARD_INFO_TYPE_1G_COPPER_A = 0x61, + CARD_INFO_TYPE_1G_FIBRE_A = 0x71, + CARD_INFO_TYPE_10G_FIBRE_A = 0x91, + CARD_INFO_TYPE_1G_COPPER_B = 0xb1, + CARD_INFO_TYPE_1G_FIBRE_B = 0xa1, + CARD_INFO_TYPE_10G_FIBRE_B = 0xc1, +}; +enum qeth_card_info_port_mode { + CARD_INFO_PORTM_HALFDUPLEX = 0x0002, + CARD_INFO_PORTM_FULLDUPLEX = 0x0003, +}; +enum qeth_card_info_port_speed { + CARD_INFO_PORTS_10M = 0x00000005, + CARD_INFO_PORTS_100M = 0x00000006, + CARD_INFO_PORTS_1G = 0x00000007, + CARD_INFO_PORTS_10G = 0x00000008, + CARD_INFO_PORTS_25G = 0x0000000A, +}; + +/* (SET)DELIP(M) IPA stuff ***************************************************/ +struct qeth_ipacmd_setdelip4 { + __be32 addr; + __be32 mask; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelip6 { + struct in6_addr addr; + struct in6_addr prefix; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelipm { + __u8 mac[6]; + __u8 padding[2]; + struct in6_addr ip; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelmac { + __u32 mac_length; + __u8 mac[6]; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelvlan { + __u16 vlan_id; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setassparms_hdr { + __u16 length; + __u16 command_code; + __u16 return_code; + __u8 number_of_replies; + __u8 seq_no; +} __attribute__((packed)); + +struct qeth_arp_query_data { + __u16 request_bits; + __u16 reply_bits; + __u32 no_entries; + char data; /* only for replies */ +} __attribute__((packed)); + +/* used as parameter for arp_query reply */ +struct qeth_arp_query_info { + __u32 udata_len; + __u16 mask_bits; + __u32 udata_offset; + __u32 no_entries; + char *udata; +}; + +/* IPA set assist segmentation bit definitions for receive and + * transmit checksum offloading. + */ +enum qeth_ipa_checksum_bits { + QETH_IPA_CHECKSUM_IP_HDR = 0x0002, + QETH_IPA_CHECKSUM_UDP = 0x0008, + QETH_IPA_CHECKSUM_TCP = 0x0010, + QETH_IPA_CHECKSUM_LP2LP = 0x0020 +}; + +enum qeth_ipa_large_send_caps { + QETH_IPA_LARGE_SEND_TCP = 0x00000001, +}; + +struct qeth_tso_start_data { + u32 mss; + u32 supported; +}; + +/* SETASSPARMS IPA Command: */ +struct qeth_ipacmd_setassparms { + u32 assist_no; + struct qeth_ipacmd_setassparms_hdr hdr; + union { + __u32 flags_32bit; + struct qeth_ipa_caps caps; + struct qeth_arp_cache_entry arp_entry; + struct qeth_arp_query_data query_arp; + struct qeth_tso_start_data tso; + } data; +} __attribute__ ((packed)); + +#define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\ + data.field) + +/* SETRTG IPA Command: ****************************************************/ +struct qeth_set_routing { + __u8 type; +}; + +/* SETADAPTERPARMS IPA Command: *******************************************/ +struct qeth_query_cmds_supp { + __u32 no_lantypes_supp; + __u8 lan_type; + __u8 reserved1[3]; + __u32 supported_cmds; + __u8 reserved2[8]; +} __attribute__ ((packed)); + +struct qeth_change_addr { + u32 cmd; + u32 addr_size; + u32 no_macs; + u8 addr[ETH_ALEN]; +}; + +struct qeth_snmp_cmd { + __u8 token[16]; + __u32 request; + __u32 interface; + __u32 returncode; + __u32 firmwarelevel; + __u32 seqno; + __u8 data; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq_hdr { + __u32 data_len; + __u32 req_len; + __u32 reserved1; + __u32 reserved2; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq { + struct qeth_snmp_ureq_hdr hdr; + struct qeth_snmp_cmd cmd; +} __attribute__((packed)); + +/* SET_ACCESS_CONTROL: same format for request and reply */ +struct qeth_set_access_ctrl { + __u32 subcmd_code; + __u8 reserved[8]; +} __attribute__((packed)); + +struct qeth_query_oat { + __u32 subcmd_code; + __u8 reserved[12]; +} __packed; + +struct qeth_qoat_priv { + __u32 buffer_len; + __u32 response_len; + char *buffer; +}; + +struct qeth_query_card_info { + __u8 card_type; + __u8 reserved1; + __u16 port_mode; + __u32 port_speed; + __u32 reserved2; +}; + +#define QETH_SWITCH_FORW_802_1 0x00000001 +#define QETH_SWITCH_FORW_REFL_RELAY 0x00000002 +#define QETH_SWITCH_CAP_RTE 0x00000004 +#define QETH_SWITCH_CAP_ECP 0x00000008 +#define QETH_SWITCH_CAP_VDP 0x00000010 + +struct qeth_query_switch_attributes { + __u8 version; + __u8 reserved1; + __u16 reserved2; + __u32 capabilities; + __u32 settings; + __u8 reserved3[8]; +}; + +#define QETH_SETADP_FLAGS_VIRTUAL_MAC 0x80 /* for CHANGE_ADDR_READ_MAC */ + +struct qeth_ipacmd_setadpparms_hdr { + u16 cmdlength; + u16 reserved2; + u32 command_code; + u16 return_code; + u8 used_total; + u8 seq_no; + u8 flags; + u8 reserved3[3]; +}; + +struct qeth_ipacmd_setadpparms { + struct qeth_ipa_caps hw_cmds; + struct qeth_ipacmd_setadpparms_hdr hdr; + union { + struct qeth_query_cmds_supp query_cmds_supp; + struct qeth_change_addr change_addr; + struct qeth_snmp_cmd snmp; + struct qeth_set_access_ctrl set_access_ctrl; + struct qeth_query_oat query_oat; + struct qeth_query_card_info card_info; + struct qeth_query_switch_attributes query_switch_attributes; + __u32 mode; + } data; +} __attribute__ ((packed)); + +#define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\ + data.field) + +/* CREATE_ADDR IPA Command: ***********************************************/ +struct qeth_create_destroy_address { + u8 mac_addr[ETH_ALEN]; + u16 uid; +}; + +/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/ + +enum qeth_diags_cmds { + QETH_DIAGS_CMD_QUERY = 0x0001, + QETH_DIAGS_CMD_TRAP = 0x0002, + QETH_DIAGS_CMD_TRACE = 0x0004, + QETH_DIAGS_CMD_NOLOG = 0x0008, + QETH_DIAGS_CMD_DUMP = 0x0010, +}; + +enum qeth_diags_trace_types { + QETH_DIAGS_TYPE_HIPERSOCKET = 0x02, +}; + +enum qeth_diags_trace_cmds { + QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001, + QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002, + QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004, + QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008, + QETH_DIAGS_CMD_TRACE_QUERY = 0x0010, +}; + +enum qeth_diags_trap_action { + QETH_DIAGS_TRAP_ARM = 0x01, + QETH_DIAGS_TRAP_DISARM = 0x02, + QETH_DIAGS_TRAP_CAPTURE = 0x04, +}; + +struct qeth_ipacmd_diagass { + __u32 host_tod2; + __u32:32; + __u16 subcmd_len; + __u16:16; + __u32 subcmd; + __u8 type; + __u8 action; + __u16 options; + __u32 ext; + __u8 cdata[64]; +} __attribute__ ((packed)); + +#define DIAG_HDR_LEN offsetofend(struct qeth_ipacmd_diagass, ext) +#define DIAG_SUB_HDR_LEN (offsetofend(struct qeth_ipacmd_diagass, ext) -\ + offsetof(struct qeth_ipacmd_diagass, \ + subcmd_len)) + +/* VNIC Characteristics IPA Command: *****************************************/ +/* IPA commands/sub commands for VNICC */ +#define IPA_VNICC_QUERY_CHARS 0x00000000L +#define IPA_VNICC_QUERY_CMDS 0x00000001L +#define IPA_VNICC_ENABLE 0x00000002L +#define IPA_VNICC_DISABLE 0x00000004L +#define IPA_VNICC_SET_TIMEOUT 0x00000008L +#define IPA_VNICC_GET_TIMEOUT 0x00000010L + +/* VNICC flags */ +#define QETH_VNICC_FLOODING 0x80000000 +#define QETH_VNICC_MCAST_FLOODING 0x40000000 +#define QETH_VNICC_LEARNING 0x20000000 +#define QETH_VNICC_TAKEOVER_SETVMAC 0x10000000 +#define QETH_VNICC_TAKEOVER_LEARNING 0x08000000 +#define QETH_VNICC_BRIDGE_INVISIBLE 0x04000000 +#define QETH_VNICC_RX_BCAST 0x02000000 + +/* VNICC default values */ +#define QETH_VNICC_ALL 0xff000000 +#define QETH_VNICC_DEFAULT QETH_VNICC_RX_BCAST +/* default VNICC timeout in seconds */ +#define QETH_VNICC_DEFAULT_TIMEOUT 600 + +/* VNICC header */ +struct qeth_ipacmd_vnicc_hdr { + u16 data_length; + u16 reserved; + u32 sub_command; +}; + +/* query supported commands for VNIC characteristic */ +struct qeth_vnicc_query_cmds { + u32 vnic_char; + u32 sup_cmds; +}; + +/* enable/disable VNIC characteristic */ +struct qeth_vnicc_set_char { + u32 vnic_char; +}; + +/* get/set timeout for VNIC characteristic */ +struct qeth_vnicc_getset_timeout { + u32 vnic_char; + u32 timeout; +}; + +/* complete VNICC IPA command message */ +struct qeth_ipacmd_vnicc { + struct qeth_ipa_caps vnicc_cmds; + struct qeth_ipacmd_vnicc_hdr hdr; + union { + struct qeth_vnicc_query_cmds query_cmds; + struct qeth_vnicc_set_char set_char; + struct qeth_vnicc_getset_timeout getset_timeout; + } data; +}; + +#define VNICC_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_vnicc,\ + data.field) + +/* SETBRIDGEPORT IPA Command: *********************************************/ +enum qeth_ipa_sbp_cmd { + IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L, + IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L, + IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L, + IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L, + IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L, + IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L, +}; + +struct net_if_token { + __u16 devnum; + __u8 cssid; + __u8 iid; + __u8 ssid; + __u8 chpid; + __u16 chid; +} __packed; + +struct mac_addr_lnid { + __u8 mac[6]; + __u16 lnid; +} __packed; + +struct qeth_ipacmd_sbp_hdr { + __u16 cmdlength; + __u16 reserved1; + __u32 command_code; + __u16 return_code; + __u8 used_total; + __u8 seq_no; + __u32 reserved2; +} __packed; + +struct qeth_sbp_query_cmds_supp { + __u32 supported_cmds; + __u32 reserved; +} __packed; + +struct qeth_sbp_set_primary { + struct net_if_token token; +} __packed; + +struct qeth_sbp_port_entry { + __u8 role; + __u8 state; + __u8 reserved1; + __u8 reserved2; + struct net_if_token token; +} __packed; + +/* For IPA_SBP_QUERY_BRIDGE_PORTS, IPA_SBP_BRIDGE_PORT_STATE_CHANGE */ +struct qeth_sbp_port_data { + __u8 primary_bp_supported; + __u8 secondary_bp_supported; + __u8 num_entries; + __u8 entry_length; + struct qeth_sbp_port_entry entry[]; +} __packed; + +struct qeth_ipacmd_setbridgeport { + struct qeth_ipa_caps sbp_cmds; + struct qeth_ipacmd_sbp_hdr hdr; + union { + struct qeth_sbp_query_cmds_supp query_cmds_supp; + struct qeth_sbp_set_primary set_primary; + struct qeth_sbp_port_data port_data; + } data; +} __packed; + +#define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\ + data.field) + +/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ +/* Bitmask for entry->change_code. Both bits may be raised. */ +enum qeth_ipa_addr_change_code { + IPA_ADDR_CHANGE_CODE_VLANID = 0x01, + IPA_ADDR_CHANGE_CODE_MACADDR = 0x02, + IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */ +}; + +struct qeth_ipacmd_addr_change_entry { + struct net_if_token token; + struct mac_addr_lnid addr_lnid; + __u8 change_code; + __u8 reserved1; + __u16 reserved2; +} __packed; + +struct qeth_ipacmd_addr_change { + __u8 lost_event_mask; + __u8 reserved; + __u16 num_entries; + struct qeth_ipacmd_addr_change_entry entry[]; +} __packed; + +/* [UN]REGISTER_LOCAL_ADDRESS notifications */ +struct qeth_ipacmd_local_addr4 { + __be32 addr; + u32 flags; +}; + +struct qeth_ipacmd_local_addrs4 { + u32 count; + u32 addr_length; + struct qeth_ipacmd_local_addr4 addrs[]; +}; + +struct qeth_ipacmd_local_addr6 { + struct in6_addr addr; + u32 flags; +}; + +struct qeth_ipacmd_local_addrs6 { + u32 count; + u32 addr_length; + struct qeth_ipacmd_local_addr6 addrs[]; +}; + +/* Header for each IPA command */ +struct qeth_ipacmd_hdr { + __u8 command; + __u8 initiator; + __u16 seqno; + __u16 return_code; + __u8 adapter_type; + __u8 rel_adapter_no; + __u8 prim_version_no; + __u8 param_count; + __u16 prot_version; + struct qeth_ipa_caps assists; +} __attribute__ ((packed)); + +/* The IPA command itself */ +struct qeth_ipa_cmd { + struct qeth_ipacmd_hdr hdr; + union { + struct qeth_ipacmd_setdelip4 setdelip4; + struct qeth_ipacmd_setdelip6 setdelip6; + struct qeth_ipacmd_setdelipm setdelipm; + struct qeth_ipacmd_setassparms setassparms; + struct qeth_ipacmd_layer2setdelmac setdelmac; + struct qeth_ipacmd_layer2setdelvlan setdelvlan; + struct qeth_create_destroy_address create_destroy_addr; + struct qeth_ipacmd_setadpparms setadapterparms; + struct qeth_set_routing setrtg; + struct qeth_ipacmd_diagass diagass; + struct qeth_ipacmd_setbridgeport sbp; + struct qeth_ipacmd_addr_change addrchange; + struct qeth_ipacmd_vnicc vnicc; + struct qeth_ipacmd_local_addrs4 local_addrs4; + struct qeth_ipacmd_local_addrs6 local_addrs6; + } data; +} __attribute__ ((packed)); + +#define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field) + +/* + * special command for ARP processing. + * this is not included in setassparms command before, because we get + * problem with the size of struct qeth_ipacmd_setassparms otherwise + */ +enum qeth_ipa_arp_return_codes { + QETH_IPA_ARP_RC_SUCCESS = 0x0000, + QETH_IPA_ARP_RC_FAILED = 0x0001, + QETH_IPA_ARP_RC_NOTSUPP = 0x0002, + QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003, + QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004, + QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, +}; + +extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); +extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); + +/* Helper functions */ +#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ + (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) + +/*****************************************************************************/ +/* END OF IP Assist related definitions */ +/*****************************************************************************/ + +extern const unsigned char CM_ENABLE[]; +#define CM_ENABLE_SIZE 0x63 +#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c) +#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) +#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b) + +#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x13) + + +extern const unsigned char CM_SETUP[]; +#define CM_SETUP_SIZE 0x64 +#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) +#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a) + +#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1a) + +extern const unsigned char ULP_ENABLE[]; +#define ULP_ENABLE_SIZE 0x6b +#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61) +#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) +#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62) +#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x13) +#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1f) +#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x17) +#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x2b) +/* Layer 2 definitions */ +#define QETH_PROT_LAYER2 0x08 +#define QETH_PROT_TCPIP 0x03 +#define QETH_PROT_OSN2 0x0a +#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50) +#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19) + +extern const unsigned char ULP_SETUP[]; +#define ULP_SETUP_SIZE 0x6c +#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) +#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a) +#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68) +#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a) + +#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1a) + + +extern const unsigned char DM_ACT[]; +#define DM_ACT_SIZE 0x55 +#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51) + + + +#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4) +#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c) +#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20) + +extern const unsigned char IDX_ACTIVATE_READ[]; +extern const unsigned char IDX_ACTIVATE_WRITE[]; +#define IDX_ACTIVATE_SIZE 0x22 +#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b) +#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c) +#define QETH_IDX_ACT_INVAL_FRAME 0x40 +#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80) +#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10) +#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16) +#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e) +#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20) +#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) +#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) +#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] +#define QETH_IDX_ACT_ERR_EXCL 0x19 +#define QETH_IDX_ACT_ERR_AUTH 0x1E +#define QETH_IDX_ACT_ERR_AUTH_USER 0x20 + +#define QETH_IDX_TERMINATE 0xc0 +#define QETH_IDX_TERMINATE_MASK 0xc0 +#define QETH_IDX_TERM_BAD_TRANSPORT 0x41 +#define QETH_IDX_TERM_BAD_TRANSPORT_VM 0xf6 + +#define PDU_ENCAPSULATION(buffer) \ + (buffer + *(buffer + (*(buffer + 0x0b)) + \ + *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) + +#define IS_IPA(buffer) \ + ((buffer) && \ + (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1)) + +#endif diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c new file mode 100644 index 000000000..4441b3393 --- /dev/null +++ b/drivers/s390/net/qeth_core_sys.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/list.h> +#include <linux/rwsem.h> +#include <asm/ebcdic.h> + +#include "qeth_core.h" + +static ssize_t qeth_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + switch (card->state) { + case CARD_STATE_DOWN: + return sprintf(buf, "DOWN\n"); + case CARD_STATE_SOFTSETUP: + if (card->dev->flags & IFF_UP) + return sprintf(buf, "UP (LAN %s)\n", + netif_carrier_ok(card->dev) ? "ONLINE" : + "OFFLINE"); + return sprintf(buf, "SOFTSETUP\n"); + default: + return sprintf(buf, "UNKNOWN\n"); + } +} + +static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL); + +static ssize_t qeth_dev_chpid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%02X\n", card->info.chpid); +} + +static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL); + +static ssize_t qeth_dev_if_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", netdev_name(card->dev)); +} + +static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL); + +static ssize_t qeth_dev_card_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", qeth_get_cardname_short(card)); +} + +static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); + +static const char *qeth_get_bufsize_str(struct qeth_card *card) +{ + if (card->qdio.in_buf_size == 16384) + return "16k"; + else if (card->qdio.in_buf_size == 24576) + return "24k"; + else if (card->qdio.in_buf_size == 32768) + return "32k"; + else if (card->qdio.in_buf_size == 40960) + return "40k"; + else + return "64k"; +} + +static ssize_t qeth_dev_inbuf_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", qeth_get_bufsize_str(card)); +} + +static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL); + +static ssize_t qeth_dev_portno_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->dev->dev_port); +} + +static ssize_t qeth_dev_portno_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + unsigned int portno, limit; + int rc = 0; + + rc = kstrtouint(buf, 16, &portno); + if (rc) + return rc; + if (portno > QETH_MAX_PORTNO) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt); + if (portno > limit) { + rc = -EINVAL; + goto out; + } + card->dev->dev_port = portno; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store); + +static ssize_t qeth_dev_portname_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "no portname required\n"); +} + +static ssize_t qeth_dev_portname_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + dev_warn_once(&card->gdev->dev, + "portname is deprecated and is ignored\n"); + return count; +} + +static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show, + qeth_dev_portname_store); + +static ssize_t qeth_dev_prioqing_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_PREC: + return sprintf(buf, "%s\n", "by precedence"); + case QETH_PRIO_Q_ING_TOS: + return sprintf(buf, "%s\n", "by type of service"); + case QETH_PRIO_Q_ING_SKB: + return sprintf(buf, "%s\n", "by skb-priority"); + case QETH_PRIO_Q_ING_VLAN: + return sprintf(buf, "%s\n", "by VLAN headers"); + case QETH_PRIO_Q_ING_FIXED: + return sprintf(buf, "always queue %i\n", + card->qdio.default_out_queue); + default: + return sprintf(buf, "disabled\n"); + } +} + +static ssize_t qeth_dev_prioqing_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + + if (IS_IQD(card) || IS_VM_NIC(card)) + return -EOPNOTSUPP; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + /* check if 1920 devices are supported , + * if though we have to permit priority queueing + */ + if (card->qdio.no_out_queues == 1) { + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + rc = -EPERM; + goto out; + } + + if (sysfs_streq(buf, "prio_queueing_prec")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_skb")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_tos")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_vlan")) { + if (IS_LAYER3(card)) { + rc = -EOPNOTSUPP; + goto out; + } + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "no_prio_queueing:0")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED; + card->qdio.default_out_queue = 0; + } else if (sysfs_streq(buf, "no_prio_queueing:1")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED; + card->qdio.default_out_queue = 1; + } else if (sysfs_streq(buf, "no_prio_queueing:2")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED; + card->qdio.default_out_queue = 2; + } else if (sysfs_streq(buf, "no_prio_queueing:3")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_FIXED; + card->qdio.default_out_queue = 3; + } else if (sysfs_streq(buf, "no_prio_queueing")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show, + qeth_dev_prioqing_store); + +static ssize_t qeth_dev_bufcnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count); +} + +static ssize_t qeth_dev_bufcnt_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + unsigned int cnt; + int rc = 0; + + rc = kstrtouint(buf, 10, &cnt); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX); + rc = qeth_resize_buffer_pool(card, cnt); + +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, + qeth_dev_bufcnt_store); + +static ssize_t qeth_dev_recover_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool reset; + int rc; + + rc = kstrtobool(buf, &reset); + if (rc) + return rc; + + if (!qeth_card_hw_is_reachable(card)) + return -EPERM; + + if (reset) + rc = qeth_schedule_recovery(card); + + return rc ? rc : count; +} + +static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store); + +static ssize_t qeth_dev_performance_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +static ssize_t qeth_dev_performance_stats_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_qdio_out_q *queue; + unsigned int i; + bool reset; + int rc; + + rc = kstrtobool(buf, &reset); + if (rc) + return rc; + + if (reset) { + memset(&card->stats, 0, sizeof(card->stats)); + for (i = 0; i < card->qdio.no_out_queues; i++) { + queue = card->qdio.out_qs[i]; + if (!queue) + break; + memset(&queue->stats, 0, sizeof(queue->stats)); + } + } + + return count; +} + +static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, + qeth_dev_performance_stats_store); + +static ssize_t qeth_dev_layer2_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->options.layer); +} + +static ssize_t qeth_dev_layer2_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct net_device *ndev; + enum qeth_discipline_id newdis; + unsigned int input; + int rc; + + rc = kstrtouint(buf, 16, &input); + if (rc) + return rc; + + switch (input) { + case 0: + newdis = QETH_DISCIPLINE_LAYER3; + break; + case 1: + newdis = QETH_DISCIPLINE_LAYER2; + break; + default: + return -EINVAL; + } + + mutex_lock(&card->discipline_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.layer == newdis) + goto out; + if (card->info.layer_enforced) { + /* fixed layer, can't switch */ + rc = -EOPNOTSUPP; + goto out; + } + + if (card->discipline) { + /* start with a new, pristine netdevice: */ + ndev = qeth_clone_netdev(card->dev); + if (!ndev) { + rc = -ENOMEM; + goto out; + } + + card->discipline->remove(card->gdev); + qeth_core_free_discipline(card); + free_netdev(card->dev); + card->dev = ndev; + } + + rc = qeth_core_load_discipline(card, newdis); + if (rc) + goto out; + + rc = card->discipline->setup(card->gdev); + if (rc) + qeth_core_free_discipline(card); +out: + mutex_unlock(&card->discipline_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, + qeth_dev_layer2_store); + +#define ATTR_QETH_ISOLATION_NONE ("none") +#define ATTR_QETH_ISOLATION_FWD ("forward") +#define ATTR_QETH_ISOLATION_DROP ("drop") + +static ssize_t qeth_dev_isolation_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + switch (card->options.isolation) { + case ISOLATION_MODE_NONE: + return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE); + case ISOLATION_MODE_FWD: + return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD); + case ISOLATION_MODE_DROP: + return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP); + default: + return snprintf(buf, 5, "%s\n", "N/A"); + } +} + +static ssize_t qeth_dev_isolation_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + enum qeth_ipa_isolation_modes isolation; + int rc = 0; + + mutex_lock(&card->conf_mutex); + if (!IS_OSD(card) && !IS_OSX(card)) { + rc = -EOPNOTSUPP; + dev_err(&card->gdev->dev, "Adapter does not " + "support QDIO data connection isolation\n"); + goto out; + } + + /* parse input into isolation mode */ + if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) { + isolation = ISOLATION_MODE_NONE; + } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) { + isolation = ISOLATION_MODE_FWD; + } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) { + isolation = ISOLATION_MODE_DROP; + } else { + rc = -EINVAL; + goto out; + } + + if (qeth_card_hw_is_reachable(card)) + rc = qeth_setadpparms_set_access_ctrl(card, isolation); + + if (!rc) + WRITE_ONCE(card->options.isolation, isolation); + +out: + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, + qeth_dev_isolation_store); + +static ssize_t qeth_dev_switch_attrs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_switch_info sw_info; + int rc = 0; + + if (!qeth_card_hw_is_reachable(card)) + return sprintf(buf, "n/a\n"); + + rc = qeth_query_switch_attributes(card, &sw_info); + if (rc) + return rc; + + if (!sw_info.capabilities) + rc = sprintf(buf, "unknown"); + + if (sw_info.capabilities & QETH_SWITCH_FORW_802_1) + rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ? + "[802.1]" : "802.1")); + if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY) + rc += sprintf(buf + rc, + (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ? + " [rr]" : " rr")); + rc += sprintf(buf + rc, "\n"); + + return rc; +} + +static DEVICE_ATTR(switch_attrs, 0444, + qeth_dev_switch_attrs_show, NULL); + +static ssize_t qeth_hw_trap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (card->info.hwtrap) + return snprintf(buf, 5, "arm\n"); + else + return snprintf(buf, 8, "disarm\n"); +} + +static ssize_t qeth_hw_trap_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + int state = 0; + + mutex_lock(&card->conf_mutex); + if (qeth_card_hw_is_reachable(card)) + state = 1; + + if (sysfs_streq(buf, "arm") && !card->info.hwtrap) { + if (state) { + if (qeth_is_diagass_supported(card, + QETH_DIAGS_CMD_TRAP)) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM); + if (!rc) + card->info.hwtrap = 1; + } else + rc = -EINVAL; + } else + card->info.hwtrap = 1; + } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) { + if (state) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + if (!rc) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap) + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE); + else + rc = -EINVAL; + + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show, + qeth_hw_trap_store); + +static ssize_t qeth_dev_blkt_store(struct qeth_card *card, + const char *buf, size_t count, int *value, int max_value) +{ + unsigned int input; + int rc; + + rc = kstrtouint(buf, 10, &input); + if (rc) + return rc; + + if (input > max_value) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) + rc = -EPERM; + else + *value = input; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_dev_blkt_total_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->info.blkt.time_total); +} + +static ssize_t qeth_dev_blkt_total_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.time_total, 5000); +} + +static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show, + qeth_dev_blkt_total_store); + +static ssize_t qeth_dev_blkt_inter_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->info.blkt.inter_packet); +} + +static ssize_t qeth_dev_blkt_inter_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet, 1000); +} + +static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, + qeth_dev_blkt_inter_store); + +static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->info.blkt.inter_packet_jumbo); +} + +static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet_jumbo, 1000); +} + +static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, + qeth_dev_blkt_inter_jumbo_store); + +static struct attribute *qeth_blkt_device_attrs[] = { + &dev_attr_total.attr, + &dev_attr_inter.attr, + &dev_attr_inter_jumbo.attr, + NULL, +}; +const struct attribute_group qeth_device_blkt_group = { + .name = "blkt", + .attrs = qeth_blkt_device_attrs, +}; +EXPORT_SYMBOL_GPL(qeth_device_blkt_group); + +static struct attribute *qeth_device_attrs[] = { + &dev_attr_state.attr, + &dev_attr_chpid.attr, + &dev_attr_if_name.attr, + &dev_attr_card_type.attr, + &dev_attr_inbuf_size.attr, + &dev_attr_portno.attr, + &dev_attr_portname.attr, + &dev_attr_priority_queueing.attr, + &dev_attr_buffer_count.attr, + &dev_attr_recover.attr, + &dev_attr_performance_stats.attr, + &dev_attr_layer2.attr, + &dev_attr_isolation.attr, + &dev_attr_hw_trap.attr, + &dev_attr_switch_attrs.attr, + NULL, +}; +const struct attribute_group qeth_device_attr_group = { + .attrs = qeth_device_attrs, +}; +EXPORT_SYMBOL_GPL(qeth_device_attr_group); + +const struct attribute_group *qeth_generic_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + NULL, +}; + +static struct attribute *qeth_osn_device_attrs[] = { + &dev_attr_state.attr, + &dev_attr_chpid.attr, + &dev_attr_if_name.attr, + &dev_attr_card_type.attr, + &dev_attr_buffer_count.attr, + &dev_attr_recover.attr, + NULL, +}; +static struct attribute_group qeth_osn_device_attr_group = { + .attrs = qeth_osn_device_attrs, +}; +const struct attribute_group *qeth_osn_attr_groups[] = { + &qeth_osn_device_attr_group, + NULL, +}; diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c new file mode 100644 index 000000000..b5caa7233 --- /dev/null +++ b/drivers/s390/net/qeth_ethtool.c @@ -0,0 +1,546 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2018 + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/ethtool.h> +#include "qeth_core.h" + + +#define QETH_TXQ_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_out_q_stats, _stat) \ +} + +#define QETH_CARD_STAT(_name, _stat) { \ + .name = _name, \ + .offset = offsetof(struct qeth_card_stats, _stat) \ +} + +struct qeth_stats { + char name[ETH_GSTRING_LEN]; + unsigned int offset; +}; + +static const struct qeth_stats txq_stats[] = { + QETH_TXQ_STAT("IO buffers", bufs), + QETH_TXQ_STAT("IO buffer elements", buf_elements), + QETH_TXQ_STAT("packed IO buffers", bufs_pack), + QETH_TXQ_STAT("skbs", tx_packets), + QETH_TXQ_STAT("packed skbs", skbs_pack), + QETH_TXQ_STAT("SG skbs", skbs_sg), + QETH_TXQ_STAT("HW csum skbs", skbs_csum), + QETH_TXQ_STAT("TSO skbs", skbs_tso), + QETH_TXQ_STAT("linearized skbs", skbs_linearized), + QETH_TXQ_STAT("linearized+error skbs", skbs_linearized_fail), + QETH_TXQ_STAT("TSO bytes", tso_bytes), + QETH_TXQ_STAT("Packing mode switches", packing_mode_switch), + QETH_TXQ_STAT("Queue stopped", stopped), + QETH_TXQ_STAT("Doorbell", doorbell), + QETH_TXQ_STAT("IRQ for frames", coal_frames), + QETH_TXQ_STAT("Completion yield", completion_yield), + QETH_TXQ_STAT("Completion timer", completion_timer), +}; + +static const struct qeth_stats card_stats[] = { + QETH_CARD_STAT("rx0 IO buffers", rx_bufs), + QETH_CARD_STAT("rx0 HW csum skbs", rx_skb_csum), + QETH_CARD_STAT("rx0 SG skbs", rx_sg_skbs), + QETH_CARD_STAT("rx0 SG page frags", rx_sg_frags), + QETH_CARD_STAT("rx0 SG page allocs", rx_sg_alloc_page), + QETH_CARD_STAT("rx0 dropped, no memory", rx_dropped_nomem), + QETH_CARD_STAT("rx0 dropped, bad format", rx_dropped_notsupp), + QETH_CARD_STAT("rx0 dropped, runt", rx_dropped_runt), +}; + +#define TXQ_STATS_LEN ARRAY_SIZE(txq_stats) +#define CARD_STATS_LEN ARRAY_SIZE(card_stats) + +static void qeth_add_stat_data(u64 **dst, void *src, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + char *stat; + + for (i = 0; i < size; i++) { + stat = (char *)src + stats[i].offset; + **dst = *(u64 *)stat; + (*dst)++; + } +} + +static void qeth_add_stat_strings(u8 **data, const char *prefix, + const struct qeth_stats stats[], + unsigned int size) +{ + unsigned int i; + + for (i = 0; i < size; i++) { + snprintf(*data, ETH_GSTRING_LEN, "%s%s", prefix, stats[i].name); + *data += ETH_GSTRING_LEN; + } +} + +static int qeth_get_sset_count(struct net_device *dev, int stringset) +{ + struct qeth_card *card = dev->ml_priv; + + switch (stringset) { + case ETH_SS_STATS: + return CARD_STATS_LEN + + card->qdio.no_out_queues * TXQ_STATS_LEN; + default: + return -EINVAL; + } +} + +static void qeth_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct qeth_card *card = dev->ml_priv; + unsigned int i; + + qeth_add_stat_data(&data, &card->stats, card_stats, CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_add_stat_data(&data, &card->qdio.out_qs[i]->stats, + txq_stats, TXQ_STATS_LEN); +} + +static void __qeth_set_coalesce(struct net_device *dev, + struct qeth_qdio_out_q *queue, + struct ethtool_coalesce *coal) +{ + WRITE_ONCE(queue->coalesce_usecs, coal->tx_coalesce_usecs); + WRITE_ONCE(queue->max_coalesced_frames, coal->tx_max_coalesced_frames); + + if (coal->tx_coalesce_usecs && + netif_running(dev) && + !qeth_out_queue_is_empty(queue)) + qeth_tx_arm_timer(queue, coal->tx_coalesce_usecs); +} + +static int qeth_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + unsigned int i; + + if (!IS_IQD(card)) + return -EOPNOTSUPP; + + if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames) + return -EINVAL; + + qeth_for_each_output_queue(card, queue, i) + __qeth_set_coalesce(dev, queue, coal); + + return 0; +} + +static void qeth_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *param) +{ + struct qeth_card *card = dev->ml_priv; + + param->rx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + param->rx_mini_max_pending = 0; + param->rx_jumbo_max_pending = 0; + param->tx_max_pending = QDIO_MAX_BUFFERS_PER_Q; + + param->rx_pending = card->qdio.in_buf_pool.buf_count; + param->rx_mini_pending = 0; + param->rx_jumbo_pending = 0; + param->tx_pending = QDIO_MAX_BUFFERS_PER_Q; +} + +static void qeth_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + struct qeth_card *card = dev->ml_priv; + char prefix[ETH_GSTRING_LEN] = ""; + unsigned int i; + + switch (stringset) { + case ETH_SS_STATS: + qeth_add_stat_strings(&data, prefix, card_stats, + CARD_STATS_LEN); + for (i = 0; i < card->qdio.no_out_queues; i++) { + snprintf(prefix, ETH_GSTRING_LEN, "tx%u ", i); + qeth_add_stat_strings(&data, prefix, txq_stats, + TXQ_STATS_LEN); + } + break; + default: + WARN_ON(1); + break; + } +} + +static void qeth_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct qeth_card *card = dev->ml_priv; + + strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3", + sizeof(info->driver)); + strlcpy(info->fw_version, card->info.mcl_level, + sizeof(info->fw_version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", + CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); +} + +static void qeth_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qeth_card *card = dev->ml_priv; + + channels->max_rx = dev->num_rx_queues; + channels->max_tx = card->qdio.no_out_queues; + channels->max_other = 0; + channels->max_combined = 0; + channels->rx_count = dev->real_num_rx_queues; + channels->tx_count = dev->real_num_tx_queues; + channels->other_count = 0; + channels->combined_count = 0; +} + +static int qeth_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct qeth_priv *priv = netdev_priv(dev); + struct qeth_card *card = dev->ml_priv; + int rc; + + if (channels->rx_count == 0 || channels->tx_count == 0) + return -EINVAL; + if (channels->tx_count > card->qdio.no_out_queues) + return -EINVAL; + + /* Prio-queueing needs all TX queues: */ + if (qeth_uses_tx_prio_queueing(card)) + return -EPERM; + + if (IS_IQD(card)) { + if (channels->tx_count < QETH_IQD_MIN_TXQ) + return -EINVAL; + + /* Reject downgrade while running. It could push displaced + * ucast flows onto txq0, which is reserved for mcast. + */ + if (netif_running(dev) && + channels->tx_count < dev->real_num_tx_queues) + return -EPERM; + } + + rc = qeth_set_real_num_tx_queues(card, channels->tx_count); + if (!rc) + priv->tx_wanted_queues = channels->tx_count; + + return rc; +} + +static int qeth_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) +{ + struct qeth_card *card = dev->ml_priv; + + if (!IS_IQD(card)) + return -EOPNOTSUPP; + + return ethtool_op_get_ts_info(dev, info); +} + +static int qeth_get_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, void *data) +{ + struct qeth_priv *priv = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + *(u32 *)data = priv->rx_copybreak; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int qeth_set_tunable(struct net_device *dev, + const struct ethtool_tunable *tuna, + const void *data) +{ + struct qeth_priv *priv = netdev_priv(dev); + + switch (tuna->id) { + case ETHTOOL_RX_COPYBREAK: + WRITE_ONCE(priv->rx_copybreak, *(u32 *)data); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int qeth_get_per_queue_coalesce(struct net_device *dev, u32 __queue, + struct ethtool_coalesce *coal) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_qdio_out_q *queue; + + if (!IS_IQD(card)) + return -EOPNOTSUPP; + + if (__queue >= card->qdio.no_out_queues) + return -EINVAL; + + queue = card->qdio.out_qs[__queue]; + + coal->tx_coalesce_usecs = queue->coalesce_usecs; + coal->tx_max_coalesced_frames = queue->max_coalesced_frames; + return 0; +} + +static int qeth_set_per_queue_coalesce(struct net_device *dev, u32 queue, + struct ethtool_coalesce *coal) +{ + struct qeth_card *card = dev->ml_priv; + + if (!IS_IQD(card)) + return -EOPNOTSUPP; + + if (queue >= card->qdio.no_out_queues) + return -EINVAL; + + if (!coal->tx_coalesce_usecs && !coal->tx_max_coalesced_frames) + return -EINVAL; + + __qeth_set_coalesce(dev, card->qdio.out_qs[queue], coal); + return 0; +} + +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ +/* specified port type. */ +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, + int maxspeed, int porttype) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + switch (porttype) { + case PORT_TP: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + break; + case PORT_FIBRE: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + WARN_ON_ONCE(1); + } + + /* partially does fall through, to also select lower speeds */ + switch (maxspeed) { + case SPEED_25000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 25000baseSR_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 25000baseSR_Full); + break; + case SPEED_10000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + fallthrough; + case SPEED_1000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); + fallthrough; + case SPEED_100: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + fallthrough; + case SPEED_10: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + WARN_ON_ONCE(1); + } +} + + +static int qeth_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct qeth_card *card = netdev->ml_priv; + enum qeth_link_types link_type; + struct carrier_info carrier_info; + int rc; + + if (IS_IQD(card) || IS_VM_NIC(card)) + link_type = QETH_LINK_TYPE_10GBIT_ETH; + else + link_type = card->info.link_type; + + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + + switch (link_type) { + case QETH_LINK_TYPE_FAST_ETH: + case QETH_LINK_TYPE_LANE_ETH100: + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; + break; + case QETH_LINK_TYPE_GBIT_ETH: + case QETH_LINK_TYPE_LANE_ETH1000: + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_10GBIT_ETH: + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_25GBIT_ETH: + cmd->base.speed = SPEED_25000; + cmd->base.port = PORT_FIBRE; + break; + default: + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; + } + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); + + /* Check if we can obtain more accurate information. */ + /* If QUERY_CARD_INFO command is not supported or fails, */ + /* just return the heuristics that was filled above. */ + rc = qeth_query_card_info(card, &carrier_info); + if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ + return 0; + if (rc) /* report error from the hardware operation */ + return rc; + /* on success, fill in the information got from the hardware */ + + netdev_dbg(netdev, + "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", + carrier_info.card_type, + carrier_info.port_mode, + carrier_info.port_speed); + + /* Update attributes for which we've obtained more authoritative */ + /* information, leave the rest the way they where filled above. */ + switch (carrier_info.card_type) { + case CARD_INFO_TYPE_1G_COPPER_A: + case CARD_INFO_TYPE_1G_COPPER_B: + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_1G_FIBRE_A: + case CARD_INFO_TYPE_1G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_10G_FIBRE_A: + case CARD_INFO_TYPE_10G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); + break; + } + + switch (carrier_info.port_mode) { + case CARD_INFO_PORTM_FULLDUPLEX: + cmd->base.duplex = DUPLEX_FULL; + break; + case CARD_INFO_PORTM_HALFDUPLEX: + cmd->base.duplex = DUPLEX_HALF; + break; + } + + switch (carrier_info.port_speed) { + case CARD_INFO_PORTS_10M: + cmd->base.speed = SPEED_10; + break; + case CARD_INFO_PORTS_100M: + cmd->base.speed = SPEED_100; + break; + case CARD_INFO_PORTS_1G: + cmd->base.speed = SPEED_1000; + break; + case CARD_INFO_PORTS_10G: + cmd->base.speed = SPEED_10000; + break; + case CARD_INFO_PORTS_25G: + cmd->base.speed = SPEED_25000; + break; + } + + return 0; +} + +const struct ethtool_ops qeth_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES, + .get_link = ethtool_op_get_link, + .set_coalesce = qeth_set_coalesce, + .get_ringparam = qeth_get_ringparam, + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, + .get_channels = qeth_get_channels, + .set_channels = qeth_set_channels, + .get_ts_info = qeth_get_ts_info, + .get_tunable = qeth_get_tunable, + .set_tunable = qeth_set_tunable, + .get_per_queue_coalesce = qeth_get_per_queue_coalesce, + .set_per_queue_coalesce = qeth_set_per_queue_coalesce, + .get_link_ksettings = qeth_get_link_ksettings, +}; + +const struct ethtool_ops qeth_osn_ethtool_ops = { + .get_strings = qeth_get_strings, + .get_ethtool_stats = qeth_get_ethtool_stats, + .get_sset_count = qeth_get_sset_count, + .get_drvinfo = qeth_get_drvinfo, +}; diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h new file mode 100644 index 000000000..296d73d84 --- /dev/null +++ b/drivers/s390/net/qeth_l2.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#ifndef __QETH_L2_H__ +#define __QETH_L2_H__ + +#include "qeth_core.h" + +extern const struct attribute_group *qeth_l2_attr_groups[]; + +int qeth_l2_create_device_attributes(struct device *); +void qeth_l2_remove_device_attributes(struct device *); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, + enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); + +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout); +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout); +bool qeth_bridgeport_allowed(struct qeth_card *card); + +struct qeth_mac { + u8 mac_addr[ETH_ALEN]; + u8 disp_flag:2; + struct hlist_node hnode; +}; + +static inline bool qeth_bridgeport_is_in_use(struct qeth_card *card) +{ + return card->options.sbp.role || + card->options.sbp.reflect_promisc || + card->options.sbp.hostnotification; +} + +#endif /* __QETH_L2_H__ */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c new file mode 100644 index 000000000..1797addf6 --- /dev/null +++ b/drivers/s390/net/qeth_l2_main.c @@ -0,0 +1,2357 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/hashtable.h> +#include <net/switchdev.h> +#include <asm/chsc.h> +#include <asm/css_chars.h> +#include <asm/setup.h> +#include "qeth_core.h" +#include "qeth_l2.h" + +static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode) +{ + int rc; + + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + switch (retcode) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + rc = -EOPNOTSUPP; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_DUP_MAC: + case IPA_RC_L2_DUP_LAYER3_MAC: + rc = -EADDRINUSE; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EADDRNOTAVAIL; + break; + case IPA_RC_L2_MAC_NOT_FOUND: + rc = -ENOENT; + break; + default: + rc = -EIO; + break; + } + return rc; +} + +static int qeth_l2_send_setdelmac_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code); +} + +static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "L2sdmac"); + iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4, + IPA_DATA_SIZEOF(setdelmac)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setdelmac.mac_length = ETH_ALEN; + ether_addr_copy(cmd->data.setdelmac.mac, mac); + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL); +} + +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); + if (rc == 0) { + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered\n", mac); + } else { + switch (rc) { + case -EADDRINUSE: + dev_warn(&card->gdev->dev, + "MAC address %pM already exists\n", mac); + break; + case -EADDRNOTAVAIL: + dev_warn(&card->gdev->dev, + "MAC address %pM is not authorized\n", mac); + break; + } + } + return rc; +} + +static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) +{ + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? + IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; + int rc; + + QETH_CARD_TEXT(card, 2, "L2Wmac"); + rc = qeth_l2_send_setdelmac(card, mac, cmd); + if (rc == -EADDRINUSE) + QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n", + CARD_DEVID(card)); + else if (rc) + QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n", + CARD_DEVID(card), rc); + return rc; +} + +static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) +{ + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? + IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; + int rc; + + QETH_CARD_TEXT(card, 2, "L2Rmac"); + rc = qeth_l2_send_setdelmac(card, mac, cmd); + if (rc) + QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n", + CARD_DEVID(card), rc); + return rc; +} + +static void qeth_l2_drain_rx_mode_cache(struct qeth_card *card) +{ + struct qeth_mac *mac; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { + hash_del(&mac->hnode); + kfree(mac); + } +} + +static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, unsigned int data_len) +{ + int cast_type = qeth_get_ether_cast_type(skb); + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + + hdr->hdr.l2.pkt_length = data_len; + + if (skb_is_gso(skb)) { + hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO; + } else { + hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + if (skb->ip_summed == CHECKSUM_PARTIAL) + qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); + } + + /* set byte byte 3 to casting flags */ + if (cast_type == RTN_MULTICAST) + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST; + else if (cast_type == RTN_BROADCAST) + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST; + else + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; + + /* VSWITCH relies on the VLAN + * information to be present in + * the QDIO header */ + if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN; + hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI); + } +} + +static int qeth_l2_setdelvlan_makerc(struct qeth_card *card, u16 retcode) +{ + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + + switch (retcode) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_L2_INVALID_VLAN_ID: + return -EINVAL; + case IPA_RC_L2_DUP_VLAN_ID: + return -EEXIST; + case IPA_RC_L2_VLAN_ID_NOT_FOUND: + return -ENOENT; + case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: + return -EPERM; + default: + return -EIO; + } +} + +static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + QETH_CARD_TEXT(card, 2, "L2sdvcb"); + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n", + cmd->data.setdelvlan.vlan_id, + CARD_DEVID(card), cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); + } + return qeth_l2_setdelvlan_makerc(card, cmd->hdr.return_code); +} + +static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); + iob = qeth_ipa_alloc_cmd(card, ipacmd, QETH_PROT_IPV4, + IPA_DATA_SIZEOF(setdelvlan)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setdelvlan.vlan_id = i; + return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelvlan_cb, NULL); +} + +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); + if (!vid) + return 0; + + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); +} + +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + if (!vid) + return 0; + + return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); +} + +static void qeth_l2_set_pnso_mode(struct qeth_card *card, + enum qeth_pnso_mode mode) +{ + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + WRITE_ONCE(card->info.pnso_mode, mode); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + if (mode == QETH_PNSO_NONE) + drain_workqueue(card->event_wq); +} + +static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card) +{ + struct switchdev_notifier_fdb_info info; + + QETH_CARD_TEXT(card, 2, "fdbflush"); + + info.addr = NULL; + /* flush all VLANs: */ + info.vid = 0; + info.added_by_user = false; + info.offloaded = true; + + call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE, + card->dev, &info.info, NULL); +} + +static int qeth_l2_request_initial_mac(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "l2reqmac"); + + if (MACHINE_IS_VM) { + rc = qeth_vm_request_mac(card); + if (!rc) + goto out; + QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, "err%04x", rc); + /* fall back to alternative mechanism: */ + } + + if (!IS_OSN(card)) { + rc = qeth_setadpparms_change_macaddr(card); + if (!rc) + goto out; + QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n", + CARD_DEVID(card), rc); + QETH_CARD_TEXT_(card, 2, "1err%04x", rc); + /* fall back once more: */ + } + + /* some devices don't support a custom MAC address: */ + if (IS_OSM(card) || IS_OSX(card)) + return (rc) ? rc : -EADDRNOTAVAIL; + eth_hw_addr_random(card->dev); + +out: + QETH_CARD_HEX(card, 2, card->dev->dev_addr, card->dev->addr_len); + return 0; +} + +static void qeth_l2_register_dev_addr(struct qeth_card *card) +{ + if (!is_valid_ether_addr(card->dev->dev_addr)) + qeth_l2_request_initial_mac(card); + + if (!IS_OSN(card) && !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.dev_addr_is_registered = 1; + else + card->info.dev_addr_is_registered = 0; +} + +static int qeth_l2_validate_addr(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (card->info.dev_addr_is_registered) + return eth_validate_addr(dev); + + QETH_CARD_TEXT(card, 4, "nomacadr"); + return -EPERM; +} + +static int qeth_l2_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; + int rc = 0; + + QETH_CARD_TEXT(card, 3, "setmac"); + + if (IS_OSM(card) || IS_OSX(card)) { + QETH_CARD_TEXT(card, 3, "setmcTYP"); + return -EOPNOTSUPP; + } + QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + card->info.dev_addr_is_registered) + return 0; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + return rc; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.dev_addr_is_registered) + qeth_l2_remove_mac(card, old_addr); + card->info.dev_addr_is_registered = 1; + return 0; +} + +static void qeth_l2_promisc_to_bridge(struct qeth_card *card, bool enable) +{ + int role; + int rc; + + QETH_CARD_TEXT(card, 3, "pmisc2br"); + + if (enable) { + if (card->options.sbp.reflect_promisc_primary) + role = QETH_SBP_ROLE_PRIMARY; + else + role = QETH_SBP_ROLE_SECONDARY; + } else + role = QETH_SBP_ROLE_NONE; + + rc = qeth_bridgeport_setrole(card, role); + QETH_CARD_TEXT_(card, 2, "bpm%c%04x", enable ? '+' : '-', rc); + if (!rc) { + card->options.sbp.role = role; + card->info.promisc_mode = enable; + } +} + +static void qeth_l2_set_promisc_mode(struct qeth_card *card) +{ + bool enable = card->dev->flags & IFF_PROMISC; + + if (card->info.promisc_mode == enable) + return; + + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) { + qeth_setadp_promisc_mode(card, enable); + } else { + mutex_lock(&card->sbp_lock); + if (card->options.sbp.reflect_promisc) + qeth_l2_promisc_to_bridge(card, enable); + mutex_unlock(&card->sbp_lock); + } +} + +/* New MAC address is added to the hash table and marked to be written on card + * only if there is not in the hash table storage already + * +*/ +static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) +{ + u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); + struct qeth_mac *mac; + + hash_for_each_possible(card->rx_mode_addrs, mac, hnode, mac_hash) { + if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) { + mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + return; + } + } + + mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC); + if (!mac) + return; + + ether_addr_copy(mac->mac_addr, ha->addr); + mac->disp_flag = QETH_DISP_ADDR_ADD; + + hash_add(card->rx_mode_addrs, &mac->hnode, mac_hash); +} + +static void qeth_l2_rx_mode_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); + struct net_device *dev = card->dev; + struct netdev_hw_addr *ha; + struct qeth_mac *mac; + struct hlist_node *tmp; + int i; + int rc; + + QETH_CARD_TEXT(card, 3, "setmulti"); + + netif_addr_lock_bh(dev); + netdev_for_each_mc_addr(ha, dev) + qeth_l2_add_mac(card, ha); + netdev_for_each_uc_addr(ha, dev) + qeth_l2_add_mac(card, ha); + netif_addr_unlock_bh(dev); + + hash_for_each_safe(card->rx_mode_addrs, i, tmp, mac, hnode) { + switch (mac->disp_flag) { + case QETH_DISP_ADDR_DELETE: + qeth_l2_remove_mac(card, mac->mac_addr); + hash_del(&mac->hnode); + kfree(mac); + break; + case QETH_DISP_ADDR_ADD: + rc = qeth_l2_write_mac(card, mac->mac_addr); + if (rc) { + hash_del(&mac->hnode); + kfree(mac); + break; + } + fallthrough; + default: + /* for next call to set_rx_mode(): */ + mac->disp_flag = QETH_DISP_ADDR_DELETE; + } + } + + qeth_l2_set_promisc_mode(card); +} + +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) +{ + gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0); + struct qeth_hdr *hdr = (struct qeth_hdr *)skb->data; + addr_t end = (addr_t)(skb->data + sizeof(*hdr)); + addr_t start = (addr_t)skb->data; + unsigned int elements = 0; + unsigned int hd_len = 0; + int rc; + + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + if (qeth_get_elements_for_range(start, end) > 1) { + /* Misaligned HW header, move it to its own buffer element. */ + hdr = kmem_cache_alloc(qeth_core_header_cache, gfp); + if (!hdr) + return -ENOMEM; + hd_len = sizeof(*hdr); + skb_copy_from_linear_data(skb, (char *)hdr, hd_len); + elements++; + } + + elements += qeth_count_elements(skb, hd_len); + if (elements > queue->max_elements) { + rc = -E2BIG; + goto out; + } + + rc = qeth_do_send_packet(card, queue, skb, hdr, hd_len, hd_len, + elements); +out: + if (rc && hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; +} + +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + u16 txq = skb_get_queue_mapping(skb); + struct qeth_qdio_out_q *queue; + int rc; + + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; + if (IS_IQD(card)) + txq = qeth_iqd_translate_txq(dev, txq); + queue = card->qdio.out_qs[txq]; + + if (IS_OSN(card)) + rc = qeth_l2_xmit_osn(card, skb, queue); + else + rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb), + qeth_l2_fill_header); + + if (!rc) + return NETDEV_TX_OK; + + QETH_TXQ_STAT_INC(queue, tx_dropped); + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (IS_IQD(card)) + return qeth_iqd_select_queue(dev, skb, + qeth_get_ether_cast_type(skb), + sb_dev); + if (qeth_uses_tx_prio_queueing(card)) + return qeth_get_priority_queue(card, skb); + + return netdev_pick_tx(dev, skb, sb_dev); +} + +static void qeth_l2_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + +/** + * qeth_l2_pnso() - perform network subchannel operation + * @card: qeth_card structure pointer + * @oc: Operation Code + * @cnc: Boolean Change-Notification Control + * @cb: Callback function will be executed for each element + * of the address list + * @priv: Pointer to pass to the callback function. + * + * Collects network information in a network address list and calls the + * callback function for every entry in the list. If "change-notification- + * control" is set, further changes in the address list will be reported + * via the IPA command. + */ +static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc, + void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry), + void *priv) +{ + struct ccw_device *ddev = CARD_DDEV(card); + struct chsc_pnso_area *rr; + u32 prev_instance = 0; + int isfirstblock = 1; + int i, size, elems; + int rc; + + rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL); + if (rr == NULL) + return -ENOMEM; + do { + QETH_CARD_TEXT(card, 2, "PNSO"); + /* on the first iteration, naihdr.resume_token will be zero */ + rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token, + cnc); + if (rc) + continue; + if (cb == NULL) + continue; + + size = rr->naihdr.naids; + if (size != sizeof(struct chsc_pnso_naid_l2)) { + WARN_ON_ONCE(1); + continue; + } + + elems = (rr->response.length - sizeof(struct chsc_header) - + sizeof(struct chsc_pnso_naihdr)) / size; + + if (!isfirstblock && (rr->naihdr.instance != prev_instance)) { + /* Inform the caller that they need to scrap */ + /* the data that was already reported via cb */ + rc = -EAGAIN; + break; + } + isfirstblock = 0; + prev_instance = rr->naihdr.instance; + for (i = 0; i < elems; i++) + (*cb)(priv, &rr->entries[i]); + } while ((rc == -EBUSY) || (!rc && /* list stored */ + /* resume token is non-zero => list incomplete */ + (rr->naihdr.resume_token.t1 || rr->naihdr.resume_token.t2))); + + if (rc) + QETH_CARD_TEXT_(card, 2, "PNrp%04x", rr->response.code); + + free_page((unsigned long)rr); + return rc; +} + +static bool qeth_is_my_net_if_token(struct qeth_card *card, + struct net_if_token *token) +{ + return ((card->info.ddev_devno == token->devnum) && + (card->info.cssid == token->cssid) && + (card->info.iid == token->iid) && + (card->info.ssid == token->ssid) && + (card->info.chpid == token->chpid) && + (card->info.chid == token->chid)); +} + +/** + * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge + * @card: qeth_card structure pointer + * @code: event bitmask: high order bit 0x80 set to + * 1 - removal of an object + * 0 - addition of an object + * Object type(s): + * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC + * @token: "network token" structure identifying 'physical' location + * of the target + * @addr_lnid: structure with MAC address and VLAN ID of the target + */ +static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code, + struct net_if_token *token, + struct mac_addr_lnid *addr_lnid) +{ + struct switchdev_notifier_fdb_info info; + u8 ntfy_mac[ETH_ALEN]; + + ether_addr_copy(ntfy_mac, addr_lnid->mac); + /* Ignore VLAN only changes */ + if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR)) + return; + /* Ignore mcast entries */ + if (is_multicast_ether_addr(ntfy_mac)) + return; + /* Ignore my own addresses */ + if (qeth_is_my_net_if_token(card, token)) + return; + + info.addr = ntfy_mac; + /* don't report VLAN IDs */ + info.vid = 0; + info.added_by_user = false; + info.offloaded = true; + + if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) { + call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + card->dev, &info.info, NULL); + QETH_CARD_TEXT(card, 4, "andelmac"); + QETH_CARD_TEXT_(card, 4, + "mc%012lx", ether_addr_to_u64(ntfy_mac)); + } else { + call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, + card->dev, &info.info, NULL); + QETH_CARD_TEXT(card, 4, "anaddmac"); + QETH_CARD_TEXT_(card, 4, + "mc%012lx", ether_addr_to_u64(ntfy_mac)); + } +} + +static void qeth_l2_dev2br_an_set_cb(void *priv, + struct chsc_pnso_naid_l2 *entry) +{ + u8 code = IPA_ADDR_CHANGE_CODE_MACADDR; + struct qeth_card *card = priv; + + if (entry->addr_lnid.lnid < VLAN_N_VID) + code |= IPA_ADDR_CHANGE_CODE_VLANID; + qeth_l2_dev2br_fdb_notify(card, code, + (struct net_if_token *)&entry->nit, + (struct mac_addr_lnid *)&entry->addr_lnid); +} + +/** + * qeth_l2_dev2br_an_set() - + * Enable or disable 'dev to bridge network address notification' + * @card: qeth_card structure pointer + * @enable: Enable or disable 'dev to bridge network address notification' + * + * Returns negative errno-compatible error indication or 0 on success. + * + * On enable, emits a series of address notifications for all + * currently registered hosts. + * + * Must be called under rtnl_lock + */ +static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable) +{ + int rc; + + if (enable) { + QETH_CARD_TEXT(card, 2, "anseton"); + rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1, + qeth_l2_dev2br_an_set_cb, card); + if (rc == -EAGAIN) + /* address notification enabled, but inconsistent + * addresses reported -> disable address notification + */ + qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, + NULL, NULL); + } else { + QETH_CARD_TEXT(card, 2, "ansetoff"); + rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL); + } + + return rc; +} + +static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, u32 filter_mask, + int nlflags) +{ + struct qeth_priv *priv = netdev_priv(dev); + struct qeth_card *card = dev->ml_priv; + u16 mode = BRIDGE_MODE_UNDEF; + + /* Do not even show qeth devs that cannot do bridge_setlink */ + if (!priv->brport_hw_features || !netif_device_present(dev) || + qeth_bridgeport_is_in_use(card)) + return -EOPNOTSUPP; + + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + mode, priv->brport_features, + priv->brport_hw_features, + nlflags, filter_mask, NULL); +} + +static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = { + [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 }, +}; + +/** + * qeth_l2_bridge_setlink() - set bridgeport attributes + * @dev: netdevice + * @nlh: netlink message header + * @flags: bridge flags (here: BRIDGE_FLAGS_SELF) + * @extack: extended ACK report struct + * + * Called under rtnl_lock + */ +static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, + u16 flags, struct netlink_ext_ack *extack) +{ + struct qeth_priv *priv = netdev_priv(dev); + struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1]; + struct qeth_card *card = dev->ml_priv; + struct nlattr *attr, *nested_attr; + bool enable, has_protinfo = false; + int rem1, rem2; + int rc; + + if (!netif_device_present(dev)) + return -ENODEV; + if (!(priv->brport_hw_features)) + return -EOPNOTSUPP; + + nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) { + if (nla_type(attr) == IFLA_PROTINFO) { + rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr, + qeth_brport_policy, extack); + if (rc) + return rc; + has_protinfo = true; + } else if (nla_type(attr) == IFLA_AF_SPEC) { + nla_for_each_nested(nested_attr, attr, rem2) { + if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS) + continue; + NL_SET_ERR_MSG_ATTR(extack, nested_attr, + "Unsupported attribute"); + return -EINVAL; + } + } else { + NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute"); + return -EINVAL; + } + } + if (!has_protinfo) + return 0; + if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC]) + return -EINVAL; + enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]); + + if (enable == !!(priv->brport_features & BR_LEARNING_SYNC)) + return 0; + + mutex_lock(&card->sbp_lock); + /* do not change anything if BridgePort is enabled */ + if (qeth_bridgeport_is_in_use(card)) { + NL_SET_ERR_MSG(extack, "n/a (BridgePort)"); + rc = -EBUSY; + } else if (enable) { + qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); + rc = qeth_l2_dev2br_an_set(card, true); + if (rc) + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + else + priv->brport_features |= BR_LEARNING_SYNC; + } else { + rc = qeth_l2_dev2br_an_set(card, false); + if (!rc) { + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + priv->brport_features ^= BR_LEARNING_SYNC; + qeth_l2_dev2br_fdb_flush(card); + } + } + mutex_unlock(&card->sbp_lock); + + return rc; +} + +static const struct net_device_ops qeth_l2_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, + .ndo_select_queue = qeth_l2_select_queue, + .ndo_validate_addr = qeth_l2_validate_addr, + .ndo_set_rx_mode = qeth_l2_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_set_mac_address = qeth_l2_set_mac_address, + .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, + .ndo_bridge_getlink = qeth_l2_bridge_getlink, + .ndo_bridge_setlink = qeth_l2_bridge_setlink, +}; + +static const struct net_device_ops qeth_osn_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_tx_timeout = qeth_tx_timeout, +}; + +static int qeth_l2_setup_netdev(struct qeth_card *card) +{ + if (IS_OSN(card)) { + card->dev->netdev_ops = &qeth_osn_netdev_ops; + card->dev->flags |= IFF_NOARP; + goto add_napi; + } + + card->dev->needed_headroom = sizeof(struct qeth_hdr); + card->dev->netdev_ops = &qeth_l2_netdev_ops; + card->dev->priv_flags |= IFF_UNICAST_FLT; + + if (IS_OSM(card)) { + card->dev->features |= NETIF_F_VLAN_CHALLENGED; + } else { + if (!IS_VM_NIC(card)) + card->dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if (IS_OSD(card) && !IS_VM_NIC(card)) { + card->dev->features |= NETIF_F_SG; + /* OSA 3S and earlier has no RX/TX support */ + if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { + card->dev->hw_features |= NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_IP_CSUM; + } + } + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; + } + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) || + qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_RXCSUM; + card->dev->vlan_features |= NETIF_F_RXCSUM; + } + if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO; + card->dev->vlan_features |= NETIF_F_TSO; + } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) { + card->dev->needed_headroom = sizeof(struct qeth_hdr_tso); + netif_keep_dst(card->dev); + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)); + } + +add_napi: + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + return register_netdev(card->dev); +} + +static void qeth_l2_trace_features(struct qeth_card *card) +{ + /* Set BridgePort features */ + QETH_CARD_TEXT(card, 2, "featuSBP"); + QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs, + sizeof(card->options.sbp.supported_funcs)); + /* VNIC Characteristics features */ + QETH_CARD_TEXT(card, 2, "feaVNICC"); + QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars, + sizeof(card->options.vnicc.sup_chars)); +} + +static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) +{ + if (!card->options.sbp.reflect_promisc && + card->options.sbp.role != QETH_SBP_ROLE_NONE) { + /* Conditional to avoid spurious error messages */ + qeth_bridgeport_setrole(card, card->options.sbp.role); + /* Let the callback function refresh the stored role value. */ + qeth_bridgeport_query_ports(card, &card->options.sbp.role, + NULL); + } + if (card->options.sbp.hostnotification) { + if (qeth_bridgeport_an_set(card, 1)) + card->options.sbp.hostnotification = 0; + } +} + +/** + * qeth_l2_detect_dev2br_support() - + * Detect whether this card supports 'dev to bridge fdb network address + * change notification' and thus can support the learning_sync bridgeport + * attribute + * @card: qeth_card structure pointer + */ +static void qeth_l2_detect_dev2br_support(struct qeth_card *card) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + bool dev2br_supported; + + QETH_CARD_TEXT(card, 2, "d2brsup"); + if (!IS_IQD(card)) + return; + + /* dev2br requires valid cssid,iid,chid */ + dev2br_supported = card->info.ids_valid && + css_general_characteristics.enarf; + QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported); + + if (dev2br_supported) + priv->brport_hw_features |= BR_LEARNING_SYNC; + else + priv->brport_hw_features &= ~BR_LEARNING_SYNC; +} + +static void qeth_l2_enable_brport_features(struct qeth_card *card) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + int rc; + + if (priv->brport_features & BR_LEARNING_SYNC) { + if (priv->brport_hw_features & BR_LEARNING_SYNC) { + qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); + rc = qeth_l2_dev2br_an_set(card, true); + if (rc == -EAGAIN) { + /* Recoverable error, retry once */ + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + qeth_l2_dev2br_fdb_flush(card); + qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO); + rc = qeth_l2_dev2br_an_set(card, true); + } + if (rc) { + netdev_err(card->dev, + "failed to enable bridge learning_sync: %d\n", + rc); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + qeth_l2_dev2br_fdb_flush(card); + priv->brport_features ^= BR_LEARNING_SYNC; + } + } else { + dev_warn(&card->gdev->dev, + "bridge learning_sync not supported\n"); + priv->brport_features ^= BR_LEARNING_SYNC; + } + } +} + +#ifdef CONFIG_QETH_OSN +static void qeth_osn_assist_cb(struct qeth_card *card, + struct qeth_cmd_buffer *iob, + unsigned int data_length) +{ + qeth_notify_cmd(iob, 0); + qeth_put_cmd(iob); +} + +int qeth_osn_assist(struct net_device *dev, void *data, int data_len) +{ + struct qeth_cmd_buffer *iob; + struct qeth_card *card; + + if (data_len < 0) + return -EINVAL; + if (!dev) + return -ENODEV; + card = dev->ml_priv; + if (!card) + return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnsdmc"); + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + + iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_len, 1, + QETH_IPA_TIMEOUT); + if (!iob) + return -ENOMEM; + + qeth_prepare_ipa_cmd(card, iob, (u16) data_len, NULL); + + memcpy(__ipa_cmd(iob), data, data_len); + iob->callback = qeth_osn_assist_cb; + return qeth_send_ipa_cmd(card, iob, NULL, NULL); +} +EXPORT_SYMBOL(qeth_osn_assist); + +int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)) +{ + struct qeth_card *card; + char bus_id[16]; + u16 devno; + + memcpy(&devno, read_dev_no, 2); + sprintf(bus_id, "0.0.%04x", devno); + card = qeth_get_card_by_busid(bus_id); + if (!card || !IS_OSN(card)) + return -ENODEV; + *dev = card->dev; + + QETH_CARD_TEXT(card, 2, "osnreg"); + if ((assist_cb == NULL) || (data_cb == NULL)) + return -EINVAL; + card->osn_info.assist_cb = assist_cb; + card->osn_info.data_cb = data_cb; + return 0; +} +EXPORT_SYMBOL(qeth_osn_register); + +void qeth_osn_deregister(struct net_device *dev) +{ + struct qeth_card *card; + + if (!dev) + return; + card = dev->ml_priv; + if (!card) + return; + QETH_CARD_TEXT(card, 2, "osndereg"); + card->osn_info.assist_cb = NULL; + card->osn_info.data_cb = NULL; +} +EXPORT_SYMBOL(qeth_osn_deregister); +#endif + +/* SETBRIDGEPORT support, async notifications */ + +enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset}; + +/** + * qeth_bridge_emit_host_event() - bridgeport address change notification + * @card: qeth_card structure pointer, for udev events. + * @evtype: "normal" register/unregister, or abort, or reset. For abort + * and reset token and addr_lnid are unused and may be NULL. + * @code: event bitmask: high order bit 0x80 value 1 means removal of an + * object, 0 - addition of an object. + * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC. + * @token: "network token" structure identifying physical address of the port. + * @addr_lnid: pointer to structure with MAC address and VLAN ID. + * + * This function is called when registrations and deregistrations are + * reported by the hardware, and also when notifications are enabled - + * for all currently registered addresses. + */ +static void qeth_bridge_emit_host_event(struct qeth_card *card, + enum qeth_an_event_type evtype, + u8 code, + struct net_if_token *token, + struct mac_addr_lnid *addr_lnid) +{ + char str[7][32]; + char *env[8]; + int i = 0; + + switch (evtype) { + case anev_reg_unreg: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s", + (code & IPA_ADDR_CHANGE_CODE_REMOVAL) + ? "deregister" : "register"); + env[i] = str[i]; i++; + if (code & IPA_ADDR_CHANGE_CODE_VLANID) { + snprintf(str[i], sizeof(str[i]), "VLAN=%d", + addr_lnid->lnid); + env[i] = str[i]; i++; + } + if (code & IPA_ADDR_CHANGE_CODE_MACADDR) { + snprintf(str[i], sizeof(str[i]), "MAC=%pM", + addr_lnid->mac); + env[i] = str[i]; i++; + } + snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x", + token->cssid, token->ssid, token->devnum); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x", + token->chpid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid); + env[i] = str[i]; i++; + break; + case anev_abort: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort"); + env[i] = str[i]; i++; + break; + case anev_reset: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset"); + env[i] = str[i]; i++; + break; + } + env[i] = NULL; + kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env); +} + +struct qeth_bridge_state_data { + struct work_struct worker; + struct qeth_card *card; + u8 role; + u8 state; +}; + +static void qeth_bridge_state_change_worker(struct work_struct *work) +{ + struct qeth_bridge_state_data *data = + container_of(work, struct qeth_bridge_state_data, worker); + char env_locrem[32]; + char env_role[32]; + char env_state[32]; + char *env[] = { + env_locrem, + env_role, + env_state, + NULL + }; + + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); + snprintf(env_role, sizeof(env_role), "ROLE=%s", + (data->role == QETH_SBP_ROLE_NONE) ? "none" : + (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : + (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : + "<INVALID>"); + snprintf(env_state, sizeof(env_state), "STATE=%s", + (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : + (data->state == QETH_SBP_STATE_STANDBY) ? "standby" : + (data->state == QETH_SBP_STATE_ACTIVE) ? "active" : + "<INVALID>"); + kobject_uevent_env(&data->card->gdev->dev.kobj, + KOBJ_CHANGE, env); + kfree(data); +} + +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data; + struct qeth_bridge_state_data *data; + + QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); + return; + } + + data = kzalloc(sizeof(*data), GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "BPSalloc"); + return; + } + INIT_WORK(&data->worker, qeth_bridge_state_change_worker); + data->card = card; + /* Information for the local port: */ + data->role = qports->entry[0].role; + data->state = qports->entry[0].state; + + queue_work(card->event_wq, &data->worker); +} + +struct qeth_addr_change_data { + struct delayed_work dwork; + struct qeth_card *card; + struct qeth_ipacmd_addr_change ac_event; +}; + +static void qeth_l2_dev2br_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct qeth_addr_change_data *data; + struct qeth_card *card; + struct qeth_priv *priv; + unsigned int i; + int rc; + + data = container_of(dwork, struct qeth_addr_change_data, dwork); + card = data->card; + priv = netdev_priv(card->dev); + + QETH_CARD_TEXT(card, 4, "dev2brew"); + + if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) + goto free; + + /* Potential re-config in progress, try again later: */ + if (!rtnl_trylock()) { + queue_delayed_work(card->event_wq, dwork, + msecs_to_jiffies(100)); + return; + } + if (!netif_device_present(card->dev)) + goto out_unlock; + + if (data->ac_event.lost_event_mask) { + QETH_DBF_MESSAGE(3, + "Address change notification overflow on device %x\n", + CARD_DEVID(card)); + /* Card fdb and bridge fdb are out of sync, card has stopped + * notifications (no need to drain_workqueue). Purge all + * 'extern_learn' entries from the parent bridge and restart + * the notifications. + */ + qeth_l2_dev2br_fdb_flush(card); + rc = qeth_l2_dev2br_an_set(card, true); + if (rc) { + /* TODO: if we want to retry after -EAGAIN, be + * aware there could be stale entries in the + * workqueue now, that need to be drained. + * For now we give up: + */ + netdev_err(card->dev, + "bridge learning_sync failed to recover: %d\n", + rc); + WRITE_ONCE(card->info.pnso_mode, + QETH_PNSO_NONE); + /* To remove fdb entries reported by an_set: */ + qeth_l2_dev2br_fdb_flush(card); + priv->brport_features ^= BR_LEARNING_SYNC; + } else { + QETH_DBF_MESSAGE(3, + "Address Notification resynced on device %x\n", + CARD_DEVID(card)); + } + } else { + for (i = 0; i < data->ac_event.num_entries; i++) { + struct qeth_ipacmd_addr_change_entry *entry = + &data->ac_event.entry[i]; + qeth_l2_dev2br_fdb_notify(card, + entry->change_code, + &entry->token, + &entry->addr_lnid); + } + } + +out_unlock: + rtnl_unlock(); + +free: + kfree(data); +} + +static void qeth_addr_change_event_worker(struct work_struct *work) +{ + struct delayed_work *dwork = to_delayed_work(work); + struct qeth_addr_change_data *data; + struct qeth_card *card; + int i; + + data = container_of(dwork, struct qeth_addr_change_data, dwork); + card = data->card; + + QETH_CARD_TEXT(data->card, 4, "adrchgew"); + + if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE) + goto free; + + if (data->ac_event.lost_event_mask) { + /* Potential re-config in progress, try again later: */ + if (!mutex_trylock(&card->sbp_lock)) { + queue_delayed_work(card->event_wq, dwork, + msecs_to_jiffies(100)); + return; + } + + dev_info(&data->card->gdev->dev, + "Address change notification stopped on %s (%s)\n", + netdev_name(card->dev), + (data->ac_event.lost_event_mask == 0x01) + ? "Overflow" + : (data->ac_event.lost_event_mask == 0x02) + ? "Bridge port state change" + : "Unknown reason"); + + data->card->options.sbp.hostnotification = 0; + card->info.pnso_mode = QETH_PNSO_NONE; + mutex_unlock(&data->card->sbp_lock); + qeth_bridge_emit_host_event(data->card, anev_abort, + 0, NULL, NULL); + } else + for (i = 0; i < data->ac_event.num_entries; i++) { + struct qeth_ipacmd_addr_change_entry *entry = + &data->ac_event.entry[i]; + qeth_bridge_emit_host_event(data->card, + anev_reg_unreg, + entry->change_code, + &entry->token, + &entry->addr_lnid); + } + +free: + kfree(data); +} + +static void qeth_addr_change_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_ipacmd_addr_change *hostevs = + &cmd->data.addrchange; + struct qeth_addr_change_data *data; + int extrasize; + + if (card->info.pnso_mode == QETH_PNSO_NONE) + return; + + QETH_CARD_TEXT(card, 4, "adrchgev"); + if (cmd->hdr.return_code != 0x0000) { + if (cmd->hdr.return_code == 0x0010) { + if (hostevs->lost_event_mask == 0x00) + hostevs->lost_event_mask = 0xff; + } else { + QETH_CARD_TEXT_(card, 2, "ACHN%04x", + cmd->hdr.return_code); + return; + } + } + extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) * + hostevs->num_entries; + data = kzalloc(sizeof(struct qeth_addr_change_data) + extrasize, + GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "ACNalloc"); + return; + } + if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT) + INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker); + else + INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker); + data->card = card; + memcpy(&data->ac_event, hostevs, + sizeof(struct qeth_ipacmd_addr_change) + extrasize); + queue_delayed_work(card->event_wq, &data->dwork, 0); +} + +/* SETBRIDGEPORT support; sending commands */ + +struct _qeth_sbp_cbctl { + union { + u32 supported; + struct { + enum qeth_sbp_roles *role; + enum qeth_sbp_states *state; + } qports; + } data; +}; + +static int qeth_bridgeport_makerc(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_ipacmd_setbridgeport *sbp = &cmd->data.sbp; + enum qeth_ipa_sbp_cmd setcmd = sbp->hdr.command_code; + u16 ipa_rc = cmd->hdr.return_code; + u16 sbp_rc = sbp->hdr.return_code; + int rc; + + if (ipa_rc == IPA_RC_SUCCESS && sbp_rc == IPA_RC_SUCCESS) + return 0; + + if ((IS_IQD(card) && ipa_rc == IPA_RC_SUCCESS) || + (!IS_IQD(card) && ipa_rc == sbp_rc)) { + switch (sbp_rc) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + case IPA_RC_UNSUPPORTED_COMMAND: + rc = -EOPNOTSUPP; + break; + case IPA_RC_SBP_OSA_NOT_CONFIGURED: + case IPA_RC_SBP_IQD_NOT_CONFIGURED: + rc = -ENODEV; /* maybe not the best code here? */ + dev_err(&card->gdev->dev, + "The device is not configured as a Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_OS_MISMATCH: + case IPA_RC_SBP_IQD_OS_MISMATCH: + rc = -EPERM; + dev_err(&card->gdev->dev, + "A Bridge Port is already configured by a different operating system\n"); + break; + case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY: + case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY: + switch (setcmd) { + case IPA_SBP_SET_PRIMARY_BRIDGE_PORT: + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The LAN already has a primary Bridge Port\n"); + break; + case IPA_SBP_SET_SECONDARY_BRIDGE_PORT: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a primary Bridge Port\n"); + break; + default: + rc = -EIO; + } + break; + case IPA_RC_SBP_OSA_CURRENT_SECOND: + case IPA_RC_SBP_IQD_CURRENT_SECOND: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a secondary Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_LIMIT_SECOND: + case IPA_RC_SBP_IQD_LIMIT_SECOND: + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The LAN cannot have more secondary Bridge Ports\n"); + break; + case IPA_RC_SBP_OSA_CURRENT_PRIMARY: + case IPA_RC_SBP_IQD_CURRENT_PRIMARY: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a primary Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN: + case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN: + rc = -EACCES; + dev_err(&card->gdev->dev, + "The device is not authorized to be a Bridge Port\n"); + break; + default: + rc = -EIO; + } + } else { + switch (ipa_rc) { + case IPA_RC_NOTSUPP: + rc = -EOPNOTSUPP; + break; + case IPA_RC_UNSUPPORTED_COMMAND: + rc = -EOPNOTSUPP; + break; + default: + rc = -EIO; + } + } + + if (rc) { + QETH_CARD_TEXT_(card, 2, "SBPi%04x", ipa_rc); + QETH_CARD_TEXT_(card, 2, "SBPc%04x", sbp_rc); + } + return rc; +} + +static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, + enum qeth_ipa_sbp_cmd sbp_cmd, + unsigned int data_length) +{ + enum qeth_ipa_cmds ipa_cmd = IS_IQD(card) ? IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; + struct qeth_ipacmd_sbp_hdr *hdr; + struct qeth_cmd_buffer *iob; + + iob = qeth_ipa_alloc_cmd(card, ipa_cmd, QETH_PROT_NONE, + data_length + + offsetof(struct qeth_ipacmd_setbridgeport, + data)); + if (!iob) + return iob; + + hdr = &__ipa_cmd(iob)->data.sbp.hdr; + hdr->cmdlength = sizeof(*hdr) + data_length; + hdr->command_code = sbp_cmd; + hdr->used_total = 1; + hdr->seq_no = 1; + return iob; +} + +static int qeth_bridgeport_query_support_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + int rc; + + QETH_CARD_TEXT(card, 2, "brqsupcb"); + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + + cbctl->data.supported = + cmd->data.sbp.data.query_cmds_supp.supported_cmds; + return 0; +} + +/** + * qeth_bridgeport_query_support() - store bitmask of supported subfunctions. + * @card: qeth_card structure pointer. + * + * Sets bitmask of supported setbridgeport subfunctions in the qeth_card + * strucutre: card->options.sbp.supported_funcs. + */ +static void qeth_bridgeport_query_support(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + struct _qeth_sbp_cbctl cbctl; + + QETH_CARD_TEXT(card, 2, "brqsuppo"); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, + SBP_DATA_SIZEOF(query_cmds_supp)); + if (!iob) + return; + + if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, + &cbctl)) { + card->options.sbp.role = QETH_SBP_ROLE_NONE; + card->options.sbp.supported_funcs = 0; + return; + } + card->options.sbp.supported_funcs = cbctl.data.supported; +} + +static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + struct qeth_sbp_port_data *qports; + int rc; + + QETH_CARD_TEXT(card, 2, "brqprtcb"); + rc = qeth_bridgeport_makerc(card, cmd); + if (rc) + return rc; + + qports = &cmd->data.sbp.data.port_data; + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); + return -EINVAL; + } + /* first entry contains the state of the local port */ + if (qports->num_entries > 0) { + if (cbctl->data.qports.role) + *cbctl->data.qports.role = qports->entry[0].role; + if (cbctl->data.qports.state) + *cbctl->data.qports.state = qports->entry[0].state; + } + return 0; +} + +/** + * qeth_bridgeport_query_ports() - query local bridgeport status. + * @card: qeth_card structure pointer. + * @role: Role of the port: 0-none, 1-primary, 2-secondary. + * @state: State of the port: 0-inactive, 1-standby, 2-active. + * + * Returns negative errno-compatible error indication or 0 on success. + * + * 'role' and 'state' are not updated in case of hardware operation failure. + */ +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, enum qeth_sbp_states *state) +{ + struct qeth_cmd_buffer *iob; + struct _qeth_sbp_cbctl cbctl = { + .data = { + .qports = { + .role = role, + .state = state, + }, + }, + }; + + QETH_CARD_TEXT(card, 2, "brqports"); + if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) + return -EOPNOTSUPP; + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); + if (!iob) + return -ENOMEM; + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, + &cbctl); +} + +static int qeth_bridgeport_set_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + + QETH_CARD_TEXT(card, 2, "brsetrcb"); + return qeth_bridgeport_makerc(card, cmd); +} + +/** + * qeth_bridgeport_setrole() - Assign primary role to the port. + * @card: qeth_card structure pointer. + * @role: Role to assign. + * + * Returns negative errno-compatible error indication or 0 on success. + */ +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) +{ + struct qeth_cmd_buffer *iob; + enum qeth_ipa_sbp_cmd setcmd; + unsigned int cmdlength = 0; + + QETH_CARD_TEXT(card, 2, "brsetrol"); + switch (role) { + case QETH_SBP_ROLE_NONE: + setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; + break; + case QETH_SBP_ROLE_PRIMARY: + setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; + cmdlength = SBP_DATA_SIZEOF(set_primary); + break; + case QETH_SBP_ROLE_SECONDARY: + setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; + break; + default: + return -EINVAL; + } + if (!(card->options.sbp.supported_funcs & setcmd)) + return -EOPNOTSUPP; + iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); + if (!iob) + return -ENOMEM; + + return qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, NULL); +} + +static void qeth_bridgeport_an_set_cb(void *priv, + struct chsc_pnso_naid_l2 *entry) +{ + struct qeth_card *card = (struct qeth_card *)priv; + u8 code; + + code = IPA_ADDR_CHANGE_CODE_MACADDR; + if (entry->addr_lnid.lnid < VLAN_N_VID) + code |= IPA_ADDR_CHANGE_CODE_VLANID; + qeth_bridge_emit_host_event(card, anev_reg_unreg, code, + (struct net_if_token *)&entry->nit, + (struct mac_addr_lnid *)&entry->addr_lnid); +} + +/** + * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification + * @card: qeth_card structure pointer. + * @enable: 0 - disable, non-zero - enable notifications + * + * Returns negative errno-compatible error indication or 0 on success. + * + * On enable, emits a series of address notifications udev events for all + * currently registered hosts. + */ +int qeth_bridgeport_an_set(struct qeth_card *card, int enable) +{ + int rc; + + if (!card->options.sbp.supported_funcs) + return -EOPNOTSUPP; + + if (enable) { + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT); + rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1, + qeth_bridgeport_an_set_cb, card); + if (rc) + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } else { + rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL); + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + } + return rc; +} + +/* VNIC Characteristics support */ + +/* handle VNICC IPA command return codes; convert to error codes */ +static int qeth_l2_vnicc_makerc(struct qeth_card *card, u16 ipa_rc) +{ + int rc; + + switch (ipa_rc) { + case IPA_RC_SUCCESS: + return ipa_rc; + case IPA_RC_L2_UNSUPPORTED_CMD: + case IPA_RC_NOTSUPP: + rc = -EOPNOTSUPP; + break; + case IPA_RC_VNICC_OOSEQ: + rc = -EALREADY; + break; + case IPA_RC_VNICC_VNICBP: + rc = -EBUSY; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EACCES; + break; + default: + rc = -EIO; + } + + QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc); + return rc; +} + +/* generic VNICC request call back */ +static int qeth_l2_vnicc_request_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; + u32 sub_cmd = cmd->data.vnicc.hdr.sub_command; + + QETH_CARD_TEXT(card, 2, "vniccrcb"); + if (cmd->hdr.return_code) + return qeth_l2_vnicc_makerc(card, cmd->hdr.return_code); + /* return results to caller */ + card->options.vnicc.sup_chars = rep->vnicc_cmds.supported; + card->options.vnicc.cur_chars = rep->vnicc_cmds.enabled; + + if (sub_cmd == IPA_VNICC_QUERY_CMDS) + *(u32 *)reply->param = rep->data.query_cmds.sup_cmds; + else if (sub_cmd == IPA_VNICC_GET_TIMEOUT) + *(u32 *)reply->param = rep->data.getset_timeout.timeout; + + return 0; +} + +static struct qeth_cmd_buffer *qeth_l2_vnicc_build_cmd(struct qeth_card *card, + u32 vnicc_cmd, + unsigned int data_length) +{ + struct qeth_ipacmd_vnicc_hdr *hdr; + struct qeth_cmd_buffer *iob; + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_VNICC, QETH_PROT_NONE, + data_length + + offsetof(struct qeth_ipacmd_vnicc, data)); + if (!iob) + return NULL; + + hdr = &__ipa_cmd(iob)->data.vnicc.hdr; + hdr->data_length = sizeof(*hdr) + data_length; + hdr->sub_command = vnicc_cmd; + return iob; +} + +/* VNICC query VNIC characteristics request */ +static int qeth_l2_vnicc_query_chars(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "vniccqch"); + iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CHARS, 0); + if (!iob) + return -ENOMEM; + + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); +} + +/* VNICC query sub commands request */ +static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, + u32 *sup_cmds) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "vniccqcm"); + iob = qeth_l2_vnicc_build_cmd(card, IPA_VNICC_QUERY_CMDS, + VNICC_DATA_SIZEOF(query_cmds)); + if (!iob) + return -ENOMEM; + + __ipa_cmd(iob)->data.vnicc.data.query_cmds.vnic_char = vnic_char; + + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, sup_cmds); +} + +/* VNICC enable/disable characteristic request */ +static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, + u32 cmd) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "vniccedc"); + iob = qeth_l2_vnicc_build_cmd(card, cmd, VNICC_DATA_SIZEOF(set_char)); + if (!iob) + return -ENOMEM; + + __ipa_cmd(iob)->data.vnicc.data.set_char.vnic_char = vnic_char; + + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, NULL); +} + +/* VNICC get/set timeout for characteristic request */ +static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, + u32 cmd, u32 *timeout) +{ + struct qeth_vnicc_getset_timeout *getset_timeout; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "vniccgst"); + iob = qeth_l2_vnicc_build_cmd(card, cmd, + VNICC_DATA_SIZEOF(getset_timeout)); + if (!iob) + return -ENOMEM; + + getset_timeout = &__ipa_cmd(iob)->data.vnicc.data.getset_timeout; + getset_timeout->vnic_char = vnicc; + + if (cmd == IPA_VNICC_SET_TIMEOUT) + getset_timeout->timeout = *timeout; + + return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout); +} + +/* recover user timeout setting */ +static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, + u32 *timeout) +{ + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.getset_timeout_sup & vnicc && + !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT, + timeout)) + return false; + *timeout = QETH_VNICC_DEFAULT_TIMEOUT; + return true; +} + +/* set current VNICC flag state; called from sysfs store function */ +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) +{ + int rc = 0; + u32 cmd; + + QETH_CARD_TEXT(card, 2, "vniccsch"); + + /* check if characteristic and enable/disable are supported */ + if (!(card->options.vnicc.sup_chars & vnicc) || + !(card->options.vnicc.set_char_sup & vnicc)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* set enable/disable command and store wanted characteristic */ + if (state) { + cmd = IPA_VNICC_ENABLE; + card->options.vnicc.wanted_chars |= vnicc; + } else { + cmd = IPA_VNICC_DISABLE; + card->options.vnicc.wanted_chars &= ~vnicc; + } + + /* do we need to do anything? */ + if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars) + return rc; + + /* if card is not ready, simply stop here */ + if (!qeth_card_hw_is_reachable(card)) { + if (state) + card->options.vnicc.cur_chars |= vnicc; + else + card->options.vnicc.cur_chars &= ~vnicc; + return rc; + } + + rc = qeth_l2_vnicc_set_char(card, vnicc, cmd); + if (rc) + card->options.vnicc.wanted_chars = + card->options.vnicc.cur_chars; + else { + /* successful online VNICC change; handle special cases */ + if (state && vnicc == QETH_VNICC_RX_BCAST) + card->options.vnicc.rx_bcast_enabled = true; + if (!state && vnicc == QETH_VNICC_LEARNING) + qeth_l2_vnicc_recover_timeout(card, vnicc, + &card->options.vnicc.learning_timeout); + } + + return rc; +} + +/* get current VNICC flag state; called from sysfs show function */ +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgch"); + + /* check if characteristic is supported */ + if (!(card->options.vnicc.sup_chars & vnicc)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* if card is ready, query current VNICC state */ + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_query_chars(card); + + *state = (card->options.vnicc.cur_chars & vnicc) ? true : false; + return rc; +} + +/* set VNICC timeout; called from sysfs store function. Currently, only learning + * supports timeout + */ +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccsto"); + + /* check if characteristic and set_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* do we need to do anything? */ + if (card->options.vnicc.learning_timeout == timeout) + return rc; + + /* if card is not ready, simply store the value internally and return */ + if (!qeth_card_hw_is_reachable(card)) { + card->options.vnicc.learning_timeout = timeout; + return rc; + } + + /* send timeout value to card; if successful, store value internally */ + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_SET_TIMEOUT, &timeout); + if (!rc) + card->options.vnicc.learning_timeout = timeout; + + return rc; +} + +/* get current VNICC timeout; called from sysfs show function. Currently, only + * learning supports timeout + */ +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgto"); + + /* check if characteristic and get_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* if card is ready, get timeout. Otherwise, just return stored value */ + *timeout = card->options.vnicc.learning_timeout; + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_GET_TIMEOUT, + timeout); + + return rc; +} + +/* check if VNICC is currently enabled */ +static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card) +{ + if (!card->options.vnicc.sup_chars) + return false; + /* default values are only OK if rx_bcast was not enabled by user + * or the card is offline. + */ + if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) { + if (!card->options.vnicc.rx_bcast_enabled || + !qeth_card_hw_is_reachable(card)) + return false; + } + return true; +} + +/** + * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed? + * @card: qeth_card structure pointer + * + * qeth_bridgeport functionality is mutually exclusive with usage of the + * VNIC Characteristics and dev2br address notifications + */ +bool qeth_bridgeport_allowed(struct qeth_card *card) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + + return (!_qeth_l2_vnicc_is_in_use(card) && + !(priv->brport_features & BR_LEARNING_SYNC)); +} + +/* recover user characteristic setting */ +static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, + bool enable) +{ + u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE; + + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.set_char_sup & vnicc && + !qeth_l2_vnicc_set_char(card, vnicc, cmd)) + return false; + card->options.vnicc.wanted_chars &= ~vnicc; + card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc; + return true; +} + +/* (re-)initialize VNICC */ +static void qeth_l2_vnicc_init(struct qeth_card *card) +{ + u32 *timeout = &card->options.vnicc.learning_timeout; + bool enable, error = false; + unsigned int chars_len, i; + unsigned long chars_tmp; + u32 sup_cmds, vnicc; + + QETH_CARD_TEXT(card, 2, "vniccini"); + /* reset rx_bcast */ + card->options.vnicc.rx_bcast_enabled = 0; + /* initial query and storage of VNIC characteristics */ + if (qeth_l2_vnicc_query_chars(card)) { + if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT || + *timeout != QETH_VNICC_DEFAULT_TIMEOUT) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); + /* fail quietly if user didn't change the default config */ + card->options.vnicc.sup_chars = 0; + card->options.vnicc.cur_chars = 0; + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; + return; + } + /* get supported commands for each supported characteristic */ + chars_tmp = card->options.vnicc.sup_chars; + chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) { + sup_cmds = 0; + error = true; + } + if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) && + (sup_cmds & IPA_VNICC_GET_TIMEOUT)) + card->options.vnicc.getset_timeout_sup |= vnicc; + else + card->options.vnicc.getset_timeout_sup &= ~vnicc; + if ((sup_cmds & IPA_VNICC_ENABLE) && + (sup_cmds & IPA_VNICC_DISABLE)) + card->options.vnicc.set_char_sup |= vnicc; + else + card->options.vnicc.set_char_sup &= ~vnicc; + } + /* enforce assumed default values and recover settings, if changed */ + error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, + timeout); + /* Change chars, if necessary */ + chars_tmp = card->options.vnicc.wanted_chars ^ + card->options.vnicc.cur_chars; + chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + enable = card->options.vnicc.wanted_chars & vnicc; + error |= qeth_l2_vnicc_recover_char(card, vnicc, enable); + } + if (error) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); +} + +/* configure default values of VNIC characteristics */ +static void qeth_l2_vnicc_set_defaults(struct qeth_card *card) +{ + /* characteristics values */ + card->options.vnicc.sup_chars = QETH_VNICC_ALL; + card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT; + card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT; + /* supported commands */ + card->options.vnicc.set_char_sup = QETH_VNICC_ALL; + card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING; + /* settings wanted by users */ + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; +} + +static const struct device_type qeth_l2_devtype = { + .name = "qeth_layer2", + .groups = qeth_l2_attr_groups, +}; + +static int qeth_l2_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + + if (IS_OSN(card)) + dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n"); + + qeth_l2_vnicc_set_defaults(card); + mutex_init(&card->sbp_lock); + + if (gdev->dev.type == &qeth_generic_devtype) { + rc = qeth_l2_create_device_attributes(&gdev->dev); + if (rc) + return rc; + } + + INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work); + return 0; +} + +static void qeth_l2_remove_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + if (gdev->dev.type == &qeth_generic_devtype) + qeth_l2_remove_device_attributes(&gdev->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + + if (gdev->state == CCWGROUP_ONLINE) + qeth_set_offline(card, card->discipline, false); + + cancel_work_sync(&card->close_dev_work); + if (card->dev->reg_state == NETREG_REGISTERED) + unregister_netdev(card->dev); +} + +static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) +{ + struct net_device *dev = card->dev; + int rc = 0; + + qeth_l2_detect_dev2br_support(card); + + mutex_lock(&card->sbp_lock); + qeth_bridgeport_query_support(card); + if (card->options.sbp.supported_funcs) { + qeth_l2_setup_bridgeport_attrs(card); + dev_info(&card->gdev->dev, + "The device represents a Bridge Capable Port\n"); + } + mutex_unlock(&card->sbp_lock); + + qeth_l2_register_dev_addr(card); + + /* for the rx_bcast characteristic, init VNICC after setmac */ + qeth_l2_vnicc_init(card); + + qeth_l2_trace_features(card); + + /* softsetup */ + QETH_CARD_TEXT(card, 2, "softsetp"); + + card->state = CARD_STATE_SOFTSETUP; + + qeth_set_allowed_threads(card, 0xffffffff, 0); + + if (dev->reg_state != NETREG_REGISTERED) { + rc = qeth_l2_setup_netdev(card); + if (rc) + goto err_setup; + + if (carrier_ok) + netif_carrier_on(dev); + } else { + rtnl_lock(); + rc = qeth_set_real_num_tx_queues(card, + qeth_tx_actual_queues(card)); + if (rc) { + rtnl_unlock(); + goto err_set_queues; + } + + if (carrier_ok) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + netif_device_attach(dev); + qeth_enable_hw_features(dev); + qeth_l2_enable_brport_features(card); + + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + qeth_l2_set_rx_mode(dev); + } + rtnl_unlock(); + } + return 0; + +err_set_queues: +err_setup: + qeth_set_allowed_threads(card, 0, 1); + card->state = CARD_STATE_DOWN; + return rc; +} + +static void qeth_l2_set_offline(struct qeth_card *card) +{ + struct qeth_priv *priv = netdev_priv(card->dev); + + qeth_set_allowed_threads(card, 0, 1); + qeth_l2_drain_rx_mode_cache(card); + + if (card->state == CARD_STATE_SOFTSETUP) + card->state = CARD_STATE_DOWN; + + qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE); + if (priv->brport_features & BR_LEARNING_SYNC) { + rtnl_lock(); + qeth_l2_dev2br_fdb_flush(card); + rtnl_unlock(); + } +} + +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l2_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + switch (cmd->hdr.command) { + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_SETBRIDGEPORT_IQD: + if (cmd->data.sbp.hdr.command_code == + IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { + qeth_bridge_state_change(card, cmd); + return 0; + } + + return 1; + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + qeth_addr_change_event(card, cmd); + return 0; + default: + return 1; + } +} + +const struct qeth_discipline qeth_l2_discipline = { + .devtype = &qeth_l2_devtype, + .setup = qeth_l2_probe_device, + .remove = qeth_l2_remove_device, + .set_online = qeth_l2_set_online, + .set_offline = qeth_l2_set_offline, + .do_ioctl = NULL, + .control_event_handler = qeth_l2_control_event, +}; +EXPORT_SYMBOL_GPL(qeth_l2_discipline); + +static int __init qeth_l2_init(void) +{ + pr_info("register layer 2 discipline\n"); + return 0; +} + +static void __exit qeth_l2_exit(void) +{ + pr_info("unregister layer 2 discipline\n"); +} + +module_init(qeth_l2_init); +module_exit(qeth_l2_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth layer 2 discipline"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c new file mode 100644 index 000000000..4ba3bc572 --- /dev/null +++ b/drivers/s390/net/qeth_l2_sys.c @@ -0,0 +1,402 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include "qeth_core.h" +#include "qeth_l2.h" + +static ssize_t qeth_bridge_port_role_state_show(struct device *dev, + struct device_attribute *attr, char *buf, + int show_state) +{ + struct qeth_card *card = dev_get_drvdata(dev); + enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE; + int rc = 0; + char *word; + + if (!qeth_bridgeport_allowed(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + mutex_lock(&card->sbp_lock); + if (qeth_card_hw_is_reachable(card) && + card->options.sbp.supported_funcs) + rc = qeth_bridgeport_query_ports(card, + &card->options.sbp.role, &state); + if (!rc) { + if (show_state) + switch (state) { + case QETH_SBP_STATE_INACTIVE: + word = "inactive"; break; + case QETH_SBP_STATE_STANDBY: + word = "standby"; break; + case QETH_SBP_STATE_ACTIVE: + word = "active"; break; + default: + rc = -EIO; + } + else + switch (card->options.sbp.role) { + case QETH_SBP_ROLE_NONE: + word = "none"; break; + case QETH_SBP_ROLE_PRIMARY: + word = "primary"; break; + case QETH_SBP_ROLE_SECONDARY: + word = "secondary"; break; + default: + rc = -EIO; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x", + card->options.sbp.role, state); + else + rc = sprintf(buf, "%s\n", word); + } + mutex_unlock(&card->sbp_lock); + + return rc; +} + +static ssize_t qeth_bridge_port_role_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!qeth_bridgeport_allowed(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + return qeth_bridge_port_role_state_show(dev, attr, buf, 0); +} + +static ssize_t qeth_bridge_port_role_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + enum qeth_sbp_roles role; + + if (sysfs_streq(buf, "primary")) + role = QETH_SBP_ROLE_PRIMARY; + else if (sysfs_streq(buf, "secondary")) + role = QETH_SBP_ROLE_SECONDARY; + else if (sysfs_streq(buf, "none")) + role = QETH_SBP_ROLE_NONE; + else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); + + if (!qeth_bridgeport_allowed(card)) + rc = -EBUSY; + else if (card->options.sbp.reflect_promisc) + /* Forbid direct manipulation */ + rc = -EPERM; + else if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_setrole(card, role); + if (!rc) + card->options.sbp.role = role; + } else + card->options.sbp.role = role; + + mutex_unlock(&card->sbp_lock); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show, + qeth_bridge_port_role_store); + +static ssize_t qeth_bridge_port_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!qeth_bridgeport_allowed(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + return qeth_bridge_port_role_state_show(dev, attr, buf, 1); +} + +static DEVICE_ATTR(bridge_state, 0444, qeth_bridge_port_state_show, + NULL); + +static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int enabled; + + if (!qeth_bridgeport_allowed(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + enabled = card->options.sbp.hostnotification; + + return sprintf(buf, "%d\n", enabled); +} + +static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool enable; + int rc; + + rc = kstrtobool(buf, &enable); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); + + if (!qeth_bridgeport_allowed(card)) + rc = -EBUSY; + else if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_an_set(card, enable); + /* sbp_lock ensures ordering vs notifications-stopped events */ + if (!rc) + card->options.sbp.hostnotification = enable; + } else + card->options.sbp.hostnotification = enable; + + mutex_unlock(&card->sbp_lock); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_hostnotify, 0644, + qeth_bridgeport_hostnotification_show, + qeth_bridgeport_hostnotification_store); + +static ssize_t qeth_bridgeport_reflect_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *state; + + if (!qeth_bridgeport_allowed(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + if (card->options.sbp.reflect_promisc) { + if (card->options.sbp.reflect_promisc_primary) + state = "primary"; + else + state = "secondary"; + } else + state = "none"; + + return sprintf(buf, "%s\n", state); +} + +static ssize_t qeth_bridgeport_reflect_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int enable, primary; + int rc = 0; + + if (sysfs_streq(buf, "none")) { + enable = 0; + primary = 0; + } else if (sysfs_streq(buf, "primary")) { + enable = 1; + primary = 1; + } else if (sysfs_streq(buf, "secondary")) { + enable = 1; + primary = 0; + } else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + mutex_lock(&card->sbp_lock); + + if (!qeth_bridgeport_allowed(card)) + rc = -EBUSY; + else if (card->options.sbp.role != QETH_SBP_ROLE_NONE) + rc = -EPERM; + else { + card->options.sbp.reflect_promisc = enable; + card->options.sbp.reflect_promisc_primary = primary; + rc = 0; + } + + mutex_unlock(&card->sbp_lock); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_reflect_promisc, 0644, + qeth_bridgeport_reflect_show, + qeth_bridgeport_reflect_store); + +static struct attribute *qeth_l2_bridgeport_attrs[] = { + &dev_attr_bridge_role.attr, + &dev_attr_bridge_state.attr, + &dev_attr_bridge_hostnotify.attr, + &dev_attr_bridge_reflect_promisc.attr, + NULL, +}; + +static struct attribute_group qeth_l2_bridgeport_attr_group = { + .attrs = qeth_l2_bridgeport_attrs, +}; + +/* VNIC CHARS support */ + +/* convert sysfs attr name to VNIC characteristic */ +static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name) +{ + if (sysfs_streq(attr_name, "flooding")) + return QETH_VNICC_FLOODING; + else if (sysfs_streq(attr_name, "mcast_flooding")) + return QETH_VNICC_MCAST_FLOODING; + else if (sysfs_streq(attr_name, "learning")) + return QETH_VNICC_LEARNING; + else if (sysfs_streq(attr_name, "takeover_setvmac")) + return QETH_VNICC_TAKEOVER_SETVMAC; + else if (sysfs_streq(attr_name, "takeover_learning")) + return QETH_VNICC_TAKEOVER_LEARNING; + else if (sysfs_streq(attr_name, "bridge_invisible")) + return QETH_VNICC_BRIDGE_INVISIBLE; + else if (sysfs_streq(attr_name, "rx_bcast")) + return QETH_VNICC_RX_BCAST; + + return 0; +} + +/* get current timeout setting */ +static ssize_t qeth_vnicc_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + rc = qeth_l2_vnicc_get_timeout(card, &timeout); + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", timeout); +} + +/* change timeout setting */ +static ssize_t qeth_vnicc_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + rc = kstrtou32(buf, 10, &timeout); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_timeout(card, timeout); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +/* get current setting of characteristic */ +static ssize_t qeth_vnicc_char_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + rc = qeth_l2_vnicc_get_state(card, vnicc, &state); + + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", state); +} + +/* change setting of characteristic */ +static ssize_t qeth_vnicc_char_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + if (kstrtobool(buf, &state)) + return -EINVAL; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_state(card, vnicc, state); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show, + qeth_vnicc_timeout_store); +static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); + +static struct attribute *qeth_l2_vnicc_attrs[] = { + &dev_attr_flooding.attr, + &dev_attr_mcast_flooding.attr, + &dev_attr_learning.attr, + &dev_attr_learning_timeout.attr, + &dev_attr_takeover_setvmac.attr, + &dev_attr_takeover_learning.attr, + &dev_attr_bridge_invisible.attr, + &dev_attr_rx_bcast.attr, + NULL, +}; + +static struct attribute_group qeth_l2_vnicc_attr_group = { + .attrs = qeth_l2_vnicc_attrs, + .name = "vnicc", +}; + +static const struct attribute_group *qeth_l2_only_attr_groups[] = { + &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, + NULL, +}; + +int qeth_l2_create_device_attributes(struct device *dev) +{ + return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + +void qeth_l2_remove_device_attributes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + +const struct attribute_group *qeth_l2_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + /* l2 specific, see qeth_l2_only_attr_groups: */ + &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, + NULL, +}; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h new file mode 100644 index 000000000..acd130cfb --- /dev/null +++ b/drivers/s390/net/qeth_l3.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_L3_H__ +#define __QETH_L3_H__ + +#include "qeth_core.h" +#include <linux/hashtable.h> + +enum qeth_ip_types { + QETH_IP_TYPE_NORMAL, + QETH_IP_TYPE_VIPA, + QETH_IP_TYPE_RXIP, +}; + +struct qeth_ipaddr { + struct hlist_node hnode; + enum qeth_ip_types type; + u8 is_multicast:1; + u8 disp_flag:2; + u8 ipato:1; /* ucast only */ + + /* is changed only for normal ip addresses + * for non-normal addresses it always is 1 + */ + int ref_counter; + enum qeth_prot_versions proto; + union { + struct { + __be32 addr; + __be32 mask; + } a4; + struct { + struct in6_addr addr; + unsigned int pfxlen; + } a6; + } u; +}; + +static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr, + enum qeth_ip_types type, + enum qeth_prot_versions proto) +{ + memset(addr, 0, sizeof(*addr)); + addr->type = type; + addr->proto = proto; + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + addr->ref_counter = 1; +} + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + if (a1->proto != a2->proto) + return false; + if (a1->proto == QETH_PROT_IPV6) + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); + return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), + * so 'proto' and 'addr' match for sure. + * + * For ucast: + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching + * values are required to avoid mixups in takeover eligibility. + * + * For mcast, + * - 'mask'/'pfxlen' is always 0. + */ + if (a1->type != a2->type) + return false; + if (a1->proto == QETH_PROT_IPV6) + return a1->u.a6.pfxlen == a2->u.a6.pfxlen; + return a1->u.a4.mask == a2->u.a4.mask; +} + +static inline u32 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) +{ + if (addr->proto == QETH_PROT_IPV6) + return ipv6_addr_hash(&addr->u.a6.addr); + else + return ipv4_addr_hash(addr->u.a4.addr); +} + +struct qeth_ipato_entry { + struct list_head entry; + enum qeth_prot_versions proto; + char addr[16]; + unsigned int mask_bits; +}; + +extern const struct attribute_group *qeth_l3_attr_groups[]; + +int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr, + char *buf); +int qeth_l3_create_device_attributes(struct device *); +void qeth_l3_remove_device_attributes(struct device *); +int qeth_l3_setrouting_v4(struct qeth_card *); +int qeth_l3_setrouting_v6(struct qeth_card *); +int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *); +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + unsigned int mask_bits); +void qeth_l3_update_ipato(struct qeth_card *card); +int qeth_l3_modify_hsuid(struct qeth_card *card, bool add); +int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, + enum qeth_ip_types type, + enum qeth_prot_versions proto); + +#endif /* __QETH_L3_H__ */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c new file mode 100644 index 000000000..d8cdf9024 --- /dev/null +++ b/drivers/s390/net/qeth_l3_main.c @@ -0,0 +1,2247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/bitops.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/ipv6.h> +#include <linux/inetdevice.h> +#include <linux/igmp.h> +#include <linux/slab.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/skbuff.h> + +#include <net/ip.h> +#include <net/arp.h> +#include <net/route.h> +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/iucv/af_iucv.h> +#include <linux/hashtable.h> + +#include "qeth_l3.h" + +static int qeth_l3_register_addr_entry(struct qeth_card *, + struct qeth_ipaddr *); +static int qeth_l3_deregister_addr_entry(struct qeth_card *, + struct qeth_ipaddr *); + +int qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const u8 *addr, + char *buf) +{ + if (proto == QETH_PROT_IPV4) + return sprintf(buf, "%pI4", addr); + else + return sprintf(buf, "%pI6", addr); +} + +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, + struct qeth_ipaddr *query) +{ + u32 key = qeth_l3_ipaddr_hash(query); + struct qeth_ipaddr *addr; + + if (query->is_multicast) { + hash_for_each_possible(card->rx_mode_addrs, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } else { + hash_for_each_possible(card->ip_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } + return NULL; +} + +static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) +{ + int i, j; + u8 octet; + + for (i = 0; i < len; ++i) { + octet = addr[i]; + for (j = 7; j >= 0; --j) { + bits[i*8 + j] = octet & 1; + octet >>= 1; + } + } +} + +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + struct qeth_ipato_entry *ipatoe; + u8 addr_bits[128] = {0, }; + u8 ipatoe_bits[128] = {0, }; + int rc = 0; + + if (!card->ipato.enabled) + return false; + if (addr->type != QETH_IP_TYPE_NORMAL) + return false; + + qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, + (addr->proto == QETH_PROT_IPV4) ? 4 : 16); + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + if (addr->proto != ipatoe->proto) + continue; + qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, + (ipatoe->proto == QETH_PROT_IPV4) ? + 4 : 16); + if (addr->proto == QETH_PROT_IPV4) + rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits); + else + rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits); + if (rc) + break; + } + /* invert? */ + if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) + rc = !rc; + else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) + rc = !rc; + + return rc; +} + +static int qeth_l3_delete_ip(struct qeth_card *card, + struct qeth_ipaddr *tmp_addr) +{ + int rc = 0; + struct qeth_ipaddr *addr; + + if (tmp_addr->type == QETH_IP_TYPE_RXIP) + QETH_CARD_TEXT(card, 2, "delrxip"); + else if (tmp_addr->type == QETH_IP_TYPE_VIPA) + QETH_CARD_TEXT(card, 2, "delvipa"); + else + QETH_CARD_TEXT(card, 2, "delip"); + + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); + else { + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) + return -ENOENT; + + addr->ref_counter--; + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + return rc; + + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l3_deregister_addr_entry(card, addr); + + hash_del(&addr->hnode); + kfree(addr); + + return rc; +} + +static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) +{ + int rc = 0; + struct qeth_ipaddr *addr; + char buf[40]; + + if (tmp_addr->type == QETH_IP_TYPE_RXIP) + QETH_CARD_TEXT(card, 2, "addrxip"); + else if (tmp_addr->type == QETH_IP_TYPE_VIPA) + QETH_CARD_TEXT(card, 2, "addvipa"); + else + QETH_CARD_TEXT(card, 2, "addip"); + + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); + else { + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (addr) { + if (tmp_addr->type != QETH_IP_TYPE_NORMAL) + return -EADDRINUSE; + if (qeth_l3_addr_match_all(addr, tmp_addr)) { + addr->ref_counter++; + return 0; + } + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, + buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + return -EADDRINUSE; + } else { + addr = kmemdup(tmp_addr, sizeof(*tmp_addr), GFP_KERNEL); + if (!addr) + return -ENOMEM; + + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { + QETH_CARD_TEXT(card, 2, "tkovaddr"); + addr->ipato = 1; + } + hash_add(card->ip_htable, &addr->hnode, + qeth_l3_ipaddr_hash(addr)); + + if (!qeth_card_hw_is_reachable(card)) { + addr->disp_flag = QETH_DISP_ADDR_ADD; + return 0; + } + + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + } else { + hash_del(&addr->hnode); + kfree(addr); + } + } + return rc; +} + +static int qeth_l3_modify_ip(struct qeth_card *card, struct qeth_ipaddr *addr, + bool add) +{ + int rc; + + mutex_lock(&card->ip_lock); + rc = add ? qeth_l3_add_ip(card, addr) : qeth_l3_delete_ip(card, addr); + mutex_unlock(&card->ip_lock); + + return rc; +} + +static void qeth_l3_drain_rx_mode_cache(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { + hash_del(&addr->hnode); + kfree(addr); + } +} + +static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + + QETH_CARD_TEXT(card, 4, "clearip"); + + mutex_lock(&card->ip_lock); + + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (!recover) { + hash_del(&addr->hnode); + kfree(addr); + continue; + } + addr->disp_flag = QETH_DISP_ADDR_ADD; + } + + mutex_unlock(&card->ip_lock); +} + +static void qeth_l3_recover_ip(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + int rc; + + QETH_CARD_TEXT(card, 4, "recovrip"); + + mutex_lock(&card->ip_lock); + + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + } else { + hash_del(&addr->hnode); + kfree(addr); + } + } + } + + mutex_unlock(&card->ip_lock); +} + +static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_DUPLICATE_IP_ADDRESS: + return -EADDRINUSE; + case IPA_RC_MC_ADDR_NOT_FOUND: + return -ENOENT; + case IPA_RC_LAN_OFFLINE: + return -ENETDOWN; + default: + return -EIO; + } +} + +static int qeth_l3_send_setdelmc(struct qeth_card *card, + struct qeth_ipaddr *addr, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "setdelmc"); + + iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, + IPA_DATA_SIZEOF(setdelipm)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + if (addr->proto == QETH_PROT_IPV6) { + cmd->data.setdelipm.ip = addr->u.a6.addr; + ipv6_eth_mc_map(&addr->u.a6.addr, cmd->data.setdelipm.mac); + } else { + cmd->data.setdelipm.ip.s6_addr32[3] = addr->u.a4.addr; + ip_eth_mc_map(addr->u.a4.addr, cmd->data.setdelipm.mac); + } + + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); +} + +static void qeth_l3_set_ipv6_prefix(struct in6_addr *prefix, unsigned int len) +{ + unsigned int i = 0; + + while (len && i < 4) { + int mask_len = min_t(int, len, 32); + + prefix->s6_addr32[i] = inet_make_mask(mask_len); + len -= mask_len; + i++; + } +} + +static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) +{ + switch (addr->type) { + case QETH_IP_TYPE_RXIP: + return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; + case QETH_IP_TYPE_VIPA: + return (set) ? QETH_IPA_SETIP_VIPA_FLAG : + QETH_IPA_DELIP_VIPA_FLAG; + default: + return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; + } +} + +static int qeth_l3_send_setdelip(struct qeth_card *card, + struct qeth_ipaddr *addr, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + u32 flags; + + QETH_CARD_TEXT(card, 4, "setdelip"); + + iob = qeth_ipa_alloc_cmd(card, ipacmd, addr->proto, + IPA_DATA_SIZEOF(setdelip6)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + + flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); + QETH_CARD_TEXT_(card, 4, "flags%02X", flags); + + if (addr->proto == QETH_PROT_IPV6) { + cmd->data.setdelip6.addr = addr->u.a6.addr; + qeth_l3_set_ipv6_prefix(&cmd->data.setdelip6.prefix, + addr->u.a6.pfxlen); + cmd->data.setdelip6.flags = flags; + } else { + cmd->data.setdelip4.addr = addr->u.a4.addr; + cmd->data.setdelip4.mask = addr->u.a4.mask; + cmd->data.setdelip4.flags = flags; + } + + return qeth_send_ipa_cmd(card, iob, qeth_l3_setdelip_cb, NULL); +} + +static int qeth_l3_send_setrouting(struct qeth_card *card, + enum qeth_routing_types type, enum qeth_prot_versions prot) +{ + int rc; + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 4, "setroutg"); + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETRTG, prot, + IPA_DATA_SIZEOF(setrtg)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setrtg.type = (type); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} + +static int qeth_l3_correct_routing_type(struct qeth_card *card, + enum qeth_routing_types *type, enum qeth_prot_versions prot) +{ + if (IS_IQD(card)) { + switch (*type) { + case NO_ROUTER: + case PRIMARY_CONNECTOR: + case SECONDARY_CONNECTOR: + case MULTICAST_ROUTER: + return 0; + default: + goto out_inval; + } + } else { + switch (*type) { + case NO_ROUTER: + case PRIMARY_ROUTER: + case SECONDARY_ROUTER: + return 0; + case MULTICAST_ROUTER: + if (qeth_is_ipafunc_supported(card, prot, + IPA_OSA_MC_ROUTER)) + return 0; + default: + goto out_inval; + } + } +out_inval: + *type = NO_ROUTER; + return -EINVAL; +} + +int qeth_l3_setrouting_v4(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "setrtg4"); + + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, + QETH_PROT_IPV4); + if (rc) + return rc; + + rc = qeth_l3_send_setrouting(card, card->options.route4.type, + QETH_PROT_IPV4); + if (rc) { + card->options.route4.type = NO_ROUTER; + QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", + rc, CARD_DEVID(card)); + } + return rc; +} + +int qeth_l3_setrouting_v6(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "setrtg6"); + + if (!qeth_is_supported(card, IPA_IPV6)) + return 0; + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, + QETH_PROT_IPV6); + if (rc) + return rc; + + rc = qeth_l3_send_setrouting(card, card->options.route6.type, + QETH_PROT_IPV6); + if (rc) { + card->options.route6.type = NO_ROUTER; + QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n", + rc, CARD_DEVID(card)); + } + return rc; +} + +/* + * IP address takeover related functions + */ + +/** + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. + * + * Caller must hold ip_lock. + */ +void qeth_l3_update_ipato(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + unsigned int i; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; + addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); + } +} + +static void qeth_l3_clear_ipato_list(struct qeth_card *card) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + + mutex_lock(&card->ip_lock); + + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { + list_del(&ipatoe->entry); + kfree(ipatoe); + } + + qeth_l3_update_ipato(card); + mutex_unlock(&card->ip_lock); +} + +int qeth_l3_add_ipato_entry(struct qeth_card *card, + struct qeth_ipato_entry *new) +{ + struct qeth_ipato_entry *ipatoe; + int rc = 0; + + QETH_CARD_TEXT(card, 2, "addipato"); + + mutex_lock(&card->ip_lock); + + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + if (ipatoe->proto != new->proto) + continue; + if (!memcmp(ipatoe->addr, new->addr, + (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) && + (ipatoe->mask_bits == new->mask_bits)) { + rc = -EEXIST; + break; + } + } + + if (!rc) { + list_add_tail(&new->entry, &card->ipato.entries); + qeth_l3_update_ipato(card); + } + + mutex_unlock(&card->ip_lock); + + return rc; +} + +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + unsigned int mask_bits) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + int rc = -ENOENT; + + QETH_CARD_TEXT(card, 2, "delipato"); + + mutex_lock(&card->ip_lock); + + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { + if (ipatoe->proto != proto) + continue; + if (!memcmp(ipatoe->addr, addr, + (proto == QETH_PROT_IPV4) ? 4 : 16) && + (ipatoe->mask_bits == mask_bits)) { + list_del(&ipatoe->entry); + qeth_l3_update_ipato(card); + kfree(ipatoe); + rc = 0; + } + } + + mutex_unlock(&card->ip_lock); + + return rc; +} + +int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, + enum qeth_ip_types type, + enum qeth_prot_versions proto) +{ + struct qeth_ipaddr addr; + + qeth_l3_init_ipaddr(&addr, type, proto); + if (proto == QETH_PROT_IPV4) + memcpy(&addr.u.a4.addr, ip, 4); + else + memcpy(&addr.u.a6.addr, ip, 16); + + return qeth_l3_modify_ip(card, &addr, add); +} + +int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) +{ + struct qeth_ipaddr addr; + unsigned int i; + + qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); + addr.u.a6.addr.s6_addr[0] = 0xfe; + addr.u.a6.addr.s6_addr[1] = 0x80; + for (i = 0; i < 8; i++) + addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; + + return qeth_l3_modify_ip(card, &addr, add); +} + +static int qeth_l3_register_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + char buf[50]; + int rc = 0; + int cnt = 3; + + if (card->options.sniffer) + return 0; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_CARD_TEXT(card, 2, "setaddr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_CARD_TEXT(card, 2, "setaddr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); + } else { + QETH_CARD_TEXT(card, 2, "setaddr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); + } + do { + if (addr->is_multicast) + rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); + else + rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); + if (rc) + QETH_CARD_TEXT(card, 2, "failed"); + } while ((--cnt > 0) && rc); + if (rc) { + QETH_CARD_TEXT(card, 2, "FAILED"); + qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + } + return rc; +} + +static int qeth_l3_deregister_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + int rc = 0; + + if (card->options.sniffer) + return 0; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_CARD_TEXT(card, 2, "deladdr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_CARD_TEXT(card, 2, "deladdr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); + } else { + QETH_CARD_TEXT(card, 2, "deladdr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); + } + if (addr->is_multicast) + rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); + else + rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); + if (rc) + QETH_CARD_TEXT(card, 2, "failed"); + + return rc; +} + +static int qeth_l3_setadapter_parms(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "setadprm"); + + if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { + rc = qeth_setadpparms_change_macaddr(card); + if (rc) + dev_warn(&card->gdev->dev, "Reading the adapter MAC" + " address failed\n"); + } + + return rc; +} + +static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "ipaarp"); + + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + dev_info(&card->gdev->dev, + "ARP processing not supported on %s!\n", + netdev_name(card->dev)); + return 0; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_START, NULL); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting ARP processing support for %s failed\n", + netdev_name(card->dev)); + } + return rc; +} + +static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "stsrcmac"); + + if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { + dev_info(&card->gdev->dev, + "Inbound source MAC-address not supported on %s\n", + netdev_name(card->dev)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, + IPA_CMD_ASS_START, NULL); + if (rc) + dev_warn(&card->gdev->dev, + "Starting source MAC-address support for %s failed\n", + netdev_name(card->dev)); + return rc; +} + +static int qeth_l3_start_ipa_vlan(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "strtvlan"); + + if (!qeth_is_supported(card, IPA_FULL_VLAN)) { + dev_info(&card->gdev->dev, + "VLAN not supported on %s\n", netdev_name(card->dev)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, + IPA_CMD_ASS_START, NULL); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting VLAN support for %s failed\n", + netdev_name(card->dev)); + } else { + dev_info(&card->gdev->dev, "VLAN enabled\n"); + } + return rc; +} + +static int qeth_l3_start_ipa_multicast(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "stmcast"); + + if (!qeth_is_supported(card, IPA_MULTICASTING)) { + dev_info(&card->gdev->dev, + "Multicast not supported on %s\n", + netdev_name(card->dev)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, + IPA_CMD_ASS_START, NULL); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting multicast support for %s failed\n", + netdev_name(card->dev)); + } else { + dev_info(&card->gdev->dev, "Multicast enabled\n"); + card->dev->flags |= IFF_MULTICAST; + } + return rc; +} + +static int qeth_l3_softsetup_ipv6(struct qeth_card *card) +{ + u32 ipv6_data = 3; + int rc; + + QETH_CARD_TEXT(card, 3, "softipv6"); + + if (IS_IQD(card)) + goto out; + + rc = qeth_send_simple_setassparms(card, IPA_IPV6, IPA_CMD_ASS_START, + &ipv6_data); + if (rc) { + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + netdev_name(card->dev)); + return rc; + } + rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START, + NULL); + if (rc) { + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + netdev_name(card->dev)); + return rc; + } + rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, + IPA_CMD_ASS_START, NULL); + if (rc) { + dev_warn(&card->gdev->dev, + "Enabling the passthrough mode for %s failed\n", + netdev_name(card->dev)); + return rc; + } +out: + dev_info(&card->gdev->dev, "IPV6 enabled\n"); + return 0; +} + +static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 3, "strtipv6"); + + if (!qeth_is_supported(card, IPA_IPV6)) { + dev_info(&card->gdev->dev, + "IPv6 not supported on %s\n", netdev_name(card->dev)); + return 0; + } + return qeth_l3_softsetup_ipv6(card); +} + +static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) +{ + u32 filter_data = 1; + int rc; + + QETH_CARD_TEXT(card, 3, "stbrdcst"); + card->info.broadcast_capable = 0; + if (!qeth_is_supported(card, IPA_FILTERING)) { + dev_info(&card->gdev->dev, + "Broadcast not supported on %s\n", + netdev_name(card->dev)); + rc = -EOPNOTSUPP; + goto out; + } + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_START, NULL); + if (rc) { + dev_warn(&card->gdev->dev, + "Enabling broadcast filtering for %s failed\n", + netdev_name(card->dev)); + goto out; + } + + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_CONFIGURE, &filter_data); + if (rc) { + dev_warn(&card->gdev->dev, + "Setting up broadcast filtering for %s failed\n", + netdev_name(card->dev)); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; + dev_info(&card->gdev->dev, "Broadcast enabled\n"); + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_ENABLE, &filter_data); + if (rc) { + dev_warn(&card->gdev->dev, + "Setting up broadcast echo filtering for %s failed\n", + netdev_name(card->dev)); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; +out: + if (card->info.broadcast_capable) + card->dev->flags |= IFF_BROADCAST; + else + card->dev->flags &= ~IFF_BROADCAST; + return rc; +} + +static void qeth_l3_start_ipassists(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 3, "strtipas"); + + qeth_l3_start_ipa_arp_processing(card); /* go on*/ + qeth_l3_start_ipa_source_mac(card); /* go on*/ + qeth_l3_start_ipa_vlan(card); /* go on*/ + qeth_l3_start_ipa_multicast(card); /* go on*/ + qeth_l3_start_ipa_ipv6(card); /* go on*/ + qeth_l3_start_ipa_broadcast(card); /* go on*/ +} + +static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + if (cmd->hdr.return_code) + return -EIO; + if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr)) + return -EADDRNOTAVAIL; + + ether_addr_copy(card->dev->dev_addr, + cmd->data.create_destroy_addr.mac_addr); + return 0; +} + +static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "hsrmac"); + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, + IPA_DATA_SIZEOF(create_destroy_addr)); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, + NULL); + return rc; +} + +static int qeth_l3_get_unique_id_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + u16 *uid = reply->param; + + if (cmd->hdr.return_code == 0) { + *uid = cmd->data.create_destroy_addr.uid; + return 0; + } + + dev_warn(&card->gdev->dev, "The network adapter failed to generate a unique ID\n"); + return -EIO; +} + +static u16 qeth_l3_get_unique_id(struct qeth_card *card, u16 uid) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "guniqeid"); + + if (!qeth_is_supported(card, IPA_IPV6)) + goto out; + + iob = qeth_ipa_alloc_cmd(card, IPA_CMD_CREATE_ADDR, QETH_PROT_IPV6, + IPA_DATA_SIZEOF(create_destroy_addr)); + if (!iob) + goto out; + + __ipa_cmd(iob)->data.create_destroy_addr.uid = uid; + qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, &uid); + +out: + return uid; +} + +static int +qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + QETH_CARD_TEXT(card, 2, "diastrcb"); + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "dxter%x", rc); + switch (cmd->data.diagass.action) { + case QETH_DIAGS_CMD_TRACE_QUERY: + break; + case QETH_DIAGS_CMD_TRACE_DISABLE: + switch (rc) { + case 0: + case IPA_RC_INVALID_SUBCMD: + card->info.promisc_mode = SET_PROMISC_MODE_OFF; + dev_info(&card->gdev->dev, "The HiperSockets network " + "traffic analyzer is deactivated\n"); + break; + default: + break; + } + break; + case QETH_DIAGS_CMD_TRACE_ENABLE: + switch (rc) { + case 0: + card->info.promisc_mode = SET_PROMISC_MODE_ON; + dev_info(&card->gdev->dev, "The HiperSockets network " + "traffic analyzer is activated\n"); + break; + case IPA_RC_HARDWARE_AUTH_ERROR: + dev_warn(&card->gdev->dev, "The device is not " + "authorized to run as a HiperSockets network " + "traffic analyzer\n"); + break; + case IPA_RC_TRACE_ALREADY_ACTIVE: + dev_warn(&card->gdev->dev, "A HiperSockets " + "network traffic analyzer is already " + "active in the HiperSockets LAN\n"); + break; + default: + break; + } + break; + default: + QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n", + cmd->data.diagass.action, CARD_DEVID(card)); + } + + return rc ? -EIO : 0; +} + +static int +qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 2, "diagtrac"); + + iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRACE, 0); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; + cmd->data.diagass.action = diags_cmd; + return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); +} + +static int qeth_l3_add_mcast_rtnl(struct net_device *dev, int vid, void *arg) +{ + struct qeth_card *card = arg; + struct inet6_dev *in6_dev; + struct in_device *in4_dev; + struct qeth_ipaddr *ipm; + struct qeth_ipaddr tmp; + struct ip_mc_list *im4; + struct ifmcaddr6 *im6; + + QETH_CARD_TEXT(card, 4, "addmc"); + + if (!dev || !(dev->flags & IFF_UP)) + goto out; + + in4_dev = __in_dev_get_rtnl(dev); + if (!in4_dev) + goto walk_ipv6; + + qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); + tmp.disp_flag = QETH_DISP_ADDR_ADD; + tmp.is_multicast = 1; + + for (im4 = rtnl_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rtnl_dereference(im4->next_rcu)) { + tmp.u.a4.addr = im4->multiaddr; + + ipm = qeth_l3_find_addr_by_ip(card, &tmp); + if (ipm) { + /* for mcast, by-IP match means full match */ + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + continue; + } + + ipm = kmemdup(&tmp, sizeof(tmp), GFP_KERNEL); + if (!ipm) + continue; + + hash_add(card->rx_mode_addrs, &ipm->hnode, + qeth_l3_ipaddr_hash(ipm)); + } + +walk_ipv6: + if (!qeth_is_supported(card, IPA_IPV6)) + goto out; + + in6_dev = __in6_dev_get(dev); + if (!in6_dev) + goto out; + + qeth_l3_init_ipaddr(&tmp, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); + tmp.disp_flag = QETH_DISP_ADDR_ADD; + tmp.is_multicast = 1; + + read_lock_bh(&in6_dev->lock); + for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { + tmp.u.a6.addr = im6->mca_addr; + + ipm = qeth_l3_find_addr_by_ip(card, &tmp); + if (ipm) { + /* for mcast, by-IP match means full match */ + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + continue; + } + + ipm = kmemdup(&tmp, sizeof(tmp), GFP_ATOMIC); + if (!ipm) + continue; + + hash_add(card->rx_mode_addrs, &ipm->hnode, + qeth_l3_ipaddr_hash(ipm)); + + } + read_unlock_bh(&in6_dev->lock); + +out: + return 0; +} + +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); + return 0; +} + +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + return 0; +} + +static void qeth_l3_set_promisc_mode(struct qeth_card *card) +{ + bool enable = card->dev->flags & IFF_PROMISC; + + if (card->info.promisc_mode == enable) + return; + + if (IS_VM_NIC(card)) { /* Guestlan trace */ + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + qeth_setadp_promisc_mode(card, enable); + } else if (card->options.sniffer && /* HiperSockets trace */ + qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + if (enable) { + QETH_CARD_TEXT(card, 3, "+promisc"); + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); + } else { + QETH_CARD_TEXT(card, 3, "-promisc"); + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); + } + } +} + +static void qeth_l3_rx_mode_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + rx_mode_work); + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i, rc; + + QETH_CARD_TEXT(card, 3, "setmulti"); + + if (!card->options.sniffer) { + rtnl_lock(); + qeth_l3_add_mcast_rtnl(card->dev, 0, card); + if (qeth_is_supported(card, IPA_FULL_VLAN)) + vlan_for_each(card->dev, qeth_l3_add_mcast_rtnl, card); + rtnl_unlock(); + + hash_for_each_safe(card->rx_mode_addrs, i, tmp, addr, hnode) { + switch (addr->disp_flag) { + case QETH_DISP_ADDR_DELETE: + rc = qeth_l3_deregister_addr_entry(card, addr); + if (!rc || rc == -ENOENT) { + hash_del(&addr->hnode); + kfree(addr); + } + break; + case QETH_DISP_ADDR_ADD: + rc = qeth_l3_register_addr_entry(card, addr); + if (rc && rc != -ENETDOWN) { + hash_del(&addr->hnode); + kfree(addr); + break; + } + fallthrough; + default: + /* for next call to set_rx_mode(): */ + addr->disp_flag = QETH_DISP_ADDR_DELETE; + } + } + } + + qeth_l3_set_promisc_mode(card); +} + +static int qeth_l3_arp_makerc(u16 rc) +{ + switch (rc) { + case IPA_RC_SUCCESS: + return 0; + case QETH_IPA_ARP_RC_NOTSUPP: + case QETH_IPA_ARP_RC_Q_NOTSUPP: + return -EOPNOTSUPP; + case QETH_IPA_ARP_RC_OUT_OF_RANGE: + return -EINVAL; + case QETH_IPA_ARP_RC_Q_NO_DATA: + return -ENOENT; + default: + return -EIO; + } +} + +static int qeth_l3_arp_cmd_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + qeth_setassparms_cb(card, reply, data); + return qeth_l3_arp_makerc(cmd->hdr.return_code); +} + +static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) +{ + struct qeth_cmd_buffer *iob; + int rc; + + QETH_CARD_TEXT(card, 3, "arpstnoe"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; + * thus we say EOPNOTSUPP for this ARP function + */ + if (IS_VM_NIC(card)) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_SET_NO_ENTRIES, + SETASS_DATA_SIZEOF(flags_32bit), + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + __ipa_cmd(iob)->data.setassparms.data.flags_32bit = (u32) no_entries; + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); + if (rc) + QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n", + CARD_DEVID(card), rc); + return rc; +} + +static __u32 get_arp_entry_size(struct qeth_card *card, + struct qeth_arp_query_data *qdata, + struct qeth_arp_entrytype *type, __u8 strip_entries) +{ + __u32 rc; + __u8 is_hsi; + + is_hsi = qdata->reply_bits == 5; + if (type->ip == QETHARP_IP_ADDR_V4) { + QETH_CARD_TEXT(card, 4, "arpev4"); + if (strip_entries) { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : + sizeof(struct qeth_arp_qi_entry7_short); + } else { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : + sizeof(struct qeth_arp_qi_entry7); + } + } else if (type->ip == QETHARP_IP_ADDR_V6) { + QETH_CARD_TEXT(card, 4, "arpev6"); + if (strip_entries) { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_short_ipv6) : + sizeof(struct qeth_arp_qi_entry7_short_ipv6); + } else { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_ipv6) : + sizeof(struct qeth_arp_qi_entry7_ipv6); + } + } else { + QETH_CARD_TEXT(card, 4, "arpinv"); + rc = 0; + } + + return rc; +} + +static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) +{ + return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || + (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); +} + +static int qeth_l3_arp_query_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_arp_query_data *qdata; + struct qeth_arp_query_info *qinfo; + int e; + int entrybytes_done; + int stripped_bytes; + __u8 do_strip_entries; + + QETH_CARD_TEXT(card, 3, "arpquecb"); + + qinfo = (struct qeth_arp_query_info *) reply->param; + cmd = (struct qeth_ipa_cmd *) data; + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); + if (cmd->hdr.return_code) { + QETH_CARD_TEXT(card, 4, "arpcberr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); + return qeth_l3_arp_makerc(cmd->hdr.return_code); + } + if (cmd->data.setassparms.hdr.return_code) { + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + QETH_CARD_TEXT(card, 4, "setaperr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); + return qeth_l3_arp_makerc(cmd->hdr.return_code); + } + qdata = &cmd->data.setassparms.data.query_arp; + QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); + + do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; + stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; + entrybytes_done = 0; + for (e = 0; e < qdata->no_entries; ++e) { + char *cur_entry; + __u32 esize; + struct qeth_arp_entrytype *etype; + + cur_entry = &qdata->data + entrybytes_done; + etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; + if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { + QETH_CARD_TEXT(card, 4, "pmis"); + QETH_CARD_TEXT_(card, 4, "%i", etype->ip); + break; + } + esize = get_arp_entry_size(card, qdata, etype, + do_strip_entries); + QETH_CARD_TEXT_(card, 5, "esz%i", esize); + if (!esize) + break; + + if ((qinfo->udata_len - qinfo->udata_offset) < esize) { + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOSPC); + memset(qinfo->udata, 0, 4); + return -ENOSPC; + } + + memcpy(qinfo->udata + qinfo->udata_offset, + &qdata->data + entrybytes_done + stripped_bytes, + esize); + entrybytes_done += esize + stripped_bytes; + qinfo->udata_offset += esize; + ++qinfo->no_entries; + } + /* check if all replies received ... */ + if (cmd->data.setassparms.hdr.seq_no < + cmd->data.setassparms.hdr.number_of_replies) + return 1; + QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); + memcpy(qinfo->udata, &qinfo->no_entries, 4); + /* keep STRIP_ENTRIES flag so the user program can distinguish + * stripped entries from normal ones */ + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) + qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; + memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); + QETH_CARD_TEXT_(card, 4, "rc%i", 0); + return 0; +} + +static int qeth_l3_query_arp_cache_info(struct qeth_card *card, + enum qeth_prot_versions prot, + struct qeth_arp_query_info *qinfo) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + int rc; + + QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_QUERY_INFO, + SETASS_DATA_SIZEOF(query_arp), prot); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setassparms.data.query_arp.request_bits = 0x000F; + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_query_cb, qinfo); + if (rc) + QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n", + CARD_DEVID(card), rc); + return rc; +} + +static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) +{ + struct qeth_arp_query_info qinfo = {0, }; + int rc; + + QETH_CARD_TEXT(card, 3, "arpquery"); + + if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ + IPA_ARP_PROCESSING)) { + QETH_CARD_TEXT(card, 3, "arpqnsup"); + rc = -EOPNOTSUPP; + goto out; + } + /* get size of userspace buffer and mask_bits -> 6 bytes */ + if (copy_from_user(&qinfo, udata, 6)) { + rc = -EFAULT; + goto out; + } + qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); + if (!qinfo.udata) { + rc = -ENOMEM; + goto out; + } + qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; + rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); + if (rc) { + if (copy_to_user(udata, qinfo.udata, 4)) + rc = -EFAULT; + goto free_and_out; + } + if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { + /* fails in case of GuestLAN QDIO mode */ + qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); + } + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { + QETH_CARD_TEXT(card, 4, "qactf"); + rc = -EFAULT; + goto free_and_out; + } + QETH_CARD_TEXT(card, 4, "qacts"); + +free_and_out: + kfree(qinfo.udata); +out: + return rc; +} + +static int qeth_l3_arp_modify_entry(struct qeth_card *card, + struct qeth_arp_cache_entry *entry, + enum qeth_arp_process_subcmds arp_cmd) +{ + struct qeth_arp_cache_entry *cmd_entry; + struct qeth_cmd_buffer *iob; + int rc; + + if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY) + QETH_CARD_TEXT(card, 3, "arpadd"); + else + QETH_CARD_TEXT(card, 3, "arpdel"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; + * thus we say EOPNOTSUPP for this ARP function + */ + if (IS_VM_NIC(card)) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd, + SETASS_DATA_SIZEOF(arp_entry), + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry; + ether_addr_copy(cmd_entry->macaddr, entry->macaddr); + memcpy(cmd_entry->ipaddr, entry->ipaddr, 4); + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); + if (rc) + QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n", + arp_cmd, CARD_DEVID(card), rc); + return rc; +} + +static int qeth_l3_arp_flush_cache(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + int rc; + + QETH_CARD_TEXT(card, 3, "arpflush"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; + * thus we say EOPNOTSUPP for this ARP function + */ + if (IS_VM_NIC(card) || IS_IQD(card)) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_FLUSH_CACHE, 0, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_arp_cmd_cb, NULL); + if (rc) + QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n", + CARD_DEVID(card), rc); + return rc; +} + +static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_arp_cache_entry arp_entry; + enum qeth_arp_process_subcmds arp_cmd; + int rc = 0; + + switch (cmd) { + case SIOC_QETH_ARP_SET_NO_ENTRIES: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); + break; + case SIOC_QETH_ARP_QUERY_INFO: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_ARP_ADD_ENTRY: + case SIOC_QETH_ARP_REMOVE_ENTRY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry))) + return -EFAULT; + + arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ? + IPA_CMD_ASS_ARP_ADD_ENTRY : + IPA_CMD_ASS_ARP_REMOVE_ENTRY; + return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd); + case SIOC_QETH_ARP_FLUSH_CACHE: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_flush_cache(card); + break; + default: + rc = -EOPNOTSUPP; + } + return rc; +} + +static int qeth_l3_get_cast_type_rcu(struct sk_buff *skb, struct dst_entry *dst, + int ipv) +{ + struct neighbour *n = NULL; + + if (dst) + n = dst_neigh_lookup_skb(dst, skb); + + if (n) { + int cast_type = n->type; + + neigh_release(n); + if ((cast_type == RTN_BROADCAST) || + (cast_type == RTN_MULTICAST) || + (cast_type == RTN_ANYCAST)) + return cast_type; + return RTN_UNICAST; + } + + /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ + switch (ipv) { + case 4: + if (ipv4_is_lbcast(ip_hdr(skb)->daddr)) + return RTN_BROADCAST; + return ipv4_is_multicast(ip_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + case 6: + return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + default: + /* ... and MAC address */ + return qeth_get_ether_cast_type(skb); + } +} + +static int qeth_l3_get_cast_type(struct sk_buff *skb) +{ + int ipv = qeth_get_ip_version(skb); + struct dst_entry *dst; + int cast_type; + + rcu_read_lock(); + dst = qeth_dst_check_rcu(skb, ipv); + cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); + rcu_read_unlock(); + + return cast_type; +} + +static u8 qeth_l3_cast_type_to_flag(int cast_type) +{ + if (cast_type == RTN_MULTICAST) + return QETH_CAST_MULTICAST; + if (cast_type == RTN_ANYCAST) + return QETH_CAST_ANYCAST; + if (cast_type == RTN_BROADCAST) + return QETH_CAST_BROADCAST; + return QETH_CAST_UNICAST; +} + +static void qeth_l3_fill_header(struct qeth_qdio_out_q *queue, + struct qeth_hdr *hdr, struct sk_buff *skb, + int ipv, unsigned int data_len) +{ + struct qeth_hdr_layer3 *l3_hdr = &hdr->hdr.l3; + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + struct qeth_card *card = queue->card; + struct dst_entry *dst; + int cast_type; + + hdr->hdr.l3.length = data_len; + + if (skb_is_gso(skb)) { + hdr->hdr.l3.id = QETH_HEADER_TYPE_L3_TSO; + } else { + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + + if (skb->protocol == htons(ETH_P_AF_IUCV)) { + l3_hdr->flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + l3_hdr->next_hop.addr.s6_addr16[0] = htons(0xfe80); + memcpy(&l3_hdr->next_hop.addr.s6_addr32[2], + iucv_trans_hdr(skb)->destUserID, 8); + return; + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + /* some HW requires combined L3+L4 csum offload: */ + if (ipv == 4) + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ; + } + } + + if (ipv == 4 || IS_IQD(card)) { + /* NETIF_F_HW_VLAN_CTAG_TX */ + if (skb_vlan_tag_present(skb)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_VLAN_FRAME; + hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + } + } else if (veth->h_vlan_proto == htons(ETH_P_8021Q)) { + hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_INCLUDE_VLAN_TAG; + hdr->hdr.l3.vlan_id = ntohs(veth->h_vlan_TCI); + } + + rcu_read_lock(); + dst = qeth_dst_check_rcu(skb, ipv); + + if (IS_IQD(card) && skb_get_queue_mapping(skb) != QETH_IQD_MCAST_TXQ) + cast_type = RTN_UNICAST; + else + cast_type = qeth_l3_get_cast_type_rcu(skb, dst, ipv); + l3_hdr->flags |= qeth_l3_cast_type_to_flag(cast_type); + + if (ipv == 4) { + l3_hdr->next_hop.addr.s6_addr32[3] = + qeth_next_hop_v4_rcu(skb, dst); + } else if (ipv == 6) { + l3_hdr->next_hop.addr = *qeth_next_hop_v6_rcu(skb, dst); + + hdr->hdr.l3.flags |= QETH_HDR_IPV6; + if (!IS_IQD(card)) + hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; + } else { + /* OSA only: */ + l3_hdr->flags |= QETH_HDR_PASSTHRU; + } + rcu_read_unlock(); +} + +static void qeth_l3_fixup_headers(struct sk_buff *skb) +{ + struct iphdr *iph = ip_hdr(skb); + + /* this is safe, IPv6 traffic takes a different path */ + if (skb->ip_summed == CHECKSUM_PARTIAL) + iph->check = 0; + if (skb_is_gso(skb)) { + iph->tot_len = 0; + tcp_hdr(skb)->check = ~tcp_v4_check(0, iph->saddr, + iph->daddr, 0); + } +} + +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv) +{ + unsigned int hw_hdr_len; + int rc; + + /* re-use the L2 header area for the HW header: */ + hw_hdr_len = skb_is_gso(skb) ? sizeof(struct qeth_hdr_tso) : + sizeof(struct qeth_hdr); + rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); + if (rc) + return rc; + skb_pull(skb, ETH_HLEN); + + qeth_l3_fixup_headers(skb); + return qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); +} + +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + u16 txq = skb_get_queue_mapping(skb); + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int rc; + + if (!skb_is_gso(skb)) + qdisc_skb_cb(skb)->pkt_len = skb->len; + if (IS_IQD(card)) { + queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)]; + + if (card->options.sniffer) + goto tx_drop; + if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || + (card->options.cq == QETH_CQ_ENABLED && + skb->protocol != htons(ETH_P_AF_IUCV))) + goto tx_drop; + } else { + queue = card->qdio.out_qs[txq]; + } + + if (!(dev->flags & IFF_BROADCAST) && + qeth_l3_get_cast_type(skb) == RTN_BROADCAST) + goto tx_drop; + + if (ipv == 4 || IS_IQD(card)) + rc = qeth_l3_xmit(card, skb, queue, ipv); + else + rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header); + + if (!rc) + return NETDEV_TX_OK; + +tx_drop: + QETH_TXQ_STAT_INC(queue, tx_dropped); + kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void qeth_l3_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + schedule_work(&card->rx_mode_work); +} + +/* + * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting + * NOARP on the netdevice is no option because it also turns off neighbor + * solicitation. For IPv4 we install a neighbor_setup function. We don't want + * arp resolution but we want the hard header (packet socket will work + * e.g. tcpdump) + */ +static int qeth_l3_neigh_setup_noarp(struct neighbour *n) +{ + n->nud_state = NUD_NOARP; + memcpy(n->ha, "FAKELL", 6); + n->output = n->ops->connected_output; + return 0; +} + +static int +qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) +{ + if (np->tbl->family == AF_INET) + np->neigh_setup = qeth_l3_neigh_setup_noarp; + + return 0; +} + +static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + if (vlan_get_protocol(skb) != htons(ETH_P_IP)) + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + return qeth_features_check(skb, dev, features); +} + +static u16 qeth_l3_iqd_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + return qeth_iqd_select_queue(dev, skb, qeth_l3_get_cast_type(skb), + sb_dev); +} + +static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb, + struct net_device *sb_dev) +{ + struct qeth_card *card = dev->ml_priv; + + if (qeth_uses_tx_prio_queueing(card)) + return qeth_get_priority_queue(card, skb); + + return netdev_pick_tx(dev, skb, sb_dev); +} + +static const struct net_device_ops qeth_l3_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_select_queue = qeth_l3_iqd_select_queue, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qeth_l3_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, + .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, +}; + +static const struct net_device_ops qeth_l3_osa_netdev_ops = { + .ndo_open = qeth_open, + .ndo_stop = qeth_stop, + .ndo_get_stats64 = qeth_get_stats64, + .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_features_check = qeth_l3_osa_features_check, + .ndo_select_queue = qeth_l3_osa_select_queue, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qeth_l3_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, + .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, + .ndo_neigh_setup = qeth_l3_neigh_setup, +}; + +static int qeth_l3_setup_netdev(struct qeth_card *card) +{ + struct net_device *dev = card->dev; + unsigned int headroom; + int rc; + + if (IS_OSD(card) || IS_OSX(card)) { + card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; + + /*IPv6 address autoconfiguration stuff*/ + dev->dev_id = qeth_l3_get_unique_id(card, dev->dev_id); + + if (!IS_VM_NIC(card)) { + card->dev->features |= NETIF_F_SG; + card->dev->hw_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + } + + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; + } + if (qeth_is_supported6(card, IPA_OUTBOUND_TSO)) { + card->dev->hw_features |= NETIF_F_TSO6; + card->dev->vlan_features |= NETIF_F_TSO6; + } + + /* allow for de-acceleration of NETIF_F_HW_VLAN_CTAG_TX: */ + if (card->dev->hw_features & NETIF_F_TSO6) + headroom = sizeof(struct qeth_hdr_tso) + VLAN_HLEN; + else if (card->dev->hw_features & NETIF_F_TSO) + headroom = sizeof(struct qeth_hdr_tso); + else + headroom = sizeof(struct qeth_hdr) + VLAN_HLEN; + } else if (IS_IQD(card)) { + card->dev->flags |= IFF_NOARP; + card->dev->netdev_ops = &qeth_l3_netdev_ops; + headroom = sizeof(struct qeth_hdr) - ETH_HLEN; + + rc = qeth_l3_iqd_read_initial_mac(card); + if (rc) + return rc; + } else + return -ENODEV; + + card->dev->needed_headroom = headroom; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + netif_keep_dst(card->dev); + if (card->dev->hw_features & (NETIF_F_TSO | NETIF_F_TSO6)) + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); + + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + return register_netdev(card->dev); +} + +static const struct device_type qeth_l3_devtype = { + .name = "qeth_layer3", + .groups = qeth_l3_attr_groups, +}; + +static int qeth_l3_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + + hash_init(card->ip_htable); + mutex_init(&card->ip_lock); + card->cmd_wq = alloc_ordered_workqueue("%s_cmd", 0, + dev_name(&gdev->dev)); + if (!card->cmd_wq) + return -ENOMEM; + + if (gdev->dev.type == &qeth_generic_devtype) { + rc = qeth_l3_create_device_attributes(&gdev->dev); + if (rc) { + destroy_workqueue(card->cmd_wq); + return rc; + } + } + + INIT_WORK(&card->rx_mode_work, qeth_l3_rx_mode_work); + return 0; +} + +static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) +{ + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + + if (cgdev->dev.type == &qeth_generic_devtype) + qeth_l3_remove_device_attributes(&cgdev->dev); + + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + + if (cgdev->state == CCWGROUP_ONLINE) + qeth_set_offline(card, card->discipline, false); + + cancel_work_sync(&card->close_dev_work); + if (card->dev->reg_state == NETREG_REGISTERED) + unregister_netdev(card->dev); + + flush_workqueue(card->cmd_wq); + destroy_workqueue(card->cmd_wq); + qeth_l3_clear_ip_htable(card, 0); + qeth_l3_clear_ipato_list(card); +} + +static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok) +{ + struct net_device *dev = card->dev; + int rc = 0; + + /* softsetup */ + QETH_CARD_TEXT(card, 2, "softsetp"); + + rc = qeth_l3_setadapter_parms(card); + if (rc) + QETH_CARD_TEXT_(card, 2, "2err%04x", rc); + if (!card->options.sniffer) { + qeth_l3_start_ipassists(card); + + rc = qeth_l3_setrouting_v4(card); + if (rc) + QETH_CARD_TEXT_(card, 2, "4err%04x", rc); + rc = qeth_l3_setrouting_v6(card); + if (rc) + QETH_CARD_TEXT_(card, 2, "5err%04x", rc); + } + + card->state = CARD_STATE_SOFTSETUP; + + qeth_set_allowed_threads(card, 0xffffffff, 0); + qeth_l3_recover_ip(card); + + if (dev->reg_state != NETREG_REGISTERED) { + rc = qeth_l3_setup_netdev(card); + if (rc) + goto err_setup; + + if (carrier_ok) + netif_carrier_on(dev); + } else { + rtnl_lock(); + rc = qeth_set_real_num_tx_queues(card, + qeth_tx_actual_queues(card)); + if (rc) { + rtnl_unlock(); + goto err_set_queues; + } + + if (carrier_ok) + netif_carrier_on(dev); + else + netif_carrier_off(dev); + + netif_device_attach(dev); + qeth_enable_hw_features(dev); + + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + } + rtnl_unlock(); + } + return 0; + +err_set_queues: +err_setup: + qeth_set_allowed_threads(card, 0, 1); + card->state = CARD_STATE_DOWN; + qeth_l3_clear_ip_htable(card, 1); + return rc; +} + +static void qeth_l3_set_offline(struct qeth_card *card) +{ + qeth_set_allowed_threads(card, 0, 1); + qeth_l3_drain_rx_mode_cache(card); + + if (card->options.sniffer && + (card->info.promisc_mode == SET_PROMISC_MODE_ON)) + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); + + if (card->state == CARD_STATE_SOFTSETUP) { + card->state = CARD_STATE_DOWN; + qeth_l3_clear_ip_htable(card, 1); + } +} + +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l3_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + return 1; +} + +const struct qeth_discipline qeth_l3_discipline = { + .devtype = &qeth_l3_devtype, + .setup = qeth_l3_probe_device, + .remove = qeth_l3_remove_device, + .set_online = qeth_l3_set_online, + .set_offline = qeth_l3_set_offline, + .do_ioctl = qeth_l3_do_ioctl, + .control_event_handler = qeth_l3_control_event, +}; +EXPORT_SYMBOL_GPL(qeth_l3_discipline); + +static int qeth_l3_handle_ip_event(struct qeth_card *card, + struct qeth_ipaddr *addr, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + qeth_l3_modify_ip(card, addr, true); + return NOTIFY_OK; + case NETDEV_DOWN: + qeth_l3_modify_ip(card, addr, false); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +struct qeth_l3_ip_event_work { + struct work_struct work; + struct qeth_card *card; + struct qeth_ipaddr addr; +}; + +#define to_ip_work(w) container_of((w), struct qeth_l3_ip_event_work, work) + +static void qeth_l3_add_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, true); + kfree(work); +} + +static void qeth_l3_delete_ip_worker(struct work_struct *work) +{ + struct qeth_l3_ip_event_work *ip_work = to_ip_work(work); + + qeth_l3_modify_ip(ip_work->card, &ip_work->addr, false); + kfree(work); +} + +static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) +{ + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || + dev->netdev_ops == &qeth_l3_netdev_ops) + return (struct qeth_card *) dev->ml_priv; + return NULL; +} + +static int qeth_l3_ip_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct qeth_ipaddr addr; + struct qeth_card *card; + + card = qeth_l3_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ipevent"); + + qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); + addr.u.a4.addr = ifa->ifa_address; + addr.u.a4.mask = ifa->ifa_mask; + + return qeth_l3_handle_ip_event(card, &addr, event); +} + +static struct notifier_block qeth_l3_ip_notifier = { + qeth_l3_ip_event, + NULL, +}; + +static int qeth_l3_ip6_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *dev = ifa->idev->dev; + struct qeth_l3_ip_event_work *ip_work; + struct qeth_card *card; + + if (event != NETDEV_UP && event != NETDEV_DOWN) + return NOTIFY_DONE; + + card = qeth_l3_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ip6event"); + if (!qeth_is_supported(card, IPA_IPV6)) + return NOTIFY_DONE; + + ip_work = kmalloc(sizeof(*ip_work), GFP_ATOMIC); + if (!ip_work) + return NOTIFY_DONE; + + if (event == NETDEV_UP) + INIT_WORK(&ip_work->work, qeth_l3_add_ip_worker); + else + INIT_WORK(&ip_work->work, qeth_l3_delete_ip_worker); + + ip_work->card = card; + qeth_l3_init_ipaddr(&ip_work->addr, QETH_IP_TYPE_NORMAL, + QETH_PROT_IPV6); + ip_work->addr.u.a6.addr = ifa->addr; + ip_work->addr.u.a6.pfxlen = ifa->prefix_len; + + queue_work(card->cmd_wq, &ip_work->work); + return NOTIFY_OK; +} + +static struct notifier_block qeth_l3_ip6_notifier = { + qeth_l3_ip6_event, + NULL, +}; + +static int qeth_l3_register_notifiers(void) +{ + int rc; + + QETH_DBF_TEXT(SETUP, 5, "regnotif"); + rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); + if (rc) + return rc; + rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); + if (rc) { + unregister_inetaddr_notifier(&qeth_l3_ip_notifier); + return rc; + } + return 0; +} + +static void qeth_l3_unregister_notifiers(void) +{ + QETH_DBF_TEXT(SETUP, 5, "unregnot"); + WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); + WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); +} + +static int __init qeth_l3_init(void) +{ + pr_info("register layer 3 discipline\n"); + return qeth_l3_register_notifiers(); +} + +static void __exit qeth_l3_exit(void) +{ + qeth_l3_unregister_notifiers(); + pr_info("unregister layer 3 discipline\n"); +} + +module_init(qeth_l3_init); +module_exit(qeth_l3_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth layer 3 discipline"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c new file mode 100644 index 000000000..316f8622f --- /dev/null +++ b/drivers/s390/net/qeth_l3_sys.c @@ -0,0 +1,835 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include <linux/hashtable.h> +#include <linux/inet.h> +#include "qeth_l3.h" + +#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) + +static int qeth_l3_string_to_ipaddr(const char *buf, + enum qeth_prot_versions proto, u8 *addr) +{ + const char *end; + + if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) || + (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end))) + return -EINVAL; + return 0; +} + +static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, + struct qeth_routing_info *route, char *buf) +{ + switch (route->type) { + case PRIMARY_ROUTER: + return sprintf(buf, "%s\n", "primary router"); + case SECONDARY_ROUTER: + return sprintf(buf, "%s\n", "secondary router"); + case MULTICAST_ROUTER: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "multicast router+"); + else + return sprintf(buf, "%s\n", "multicast router"); + case PRIMARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "primary connector+"); + else + return sprintf(buf, "%s\n", "primary connector"); + case SECONDARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "secondary connector+"); + else + return sprintf(buf, "%s\n", "secondary connector"); + default: + return sprintf(buf, "%s\n", "no"); + } +} + +static ssize_t qeth_l3_dev_route4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_route_show(card, &card->options.route4, buf); +} + +static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, + struct qeth_routing_info *route, enum qeth_prot_versions prot, + const char *buf, size_t count) +{ + enum qeth_routing_types old_route_type = route->type; + int rc = 0; + + mutex_lock(&card->conf_mutex); + if (sysfs_streq(buf, "no_router")) { + route->type = NO_ROUTER; + } else if (sysfs_streq(buf, "primary_connector")) { + route->type = PRIMARY_CONNECTOR; + } else if (sysfs_streq(buf, "secondary_connector")) { + route->type = SECONDARY_CONNECTOR; + } else if (sysfs_streq(buf, "primary_router")) { + route->type = PRIMARY_ROUTER; + } else if (sysfs_streq(buf, "secondary_router")) { + route->type = SECONDARY_ROUTER; + } else if (sysfs_streq(buf, "multicast_router")) { + route->type = MULTICAST_ROUTER; + } else { + rc = -EINVAL; + goto out; + } + if (qeth_card_hw_is_reachable(card) && + (old_route_type != route->type)) { + if (prot == QETH_PROT_IPV4) + rc = qeth_l3_setrouting_v4(card); + else if (prot == QETH_PROT_IPV6) + rc = qeth_l3_setrouting_v6(card); + } +out: + if (rc) + route->type = old_route_type; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_route4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_route_store(card, &card->options.route4, + QETH_PROT_IPV4, buf, count); +} + +static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show, + qeth_l3_dev_route4_store); + +static ssize_t qeth_l3_dev_route6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_route_show(card, &card->options.route6, buf); +} + +static ssize_t qeth_l3_dev_route6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_route_store(card, &card->options.route6, + QETH_PROT_IPV6, buf, count); +} + +static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show, + qeth_l3_dev_route6_store); + +static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0); +} + +static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + unsigned long i; + + if (!IS_IQD(card)) + return -EPERM; + if (card->options.cq == QETH_CQ_ENABLED) + return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + rc = kstrtoul(buf, 16, &i); + if (rc) { + rc = -EINVAL; + goto out; + } + switch (i) { + case 0: + card->options.sniffer = i; + break; + case 1: + qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); + if (card->ssqd.qdioac2 & CHSC_AC2_SNIFFER_AVAILABLE) { + card->options.sniffer = i; + qeth_resize_buffer_pool(card, QETH_IN_BUF_COUNT_MAX); + } else { + rc = -EPERM; + } + + break; + default: + rc = -EINVAL; + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, + qeth_l3_dev_sniffer_store); + +static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char tmp_hsuid[9]; + + if (!IS_IQD(card)) + return -EPERM; + + memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); + EBCASC(tmp_hsuid, 8); + return sprintf(buf, "%s\n", tmp_hsuid); +} + +static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + char *tmp; + + if (!IS_IQD(card)) + return -EPERM; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + if (card->options.sniffer) { + rc = -EPERM; + goto out; + } + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -EPERM; + goto out; + } + + tmp = strsep((char **)&buf, "\n"); + if (strlen(tmp) > 8) { + rc = -EINVAL; + goto out; + } + + if (card->options.hsuid[0]) + /* delete old ip address */ + qeth_l3_modify_hsuid(card, false); + + if (strlen(tmp) == 0) { + /* delete ip address only */ + card->options.hsuid[0] = '\0'; + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + qeth_configure_cq(card, QETH_CQ_DISABLED); + goto out; + } + + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { + rc = -EPERM; + goto out; + } + + snprintf(card->options.hsuid, sizeof(card->options.hsuid), + "%-8s", tmp); + ASCEBC(card->options.hsuid, 8); + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + + rc = qeth_l3_modify_hsuid(card, true); + +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, + qeth_l3_dev_hsuid_store); + + +static struct attribute *qeth_l3_device_attrs[] = { + &dev_attr_route4.attr, + &dev_attr_route6.attr, + &dev_attr_sniffer.attr, + &dev_attr_hsuid.attr, + NULL, +}; + +static const struct attribute_group qeth_l3_device_attr_group = { + .attrs = qeth_l3_device_attrs, +}; + +static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0); +} + +static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool enable; + int rc = 0; + + mutex_lock(&card->conf_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + mutex_lock(&card->ip_lock); + if (sysfs_streq(buf, "toggle")) { + enable = !card->ipato.enabled; + } else if (kstrtobool(buf, &enable)) { + rc = -EINVAL; + goto unlock_ip; + } + + if (card->ipato.enabled != enable) { + card->ipato.enabled = enable; + qeth_l3_update_ipato(card); + } + +unlock_ip: + mutex_unlock(&card->ip_lock); +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_enable, enable, 0644, + qeth_l3_dev_ipato_enable_show, + qeth_l3_dev_ipato_enable_store); + +static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0); +} + +static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool invert; + int rc = 0; + + mutex_lock(&card->ip_lock); + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert4; + } else if (kstrtobool(buf, &invert)) { + rc = -EINVAL; + goto out; + } + + if (card->ipato.invert4 != invert) { + card->ipato.invert4 = invert; + qeth_l3_update_ipato(card); + } + +out: + mutex_unlock(&card->ip_lock); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, + qeth_l3_dev_ipato_invert4_show, + qeth_l3_dev_ipato_invert4_store); + +static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, + enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + int str_len = 0; + + mutex_lock(&card->ip_lock); + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + char addr_str[40]; + int entry_len; + + if (ipatoe->proto != proto) + continue; + + entry_len = qeth_l3_ipaddr_to_string(proto, ipatoe->addr, + addr_str); + if (entry_len < 0) + continue; + + /* Append /%mask to the entry: */ + entry_len += 1 + ((proto == QETH_PROT_IPV4) ? 2 : 3); + /* Enough room to format %entry\n into null terminated page? */ + if (entry_len + 1 > PAGE_SIZE - str_len - 1) + break; + + entry_len = scnprintf(buf, PAGE_SIZE - str_len, + "%s/%i\n", addr_str, ipatoe->mask_bits); + str_len += entry_len; + buf += entry_len; + } + mutex_unlock(&card->ip_lock); + + return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); +} + +static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, + u8 *addr, unsigned int *mask_bits) +{ + char *sep; + int rc; + + /* Expected input pattern: %addr/%mask */ + sep = strnchr(buf, 40, '/'); + if (!sep) + return -EINVAL; + + /* Terminate the %addr sub-string, and parse it: */ + *sep = '\0'; + rc = qeth_l3_string_to_ipaddr(buf, proto, addr); + if (rc) + return rc; + + rc = kstrtouint(sep + 1, 10, mask_bits); + if (rc) + return rc; + + if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128)) + return -EINVAL; + + return 0; +} + +static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + unsigned int mask_bits; + u8 addr[16]; + int rc = 0; + + rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); + if (rc) + return rc; + + ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); + if (!ipatoe) + return -ENOMEM; + + ipatoe->proto = proto; + memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4) ? 4 : 16); + ipatoe->mask_bits = mask_bits; + + rc = qeth_l3_add_ipato_entry(card, ipatoe); + if (rc) + kfree(ipatoe); + + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, + qeth_l3_dev_ipato_add4_show, + qeth_l3_dev_ipato_add4_store); + +static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + unsigned int mask_bits; + u8 addr[16]; + int rc = 0; + + rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); + if (!rc) + rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL, + qeth_l3_dev_ipato_del4_store); + +static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0); +} + +static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool invert; + int rc = 0; + + mutex_lock(&card->ip_lock); + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert6; + } else if (kstrtobool(buf, &invert)) { + rc = -EINVAL; + goto out; + } + + if (card->ipato.invert6 != invert) { + card->ipato.invert6 = invert; + qeth_l3_update_ipato(card); + } + +out: + mutex_unlock(&card->ip_lock); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644, + qeth_l3_dev_ipato_invert6_show, + qeth_l3_dev_ipato_invert6_store); + + +static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6); +} + +static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_add6, add6, 0644, + qeth_l3_dev_ipato_add6_show, + qeth_l3_dev_ipato_add6_store); + +static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL, + qeth_l3_dev_ipato_del6_store); + +static struct attribute *qeth_ipato_device_attrs[] = { + &dev_attr_ipato_enable.attr, + &dev_attr_ipato_invert4.attr, + &dev_attr_ipato_add4.attr, + &dev_attr_ipato_del4.attr, + &dev_attr_ipato_invert6.attr, + &dev_attr_ipato_add6.attr, + &dev_attr_ipato_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_ipato_group = { + .name = "ipa_takeover", + .attrs = qeth_ipato_device_attrs, +}; + +static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, + enum qeth_prot_versions proto, + enum qeth_ip_types type) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_ipaddr *ipaddr; + int str_len = 0; + int i; + + mutex_lock(&card->ip_lock); + hash_for_each(card->ip_htable, i, ipaddr, hnode) { + char addr_str[40]; + int entry_len; + + if (ipaddr->proto != proto || ipaddr->type != type) + continue; + + entry_len = qeth_l3_ipaddr_to_string(proto, (u8 *)&ipaddr->u, + addr_str); + if (entry_len < 0) + continue; + + /* Enough room to format %addr\n into null terminated page? */ + if (entry_len + 1 > PAGE_SIZE - str_len - 1) + break; + + entry_len = scnprintf(buf, PAGE_SIZE - str_len, "%s\n", + addr_str); + str_len += entry_len; + buf += entry_len; + } + mutex_unlock(&card->ip_lock); + + return str_len ? str_len : scnprintf(buf, PAGE_SIZE, "\n"); +} + +static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_VIPA); +} + +static ssize_t qeth_l3_vipa_store(struct device *dev, const char *buf, bool add, + size_t count, enum qeth_prot_versions proto) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u8 addr[16] = {0, }; + int rc; + + rc = qeth_l3_string_to_ipaddr(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, add, addr, + QETH_IP_TYPE_VIPA, proto); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, + qeth_l3_dev_vipa_add4_show, + qeth_l3_dev_vipa_add4_store); + +static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, + qeth_l3_dev_vipa_del4_store); + +static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_VIPA); +} + +static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_vipa_store(dev, buf, true, count, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, + qeth_l3_dev_vipa_add6_show, + qeth_l3_dev_vipa_add6_store); + +static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_vipa_store(dev, buf, false, count, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL, + qeth_l3_dev_vipa_del6_store); + +static struct attribute *qeth_vipa_device_attrs[] = { + &dev_attr_vipa_add4.attr, + &dev_attr_vipa_del4.attr, + &dev_attr_vipa_add6.attr, + &dev_attr_vipa_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_vipa_group = { + .name = "vipa", + .attrs = qeth_vipa_device_attrs, +}; + +static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_RXIP); +} + +static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, + u8 *addr) +{ + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { + return -EINVAL; + } + if (proto == QETH_PROT_IPV4) { + memcpy(&ipv4_addr, addr, sizeof(ipv4_addr)); + if (ipv4_is_multicast(ipv4_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } else if (proto == QETH_PROT_IPV6) { + memcpy(&ipv6_addr, addr, sizeof(ipv6_addr)); + if (ipv6_addr_is_multicast(&ipv6_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } + + return 0; +} + +static ssize_t qeth_l3_rxip_store(struct device *dev, const char *buf, bool add, + size_t count, enum qeth_prot_versions proto) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u8 addr[16] = {0, }; + int rc; + + rc = qeth_l3_parse_rxipe(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, add, addr, + QETH_IP_TYPE_RXIP, proto); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, + qeth_l3_dev_rxip_add4_show, + qeth_l3_dev_rxip_add4_store); + +static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, + qeth_l3_dev_rxip_del4_store); + +static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_RXIP); +} + +static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_rxip_store(dev, buf, true, count, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, + qeth_l3_dev_rxip_add6_show, + qeth_l3_dev_rxip_add6_store); + +static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + return qeth_l3_rxip_store(dev, buf, false, count, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL, + qeth_l3_dev_rxip_del6_store); + +static struct attribute *qeth_rxip_device_attrs[] = { + &dev_attr_rxip_add4.attr, + &dev_attr_rxip_del4.attr, + &dev_attr_rxip_add6.attr, + &dev_attr_rxip_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_rxip_group = { + .name = "rxip", + .attrs = qeth_rxip_device_attrs, +}; + +static const struct attribute_group *qeth_l3_only_attr_groups[] = { + &qeth_l3_device_attr_group, + &qeth_device_ipato_group, + &qeth_device_vipa_group, + &qeth_device_rxip_group, + NULL, +}; + +int qeth_l3_create_device_attributes(struct device *dev) +{ + return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups); +} + +void qeth_l3_remove_device_attributes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups); +} + +const struct attribute_group *qeth_l3_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + /* l3 specific, see qeth_l3_only_attr_groups: */ + &qeth_l3_device_attr_group, + &qeth_device_ipato_group, + &qeth_device_vipa_group, + &qeth_device_rxip_group, + NULL, +}; diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c new file mode 100644 index 000000000..c84ec2fbf --- /dev/null +++ b/drivers/s390/net/smsgiucv.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IUCV special message driver + * + * Copyright IBM Corp. 2003, 2009 + * + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <net/iucv/iucv.h> +#include <asm/cpcmd.h> +#include <asm/ebcdic.h> +#include "smsgiucv.h" + +struct smsg_callback { + struct list_head list; + const char *prefix; + int len; + void (*callback)(const char *from, char *str); +}; + +MODULE_AUTHOR + ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); + +static struct iucv_path *smsg_path; + +static DEFINE_SPINLOCK(smsg_list_lock); +static LIST_HEAD(smsg_list); + +static int smsg_path_pending(struct iucv_path *, u8 *, u8 *); +static void smsg_message_pending(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler smsg_handler = { + .path_pending = smsg_path_pending, + .message_pending = smsg_message_pending, +}; + +static int smsg_path_pending(struct iucv_path *path, u8 *ipvmid, u8 *ipuser) +{ + if (strncmp(ipvmid, "*MSG ", 8) != 0) + return -EINVAL; + /* Path pending from *MSG. */ + return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); +} + +static void smsg_message_pending(struct iucv_path *path, + struct iucv_message *msg) +{ + struct smsg_callback *cb; + unsigned char *buffer; + unsigned char sender[9]; + int rc, i; + + buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); + if (!buffer) { + iucv_message_reject(path, msg); + return; + } + rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); + if (rc == 0) { + buffer[msg->length] = 0; + EBCASC(buffer, msg->length); + memcpy(sender, buffer, 8); + sender[8] = 0; + /* Remove trailing whitespace from the sender name. */ + for (i = 7; i >= 0; i--) { + if (sender[i] != ' ' && sender[i] != '\t') + break; + sender[i] = 0; + } + spin_lock(&smsg_list_lock); + list_for_each_entry(cb, &smsg_list, list) + if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { + cb->callback(sender, buffer + 8); + break; + } + spin_unlock(&smsg_list_lock); + } + kfree(buffer); +} + +int smsg_register_callback(const char *prefix, + void (*callback)(const char *from, char *str)) +{ + struct smsg_callback *cb; + + cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL); + if (!cb) + return -ENOMEM; + cb->prefix = prefix; + cb->len = strlen(prefix); + cb->callback = callback; + spin_lock_bh(&smsg_list_lock); + list_add_tail(&cb->list, &smsg_list); + spin_unlock_bh(&smsg_list_lock); + return 0; +} + +void smsg_unregister_callback(const char *prefix, + void (*callback)(const char *from, + char *str)) +{ + struct smsg_callback *cb, *tmp; + + spin_lock_bh(&smsg_list_lock); + cb = NULL; + list_for_each_entry(tmp, &smsg_list, list) + if (tmp->callback == callback && + strcmp(tmp->prefix, prefix) == 0) { + cb = tmp; + list_del(&cb->list); + break; + } + spin_unlock_bh(&smsg_list_lock); + kfree(cb); +} + +static struct device_driver smsg_driver = { + .owner = THIS_MODULE, + .name = SMSGIUCV_DRV_NAME, + .bus = &iucv_bus, +}; + +static void __exit smsg_exit(void) +{ + cpcmd("SET SMSG OFF", NULL, 0, NULL); + iucv_unregister(&smsg_handler, 1); + driver_unregister(&smsg_driver); +} + +static int __init smsg_init(void) +{ + int rc; + + if (!MACHINE_IS_VM) { + rc = -EPROTONOSUPPORT; + goto out; + } + rc = driver_register(&smsg_driver); + if (rc != 0) + goto out; + rc = iucv_register(&smsg_handler, 1); + if (rc) + goto out_driver; + smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); + if (!smsg_path) { + rc = -ENOMEM; + goto out_register; + } + rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", + NULL, NULL, NULL); + if (rc) + goto out_free_path; + + cpcmd("SET SMSG IUCV", NULL, 0, NULL); + return 0; + +out_free_path: + iucv_path_free(smsg_path); + smsg_path = NULL; +out_register: + iucv_unregister(&smsg_handler, 1); +out_driver: + driver_unregister(&smsg_driver); +out: + return rc; +} + +module_init(smsg_init); +module_exit(smsg_exit); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(smsg_register_callback); +EXPORT_SYMBOL(smsg_unregister_callback); diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h new file mode 100644 index 000000000..a0d6c6130 --- /dev/null +++ b/drivers/s390/net/smsgiucv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IUCV special message driver + * + * Copyright IBM Corp. 2003 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define SMSGIUCV_DRV_NAME "SMSGIUCV" + +int smsg_register_callback(const char *, + void (*)(const char *, char *)); +void smsg_unregister_callback(const char *, + void (*)(const char *, char *)); + diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c new file mode 100644 index 000000000..0a263999f --- /dev/null +++ b/drivers/s390/net/smsgiucv_app.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Deliver z/VM CP special messages (SMSG) as uevents. + * + * The driver registers for z/VM CP special messages with the + * "APP" prefix. Incoming messages are delivered to user space + * as uevents. + * + * Copyright IBM Corp. 2010 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + */ +#define KMSG_COMPONENT "smsgiucv_app" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/ctype.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <net/iucv/iucv.h> +#include "smsgiucv.h" + +/* prefix used for SMSG registration */ +#define SMSG_PREFIX "APP" + +/* SMSG related uevent environment variables */ +#define ENV_SENDER_STR "SMSG_SENDER=" +#define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1) +#define ENV_PREFIX_STR "SMSG_ID=" +#define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \ + strlen(SMSG_PREFIX) + 1) +#define ENV_TEXT_STR "SMSG_TEXT=" +#define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1) + +/* z/VM user ID which is permitted to send SMSGs + * If the value is undefined or empty (""), special messages are + * accepted from any z/VM user ID. */ +static char *sender; +module_param(sender, charp, 0400); +MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted"); + +/* SMSG device representation */ +static struct device *smsg_app_dev; + +/* list element for queuing received messages for delivery */ +struct smsg_app_event { + struct list_head list; + char *buf; + char *envp[4]; +}; + +/* queue for outgoing uevents */ +static LIST_HEAD(smsg_event_queue); +static DEFINE_SPINLOCK(smsg_event_queue_lock); + +static void smsg_app_event_free(struct smsg_app_event *ev) +{ + kfree(ev->buf); + kfree(ev); +} + +static struct smsg_app_event *smsg_app_event_alloc(const char *from, + const char *msg) +{ + struct smsg_app_event *ev; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) + return NULL; + + ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN + + ENV_TEXT_LEN(msg), GFP_ATOMIC); + if (!ev->buf) { + kfree(ev); + return NULL; + } + + /* setting up environment pointers into buf */ + ev->envp[0] = ev->buf; + ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN; + ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN; + ev->envp[3] = NULL; + + /* setting up environment: sender, prefix name, and message text */ + snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); + snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX); + snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); + + return ev; +} + +static void smsg_event_work_fn(struct work_struct *work) +{ + LIST_HEAD(event_queue); + struct smsg_app_event *p, *n; + struct device *dev; + + dev = get_device(smsg_app_dev); + if (!dev) + return; + + spin_lock_bh(&smsg_event_queue_lock); + list_splice_init(&smsg_event_queue, &event_queue); + spin_unlock_bh(&smsg_event_queue_lock); + + list_for_each_entry_safe(p, n, &event_queue, list) { + list_del(&p->list); + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp); + smsg_app_event_free(p); + } + + put_device(dev); +} +static DECLARE_WORK(smsg_event_work, smsg_event_work_fn); + +static void smsg_app_callback(const char *from, char *msg) +{ + struct smsg_app_event *se; + + /* check if the originating z/VM user ID matches + * the configured sender. */ + if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0) + return; + + /* get start of message text (skip prefix and leading blanks) */ + msg += strlen(SMSG_PREFIX); + while (*msg && isspace(*msg)) + msg++; + if (*msg == '\0') + return; + + /* allocate event list element and its environment */ + se = smsg_app_event_alloc(from, msg); + if (!se) + return; + + /* queue event and schedule work function */ + spin_lock(&smsg_event_queue_lock); + list_add_tail(&se->list, &smsg_event_queue); + spin_unlock(&smsg_event_queue_lock); + + schedule_work(&smsg_event_work); + return; +} + +static int __init smsgiucv_app_init(void) +{ + struct device_driver *smsgiucv_drv; + int rc; + + if (!MACHINE_IS_VM) + return -ENODEV; + + smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL); + if (!smsg_app_dev) + return -ENOMEM; + + smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus); + if (!smsgiucv_drv) { + kfree(smsg_app_dev); + return -ENODEV; + } + + rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); + if (rc) { + kfree(smsg_app_dev); + goto fail; + } + smsg_app_dev->bus = &iucv_bus; + smsg_app_dev->parent = iucv_root; + smsg_app_dev->release = (void (*)(struct device *)) kfree; + smsg_app_dev->driver = smsgiucv_drv; + rc = device_register(smsg_app_dev); + if (rc) { + put_device(smsg_app_dev); + goto fail; + } + + /* convert sender to uppercase characters */ + if (sender) { + int len = strlen(sender); + while (len--) + sender[len] = toupper(sender[len]); + } + + /* register with the smsgiucv device driver */ + rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); + if (rc) { + device_unregister(smsg_app_dev); + goto fail; + } + + rc = 0; +fail: + return rc; +} +module_init(smsgiucv_app_init); + +static void __exit smsgiucv_app_exit(void) +{ + /* unregister callback */ + smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback); + + /* cancel pending work and flush any queued event work */ + cancel_work_sync(&smsg_event_work); + smsg_event_work_fn(&smsg_event_work); + + device_unregister(smsg_app_dev); +} +module_exit(smsgiucv_app_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents"); +MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); |