diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/s390/net | |
parent | Initial commit. (diff) | |
download | linux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
32 files changed, 31379 insertions, 0 deletions
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig new file mode 100644 index 000000000..7c5a25ddf --- /dev/null +++ b/drivers/s390/net/Kconfig @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "S/390 network device drivers" + depends on NETDEVICES && S390 + +config LCS + def_tristate m + prompt "Lan Channel Station Interface" + depends on CCW && NETDEVICES && (ETHERNET || FDDI) + help + Select this option if you want to use LCS networking on IBM System z. + This device driver supports FDDI (IEEE 802.7) and Ethernet. + To compile as a module, choose M. The module name is lcs. + If you do not know what it is, it's safe to choose Y. + +config CTCM + def_tristate m + prompt "CTC and MPC SNA device support" + depends on CCW && NETDEVICES + help + Select this option if you want to use channel-to-channel + point-to-point networking on IBM System z. + This device driver supports real CTC coupling using ESCON. + It also supports virtual CTCs when running under VM. + This driver also supports channel-to-channel MPC SNA devices. + MPC is an SNA protocol device used by Communication Server for Linux. + To compile as a module, choose M. The module name is ctcm. + To compile into the kernel, choose Y. + If you do not need any channel-to-channel connection, choose N. + +config NETIUCV + def_tristate m + prompt "IUCV network device support (VM only)" + depends on IUCV && NETDEVICES + help + Select this option if you want to use inter-user communication + vehicle networking under VM or VIF. It enables a fast communication + link between VM guests. Using ifconfig a point-to-point connection + can be established to the Linux on IBM System z + running on the other VM guest. To compile as a module, choose M. + The module name is netiucv. If unsure, choose Y. + +config SMSGIUCV + def_tristate m + prompt "IUCV special message support (VM only)" + depends on IUCV + help + Select this option if you want to be able to receive SMSG messages + from other VM guest systems. + +config SMSGIUCV_EVENT + def_tristate m + prompt "Deliver IUCV special messages as uevents (VM only)" + depends on SMSGIUCV + help + Select this option to deliver CP special messages (SMSGs) as + uevents. The driver handles only those special messages that + start with "APP". + + To compile as a module, choose M. The module name is "smsgiucv_app". + +config QETH + def_tristate y + prompt "Gigabit Ethernet device support" + depends on CCW && NETDEVICES && IP_MULTICAST && QDIO && ETHERNET + help + This driver supports the IBM System z OSA Express adapters + in QDIO mode (all media types), HiperSockets interfaces and z/VM + virtual NICs for Guest LAN and VSWITCH. + + For details please refer to the documentation provided by IBM at + <http://www.ibm.com/developerworks/linux/linux390> + + To compile this driver as a module, choose M. + The module name is qeth. + +config QETH_L2 + def_tristate y + prompt "qeth layer 2 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 2 mode. + To compile as a module, choose M. The module name is qeth_l2. + If unsure, choose y. + +config QETH_L3 + def_tristate y + prompt "qeth layer 3 device support" + depends on QETH + help + Select this option to be able to run qeth devices in layer 3 mode. + To compile as a module choose M. The module name is qeth_l3. + If unsure, choose Y. + +config CCWGROUP + tristate + default (LCS || CTCM || QETH) + +config ISM + tristate "Support for ISM vPCI Adapter" + depends on PCI && SMC + default n + help + Select this option if you want to use the Internal Shared Memory + vPCI Adapter. + + To compile as a module choose M. The module name is ism. + If unsure, choose N. +endmenu diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile new file mode 100644 index 000000000..f2d6bbe57 --- /dev/null +++ b/drivers/s390/net/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# S/390 network devices +# + +ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o +obj-$(CONFIG_CTCM) += ctcm.o fsm.o +obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o +obj-$(CONFIG_SMSGIUCV) += smsgiucv.o +obj-$(CONFIG_SMSGIUCV_EVENT) += smsgiucv_app.o +obj-$(CONFIG_LCS) += lcs.o +qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o +obj-$(CONFIG_QETH) += qeth.o +qeth_l2-y += qeth_l2_main.o qeth_l2_sys.o +obj-$(CONFIG_QETH_L2) += qeth_l2.o +qeth_l3-y += qeth_l3_main.o qeth_l3_sys.o +obj-$(CONFIG_QETH_L3) += qeth_l3.o + +ism-y := ism_drv.o +obj-$(CONFIG_ISM) += ism.o diff --git a/drivers/s390/net/ctcm_dbug.c b/drivers/s390/net/ctcm_dbug.c new file mode 100644 index 000000000..f7ec51db3 --- /dev/null +++ b/drivers/s390/net/ctcm_dbug.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#include <linux/stddef.h> +#include <linux/string.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/ctype.h> +#include <linux/sysctl.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/debugfs.h> +#include "ctcm_dbug.h" + +/* + * Debug Facility Stuff + */ + +struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS] = { + [CTCM_DBF_SETUP] = {"ctc_setup", 8, 1, 64, CTC_DBF_INFO, NULL}, + [CTCM_DBF_ERROR] = {"ctc_error", 8, 1, 64, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_TRACE] = {"ctc_trace", 8, 1, 64, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_MPC_SETUP] = {"mpc_setup", 8, 1, 80, CTC_DBF_INFO, NULL}, + [CTCM_DBF_MPC_ERROR] = {"mpc_error", 8, 1, 80, CTC_DBF_ERROR, NULL}, + [CTCM_DBF_MPC_TRACE] = {"mpc_trace", 8, 1, 80, CTC_DBF_ERROR, NULL}, +}; + +void ctcm_unregister_dbf_views(void) +{ + int x; + for (x = 0; x < CTCM_DBF_INFOS; x++) { + debug_unregister(ctcm_dbf[x].id); + ctcm_dbf[x].id = NULL; + } +} + +int ctcm_register_dbf_views(void) +{ + int x; + for (x = 0; x < CTCM_DBF_INFOS; x++) { + /* register the areas */ + ctcm_dbf[x].id = debug_register(ctcm_dbf[x].name, + ctcm_dbf[x].pages, + ctcm_dbf[x].areas, + ctcm_dbf[x].len); + if (ctcm_dbf[x].id == NULL) { + ctcm_unregister_dbf_views(); + return -ENOMEM; + } + + /* register a view */ + debug_register_view(ctcm_dbf[x].id, &debug_hex_ascii_view); + /* set a passing level */ + debug_set_level(ctcm_dbf[x].id, ctcm_dbf[x].level); + } + + return 0; +} + +void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *fmt, ...) +{ + char dbf_txt_buf[64]; + va_list args; + + if (!debug_level_enabled(ctcm_dbf[dbf_nix].id, level)) + return; + va_start(args, fmt); + vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); + va_end(args); + + debug_text_event(ctcm_dbf[dbf_nix].id, level, dbf_txt_buf); +} + diff --git a/drivers/s390/net/ctcm_dbug.h b/drivers/s390/net/ctcm_dbug.h new file mode 100644 index 000000000..675575ef1 --- /dev/null +++ b/drivers/s390/net/ctcm_dbug.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#ifndef _CTCM_DBUG_H_ +#define _CTCM_DBUG_H_ + +/* + * Debug Facility stuff + */ + +#include <asm/debug.h> + +#ifdef DEBUG + #define do_debug 1 +#else + #define do_debug 0 +#endif +#ifdef DEBUGCCW + #define do_debug_ccw 1 + #define DEBUGDATA 1 +#else + #define do_debug_ccw 0 +#endif +#ifdef DEBUGDATA + #define do_debug_data 1 +#else + #define do_debug_data 0 +#endif + +/* define dbf debug levels similar to kernel msg levels */ +#define CTC_DBF_ALWAYS 0 /* always print this */ +#define CTC_DBF_EMERG 0 /* system is unusable */ +#define CTC_DBF_ALERT 1 /* action must be taken immediately */ +#define CTC_DBF_CRIT 2 /* critical conditions */ +#define CTC_DBF_ERROR 3 /* error conditions */ +#define CTC_DBF_WARN 4 /* warning conditions */ +#define CTC_DBF_NOTICE 5 /* normal but significant condition */ +#define CTC_DBF_INFO 5 /* informational */ +#define CTC_DBF_DEBUG 6 /* debug-level messages */ + +enum ctcm_dbf_names { + CTCM_DBF_SETUP, + CTCM_DBF_ERROR, + CTCM_DBF_TRACE, + CTCM_DBF_MPC_SETUP, + CTCM_DBF_MPC_ERROR, + CTCM_DBF_MPC_TRACE, + CTCM_DBF_INFOS /* must be last element */ +}; + +struct ctcm_dbf_info { + char name[DEBUG_MAX_NAME_LEN]; + int pages; + int areas; + int len; + int level; + debug_info_t *id; +}; + +extern struct ctcm_dbf_info ctcm_dbf[CTCM_DBF_INFOS]; + +int ctcm_register_dbf_views(void); +void ctcm_unregister_dbf_views(void); +void ctcm_dbf_longtext(enum ctcm_dbf_names dbf_nix, int level, char *text, ...); + +static inline const char *strtail(const char *s, int n) +{ + int l = strlen(s); + return (l > n) ? s + (l - n) : s; +} + +#define CTCM_FUNTAIL strtail((char *)__func__, 16) + +#define CTCM_DBF_TEXT(name, level, text) \ + do { \ + debug_text_event(ctcm_dbf[CTCM_DBF_##name].id, level, text); \ + } while (0) + +#define CTCM_DBF_HEX(name, level, addr, len) \ + do { \ + debug_event(ctcm_dbf[CTCM_DBF_##name].id, \ + level, (void *)(addr), len); \ + } while (0) + +#define CTCM_DBF_TEXT_(name, level, text...) \ + ctcm_dbf_longtext(CTCM_DBF_##name, level, text) + +/* + * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. + * dev : netdevice with valid name field. + * text: any text string. + */ +#define CTCM_DBF_DEV_NAME(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%s) :- %s", \ + CTCM_FUNTAIL, dev->name, text); \ + } while (0) + +#define MPC_DBF_DEV_NAME(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%s) := %s", \ + CTCM_FUNTAIL, dev->name, text); \ + } while (0) + +#define CTCMY_DBF_DEV_NAME(cat, dev, text) \ + do { \ + if (IS_MPCDEV(dev)) \ + MPC_DBF_DEV_NAME(cat, dev, text); \ + else \ + CTCM_DBF_DEV_NAME(cat, dev, text); \ + } while (0) + +/* + * cat : one of {setup, mpc_setup, trace, mpc_trace, error, mpc_error}. + * dev : netdevice. + * text: any text string. + */ +#define CTCM_DBF_DEV(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(cat, CTC_DBF_INFO, "%s(%p) :-: %s", \ + CTCM_FUNTAIL, dev, text); \ + } while (0) + +#define MPC_DBF_DEV(cat, dev, text) \ + do { \ + CTCM_DBF_TEXT_(MPC_##cat, CTC_DBF_INFO, "%s(%p) :=: %s", \ + CTCM_FUNTAIL, dev, text); \ + } while (0) + +#define CTCMY_DBF_DEV(cat, dev, text) \ + do { \ + if (IS_MPCDEV(dev)) \ + MPC_DBF_DEV(cat, dev, text); \ + else \ + CTCM_DBF_DEV(cat, dev, text); \ + } while (0) + +#endif diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c new file mode 100644 index 000000000..1b4ee570b --- /dev/null +++ b/drivers/s390/net/ctcm_fsms.c @@ -0,0 +1,2300 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + * MPC additions : + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "fsm.h" + +#include "ctcm_dbug.h" +#include "ctcm_main.h" +#include "ctcm_fsms.h" + +const char *dev_state_names[] = { + [DEV_STATE_STOPPED] = "Stopped", + [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", + [DEV_STATE_STARTWAIT_RX] = "StartWait RX", + [DEV_STATE_STARTWAIT_TX] = "StartWait TX", + [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", + [DEV_STATE_STOPWAIT_RX] = "StopWait RX", + [DEV_STATE_STOPWAIT_TX] = "StopWait TX", + [DEV_STATE_RUNNING] = "Running", +}; + +const char *dev_event_names[] = { + [DEV_EVENT_START] = "Start", + [DEV_EVENT_STOP] = "Stop", + [DEV_EVENT_RXUP] = "RX up", + [DEV_EVENT_TXUP] = "TX up", + [DEV_EVENT_RXDOWN] = "RX down", + [DEV_EVENT_TXDOWN] = "TX down", + [DEV_EVENT_RESTART] = "Restart", +}; + +const char *ctc_ch_event_names[] = { + [CTC_EVENT_IO_SUCCESS] = "ccw_device success", + [CTC_EVENT_IO_EBUSY] = "ccw_device busy", + [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", + [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", + [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", + [CTC_EVENT_ATTN] = "Status ATTN", + [CTC_EVENT_BUSY] = "Status BUSY", + [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", + [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", + [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", + [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", + [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", + [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", + [CTC_EVENT_UC_ZERO] = "Unit check ZERO", + [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", + [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", + [CTC_EVENT_MC_FAIL] = "Machine check failure", + [CTC_EVENT_MC_GOOD] = "Machine check operational", + [CTC_EVENT_IRQ] = "IRQ normal", + [CTC_EVENT_FINSTAT] = "IRQ final", + [CTC_EVENT_TIMER] = "Timer", + [CTC_EVENT_START] = "Start", + [CTC_EVENT_STOP] = "Stop", + /* + * additional MPC events + */ + [CTC_EVENT_SEND_XID] = "XID Exchange", + [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", +}; + +const char *ctc_ch_state_names[] = { + [CTC_STATE_IDLE] = "Idle", + [CTC_STATE_STOPPED] = "Stopped", + [CTC_STATE_STARTWAIT] = "StartWait", + [CTC_STATE_STARTRETRY] = "StartRetry", + [CTC_STATE_SETUPWAIT] = "SetupWait", + [CTC_STATE_RXINIT] = "RX init", + [CTC_STATE_TXINIT] = "TX init", + [CTC_STATE_RX] = "RX", + [CTC_STATE_TX] = "TX", + [CTC_STATE_RXIDLE] = "RX idle", + [CTC_STATE_TXIDLE] = "TX idle", + [CTC_STATE_RXERR] = "RX error", + [CTC_STATE_TXERR] = "TX error", + [CTC_STATE_TERM] = "Terminating", + [CTC_STATE_DTERM] = "Restarting", + [CTC_STATE_NOTOP] = "Not operational", + /* + * additional MPC states + */ + [CH_XID0_PENDING] = "Pending XID0 Start", + [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", + [CH_XID7_PENDING] = "Pending XID7 P1 Start", + [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", + [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", + [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", + [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", +}; + +static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); + +/* + * ----- static ctcm actions for channel statemachine ----- + * +*/ +static void chx_txdone(fsm_instance *fi, int event, void *arg); +static void chx_rx(fsm_instance *fi, int event, void *arg); +static void chx_rxidle(fsm_instance *fi, int event, void *arg); +static void chx_firstio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); + +/* + * ----- static ctcmpc actions for ctcmpc channel statemachine ----- + * +*/ +static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); +static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); +static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); +/* shared : +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); +*/ +static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); +static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); +static void ctcmpc_chx_resend(fsm_instance *, int, void *); +static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); + +/** + * Check return code of a preceding ccw_device call, halt_IO etc... + * + * ch : The channel, the error belongs to. + * Returns the error code (!= 0) to inspect. + */ +void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) +{ + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s: %04x\n", + CTCM_FUNTAIL, ch->id, msg, rc); + switch (rc) { + case -EBUSY: + pr_info("%s: The communication peer is busy\n", + ch->id); + fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); + break; + case -ENODEV: + pr_err("%s: The specified target device is not valid\n", + ch->id); + fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); + break; + default: + pr_err("An I/O operation resulted in error %04x\n", + rc); + fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); + } +} + +void ctcm_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); + + while ((skb = skb_dequeue(q))) { + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } +} + +/** + * NOP action for statemachines + */ +static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * Actions for channel - statemachines. + */ + +/** + * Normal data has been send. Free the corresponding + * skb (it's in io_queue), reset dev->tbusy and + * revert to idle state. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_txdone(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct sk_buff *skb; + int first = 1; + int i; + unsigned long duration; + unsigned long done_stamp = jiffies; + + CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); + + duration = done_stamp - ch->prof.send_stamp; + if (duration > ch->prof.tx_time) + ch->prof.tx_time = duration; + + if (ch->irb->scsw.cmd.count != 0) + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): TX not complete, remaining %d bytes", + CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); + fsm_deltimer(&ch->timer); + while ((skb = skb_dequeue(&ch->io_queue))) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + if (first) { + priv->stats.tx_bytes += 2; + first = 0; + } + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + } + spin_lock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[4]); + if (ch->collect_len > 0) { + int rc; + + if (ctcm_checkalloc_buffer(ch)) { + spin_unlock(&ch->collect_lock); + return; + } + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + if (ch->prof.maxmulti < (ch->collect_len + 2)) + ch->prof.maxmulti = ch->collect_len + 2; + if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) + ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); + *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; + i = 0; + while ((skb = skb_dequeue(&ch->collect_queue))) { + skb_copy_from_linear_data(skb, + skb_put(ch->trans_skb, skb->len), skb->len); + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + i++; + } + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + ch->ccw[1].count = ch->trans_skb->len; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + ch->prof.doios_multi++; + if (rc != 0) { + priv->stats.tx_dropped += i; + priv->stats.tx_errors += i; + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "chained TX"); + } + } else { + spin_unlock(&ch->collect_lock); + fsm_newstate(fi, CTC_STATE_TXIDLE); + } + ctcm_clear_busy_do(dev); +} + +/** + * Initial data is sent. + * Notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); + + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); +} + +/** + * Got normal data, check for sanity, queue it up, allocate new buffer + * trigger bottom half, and initiate next read. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_rx(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; + struct sk_buff *skb = ch->trans_skb; + __u16 block_len = *((__u16 *)skb->data); + int check_len; + int rc; + + fsm_deltimer(&ch->timer); + if (len < 8) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got packet with length %d < 8\n", + CTCM_FUNTAIL, dev->name, len); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + if (len > ch->max_bufsize) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got packet with length %d > %d\n", + CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + + /* + * VM TCP seems to have a bug sending 2 trailing bytes of garbage. + */ + switch (ch->protocol) { + case CTCM_PROTO_S390: + case CTCM_PROTO_OS390: + check_len = block_len + 2; + break; + default: + check_len = block_len; + break; + } + if ((len < block_len) || (len > check_len)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s): got block length %d != rx length %d\n", + CTCM_FUNTAIL, dev->name, block_len, len); + if (do_debug) + ctcmpc_dump_skb(skb, 0); + + *((__u16 *)skb->data) = len; + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto again; + } + if (block_len > 2) { + *((__u16 *)skb->data) = block_len - 2; + ctcm_unpack_skb(ch, skb); + } + again: + skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(skb); + skb->len = 0; + if (ctcm_checkalloc_buffer(ch)) + return; + ch->ccw[1].count = ch->max_bufsize; + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, "normal RX"); +} + +/** + * Initialize connection by sending a __u16 of value 0. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_firstio(fsm_instance *fi, int event, void *arg) +{ + int rc; + struct channel *ch = arg; + int fsmstate = fsm_getstate(fi); + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) : %02x", + CTCM_FUNTAIL, ch->id, fsmstate); + + ch->sense_rc = 0; /* reset unit check report control */ + if (fsmstate == CTC_STATE_TXIDLE) + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): remote side issued READ?, init.\n", + CTCM_FUNTAIL, ch->id); + fsm_deltimer(&ch->timer); + if (ctcm_checkalloc_buffer(ch)) + return; + if ((fsmstate == CTC_STATE_SETUPWAIT) && + (ch->protocol == CTCM_PROTO_OS390)) { + /* OS/390 resp. z/OS */ + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, + CTC_EVENT_TIMER, ch); + chx_rxidle(fi, event, arg); + } else { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); + } + return; + } + /* + * Don't setup a timer for receiving the initial RX frame + * if in compatibility mode, since VM TCP delays the initial + * frame until it has some data to send. + */ + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || + (ch->protocol != CTCM_PROTO_S390)) + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; + ch->ccw[1].count = 2; /* Transfer only length */ + + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_SETUPWAIT); + ctcm_ccw_check_rc(ch, rc, "init IO"); + } + /* + * If in compatibility mode since we don't setup a timer, we + * also signal RX channel up immediately. This enables us + * to send packets early which in turn usually triggers some + * reply from VM TCP which brings up the RX channel to it's + * final state. + */ + if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && + (ch->protocol == CTCM_PROTO_S390)) { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); + } +} + +/** + * Got initial data, check it. If OK, + * notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void chx_rxidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + __u16 buflen; + int rc; + + fsm_deltimer(&ch->timer); + buflen = *((__u16 *)ch->trans_skb->data); + CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", + __func__, dev->name, buflen); + + if (buflen >= CTCM_INITIAL_BLOCKLEN) { + if (ctcm_checkalloc_buffer(ch)) + return; + ch->ccw[1].count = ch->max_bufsize; + fsm_newstate(fi, CTC_STATE_RXIDLE); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + if (rc != 0) { + fsm_newstate(fi, CTC_STATE_RXINIT); + ctcm_ccw_check_rc(ch, rc, "initial RX"); + } else + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); + } else { + CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", + __func__, dev->name, + buflen, CTCM_INITIAL_BLOCKLEN); + chx_firstio(fi, event, arg); + } +} + +/** + * Set channel into extended mode. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + int rc; + unsigned long saveflags = 0; + int timeout = CTCM_TIME_5_SEC; + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) { + timeout = 1500; + CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + } + fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); + fsm_newstate(fi, CTC_STATE_SETUPWAIT); + CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); + + if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is undeterministic in + * static view. => ignore sparse warnings here. */ + + rc = ccw_device_start(ch->cdev, &ch->ccw[6], + (unsigned long)ch, 0xff, 0); + if (event == CTC_EVENT_TIMER) /* see above comments */ + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, CTC_STATE_STARTWAIT); + ctcm_ccw_check_rc(ch, rc, "set Mode"); + } else + ch->retry = 0; +} + +/** + * Setup channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + unsigned long saveflags; + int rc; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); + + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + } + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + ch->ccw[1].cmd_code = CCW_CMD_READ; + ch->ccw[1].flags = CCW_FLAG_SLI; + ch->ccw[1].count = 0; + } else { + ch->ccw[1].cmd_code = CCW_CMD_WRITE; + ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[1].count = 0; + } + if (ctcm_checkalloc_buffer(ch)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): %s trans_skb alloc delayed " + "until first transfer", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + } + ch->ccw[0].cmd_code = CCW_CMD_PREPARE; + ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[0].count = 0; + ch->ccw[0].cda = 0; + ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ + ch->ccw[2].flags = CCW_FLAG_SLI; + ch->ccw[2].count = 0; + ch->ccw[2].cda = 0; + memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); + ch->ccw[4].cda = 0; + ch->ccw[4].flags &= ~CCW_FLAG_IDA; + + fsm_newstate(fi, CTC_STATE_STARTWAIT); + fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); + } +} + +/** + * Shutdown a channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + unsigned long saveflags = 0; + int rc; + int oldstate; + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is undeterministic in + * static view. => ignore sparse warnings here. */ + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CTC_STATE_TERM); + rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + + if (event == CTC_EVENT_STOP) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + /* see remark above about conditional locking */ + + if (rc != 0 && rc != -EBUSY) { + fsm_deltimer(&ch->timer); + if (event != CTC_EVENT_STOP) { + fsm_newstate(fi, oldstate); + ctcm_ccw_check_rc(ch, rc, (char *)__func__); + } + } +} + +/** + * Cleanup helper for chx_fail and chx_stopped + * cleanup channels queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * state The next state (depending on caller). + * ch The channel to operate on. + */ +static void ctcm_chx_cleanup(fsm_instance *fi, int state, + struct channel *ch) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, + "%s(%s): %s[%d]\n", + CTCM_FUNTAIL, dev->name, ch->id, state); + + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + fsm_newstate(fi, state); + if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb_any(ch->trans_skb); + ch->trans_skb = NULL; + } + + ch->th_seg = 0x00; + ch->th_seq_num = 0x00; + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + skb_queue_purge(&ch->io_queue); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + ctcm_purge_skb_queue(&ch->io_queue); + if (IS_MPC(ch)) + ctcm_purge_skb_queue(&ch->sweep_queue); + spin_lock(&ch->collect_lock); + ctcm_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/** + * A channel has successfully been halted. + * Cleanup it's queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) +{ + ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); +} + +/** + * A stop command from device statemachine arrived and we are in + * not operational mode. Set state to stopped. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) +{ + fsm_newstate(fi, CTC_STATE_STOPPED); +} + +/** + * A machine check for no path, not operational status or gone device has + * happened. + * Cleanup queue and notify interface statemachine. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) +{ + ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); +} + +/** + * Handle error during setup of channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + /* + * Special case: Got UC_RCRESET on setmode. + * This means that remote side isn't setup. In this case + * simply retry after some 10 secs... + */ + if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && + ((event == CTC_EVENT_UC_RCRESET) || + (event == CTC_EVENT_UC_RSRESET))) { + fsm_newstate(fi, CTC_STATE_STARTRETRY); + fsm_deltimer(&ch->timer); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + if (!IS_MPC(ch) && + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { + int rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, + "HaltIO in chx_setuperr"); + } + return; + } + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, + "%s(%s) : %s error during %s channel setup state=%s\n", + CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", + fsm_getstate_str(fi)); + + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/** + * Restart a channel after an error. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + unsigned long saveflags = 0; + int oldstate; + int rc; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s: %s[%d] of %s\n", + CTCM_FUNTAIL, ch->id, event, dev->name); + + fsm_deltimer(&ch->timer); + + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + oldstate = fsm_getstate(fi); + fsm_newstate(fi, CTC_STATE_STARTWAIT); + if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is a known problem for + * sparse because its undeterministic in static view. + * Warnings should be ignored here. */ + rc = ccw_device_halt(ch->cdev, (unsigned long)ch); + if (event == CTC_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + if (rc != -EBUSY) { + fsm_deltimer(&ch->timer); + fsm_newstate(fi, oldstate); + } + ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); + } +} + +/** + * Handle error during RX initial handshake (exchange of + * 0-length block header) + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + if (event == CTC_EVENT_TIMER) { + if (!IS_MPCDEV(dev)) + /* TODO : check if MPC deletes timer somewhere */ + fsm_deltimer(&ch->timer); + if (ch->retry++ < 3) + ctcm_chx_restart(fi, event, arg); + else { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } + } else { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); + } +} + +/** + * Notify device statemachine if we gave up initialization + * of RX channel. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): RX %s busy, init. fail", + CTCM_FUNTAIL, dev->name, ch->id); + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); +} + +/** + * Handle RX Unit check remote reset (remote disconnected) + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct channel *ch2; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s: %s: remote disconnect - re-init ...", + CTCM_FUNTAIL, dev->name); + fsm_deltimer(&ch->timer); + /* + * Notify device statemachine + */ + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + + fsm_newstate(fi, CTC_STATE_DTERM); + ch2 = priv->channel[CTCM_WRITE]; + fsm_newstate(ch2->fsm, CTC_STATE_DTERM); + + ccw_device_halt(ch->cdev, (unsigned long)ch); + ccw_device_halt(ch2->cdev, (unsigned long)ch2); +} + +/** + * Handle error during TX channel initialization. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + + if (event == CTC_EVENT_TIMER) { + fsm_deltimer(&ch->timer); + if (ch->retry++ < 3) + ctcm_chx_restart(fi, event, arg); + else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } + } else { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, + ctc_ch_event_names[event], fsm_getstate_str(fi)); + + dev_warn(&dev->dev, + "Initialization failed with RX/TX init handshake " + "error %s\n", ctc_ch_event_names[event]); + } +} + +/** + * Handle TX timeout by retrying operation. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct sk_buff *skb; + + CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + fsm_deltimer(&ch->timer); + if (ch->retry++ > 3) { + struct mpc_group *gptr = priv->mpcg; + CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, + "%s: %s: retries exceeded", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + /* call restart if not MPC or if MPC and mpcg fsm is ready. + use gptr as mpc indicator */ + if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) + ctcm_chx_restart(fi, event, arg); + goto done; + } + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s : %s: retry %d", + CTCM_FUNTAIL, ch->id, ch->retry); + skb = skb_peek(&ch->io_queue); + if (skb) { + int rc = 0; + unsigned long saveflags = 0; + clear_normalized_cda(&ch->ccw[4]); + ch->ccw[4].count = skb->len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, + "%s: %s: IDAL alloc failed", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + ctcm_chx_restart(fi, event, arg); + goto done; + } + fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); + if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + /* Such conditional locking is a known problem for + * sparse because its undeterministic in static view. + * Warnings should be ignored here. */ + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[3], + sizeof(struct ccw1) * 3); + + rc = ccw_device_start(ch->cdev, &ch->ccw[3], + (unsigned long)ch, 0xff, 0); + if (event == CTC_EVENT_TIMER) + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), + saveflags); + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); + ctcm_purge_skb_queue(&ch->io_queue); + } + } +done: + return; +} + +/** + * Handle fatal errors during an I/O command. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + int rd = CHANNEL_DIRECTION(ch->flags); + + fsm_deltimer(&ch->timer); + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s: %s: %s unrecoverable channel error", + CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); + + if (IS_MPC(ch)) { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + } + if (rd == CTCM_READ) { + fsm_newstate(fi, CTC_STATE_RXERR); + fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); + } else { + fsm_newstate(fi, CTC_STATE_TXERR); + fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); + } +} + +/* + * The ctcm statemachine for a channel. + */ +const fsm_node ch_fsm[] = { + { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, + { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + + { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, + + { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, + { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, + { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, + { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, + { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, + + { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, + { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, + { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, +}; + +int ch_fsm_len = ARRAY_SIZE(ch_fsm); + +/* + * MPC actions for mpc channel statemachine + * handling of MPC protocol requires extra + * statemachine and actions which are prefixed ctcmpc_ . + * The ctc_ch_states and ctc_ch_state_names, + * ctc_ch_events and ctc_ch_event_names share the ctcm definitions + * which are expanded by some elements. + */ + +/* + * Actions for mpc channel statemachine. + */ + +/** + * Normal data has been send. Free the corresponding + * skb (it's in io_queue), reset dev->tbusy and + * revert to idle state. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *skb; + int first = 1; + int i; + __u32 data_space; + unsigned long duration; + struct sk_buff *peekskb; + int rc; + struct th_header *header; + struct pdu *p_header; + unsigned long done_stamp = jiffies; + + CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", + __func__, dev->name, smp_processor_id()); + + duration = done_stamp - ch->prof.send_stamp; + if (duration > ch->prof.tx_time) + ch->prof.tx_time = duration; + + if (ch->irb->scsw.cmd.count != 0) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s(%s): TX not complete, remaining %d bytes", + CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); + fsm_deltimer(&ch->timer); + while ((skb = skb_dequeue(&ch->io_queue))) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; + if (first) { + priv->stats.tx_bytes += 2; + first = 0; + } + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + } + spin_lock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[4]); + if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { + spin_unlock(&ch->collect_lock); + fsm_newstate(fi, CTC_STATE_TXIDLE); + goto done; + } + + if (ctcm_checkalloc_buffer(ch)) { + spin_unlock(&ch->collect_lock); + goto done; + } + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) + ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; + if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) + ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); + i = 0; + p_header = NULL; + data_space = grp->group_max_buflen - TH_HEADER_LENGTH; + + CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" + " data_space:%04x\n", + __func__, data_space); + + while ((skb = skb_dequeue(&ch->collect_queue))) { + skb_put_data(ch->trans_skb, skb->data, skb->len); + p_header = (struct pdu *) + (skb_tail_pointer(ch->trans_skb) - skb->len); + p_header->pdu_flag = 0x00; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) + p_header->pdu_flag |= 0x60; + else + p_header->pdu_flag |= 0x20; + + CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", + __func__, ch->trans_skb->len); + CTCM_PR_DBGDATA("%s: pdu header and data for up" + " to 32 bytes sent to vtam\n", __func__); + CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); + + ch->collect_len -= skb->len; + data_space -= skb->len; + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + peekskb = skb_peek(&ch->collect_queue); + if (peekskb->len > data_space) + break; + i++; + } + /* p_header points to the last one we handled */ + if (p_header) + p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ + header = kzalloc(TH_HEADER_LENGTH, gfp_type()); + if (!header) { + spin_unlock(&ch->collect_lock); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + header->th_ch_flag = TH_HAS_PDU; /* Normal data */ + ch->th_seq_num++; + header->th_seq_num = ch->th_seq_num; + + CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , + __func__, ch->th_seq_num); + + memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header, + TH_HEADER_LENGTH); /* put the TH on the packet */ + + kfree(header); + + CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", + __func__, ch->trans_skb->len); + CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " + "data to vtam from collect_q\n", __func__); + CTCM_D3_DUMP((char *)ch->trans_skb->data, + min_t(int, ch->trans_skb->len, 50)); + + spin_unlock(&ch->collect_lock); + clear_normalized_cda(&ch->ccw[1]); + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + ch->ccw[1].count = ch->max_bufsize; + + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { + dev_kfree_skb_any(ch->trans_skb); + ch->trans_skb = NULL; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s: %s: IDAL alloc failed", + CTCM_FUNTAIL, ch->id); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + return; + } + + CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", + (void *)(unsigned long)ch->ccw[1].cda, + ch->trans_skb->data); + + ch->ccw[1].count = ch->trans_skb->len; + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + ch->prof.send_stamp = jiffies; + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + ch->prof.doios_multi++; + if (rc != 0) { + priv->stats.tx_dropped += i; + priv->stats.tx_errors += i; + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "chained TX"); + } +done: + ctcm_clear_busy(dev); + return; +} + +/** + * Got normal data, check for sanity, queue it up, allocate new buffer + * trigger bottom half, and initiate next read. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *skb = ch->trans_skb; + struct sk_buff *new_skb; + unsigned long saveflags = 0; /* avoids compiler warning */ + int len = ch->max_bufsize - ch->irb->scsw.cmd.count; + + CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", + CTCM_FUNTAIL, dev->name, smp_processor_id(), + ch->id, ch->max_bufsize, len); + fsm_deltimer(&ch->timer); + + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): TRANS_SKB = NULL", + CTCM_FUNTAIL, dev->name); + goto again; + } + + if (len < TH_HEADER_LENGTH) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): packet length %d to short", + CTCM_FUNTAIL, dev->name, len); + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + } else { + /* must have valid th header or game over */ + __u32 block_len = len; + len = TH_HEADER_LENGTH + XID2_LENGTH + 4; + new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); + + if (new_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%d): skb allocation failed", + CTCM_FUNTAIL, dev->name); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); + goto again; + } + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_RESET: + case MPCG_STATE_INOP: + dev_kfree_skb_any(new_skb); + break; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + skb_put_data(new_skb, skb->data, block_len); + skb_queue_tail(&ch->io_queue, new_skb); + tasklet_schedule(&ch->ch_tasklet); + break; + default: + skb_put_data(new_skb, skb->data, len); + skb_queue_tail(&ch->io_queue, new_skb); + tasklet_hi_schedule(&ch->ch_tasklet); + break; + } + } + +again: + switch (fsm_getstate(grp->fsm)) { + int rc, dolock; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + if (ctcm_checkalloc_buffer(ch)) + break; + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = ch->max_bufsize; + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[0], + sizeof(struct ccw1) * 3); + dolock = !in_irq(); + if (dolock) + spin_lock_irqsave( + get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + if (dolock) /* see remark about conditional locking */ + spin_unlock_irqrestore( + get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) + ctcm_ccw_check_rc(ch, rc, "normal RX"); + default: + break; + } + + CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", + __func__, dev->name, ch, ch->id); + +} + +/** + * Initialize connection by sending a __u16 of value 0. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *gptr = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", + __func__, ch->id, ch); + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, + "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", + CTCM_FUNTAIL, ch->id, fsm_getstate(fi), + fsm_getstate(gptr->fsm), ch->protocol); + + if (fsm_getstate(fi) == CTC_STATE_TXIDLE) + MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); + + fsm_deltimer(&ch->timer); + if (ctcm_checkalloc_buffer(ch)) + goto done; + + switch (fsm_getstate(fi)) { + case CTC_STATE_STARTRETRY: + case CTC_STATE_SETUPWAIT: + if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { + ctcmpc_chx_rxidle(fi, event, arg); + } else { + fsm_newstate(fi, CTC_STATE_TXIDLE); + fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); + } + goto done; + default: + break; + } + + fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); + +done: + CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", + __func__, ch->id, ch); + return; +} + +/** + * Got initial data, check it. If OK, + * notify device statemachine that we are up and + * running. + * + * fi An instance of a channel statemachine. + * event The event, just happened. + * arg Generic pointer, casted from channel * upon call. + */ +void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + int rc; + unsigned long saveflags = 0; /* avoids compiler warning */ + + fsm_deltimer(&ch->timer); + CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", + __func__, ch->id, dev->name, smp_processor_id(), + fsm_getstate(fi), fsm_getstate(grp->fsm)); + + fsm_newstate(fi, CTC_STATE_RXIDLE); + /* XID processing complete */ + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + if (ctcm_checkalloc_buffer(ch)) + goto done; + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = ch->max_bufsize; + CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); + if (event == CTC_EVENT_START) + /* see remark about conditional locking */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[0], + (unsigned long)ch, 0xff, 0); + if (event == CTC_EVENT_START) + spin_unlock_irqrestore( + get_ccwdev_lock(ch->cdev), saveflags); + if (rc != 0) { + fsm_newstate(fi, CTC_STATE_RXINIT); + ctcm_ccw_check_rc(ch, rc, "initial RX"); + goto done; + } + break; + default: + break; + } + + fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); +done: + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", + __func__, dev->name, ch->id, ch, smp_processor_id(), + fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + /* ok..start yside xid exchanges */ + if (!ch->in_mpcgroup) + break; + if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, + MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + + } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) + /* attn rcvd before xid0 processed via bh */ + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + /* attn rcvd before xid0 processed on ch + but mid-xid0 processing for group */ + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + switch (fsm_getstate(ch->fsm)) { + case CH_XID7_PENDING: + fsm_newstate(ch->fsm, CH_XID7_PENDING1); + break; + case CH_XID7_PENDING2: + fsm_newstate(ch->fsm, CH_XID7_PENDING3); + break; + } + fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); + break; + } + + return; +} + +/* + * ctcmpc channel FSM action + * called from one point in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", + __func__, dev->name, ch->id, + fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); + + fsm_deltimer(&ch->timer); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID0IOWAIT: + /* vtam wants to be primary.start yside xid exchanges*/ + /* only receive one attn-busy at a time so must not */ + /* change state each time */ + grp->changed_side = 1; + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); + break; + case MPCG_STATE_XID2INITW: + if (grp->changed_side == 1) { + grp->changed_side = 2; + break; + } + /* process began via call to establish_conn */ + /* so must report failure instead of reverting */ + /* back to ready-for-xid passive state */ + if (grp->estconnfunc) + goto done; + /* this attnbusy is NOT the result of xside xid */ + /* collisions so yside must have been triggered */ + /* by an ATTN that was not intended to start XID */ + /* processing. Revert back to ready-for-xid and */ + /* wait for ATTN interrupt to signal xid start */ + if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { + fsm_newstate(ch->fsm, CH_XID0_PENDING) ; + fsm_deltimer(&grp->timer); + goto done; + } + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + case MPCG_STATE_XID2INITX: + /* XID2 was received before ATTN Busy for second + channel.Send yside xid for second channel. + */ + if (grp->changed_side == 1) { + grp->changed_side = 2; + break; + } + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + default: + /* multiple attn-busy indicates too out-of-sync */ + /* and they are certainly not being received as part */ + /* of valid mpc group negotiations.. */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + + if (grp->changed_side == 1) { + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + } + if (ch->in_mpcgroup) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + else + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): channel %s not added to group", + CTCM_FUNTAIL, dev->name, ch->id); + +done: + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); + return; +} + +/* + * ctcmpc channel FSM action + * called from several points in ctcmpc_ch_fsm + * ctcmpc only + */ +static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ach = arg; + struct net_device *dev = ach->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *wch = priv->channel[CTCM_WRITE]; + struct channel *rch = priv->channel[CTCM_READ]; + struct sk_buff *skb; + struct th_sweep *header; + int rc = 0; + unsigned long saveflags = 0; + + CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ach, ach->id); + + if (grp->in_sweep == 0) + goto done; + + CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , + __func__, wch->th_seq_num); + CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , + __func__, rch->th_seq_num); + + if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { + /* give the previous IO time to complete */ + fsm_addtimer(&wch->sweep_timer, + 200, CTC_EVENT_RSWEEP_TIMER, wch); + goto done; + } + + skb = skb_dequeue(&wch->sweep_queue); + if (!skb) + goto done; + + if (set_normalized_cda(&wch->ccw[4], skb->data)) { + grp->in_sweep = 0; + ctcm_clear_busy_do(dev); + dev_kfree_skb_any(skb); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } else { + refcount_inc(&skb->users); + skb_queue_tail(&wch->io_queue, skb); + } + + /* send out the sweep */ + wch->ccw[4].count = skb->len; + + header = (struct th_sweep *)skb->data; + switch (header->th.th_ch_flag) { + case TH_SWEEP_REQ: + grp->sweep_req_pend_num--; + break; + case TH_SWEEP_RESP: + grp->sweep_rsp_pend_num--; + break; + } + + header->sw.th_last_seq = wch->th_seq_num; + + CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); + CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); + CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); + + fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); + fsm_newstate(wch->fsm, CTC_STATE_TX); + + spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); + wch->prof.send_stamp = jiffies; + rc = ccw_device_start(wch->cdev, &wch->ccw[3], + (unsigned long) wch, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); + + if ((grp->sweep_req_pend_num == 0) && + (grp->sweep_rsp_pend_num == 0)) { + grp->in_sweep = 0; + rch->th_seq_num = 0x00; + wch->th_seq_num = 0x00; + ctcm_clear_busy_do(dev); + } + + CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , + __func__, wch->th_seq_num, rch->th_seq_num); + + if (rc != 0) + ctcm_ccw_check_rc(wch, rc, "send sweep"); + +done: + return; +} + + +/* + * The ctcmpc statemachine for a channel. + */ + +const fsm_node ctcmpc_ch_fsm[] = { + { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, + { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + + { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, + { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, + { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, + { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, + { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, + { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, + { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, + { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + + { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, + { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + + { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, + { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, + { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, + { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, + { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, + { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, + { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, + + { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, + { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + + { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + + { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, + { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, + { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, + { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + + { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, + { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, + { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, + { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, + { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, + + { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, + { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, + { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, + { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, +}; + +int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); + +/* + * Actions for interface - statemachine. + */ + +/** + * Startup channels by sending CTC_EVENT_START to each channel. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_start(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + int direction; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + fsm_deltimer(&priv->restart_timer); + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + if (IS_MPC(priv)) + priv->mpcg->channels_terminating = 0; + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + fsm_event(ch->fsm, CTC_EVENT_START, ch); + } +} + +/** + * Shutdown channels by sending CTC_EVENT_STOP to each channel. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_stop(fsm_instance *fi, int event, void *arg) +{ + int direction; + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + fsm_event(ch->fsm, CTC_EVENT_STOP, ch); + ch->th_seq_num = 0x00; + CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", + __func__, ch->th_seq_num); + } + if (IS_MPC(priv)) + fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); +} + +static void dev_action_restart(fsm_instance *fi, int event, void *arg) +{ + int restart_timer; + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(TRACE, dev, ""); + + if (IS_MPC(priv)) { + restart_timer = CTCM_TIME_1_SEC; + } else { + restart_timer = CTCM_TIME_5_SEC; + } + dev_info(&dev->dev, "Restarting device\n"); + + dev_action_stop(fi, event, arg); + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + if (IS_MPC(priv)) + fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); + + /* going back into start sequence too quickly can */ + /* result in the other side becoming unreachable due */ + /* to sense reported when IO is aborted */ + fsm_addtimer(&priv->restart_timer, restart_timer, + DEV_EVENT_START, dev); +} + +/** + * Called from channel statemachine + * when a channel is up and running. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_chup(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + int dev_stat = fsm_getstate(fi); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, + "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, + dev->name, dev->ml_priv, dev_stat, event); + + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT_RXTX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_RXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(&dev->dev, + "Connected with remote side\n"); + ctcm_clear_busy(dev); + } + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_TXUP) { + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(&dev->dev, + "Connected with remote side\n"); + ctcm_clear_busy(dev); + } + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_RXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_TXUP) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); + break; + } + + if (IS_MPC(priv)) { + if (event == DEV_EVENT_RXUP) + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_ADD); + else + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_ADD); + } +} + +/** + * Called from device statemachine + * when a channel has been shutdown. + * + * fi An instance of an interface statemachine. + * event The event, just happened. + * arg Generic pointer, casted from struct net_device * upon call. + */ +static void dev_action_chdown(fsm_instance *fi, int event, void *arg) +{ + + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); + else + fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); + break; + case DEV_STATE_STARTWAIT_RX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STARTWAIT_TX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); + break; + case DEV_STATE_STOPWAIT_RXTX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); + else + fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); + break; + case DEV_STATE_STOPWAIT_RX: + if (event == DEV_EVENT_RXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + case DEV_STATE_STOPWAIT_TX: + if (event == DEV_EVENT_TXDOWN) + fsm_newstate(fi, DEV_STATE_STOPPED); + break; + } + if (IS_MPC(priv)) { + if (event == DEV_EVENT_RXDOWN) + mpc_channel_action(priv->channel[CTCM_READ], + CTCM_READ, MPC_CHANNEL_REMOVE); + else + mpc_channel_action(priv->channel[CTCM_WRITE], + CTCM_WRITE, MPC_CHANNEL_REMOVE); + } +} + +const fsm_node dev_fsm[] = { + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, + { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, + { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, +}; + +int dev_fsm_len = ARRAY_SIZE(dev_fsm); + +/* --- This is the END my friend --- */ + diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h new file mode 100644 index 000000000..225737295 --- /dev/null +++ b/drivers/s390/net/ctcm_fsms.h @@ -0,0 +1,357 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + * MPC additions : + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ +#ifndef _CTCM_FSMS_H_ +#define _CTCM_FSMS_H_ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "fsm.h" +#include "ctcm_main.h" + +/* + * Definitions for the channel statemachine(s) for ctc and ctcmpc + * + * To allow better kerntyping, prefix-less definitions for channel states + * and channel events have been replaced : + * ch_event... -> ctc_ch_event... + * CH_EVENT... -> CTC_EVENT... + * ch_state... -> ctc_ch_state... + * CH_STATE... -> CTC_STATE... + */ +/* + * Events of the channel statemachine(s) for ctc and ctcmpc + */ +enum ctc_ch_events { + /* + * Events, representing return code of + * I/O operations (ccw_device_start, ccw_device_halt et al.) + */ + CTC_EVENT_IO_SUCCESS, + CTC_EVENT_IO_EBUSY, + CTC_EVENT_IO_ENODEV, + CTC_EVENT_IO_UNKNOWN, + + CTC_EVENT_ATTNBUSY, + CTC_EVENT_ATTN, + CTC_EVENT_BUSY, + /* + * Events, representing unit-check + */ + CTC_EVENT_UC_RCRESET, + CTC_EVENT_UC_RSRESET, + CTC_EVENT_UC_TXTIMEOUT, + CTC_EVENT_UC_TXPARITY, + CTC_EVENT_UC_HWFAIL, + CTC_EVENT_UC_RXPARITY, + CTC_EVENT_UC_ZERO, + CTC_EVENT_UC_UNKNOWN, + /* + * Events, representing subchannel-check + */ + CTC_EVENT_SC_UNKNOWN, + /* + * Events, representing machine checks + */ + CTC_EVENT_MC_FAIL, + CTC_EVENT_MC_GOOD, + /* + * Event, representing normal IRQ + */ + CTC_EVENT_IRQ, + CTC_EVENT_FINSTAT, + /* + * Event, representing timer expiry. + */ + CTC_EVENT_TIMER, + /* + * Events, representing commands from upper levels. + */ + CTC_EVENT_START, + CTC_EVENT_STOP, + CTC_NR_EVENTS, + /* + * additional MPC events + */ + CTC_EVENT_SEND_XID = CTC_NR_EVENTS, + CTC_EVENT_RSWEEP_TIMER, + /* + * MUST be always the last element!! + */ + CTC_MPC_NR_EVENTS, +}; + +/* + * States of the channel statemachine(s) for ctc and ctcmpc. + */ +enum ctc_ch_states { + /* + * Channel not assigned to any device, + * initial state, direction invalid + */ + CTC_STATE_IDLE, + /* + * Channel assigned but not operating + */ + CTC_STATE_STOPPED, + CTC_STATE_STARTWAIT, + CTC_STATE_STARTRETRY, + CTC_STATE_SETUPWAIT, + CTC_STATE_RXINIT, + CTC_STATE_TXINIT, + CTC_STATE_RX, + CTC_STATE_TX, + CTC_STATE_RXIDLE, + CTC_STATE_TXIDLE, + CTC_STATE_RXERR, + CTC_STATE_TXERR, + CTC_STATE_TERM, + CTC_STATE_DTERM, + CTC_STATE_NOTOP, + CTC_NR_STATES, /* MUST be the last element of non-expanded states */ + /* + * additional MPC states + */ + CH_XID0_PENDING = CTC_NR_STATES, + CH_XID0_INPROGRESS, + CH_XID7_PENDING, + CH_XID7_PENDING1, + CH_XID7_PENDING2, + CH_XID7_PENDING3, + CH_XID7_PENDING4, + CTC_MPC_NR_STATES, /* MUST be the last element of expanded mpc states */ +}; + +extern const char *ctc_ch_event_names[]; + +extern const char *ctc_ch_state_names[]; + +void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg); +void ctcm_purge_skb_queue(struct sk_buff_head *q); +void fsm_action_nop(fsm_instance *fi, int event, void *arg); + +/* + * ----- non-static actions for ctcm channel statemachine ----- + * + */ +void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg); + +/* + * ----- FSM (state/event/action) of the ctcm channel statemachine ----- + */ +extern const fsm_node ch_fsm[]; +extern int ch_fsm_len; + + +/* + * ----- non-static actions for ctcmpc channel statemachine ---- + * + */ +/* shared : +void ctcm_chx_txidle(fsm_instance * fi, int event, void *arg); + */ +void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg); + +/* + * ----- FSM (state/event/action) of the ctcmpc channel statemachine ----- + */ +extern const fsm_node ctcmpc_ch_fsm[]; +extern int mpc_ch_fsm_len; + +/* + * Definitions for the device interface statemachine for ctc and mpc + */ + +/* + * States of the device interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT_RXTX, + DEV_STATE_STARTWAIT_RX, + DEV_STATE_STARTWAIT_TX, + DEV_STATE_STOPWAIT_RXTX, + DEV_STATE_STOPWAIT_RX, + DEV_STATE_STOPWAIT_TX, + DEV_STATE_RUNNING, + /* + * MUST be always the last element!! + */ + CTCM_NR_DEV_STATES +}; + +extern const char *dev_state_names[]; + +/* + * Events of the device interface statemachine. + * ctcm and ctcmpc + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_RXUP, + DEV_EVENT_TXUP, + DEV_EVENT_RXDOWN, + DEV_EVENT_TXDOWN, + DEV_EVENT_RESTART, + /* + * MUST be always the last element!! + */ + CTCM_NR_DEV_EVENTS +}; + +extern const char *dev_event_names[]; + +/* + * Actions for the device interface statemachine. + * ctc and ctcmpc + */ +/* +static void dev_action_start(fsm_instance * fi, int event, void *arg); +static void dev_action_stop(fsm_instance * fi, int event, void *arg); +static void dev_action_restart(fsm_instance *fi, int event, void *arg); +static void dev_action_chup(fsm_instance * fi, int event, void *arg); +static void dev_action_chdown(fsm_instance * fi, int event, void *arg); +*/ + +/* + * The (state/event/action) fsm table of the device interface statemachine. + * ctcm and ctcmpc + */ +extern const fsm_node dev_fsm[]; +extern int dev_fsm_len; + + +/* + * Definitions for the MPC Group statemachine + */ + +/* + * MPC Group Station FSM States + +State Name When In This State +====================== ======================================= +MPCG_STATE_RESET Initial State When Driver Loaded + We receive and send NOTHING + +MPCG_STATE_INOP INOP Received. + Group level non-recoverable error + +MPCG_STATE_READY XID exchanges for at least 1 write and + 1 read channel have completed. + Group is ready for data transfer. + +States from ctc_mpc_alloc_channel +============================================================== +MPCG_STATE_XID2INITW Awaiting XID2(0) Initiation + ATTN from other side will start + XID negotiations. + Y-side protocol only. + +MPCG_STATE_XID2INITX XID2(0) negotiations are in progress. + At least 1, but not all, XID2(0)'s + have been received from partner. + +MPCG_STATE_XID7INITW XID2(0) complete + No XID2(7)'s have yet been received. + XID2(7) negotiations pending. + +MPCG_STATE_XID7INITX XID2(7) negotiations in progress. + At least 1, but not all, XID2(7)'s + have been received from partner. + +MPCG_STATE_XID7INITF XID2(7) negotiations complete. + Transitioning to READY. + +MPCG_STATE_READY Ready for Data Transfer. + + +States from ctc_mpc_establish_connectivity call +============================================================== +MPCG_STATE_XID0IOWAIT Initiating XID2(0) negotiations. + X-side protocol only. + ATTN-BUSY from other side will convert + this to Y-side protocol and the + ctc_mpc_alloc_channel flow will begin. + +MPCG_STATE_XID0IOWAIX XID2(0) negotiations are in progress. + At least 1, but not all, XID2(0)'s + have been received from partner. + +MPCG_STATE_XID7INITI XID2(0) complete + No XID2(7)'s have yet been received. + XID2(7) negotiations pending. + +MPCG_STATE_XID7INITZ XID2(7) negotiations in progress. + At least 1, but not all, XID2(7)'s + have been received from partner. + +MPCG_STATE_XID7INITF XID2(7) negotiations complete. + Transitioning to READY. + +MPCG_STATE_READY Ready for Data Transfer. + +*/ + +enum mpcg_events { + MPCG_EVENT_INOP, + MPCG_EVENT_DISCONC, + MPCG_EVENT_XID0DO, + MPCG_EVENT_XID2, + MPCG_EVENT_XID2DONE, + MPCG_EVENT_XID7DONE, + MPCG_EVENT_TIMER, + MPCG_EVENT_DOIO, + MPCG_NR_EVENTS, +}; + +enum mpcg_states { + MPCG_STATE_RESET, + MPCG_STATE_INOP, + MPCG_STATE_XID2INITW, + MPCG_STATE_XID2INITX, + MPCG_STATE_XID7INITW, + MPCG_STATE_XID7INITX, + MPCG_STATE_XID0IOWAIT, + MPCG_STATE_XID0IOWAIX, + MPCG_STATE_XID7INITI, + MPCG_STATE_XID7INITZ, + MPCG_STATE_XID7INITF, + MPCG_STATE_FLOWC, + MPCG_STATE_READY, + MPCG_NR_STATES, +}; + +#endif +/* --- This is the END my friend --- */ diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c new file mode 100644 index 000000000..f63c5c871 --- /dev/null +++ b/drivers/s390/net/ctcm_main.c @@ -0,0 +1,1872 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2001, 2009 + * Author(s): + * Original CTC driver(s): + * Fritz Elfert (felfert@millenux.com) + * Dieter Wellerdiek (wel@de.ibm.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Denis Joseph Barrow (barrow_dj@yahoo.com) + * Jochen Roehrig (roehrig@de.ibm.com) + * Cornelia Huck <cornelia.huck@de.ibm.com> + * MPC additions: + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + * Revived by: + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <linux/io.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/uaccess.h> + +#include <asm/idals.h> + +#include "ctcm_fsms.h" +#include "ctcm_main.h" + +/* Some common global variables */ + +/** + * The root device for ctcm group devices + */ +static struct device *ctcm_root_dev; + +/* + * Linked list of all detected channels. + */ +struct channel *channels; + +/** + * Unpack a just received skb and hand it over to + * upper layers. + * + * ch The channel where this skb has been received. + * pskb The received skb. + */ +void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + __u16 len = *((__u16 *) pskb->data); + + skb_put(pskb, 2 + LL_HEADER_LENGTH); + skb_pull(pskb, 2); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + while (len > 0) { + struct sk_buff *skb; + int skblen; + struct ll_header *header = (struct ll_header *)pskb->data; + + skb_pull(pskb, LL_HEADER_LENGTH); + if ((ch->protocol == CTCM_PROTO_S390) && + (header->type != ETH_P_IP)) { + if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) { + ch->logflags |= LOG_FLAG_ILLEGALPKT; + /* + * Check packet type only if we stick strictly + * to S/390's protocol of OS390. This only + * supports IP. Otherwise allow any packet + * type. + */ + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Illegal packet type 0x%04x" + " - dropping", + CTCM_FUNTAIL, dev->name, header->type); + } + priv->stats.rx_dropped++; + priv->stats.rx_frame_errors++; + return; + } + pskb->protocol = cpu_to_be16(header->type); + if ((header->length <= LL_HEADER_LENGTH) || + (len <= LL_HEADER_LENGTH)) { + if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Illegal packet size %d(%d,%d)" + "- dropping", + CTCM_FUNTAIL, dev->name, + header->length, dev->mtu, len); + ch->logflags |= LOG_FLAG_ILLEGALSIZE; + } + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + return; + } + header->length -= LL_HEADER_LENGTH; + len -= LL_HEADER_LENGTH; + if ((header->length > skb_tailroom(pskb)) || + (header->length > len)) { + if (!(ch->logflags & LOG_FLAG_OVERRUN)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Packet size %d (overrun)" + " - dropping", CTCM_FUNTAIL, + dev->name, header->length); + ch->logflags |= LOG_FLAG_OVERRUN; + } + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + return; + } + skb_put(pskb, header->length); + skb_reset_mac_header(pskb); + len -= header->length; + skb = dev_alloc_skb(pskb->len); + if (!skb) { + if (!(ch->logflags & LOG_FLAG_NOMEM)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): MEMORY allocation error", + CTCM_FUNTAIL, dev->name); + ch->logflags |= LOG_FLAG_NOMEM; + } + priv->stats.rx_dropped++; + return; + } + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + skblen = skb->len; + /* + * reset logflags + */ + ch->logflags = 0; + priv->stats.rx_packets++; + priv->stats.rx_bytes += skblen; + netif_rx_ni(skb); + if (len > 0) { + skb_pull(pskb, header->length); + if (skb_tailroom(pskb) < LL_HEADER_LENGTH) { + CTCM_DBF_DEV_NAME(TRACE, dev, + "Overrun in ctcm_unpack_skb"); + ch->logflags |= LOG_FLAG_OVERRUN; + return; + } + skb_put(pskb, LL_HEADER_LENGTH); + } + } +} + +/** + * Release a specific channel in the channel list. + * + * ch Pointer to channel struct to be released. + */ +static void channel_free(struct channel *ch) +{ + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s)", CTCM_FUNTAIL, ch->id); + ch->flags &= ~CHANNEL_FLAGS_INUSE; + fsm_newstate(ch->fsm, CTC_STATE_IDLE); +} + +/** + * Remove a specific channel in the channel list. + * + * ch Pointer to channel struct to be released. + */ +static void channel_remove(struct channel *ch) +{ + struct channel **c = &channels; + char chid[CTCM_ID_SIZE+1]; + int ok = 0; + + if (ch == NULL) + return; + else + strncpy(chid, ch->id, CTCM_ID_SIZE); + + channel_free(ch); + while (*c) { + if (*c == ch) { + *c = ch->next; + fsm_deltimer(&ch->timer); + if (IS_MPC(ch)) + fsm_deltimer(&ch->sweep_timer); + + kfree_fsm(ch->fsm); + clear_normalized_cda(&ch->ccw[4]); + if (ch->trans_skb != NULL) { + clear_normalized_cda(&ch->ccw[1]); + dev_kfree_skb_any(ch->trans_skb); + } + if (IS_MPC(ch)) { + tasklet_kill(&ch->ch_tasklet); + tasklet_kill(&ch->ch_disc_tasklet); + kfree(ch->discontact_th); + } + kfree(ch->ccw); + kfree(ch->irb); + kfree(ch); + ok = 1; + break; + } + c = &((*c)->next); + } + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s) %s", CTCM_FUNTAIL, + chid, ok ? "OK" : "failed"); +} + +/** + * Get a specific channel from the channel list. + * + * type Type of channel we are interested in. + * id Id of channel we are interested in. + * direction Direction we want to use this channel for. + * + * returns Pointer to a channel or NULL if no matching channel available. + */ +static struct channel *channel_get(enum ctcm_channel_types type, + char *id, int direction) +{ + struct channel *ch = channels; + + while (ch && (strncmp(ch->id, id, CTCM_ID_SIZE) || (ch->type != type))) + ch = ch->next; + if (!ch) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%d, %s, %d) not found in channel list\n", + CTCM_FUNTAIL, type, id, direction); + } else { + if (ch->flags & CHANNEL_FLAGS_INUSE) + ch = NULL; + else { + ch->flags |= CHANNEL_FLAGS_INUSE; + ch->flags &= ~CHANNEL_FLAGS_RWMASK; + ch->flags |= (direction == CTCM_WRITE) + ? CHANNEL_FLAGS_WRITE : CHANNEL_FLAGS_READ; + fsm_newstate(ch->fsm, CTC_STATE_STOPPED); + } + } + return ch; +} + +static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + CTCM_DBF_TEXT_(ERROR, CTC_DBF_WARN, + "irb error %ld on device %s\n", + PTR_ERR(irb), dev_name(&cdev->dev)); + + switch (PTR_ERR(irb)) { + case -EIO: + dev_err(&cdev->dev, + "An I/O-error occurred on the CTCM device\n"); + break; + case -ETIMEDOUT: + dev_err(&cdev->dev, + "An adapter hardware operation timed out\n"); + break; + default: + dev_err(&cdev->dev, + "An error occurred on the adapter hardware\n"); + } + return PTR_ERR(irb); +} + + +/** + * Check sense of a unit check. + * + * ch The channel, the sense code belongs to. + * sense The sense code to inspect. + */ +static void ccw_unit_check(struct channel *ch, __u8 sense) +{ + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "%s(%s): %02x", + CTCM_FUNTAIL, ch->id, sense); + + if (sense & SNS0_INTERVENTION_REQ) { + if (sense & 0x01) { + if (ch->sense_rc != 0x01) { + pr_notice( + "%s: The communication peer has " + "disconnected\n", ch->id); + ch->sense_rc = 0x01; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); + } else { + if (ch->sense_rc != SNS0_INTERVENTION_REQ) { + pr_notice( + "%s: The remote operating system is " + "not available\n", ch->id); + ch->sense_rc = SNS0_INTERVENTION_REQ; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); + } + } else if (sense & SNS0_EQUIPMENT_CHECK) { + if (sense & SNS0_BUS_OUT_CHECK) { + if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): remote HW error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_BUS_OUT_CHECK; + } + fsm_event(ch->fsm, CTC_EVENT_UC_HWFAIL, ch); + } else { + if (ch->sense_rc != SNS0_EQUIPMENT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): remote read parity error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_EQUIPMENT_CHECK; + } + fsm_event(ch->fsm, CTC_EVENT_UC_RXPARITY, ch); + } + } else if (sense & SNS0_BUS_OUT_CHECK) { + if (ch->sense_rc != SNS0_BUS_OUT_CHECK) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): BUS OUT error %02x", + CTCM_FUNTAIL, ch->id, sense); + ch->sense_rc = SNS0_BUS_OUT_CHECK; + } + if (sense & 0x04) /* data-streaming timeout */ + fsm_event(ch->fsm, CTC_EVENT_UC_TXTIMEOUT, ch); + else /* Data-transfer parity error */ + fsm_event(ch->fsm, CTC_EVENT_UC_TXPARITY, ch); + } else if (sense & SNS0_CMD_REJECT) { + if (ch->sense_rc != SNS0_CMD_REJECT) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Command rejected", + CTCM_FUNTAIL, ch->id); + ch->sense_rc = SNS0_CMD_REJECT; + } + } else if (sense == 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Unit check ZERO", + CTCM_FUNTAIL, ch->id); + fsm_event(ch->fsm, CTC_EVENT_UC_ZERO, ch); + } else { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): Unit check code %02x unknown", + CTCM_FUNTAIL, ch->id, sense); + fsm_event(ch->fsm, CTC_EVENT_UC_UNKNOWN, ch); + } +} + +int ctcm_ch_alloc_buffer(struct channel *ch) +{ + clear_normalized_cda(&ch->ccw[1]); + ch->trans_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC | GFP_DMA); + if (ch->trans_skb == NULL) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s trans_skb allocation error", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + return -ENOMEM; + } + + ch->ccw[1].count = ch->max_bufsize; + if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { + dev_kfree_skb(ch->trans_skb); + ch->trans_skb = NULL; + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): %s set norm_cda failed", + CTCM_FUNTAIL, ch->id, + (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? + "RX" : "TX"); + return -ENOMEM; + } + + ch->ccw[1].count = 0; + ch->trans_skb_data = ch->trans_skb->data; + ch->flags &= ~CHANNEL_FLAGS_BUFSIZE_CHANGED; + return 0; +} + +/* + * Interface API for upper network layers + */ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * dev Pointer to interface struct. + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +int ctcm_open(struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + if (!IS_MPC(priv)) + fsm_event(priv->fsm, DEV_EVENT_START, dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * dev Pointer to interface struct. + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +int ctcm_close(struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + CTCMY_DBF_DEV_NAME(SETUP, dev, ""); + if (!IS_MPC(priv)) + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + return 0; +} + + +/** + * Transmit a packet. + * This is a helper function for ctcm_tx(). + * + * ch Channel to be used for sending. + * skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by ctcm_tx(). + * + * returns 0 on success, -ERRNO on failure. (Never fails.) + */ +static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) +{ + unsigned long saveflags; + struct ll_header header; + int rc = 0; + __u16 block_len; + int ccw_idx; + struct sk_buff *nskb; + unsigned long hi; + + /* we need to acquire the lock for testing the state + * otherwise we can have an IRQ changing the state to + * TXIDLE after the test but before acquiring the lock. + */ + spin_lock_irqsave(&ch->collect_lock, saveflags); + if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) { + int l = skb->len + LL_HEADER_LENGTH; + + if (ch->collect_len + l > ch->max_bufsize - 2) { + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + return -EBUSY; + } else { + refcount_inc(&skb->users); + header.length = l; + header.type = be16_to_cpu(skb->protocol); + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, + LL_HEADER_LENGTH); + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += l; + } + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto done; + } + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + /* + * Protect skb against beeing free'd by upper + * layers. + */ + refcount_inc(&skb->users); + ch->prof.txlen += skb->len; + header.length = skb->len + LL_HEADER_LENGTH; + header.type = be16_to_cpu(skb->protocol); + header.unused = 0; + memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); + block_len = skb->len + 2; + *((__u16 *)skb_push(skb, 2)) = block_len; + + /* + * IDAL support in CTCM is broken, so we have to + * care about skb's above 2G ourselves. + */ + hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31; + if (hi) { + nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + refcount_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + ctcm_clear_busy(ch->netdev); + return -ENOMEM; + } else { + skb_put_data(nskb, skb->data, skb->len); + refcount_inc(&nskb->users); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + skb = nskb; + } + } + + ch->ccw[4].count = block_len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + /* + * idal allocation failed, try via copying to + * trans_skb. trans_skb usually has a pre-allocated + * idal. + */ + if (ctcm_checkalloc_buffer(ch)) { + /* + * Remove our header. It gets added + * again on retransmit. + */ + refcount_dec(&skb->users); + skb_pull(skb, LL_HEADER_LENGTH + 2); + ctcm_clear_busy(ch->netdev); + return -ENOMEM; + } + + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = skb->len; + skb_copy_from_linear_data(skb, + skb_put(ch->trans_skb, skb->len), skb->len); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + ccw_idx = 0; + } else { + skb_queue_tail(&ch->io_queue, skb); + ccw_idx = 3; + } + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], + sizeof(struct ccw1) * 3); + ch->retry = 0; + fsm_newstate(ch->fsm, CTC_STATE_TX); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], + (unsigned long)ch, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (ccw_idx == 3) + ch->prof.doios_single++; + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "single skb TX"); + if (ccw_idx == 3) + skb_dequeue_tail(&ch->io_queue); + /* + * Remove our header. It gets added + * again on retransmit. + */ + skb_pull(skb, LL_HEADER_LENGTH + 2); + } else if (ccw_idx == 0) { + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; + } +done: + ctcm_clear_busy(ch->netdev); + return rc; +} + +static void ctcmpc_send_sweep_req(struct channel *rch) +{ + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct th_sweep *header; + struct sk_buff *sweep_skb; + struct channel *ch; + /* int rc = 0; */ + + priv = dev->ml_priv; + grp = priv->mpcg; + ch = priv->channel[CTCM_WRITE]; + + /* sweep processing is not complete until response and request */ + /* has completed for all read channels in group */ + if (grp->in_sweep == 0) { + grp->in_sweep = 1; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; + } + + sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); + + if (sweep_skb == NULL) { + /* rc = -ENOMEM; */ + goto nomem; + } + + header = kmalloc(TH_SWEEP_LENGTH, gfp_type()); + + if (!header) { + dev_kfree_skb_any(sweep_skb); + /* rc = -ENOMEM; */ + goto nomem; + } + + header->th.th_seg = 0x00 ; + header->th.th_ch_flag = TH_SWEEP_REQ; /* 0x0f */ + header->th.th_blk_flag = 0x00; + header->th.th_is_xid = 0x00; + header->th.th_seq_num = 0x00; + header->sw.th_last_seq = ch->th_seq_num; + + skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH); + + kfree(header); + + netif_trans_update(dev); + skb_queue_tail(&ch->sweep_queue, sweep_skb); + + fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); + + return; + +nomem: + grp->in_sweep = 0; + ctcm_clear_busy(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + return; +} + +/* + * MPC mode version of transmit_skb + */ +static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) +{ + struct pdu *p_header; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct th_header *header; + struct sk_buff *nskb; + int rc = 0; + int ccw_idx; + unsigned long hi; + unsigned long saveflags = 0; /* avoids compiler warning */ + + CTCM_PR_DEBUG("Enter %s: %s, cp=%i ch=0x%p id=%s state=%s\n", + __func__, dev->name, smp_processor_id(), ch, + ch->id, fsm_getstate_str(ch->fsm)); + + if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) { + spin_lock_irqsave(&ch->collect_lock, saveflags); + refcount_inc(&skb->users); + p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); + + if (!p_header) { + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto nomem_exit; + } + + p_header->pdu_offset = skb->len; + p_header->pdu_proto = 0x01; + p_header->pdu_flag = 0x00; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { + p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; + } else { + p_header->pdu_flag |= PDU_FIRST; + } + p_header->pdu_seq = 0; + memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, + PDU_HEADER_LENGTH); + + CTCM_PR_DEBUG("%s(%s): Put on collect_q - skb len: %04x \n" + "pdu header and data for up to 32 bytes:\n", + __func__, dev->name, skb->len); + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += skb->len; + kfree(p_header); + + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + goto done; + } + + /* + * Protect skb against beeing free'd by upper + * layers. + */ + refcount_inc(&skb->users); + + /* + * IDAL support in CTCM is broken, so we have to + * care about skb's above 2G ourselves. + */ + hi = ((unsigned long)skb->tail + TH_HEADER_LENGTH) >> 31; + if (hi) { + nskb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + goto nomem_exit; + } else { + skb_put_data(nskb, skb->data, skb->len); + refcount_inc(&nskb->users); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + skb = nskb; + } + } + + p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type()); + + if (!p_header) + goto nomem_exit; + + p_header->pdu_offset = skb->len; + p_header->pdu_proto = 0x01; + p_header->pdu_flag = 0x00; + p_header->pdu_seq = 0; + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { + p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; + } else { + p_header->pdu_flag |= PDU_FIRST; + } + memcpy(skb_push(skb, PDU_HEADER_LENGTH), p_header, PDU_HEADER_LENGTH); + + kfree(p_header); + + if (ch->collect_len > 0) { + spin_lock_irqsave(&ch->collect_lock, saveflags); + skb_queue_tail(&ch->collect_queue, skb); + ch->collect_len += skb->len; + skb = skb_dequeue(&ch->collect_queue); + ch->collect_len -= skb->len; + spin_unlock_irqrestore(&ch->collect_lock, saveflags); + } + + p_header = (struct pdu *)skb->data; + p_header->pdu_flag |= PDU_LAST; + + ch->prof.txlen += skb->len - PDU_HEADER_LENGTH; + + header = kmalloc(TH_HEADER_LENGTH, gfp_type()); + if (!header) + goto nomem_exit; + + header->th_seg = 0x00; + header->th_ch_flag = TH_HAS_PDU; /* Normal data */ + header->th_blk_flag = 0x00; + header->th_is_xid = 0x00; /* Just data here */ + ch->th_seq_num++; + header->th_seq_num = ch->th_seq_num; + + CTCM_PR_DBGDATA("%s(%s) ToVTAM_th_seq= %08x\n" , + __func__, dev->name, ch->th_seq_num); + + /* put the TH on the packet */ + memcpy(skb_push(skb, TH_HEADER_LENGTH), header, TH_HEADER_LENGTH); + + kfree(header); + + CTCM_PR_DBGDATA("%s(%s): skb len: %04x\n - pdu header and data for " + "up to 32 bytes sent to vtam:\n", + __func__, dev->name, skb->len); + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + ch->ccw[4].count = skb->len; + if (set_normalized_cda(&ch->ccw[4], skb->data)) { + /* + * idal allocation failed, try via copying to trans_skb. + * trans_skb usually has a pre-allocated idal. + */ + if (ctcm_checkalloc_buffer(ch)) { + /* + * Remove our header. + * It gets added again on retransmit. + */ + goto nomem_exit; + } + + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + ch->ccw[1].count = skb->len; + skb_put_data(ch->trans_skb, skb->data, skb->len); + refcount_dec(&skb->users); + dev_kfree_skb_irq(skb); + ccw_idx = 0; + CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n" + "up to 32 bytes sent to vtam:\n", + __func__, dev->name, ch->trans_skb->len); + CTCM_D3_DUMP((char *)ch->trans_skb->data, + min_t(int, 32, ch->trans_skb->len)); + } else { + skb_queue_tail(&ch->io_queue, skb); + ccw_idx = 3; + } + ch->retry = 0; + fsm_newstate(ch->fsm, CTC_STATE_TX); + fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); + + if (do_debug_ccw) + ctcmpc_dumpit((char *)&ch->ccw[ccw_idx], + sizeof(struct ccw1) * 3); + + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + ch->prof.send_stamp = jiffies; + rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx], + (unsigned long)ch, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + if (ccw_idx == 3) + ch->prof.doios_single++; + if (rc != 0) { + fsm_deltimer(&ch->timer); + ctcm_ccw_check_rc(ch, rc, "single skb TX"); + if (ccw_idx == 3) + skb_dequeue_tail(&ch->io_queue); + } else if (ccw_idx == 0) { + priv->stats.tx_packets++; + priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; + } + if (ch->th_seq_num > 0xf0000000) /* Chose at random. */ + ctcmpc_send_sweep_req(ch); + + goto done; +nomem_exit: + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_CRIT, + "%s(%s): MEMORY allocation ERROR\n", + CTCM_FUNTAIL, ch->id); + rc = -ENOMEM; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); +done: + CTCM_PR_DEBUG("Exit %s(%s)\n", __func__, dev->name); + return rc; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + * + * skb Pointer to buffer containing the packet. + * dev Pointer to interface struct. + * + * returns 0 if packet consumed, !0 if packet rejected. + * Note: If we return !0, then the packet is free'd by + * the generic network layer. + */ +/* first merge version - leaving both functions separated */ +static int ctcm_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct ctcm_priv *priv = dev->ml_priv; + + if (skb == NULL) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): NULL sk_buff passed", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + if (skb_headroom(skb) < (LL_HEADER_LENGTH + 2)) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s(%s): Got sk_buff with head room < %ld bytes", + CTCM_FUNTAIL, dev->name, LL_HEADER_LENGTH + 2); + dev_kfree_skb(skb); + priv->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /* + * If channels are not running, try to restart them + * and throw away packet. + */ + if (fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) { + fsm_event(priv->fsm, DEV_EVENT_START, dev); + dev_kfree_skb(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + + if (ctcm_test_and_set_busy(dev)) + return NETDEV_TX_BUSY; + + netif_trans_update(dev); + if (ctcm_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) + return NETDEV_TX_BUSY; + return NETDEV_TX_OK; +} + +/* unmerged MPC variant of ctcm_tx */ +static int ctcmpc_tx(struct sk_buff *skb, struct net_device *dev) +{ + int len = 0; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct sk_buff *newskb = NULL; + + /* + * Some sanity checks ... + */ + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): NULL sk_buff passed", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + goto done; + } + if (skb_headroom(skb) < (TH_HEADER_LENGTH + PDU_HEADER_LENGTH)) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s(%s): Got sk_buff with head room < %ld bytes", + CTCM_FUNTAIL, dev->name, + TH_HEADER_LENGTH + PDU_HEADER_LENGTH); + + CTCM_D3_DUMP((char *)skb->data, min_t(int, 32, skb->len)); + + len = skb->len + TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + newskb = __dev_alloc_skb(len, gfp_type() | GFP_DMA); + + if (!newskb) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, + "%s: %s: __dev_alloc_skb failed", + __func__, dev->name); + + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + newskb->protocol = skb->protocol; + skb_reserve(newskb, TH_HEADER_LENGTH + PDU_HEADER_LENGTH); + skb_put_data(newskb, skb->data, skb->len); + dev_kfree_skb_any(skb); + skb = newskb; + } + + /* + * If channels are not running, + * notify anybody about a link failure and throw + * away packet. + */ + if ((fsm_getstate(priv->fsm) != DEV_STATE_RUNNING) || + (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { + dev_kfree_skb_any(skb); + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): inactive MPCGROUP - dropped", + CTCM_FUNTAIL, dev->name); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + goto done; + } + + if (ctcm_test_and_set_busy(dev)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): device busy - dropped", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + + netif_trans_update(dev); + if (ctcmpc_transmit_skb(priv->channel[CTCM_WRITE], skb) != 0) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): device error - dropped", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + priv->stats.tx_carrier_errors++; + ctcm_clear_busy(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + ctcm_clear_busy(dev); +done: + if (do_debug) + MPC_DBF_DEV_NAME(TRACE, dev, "exit"); + + return NETDEV_TX_OK; /* handle freeing of skb here */ +} + + +/** + * Sets MTU of an interface. + * + * dev Pointer to interface struct. + * new_mtu The new MTU to use for this interface. + * + * returns 0 on success, -EINVAL if MTU is out of valid range. + * (valid range is 576 .. 65527). If VM is on the + * remote side, maximum MTU is 32760, however this is + * not checked here. + */ +static int ctcm_change_mtu(struct net_device *dev, int new_mtu) +{ + struct ctcm_priv *priv; + int max_bufsize; + + priv = dev->ml_priv; + max_bufsize = priv->channel[CTCM_READ]->max_bufsize; + + if (IS_MPC(priv)) { + if (new_mtu > max_bufsize - TH_HEADER_LENGTH) + return -EINVAL; + dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + } else { + if (new_mtu > max_bufsize - LL_HEADER_LENGTH - 2) + return -EINVAL; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + } + dev->mtu = new_mtu; + return 0; +} + +/** + * Returns interface statistics of a device. + * + * dev Pointer to interface struct. + * + * returns Pointer to stats struct of this interface. + */ +static struct net_device_stats *ctcm_stats(struct net_device *dev) +{ + return &((struct ctcm_priv *)dev->ml_priv)->stats; +} + +static void ctcm_free_netdevice(struct net_device *dev) +{ + struct ctcm_priv *priv; + struct mpc_group *grp; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s(%s)", CTCM_FUNTAIL, dev->name); + priv = dev->ml_priv; + if (priv) { + grp = priv->mpcg; + if (grp) { + if (grp->fsm) + kfree_fsm(grp->fsm); + if (grp->xid_skb) + dev_kfree_skb(grp->xid_skb); + if (grp->rcvd_xid_skb) + dev_kfree_skb(grp->rcvd_xid_skb); + tasklet_kill(&grp->mpc_tasklet2); + kfree(grp); + priv->mpcg = NULL; + } + if (priv->fsm) { + kfree_fsm(priv->fsm); + priv->fsm = NULL; + } + kfree(priv->xid); + priv->xid = NULL; + /* + * Note: kfree(priv); is done in "opposite" function of + * allocator function probe_device which is remove_device. + */ + } +#ifdef MODULE + free_netdev(dev); +#endif +} + +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); + +static const struct net_device_ops ctcm_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcm_tx, +}; + +static const struct net_device_ops ctcm_mpc_netdev_ops = { + .ndo_open = ctcm_open, + .ndo_stop = ctcm_close, + .ndo_get_stats = ctcm_stats, + .ndo_change_mtu = ctcm_change_mtu, + .ndo_start_xmit = ctcmpc_tx, +}; + +static void ctcm_dev_setup(struct net_device *dev) +{ + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = 100; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->min_mtu = 576; + dev->max_mtu = 65527; +} + +/* + * Initialize everything of the net device except the name and the + * channel structs. + */ +static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv) +{ + struct net_device *dev; + struct mpc_group *grp; + if (!priv) + return NULL; + + if (IS_MPC(priv)) + dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN, + ctcm_dev_setup); + else + dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN, + ctcm_dev_setup); + + if (!dev) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, + "%s: MEMORY allocation ERROR", + CTCM_FUNTAIL); + return NULL; + } + dev->ml_priv = priv; + priv->fsm = init_fsm("ctcmdev", dev_state_names, dev_event_names, + CTCM_NR_DEV_STATES, CTCM_NR_DEV_EVENTS, + dev_fsm, dev_fsm_len, GFP_KERNEL); + if (priv->fsm == NULL) { + CTCMY_DBF_DEV(SETUP, dev, "init_fsm error"); + free_netdev(dev); + return NULL; + } + fsm_newstate(priv->fsm, DEV_STATE_STOPPED); + fsm_settimer(priv->fsm, &priv->restart_timer); + + if (IS_MPC(priv)) { + /* MPC Group Initializations */ + grp = ctcmpc_init_mpc_group(priv); + if (grp == NULL) { + MPC_DBF_DEV(SETUP, dev, "init_mpc_group error"); + free_netdev(dev); + return NULL; + } + tasklet_init(&grp->mpc_tasklet2, + mpc_group_ready, (unsigned long)dev); + dev->mtu = MPC_BUFSIZE_DEFAULT - + TH_HEADER_LENGTH - PDU_HEADER_LENGTH; + + dev->netdev_ops = &ctcm_mpc_netdev_ops; + dev->hard_header_len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + priv->buffer_size = MPC_BUFSIZE_DEFAULT; + } else { + dev->mtu = CTCM_BUFSIZE_DEFAULT - LL_HEADER_LENGTH - 2; + dev->netdev_ops = &ctcm_netdev_ops; + dev->hard_header_len = LL_HEADER_LENGTH + 2; + } + + CTCMY_DBF_DEV(SETUP, dev, "finished"); + + return dev; +} + +/** + * Main IRQ handler. + * + * cdev The ccw_device the interrupt is for. + * intparm interruption parameter. + * irb interruption response block. + */ +static void ctcm_irq_handler(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) +{ + struct channel *ch; + struct net_device *dev; + struct ctcm_priv *priv; + struct ccwgroup_device *cgdev; + int cstat; + int dstat; + + CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, + "Enter %s(%s)", CTCM_FUNTAIL, dev_name(&cdev->dev)); + + if (ctcm_check_irb_error(cdev, irb)) + return; + + cgdev = dev_get_drvdata(&cdev->dev); + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + /* Check for unsolicited interrupts. */ + if (cgdev == NULL) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, + "%s(%s) unsolicited irq: c-%02x d-%02x\n", + CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); + dev_warn(&cdev->dev, + "The adapter received a non-specific IRQ\n"); + return; + } + + priv = dev_get_drvdata(&cgdev->dev); + + /* Try to extract channel from driver data. */ + if (priv->channel[CTCM_READ]->cdev == cdev) + ch = priv->channel[CTCM_READ]; + else if (priv->channel[CTCM_WRITE]->cdev == cdev) + ch = priv->channel[CTCM_WRITE]; + else { + dev_err(&cdev->dev, + "%s: Internal error: Can't determine channel for " + "interrupt device %s\n", + __func__, dev_name(&cdev->dev)); + /* Explain: inconsistent internal structures */ + return; + } + + dev = ch->netdev; + if (dev == NULL) { + dev_err(&cdev->dev, + "%s Internal error: net_device is NULL, ch = 0x%p\n", + __func__, ch); + /* Explain: inconsistent internal structures */ + return; + } + + /* Copy interruption response block. */ + memcpy(ch->irb, irb, sizeof(struct irb)); + + /* Issue error message and return on subchannel error code */ + if (irb->scsw.cmd.cstat) { + fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): sub-ch check %s: cs=%02x ds=%02x", + CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); + dev_warn(&cdev->dev, + "A check occurred on the subchannel\n"); + return; + } + + /* Check the reason-code of a unit check */ + if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { + if ((irb->ecw[0] & ch->sense_rc) == 0) + /* print it only once */ + CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, + "%s(%s): sense=%02x, ds=%02x", + CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); + ccw_unit_check(ch, irb->ecw[0]); + return; + } + if (irb->scsw.cmd.dstat & DEV_STAT_BUSY) { + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) + fsm_event(ch->fsm, CTC_EVENT_ATTNBUSY, ch); + else + fsm_event(ch->fsm, CTC_EVENT_BUSY, ch); + return; + } + if (irb->scsw.cmd.dstat & DEV_STAT_ATTENTION) { + fsm_event(ch->fsm, CTC_EVENT_ATTN, ch); + return; + } + if ((irb->scsw.cmd.stctl & SCSW_STCTL_SEC_STATUS) || + (irb->scsw.cmd.stctl == SCSW_STCTL_STATUS_PEND) || + (irb->scsw.cmd.stctl == + (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) + fsm_event(ch->fsm, CTC_EVENT_FINSTAT, ch); + else + fsm_event(ch->fsm, CTC_EVENT_IRQ, ch); + +} + +static const struct device_type ctcm_devtype = { + .name = "ctcm", + .groups = ctcm_attr_groups, +}; + +/** + * Add ctcm specific attributes. + * Add ctcm private data. + * + * cgdev pointer to ccwgroup_device just added + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_probe_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s %p", + __func__, cgdev); + + if (!get_device(&cgdev->dev)) + return -ENODEV; + + priv = kzalloc(sizeof(struct ctcm_priv), GFP_KERNEL); + if (!priv) { + CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, + "%s: memory allocation failure", + CTCM_FUNTAIL); + put_device(&cgdev->dev); + return -ENOMEM; + } + priv->buffer_size = CTCM_BUFSIZE_DEFAULT; + cgdev->cdev[0]->handler = ctcm_irq_handler; + cgdev->cdev[1]->handler = ctcm_irq_handler; + dev_set_drvdata(&cgdev->dev, priv); + cgdev->dev.type = &ctcm_devtype; + + return 0; +} + +/** + * Add a new channel to the list of channels. + * Keeps the channel list sorted. + * + * cdev The ccw_device to be added. + * type The type class of the new channel. + * priv Points to the private data of the ccwgroup_device. + * + * returns 0 on success, !0 on error. + */ +static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type, + struct ctcm_priv *priv) +{ + struct channel **c = &channels; + struct channel *ch; + int ccw_num; + int rc = 0; + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s(%s), type %d, proto %d", + __func__, dev_name(&cdev->dev), type, priv->protocol); + + ch = kzalloc(sizeof(struct channel), GFP_KERNEL); + if (ch == NULL) + return -ENOMEM; + + ch->protocol = priv->protocol; + if (IS_MPC(priv)) { + ch->discontact_th = kzalloc(TH_HEADER_LENGTH, gfp_type()); + if (ch->discontact_th == NULL) + goto nomem_return; + + ch->discontact_th->th_blk_flag = TH_DISCONTACT; + tasklet_init(&ch->ch_disc_tasklet, + mpc_action_send_discontact, (unsigned long)ch); + + tasklet_init(&ch->ch_tasklet, ctcmpc_bh, (unsigned long)ch); + ch->max_bufsize = (MPC_BUFSIZE_DEFAULT - 35); + ccw_num = 17; + } else + ccw_num = 8; + + ch->ccw = kcalloc(ccw_num, sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (ch->ccw == NULL) + goto nomem_return; + + ch->cdev = cdev; + snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev)); + ch->type = type; + + /** + * "static" ccws are used in the following way: + * + * ccw[0..2] (Channel program for generic I/O): + * 0: prepare + * 1: read or write (depending on direction) with fixed + * buffer (idal allocated once when buffer is allocated) + * 2: nop + * ccw[3..5] (Channel program for direct write of packets) + * 3: prepare + * 4: write (idal allocated on every write). + * 5: nop + * ccw[6..7] (Channel program for initial channel setup): + * 6: set extended mode + * 7: nop + * + * ch->ccw[0..5] are initialized in ch_action_start because + * the channel's direction is yet unknown here. + * + * ccws used for xid2 negotiations + * ch-ccw[8-14] need to be used for the XID exchange either + * X side XID2 Processing + * 8: write control + * 9: write th + * 10: write XID + * 11: read th from secondary + * 12: read XID from secondary + * 13: read 4 byte ID + * 14: nop + * Y side XID Processing + * 8: sense + * 9: read th + * 10: read XID + * 11: write th + * 12: write XID + * 13: write 4 byte ID + * 14: nop + * + * ccws used for double noop due to VM timing issues + * which result in unrecoverable Busy on channel + * 15: nop + * 16: nop + */ + ch->ccw[6].cmd_code = CCW_CMD_SET_EXTENDED; + ch->ccw[6].flags = CCW_FLAG_SLI; + + ch->ccw[7].cmd_code = CCW_CMD_NOOP; + ch->ccw[7].flags = CCW_FLAG_SLI; + + if (IS_MPC(priv)) { + ch->ccw[15].cmd_code = CCW_CMD_WRITE; + ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[15].count = TH_HEADER_LENGTH; + ch->ccw[15].cda = virt_to_phys(ch->discontact_th); + + ch->ccw[16].cmd_code = CCW_CMD_NOOP; + ch->ccw[16].flags = CCW_FLAG_SLI; + + ch->fsm = init_fsm(ch->id, ctc_ch_state_names, + ctc_ch_event_names, CTC_MPC_NR_STATES, + CTC_MPC_NR_EVENTS, ctcmpc_ch_fsm, + mpc_ch_fsm_len, GFP_KERNEL); + } else { + ch->fsm = init_fsm(ch->id, ctc_ch_state_names, + ctc_ch_event_names, CTC_NR_STATES, + CTC_NR_EVENTS, ch_fsm, + ch_fsm_len, GFP_KERNEL); + } + if (ch->fsm == NULL) + goto nomem_return; + + fsm_newstate(ch->fsm, CTC_STATE_IDLE); + + ch->irb = kzalloc(sizeof(struct irb), GFP_KERNEL); + if (ch->irb == NULL) + goto nomem_return; + + while (*c && ctcm_less_than((*c)->id, ch->id)) + c = &(*c)->next; + + if (*c && (!strncmp((*c)->id, ch->id, CTCM_ID_SIZE))) { + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "%s (%s) already in list, using old entry", + __func__, (*c)->id); + + goto free_return; + } + + spin_lock_init(&ch->collect_lock); + + fsm_settimer(ch->fsm, &ch->timer); + skb_queue_head_init(&ch->io_queue); + skb_queue_head_init(&ch->collect_queue); + + if (IS_MPC(priv)) { + fsm_settimer(ch->fsm, &ch->sweep_timer); + skb_queue_head_init(&ch->sweep_queue); + } + ch->next = *c; + *c = ch; + return 0; + +nomem_return: + rc = -ENOMEM; + +free_return: /* note that all channel pointers are 0 or valid */ + kfree(ch->ccw); + kfree(ch->discontact_th); + kfree_fsm(ch->fsm); + kfree(ch->irb); + kfree(ch); + return rc; +} + +/* + * Return type of a detected device. + */ +static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id) +{ + enum ctcm_channel_types type; + type = (enum ctcm_channel_types)id->driver_info; + + if (type == ctcm_channel_type_ficon) + type = ctcm_channel_type_escon; + + return type; +} + +/** + * + * Setup an interface. + * + * cgdev Device to be setup. + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_new_device(struct ccwgroup_device *cgdev) +{ + char read_id[CTCM_ID_SIZE]; + char write_id[CTCM_ID_SIZE]; + int direction; + enum ctcm_channel_types type; + struct ctcm_priv *priv; + struct net_device *dev; + struct ccw_device *cdev0; + struct ccw_device *cdev1; + struct channel *readc; + struct channel *writec; + int ret; + int result; + + priv = dev_get_drvdata(&cgdev->dev); + if (!priv) { + result = -ENODEV; + goto out_err_result; + } + + cdev0 = cgdev->cdev[0]; + cdev1 = cgdev->cdev[1]; + + type = get_channel_type(&cdev0->id); + + snprintf(read_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev0->dev)); + snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); + + ret = add_channel(cdev0, type, priv); + if (ret) { + result = ret; + goto out_err_result; + } + ret = add_channel(cdev1, type, priv); + if (ret) { + result = ret; + goto out_remove_channel1; + } + + ret = ccw_device_set_online(cdev0); + if (ret != 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) set_online rc=%d", + CTCM_FUNTAIL, read_id, ret); + result = -EIO; + goto out_remove_channel2; + } + + ret = ccw_device_set_online(cdev1); + if (ret != 0) { + CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, + "%s(%s) set_online rc=%d", + CTCM_FUNTAIL, write_id, ret); + + result = -EIO; + goto out_ccw1; + } + + dev = ctcm_init_netdevice(priv); + if (dev == NULL) { + result = -ENODEV; + goto out_ccw2; + } + + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + priv->channel[direction] = + channel_get(type, direction == CTCM_READ ? + read_id : write_id, direction); + if (priv->channel[direction] == NULL) { + if (direction == CTCM_WRITE) + channel_free(priv->channel[CTCM_READ]); + result = -ENODEV; + goto out_dev; + } + priv->channel[direction]->netdev = dev; + priv->channel[direction]->protocol = priv->protocol; + priv->channel[direction]->max_bufsize = priv->buffer_size; + } + /* sysfs magic */ + SET_NETDEV_DEV(dev, &cgdev->dev); + + if (register_netdev(dev)) { + result = -ENODEV; + goto out_dev; + } + + strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); + + dev_info(&dev->dev, + "setup OK : r/w = %s/%s, protocol : %d\n", + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, + priv->channel[CTCM_READ]->id, + priv->channel[CTCM_WRITE]->id, priv->protocol); + + return 0; +out_dev: + ctcm_free_netdevice(dev); +out_ccw2: + ccw_device_set_offline(cgdev->cdev[1]); +out_ccw1: + ccw_device_set_offline(cgdev->cdev[0]); +out_remove_channel2: + readc = channel_get(type, read_id, CTCM_READ); + channel_remove(readc); +out_remove_channel1: + writec = channel_get(type, write_id, CTCM_WRITE); + channel_remove(writec); +out_err_result: + return result; +} + +/** + * Shutdown an interface. + * + * cgdev Device to be shut down. + * + * returns 0 on success, !0 on failure. + */ +static int ctcm_shutdown_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv; + struct net_device *dev; + + priv = dev_get_drvdata(&cgdev->dev); + if (!priv) + return -ENODEV; + + if (priv->channel[CTCM_READ]) { + dev = priv->channel[CTCM_READ]->netdev; + CTCM_DBF_DEV(SETUP, dev, ""); + /* Close the device */ + ctcm_close(dev); + dev->flags &= ~IFF_RUNNING; + channel_free(priv->channel[CTCM_READ]); + } else + dev = NULL; + + if (priv->channel[CTCM_WRITE]) + channel_free(priv->channel[CTCM_WRITE]); + + if (dev) { + unregister_netdev(dev); + ctcm_free_netdevice(dev); + } + + if (priv->fsm) + kfree_fsm(priv->fsm); + + ccw_device_set_offline(cgdev->cdev[1]); + ccw_device_set_offline(cgdev->cdev[0]); + channel_remove(priv->channel[CTCM_READ]); + channel_remove(priv->channel[CTCM_WRITE]); + priv->channel[CTCM_READ] = priv->channel[CTCM_WRITE] = NULL; + + return 0; + +} + + +static void ctcm_remove_device(struct ccwgroup_device *cgdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&cgdev->dev); + + CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, + "removing device %p, proto : %d", + cgdev, priv->protocol); + + if (cgdev->state == CCWGROUP_ONLINE) + ctcm_shutdown_device(cgdev); + dev_set_drvdata(&cgdev->dev, NULL); + kfree(priv); + put_device(&cgdev->dev); +} + +static int ctcm_pm_suspend(struct ccwgroup_device *gdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); + + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + netif_device_detach(priv->channel[CTCM_READ]->netdev); + ctcm_close(priv->channel[CTCM_READ]->netdev); + if (!wait_event_timeout(priv->fsm->wait_q, + fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) { + netif_device_attach(priv->channel[CTCM_READ]->netdev); + return -EBUSY; + } + ccw_device_set_offline(gdev->cdev[1]); + ccw_device_set_offline(gdev->cdev[0]); + return 0; +} + +static int ctcm_pm_resume(struct ccwgroup_device *gdev) +{ + struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); + int rc; + + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + rc = ccw_device_set_online(gdev->cdev[1]); + if (rc) + goto err_out; + rc = ccw_device_set_online(gdev->cdev[0]); + if (rc) + goto err_out; + ctcm_open(priv->channel[CTCM_READ]->netdev); +err_out: + netif_device_attach(priv->channel[CTCM_READ]->netdev); + return rc; +} + +static struct ccw_device_id ctcm_ids[] = { + {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel}, + {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon}, + {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, ctcm_ids); + +static struct ccw_driver ctcm_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "ctcm", + }, + .ids = ctcm_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_CTC, +}; + +static struct ccwgroup_driver ctcm_group_driver = { + .driver = { + .owner = THIS_MODULE, + .name = CTC_DRIVER_NAME, + }, + .ccw_driver = &ctcm_ccw_driver, + .setup = ctcm_probe_device, + .remove = ctcm_remove_device, + .set_online = ctcm_new_device, + .set_offline = ctcm_shutdown_device, + .freeze = ctcm_pm_suspend, + .thaw = ctcm_pm_resume, + .restore = ctcm_pm_resume, +}; + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + + err = ccwgroup_create_dev(ctcm_root_dev, &ctcm_group_driver, 2, buf); + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *ctcm_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group ctcm_drv_attr_group = { + .attrs = ctcm_drv_attrs, +}; +static const struct attribute_group *ctcm_drv_attr_groups[] = { + &ctcm_drv_attr_group, + NULL, +}; + +/* + * Module related routines + */ + +/* + * Prepare to be unloaded. Free IRQ's and release all resources. + * This is called just before this module is unloaded. It is + * not called, if the usage count is !0, so we don't need to check + * for that. + */ +static void __exit ctcm_exit(void) +{ + ccwgroup_driver_unregister(&ctcm_group_driver); + ccw_driver_unregister(&ctcm_ccw_driver); + root_device_unregister(ctcm_root_dev); + ctcm_unregister_dbf_views(); + pr_info("CTCM driver unloaded\n"); +} + +/* + * Print Banner. + */ +static void print_banner(void) +{ + pr_info("CTCM driver initialized\n"); +} + +/** + * Initialize module. + * This is called just after the module is loaded. + * + * returns 0 on success, !0 on error. + */ +static int __init ctcm_init(void) +{ + int ret; + + channels = NULL; + + ret = ctcm_register_dbf_views(); + if (ret) + goto out_err; + ctcm_root_dev = root_device_register("ctcm"); + ret = PTR_ERR_OR_ZERO(ctcm_root_dev); + if (ret) + goto register_err; + ret = ccw_driver_register(&ctcm_ccw_driver); + if (ret) + goto ccw_err; + ctcm_group_driver.driver.groups = ctcm_drv_attr_groups; + ret = ccwgroup_driver_register(&ctcm_group_driver); + if (ret) + goto ccwgroup_err; + print_banner(); + return 0; + +ccwgroup_err: + ccw_driver_unregister(&ctcm_ccw_driver); +ccw_err: + root_device_unregister(ctcm_root_dev); +register_err: + ctcm_unregister_dbf_views(); +out_err: + pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n", + __func__, ret); + return ret; +} + +module_init(ctcm_init); +module_exit(ctcm_exit); + +MODULE_AUTHOR("Peter Tiedemann <ptiedem@de.ibm.com>"); +MODULE_DESCRIPTION("Network driver for S/390 CTC + CTCMPC (SNA)"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h new file mode 100644 index 000000000..16bdf23ee --- /dev/null +++ b/drivers/s390/net/ctcm_main.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2001, 2007 + * Authors: Fritz Elfert (felfert@millenux.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +#ifndef _CTCM_MAIN_H_ +#define _CTCM_MAIN_H_ + +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> + +#include <linux/skbuff.h> +#include <linux/netdevice.h> + +#include "fsm.h" +#include "ctcm_dbug.h" +#include "ctcm_mpc.h" + +#define CTC_DRIVER_NAME "ctcm" +#define CTC_DEVICE_NAME "ctc" +#define MPC_DEVICE_NAME "mpc" +#define CTC_DEVICE_GENE CTC_DEVICE_NAME "%d" +#define MPC_DEVICE_GENE MPC_DEVICE_NAME "%d" + +#define CHANNEL_FLAGS_READ 0 +#define CHANNEL_FLAGS_WRITE 1 +#define CHANNEL_FLAGS_INUSE 2 +#define CHANNEL_FLAGS_BUFSIZE_CHANGED 4 +#define CHANNEL_FLAGS_FAILED 8 +#define CHANNEL_FLAGS_WAITIRQ 16 +#define CHANNEL_FLAGS_RWMASK 1 +#define CHANNEL_DIRECTION(f) (f & CHANNEL_FLAGS_RWMASK) + +#define LOG_FLAG_ILLEGALPKT 1 +#define LOG_FLAG_ILLEGALSIZE 2 +#define LOG_FLAG_OVERRUN 4 +#define LOG_FLAG_NOMEM 8 + +#define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) + +#define CTCM_PR_DEBUG(fmt, arg...) \ + do { \ + if (do_debug) \ + printk(KERN_DEBUG fmt, ##arg); \ + } while (0) + +#define CTCM_PR_DBGDATA(fmt, arg...) \ + do { \ + if (do_debug_data) \ + printk(KERN_DEBUG fmt, ##arg); \ + } while (0) + +#define CTCM_D3_DUMP(buf, len) \ + do { \ + if (do_debug_data) \ + ctcmpc_dumpit(buf, len); \ + } while (0) + +#define CTCM_CCW_DUMP(buf, len) \ + do { \ + if (do_debug_ccw) \ + ctcmpc_dumpit(buf, len); \ + } while (0) + +/** + * Enum for classifying detected devices + */ +enum ctcm_channel_types { + /* Device is not a channel */ + ctcm_channel_type_none, + + /* Device is a CTC/A */ + ctcm_channel_type_parallel, + + /* Device is a FICON channel */ + ctcm_channel_type_ficon, + + /* Device is a ESCON channel */ + ctcm_channel_type_escon +}; + +/* + * CCW commands, used in this driver. + */ +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 +#define CCW_CMD_NOOP 0x03 +#define CCW_CMD_TIC 0x08 +#define CCW_CMD_SENSE_CMD 0x14 +#define CCW_CMD_WRITE_CTL 0x17 +#define CCW_CMD_SET_EXTENDED 0xc3 +#define CCW_CMD_PREPARE 0xe3 + +#define CTCM_PROTO_S390 0 +#define CTCM_PROTO_LINUX 1 +#define CTCM_PROTO_LINUX_TTY 2 +#define CTCM_PROTO_OS390 3 +#define CTCM_PROTO_MPC 4 +#define CTCM_PROTO_MAX 4 + +#define CTCM_BUFSIZE_LIMIT 65535 +#define CTCM_BUFSIZE_DEFAULT 32768 +#define MPC_BUFSIZE_DEFAULT CTCM_BUFSIZE_LIMIT + +#define CTCM_TIME_1_SEC 1000 +#define CTCM_TIME_5_SEC 5000 +#define CTCM_TIME_10_SEC 10000 + +#define CTCM_INITIAL_BLOCKLEN 2 + +#define CTCM_READ 0 +#define CTCM_WRITE 1 + +#define CTCM_ID_SIZE 20+3 + +struct ctcm_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + unsigned long send_stamp; +}; + +/* + * Definition of one channel + */ +struct channel { + struct channel *next; + char id[CTCM_ID_SIZE]; + struct ccw_device *cdev; + /* + * Type of this channel. + * CTC/A or Escon for valid channels. + */ + enum ctcm_channel_types type; + /* + * Misc. flags. See CHANNEL_FLAGS_... below + */ + __u32 flags; + __u16 protocol; /* protocol of this channel (4 = MPC) */ + /* + * I/O and irq related stuff + */ + struct ccw1 *ccw; + struct irb *irb; + /* + * RX/TX buffer size + */ + int max_bufsize; + struct sk_buff *trans_skb; /* transmit/receive buffer */ + struct sk_buff_head io_queue; /* universal I/O queue */ + struct tasklet_struct ch_tasklet; /* MPC ONLY */ + /* + * TX queue for collecting skb's during busy. + */ + struct sk_buff_head collect_queue; + /* + * Amount of data in collect_queue. + */ + int collect_len; + /* + * spinlock for collect_queue and collect_len + */ + spinlock_t collect_lock; + /* + * Timer for detecting unresposive + * I/O operations. + */ + fsm_timer timer; + /* MPC ONLY section begin */ + __u32 th_seq_num; /* SNA TH seq number */ + __u8 th_seg; + __u32 pdu_seq; + struct sk_buff *xid_skb; + char *xid_skb_data; + struct th_header *xid_th; + struct xid2 *xid; + char *xid_id; + struct th_header *rcvd_xid_th; + struct xid2 *rcvd_xid; + char *rcvd_xid_id; + __u8 in_mpcgroup; + fsm_timer sweep_timer; + struct sk_buff_head sweep_queue; + struct th_header *discontact_th; + struct tasklet_struct ch_disc_tasklet; + /* MPC ONLY section end */ + + int retry; /* retry counter for misc. operations */ + fsm_instance *fsm; /* finite state machine of this channel */ + struct net_device *netdev; /* corresponding net_device */ + struct ctcm_profile prof; + __u8 *trans_skb_data; + __u16 logflags; + __u8 sense_rc; /* last unit check sense code report control */ +}; + +struct ctcm_priv { + struct net_device_stats stats; + unsigned long tbusy; + + /* The MPC group struct of this interface */ + struct mpc_group *mpcg; /* MPC only */ + struct xid2 *xid; /* MPC only */ + + /* The finite state machine of this interface */ + fsm_instance *fsm; + + /* The protocol of this device */ + __u16 protocol; + + /* Timer for restarting after I/O Errors */ + fsm_timer restart_timer; + + int buffer_size; /* ctc only */ + + struct channel *channel[2]; +}; + +int ctcm_open(struct net_device *dev); +int ctcm_close(struct net_device *dev); + +extern const struct attribute_group *ctcm_attr_groups[]; + +/* + * Compatibility macros for busy handling + * of network devices. + */ +static inline void ctcm_clear_busy_do(struct net_device *dev) +{ + clear_bit(0, &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); + netif_wake_queue(dev); +} + +static inline void ctcm_clear_busy(struct net_device *dev) +{ + struct mpc_group *grp; + grp = ((struct ctcm_priv *)dev->ml_priv)->mpcg; + + if (!(grp && grp->in_sweep)) + ctcm_clear_busy_do(dev); +} + + +static inline int ctcm_test_and_set_busy(struct net_device *dev) +{ + netif_stop_queue(dev); + return test_and_set_bit(0, + &(((struct ctcm_priv *)dev->ml_priv)->tbusy)); +} + +extern int loglevel; +extern struct channel *channels; + +void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb); + +/* + * Functions related to setup and device detection. + */ + +static inline int ctcm_less_than(char *id1, char *id2) +{ + unsigned long dev1, dev2; + + id1 = id1 + 5; + id2 = id2 + 5; + + dev1 = simple_strtoul(id1, &id1, 16); + dev2 = simple_strtoul(id2, &id2, 16); + + return (dev1 < dev2); +} + +int ctcm_ch_alloc_buffer(struct channel *ch); + +static inline int ctcm_checkalloc_buffer(struct channel *ch) +{ + if (ch->trans_skb == NULL) + return ctcm_ch_alloc_buffer(ch); + if (ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED) { + dev_kfree_skb(ch->trans_skb); + return ctcm_ch_alloc_buffer(ch); + } + return 0; +} + +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv); + +/* test if protocol attribute (of struct ctcm_priv or struct channel) + * has MPC protocol setting. Type is not checked + */ +#define IS_MPC(p) ((p)->protocol == CTCM_PROTO_MPC) + +/* test if struct ctcm_priv of struct net_device has MPC protocol setting */ +#define IS_MPCDEV(dev) IS_MPC((struct ctcm_priv *)dev->ml_priv) + +static inline gfp_t gfp_type(void) +{ + return in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; +} + +/* + * Definition of our link level header. + */ +struct ll_header { + __u16 length; + __u16 type; + __u16 unused; +}; +#define LL_HEADER_LENGTH (sizeof(struct ll_header)) + +#endif diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c new file mode 100644 index 000000000..07d966813 --- /dev/null +++ b/drivers/s390/net/ctcm_mpc.c @@ -0,0 +1,2151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2004, 2007 + * Authors: Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + * Peter Tiedemann (ptiedem@de.ibm.com) + */ + +/* + This module exports functions to be used by CCS: + EXPORT_SYMBOL(ctc_mpc_alloc_channel); + EXPORT_SYMBOL(ctc_mpc_establish_connectivity); + EXPORT_SYMBOL(ctc_mpc_dealloc_ch); + EXPORT_SYMBOL(ctc_mpc_flow_control); +*/ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/sched.h> + +#include <linux/signal.h> +#include <linux/string.h> +#include <linux/proc_fs.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <linux/netdevice.h> +#include <net/dst.h> + +#include <linux/io.h> /* instead of <asm/io.h> ok ? */ +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <linux/bitops.h> /* instead of <asm/bitops.h> ok ? */ +#include <linux/uaccess.h> /* instead of <asm/uaccess.h> ok ? */ +#include <linux/wait.h> +#include <linux/moduleparam.h> +#include <asm/idals.h> + +#include "ctcm_main.h" +#include "ctcm_mpc.h" +#include "ctcm_fsms.h" + +static const struct xid2 init_xid = { + .xid2_type_id = XID_FM2, + .xid2_len = 0x45, + .xid2_adj_id = 0, + .xid2_rlen = 0x31, + .xid2_resv1 = 0, + .xid2_flag1 = 0, + .xid2_fmtt = 0, + .xid2_flag4 = 0x80, + .xid2_resv2 = 0, + .xid2_tgnum = 0, + .xid2_sender_id = 0, + .xid2_flag2 = 0, + .xid2_option = XID2_0, + .xid2_resv3 = "\x00", + .xid2_resv4 = 0, + .xid2_dlc_type = XID2_READ_SIDE, + .xid2_resv5 = 0, + .xid2_mpc_flag = 0, + .xid2_resv6 = 0, + .xid2_buf_len = (MPC_BUFSIZE_DEFAULT - 35), +}; + +static const struct th_header thnorm = { + .th_seg = 0x00, + .th_ch_flag = TH_IS_XID, + .th_blk_flag = TH_DATA_IS_XID, + .th_is_xid = 0x01, + .th_seq_num = 0x00000000, +}; + +static const struct th_header thdummy = { + .th_seg = 0x00, + .th_ch_flag = 0x00, + .th_blk_flag = TH_DATA_IS_XID, + .th_is_xid = 0x01, + .th_seq_num = 0x00000000, +}; + +/* + * Definition of one MPC group + */ + +/* + * Compatibility macros for busy handling + * of network devices. + */ + +static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb); + +/* + * MPC Group state machine actions (static prototypes) + */ +static void mpc_action_nop(fsm_instance *fsm, int event, void *arg); +static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg); +static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg); +static void mpc_action_timeout(fsm_instance *fi, int event, void *arg); +static int mpc_validate_xid(struct mpcg_info *mpcginfo); +static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg); +static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg); +static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg); +static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg); +static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg); +static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg); + +#ifdef DEBUGDATA +/*-------------------------------------------------------------------* +* Dump buffer format * +* * +*--------------------------------------------------------------------*/ +void ctcmpc_dumpit(char *buf, int len) +{ + __u32 ct, sw, rm, dup; + char *ptr, *rptr; + char tbuf[82], tdup[82]; + char addr[22]; + char boff[12]; + char bhex[82], duphex[82]; + char basc[40]; + + sw = 0; + rptr = ptr = buf; + rm = 16; + duphex[0] = 0x00; + dup = 0; + + for (ct = 0; ct < len; ct++, ptr++, rptr++) { + if (sw == 0) { + sprintf(addr, "%16.16llx", (__u64)rptr); + + sprintf(boff, "%4.4X", (__u32)ct); + bhex[0] = '\0'; + basc[0] = '\0'; + } + if ((sw == 4) || (sw == 12)) + strcat(bhex, " "); + if (sw == 8) + strcat(bhex, " "); + + sprintf(tbuf, "%2.2llX", (__u64)*ptr); + + tbuf[2] = '\0'; + strcat(bhex, tbuf); + if ((0 != isprint(*ptr)) && (*ptr >= 0x20)) + basc[sw] = *ptr; + else + basc[sw] = '.'; + + basc[sw+1] = '\0'; + sw++; + rm--; + if (sw != 16) + continue; + if ((strcmp(duphex, bhex)) != 0) { + if (dup != 0) { + sprintf(tdup, + "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", + tdup); + } + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + dup = 0; + strcpy(duphex, bhex); + } else + dup++; + + sw = 0; + rm = 16; + } /* endfor */ + + if (sw != 0) { + for ( ; rm > 0; rm--, sw++) { + if ((sw == 4) || (sw == 12)) + strcat(bhex, " "); + if (sw == 8) + strcat(bhex, " "); + strcat(bhex, " "); + strcat(basc, " "); + } + if (dup != 0) { + sprintf(tdup, "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", tdup); + } + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } else { + if (dup >= 1) { + sprintf(tdup, "Duplicate as above to %s", addr); + ctcm_pr_debug(" --- %s ---\n", tdup); + } + if (dup != 0) { + ctcm_pr_debug(" %s (+%s) : %s [%s]\n", + addr, boff, bhex, basc); + } + } + + return; + +} /* end of ctcmpc_dumpit */ +#endif + +#ifdef DEBUGDATA +/* + * Dump header and first 16 bytes of an sk_buff for debugging purposes. + * + * skb The sk_buff to dump. + * offset Offset relative to skb-data, where to start the dump. + */ +void ctcmpc_dump_skb(struct sk_buff *skb, int offset) +{ + __u8 *p = skb->data; + struct th_header *header; + struct pdu *pheader; + int bl = skb->len; + int i; + + if (p == NULL) + return; + + p += offset; + header = (struct th_header *)p; + + ctcm_pr_debug("dump:\n"); + ctcm_pr_debug("skb len=%d \n", skb->len); + if (skb->len > 2) { + switch (header->th_ch_flag) { + case TH_HAS_PDU: + break; + case 0x00: + case TH_IS_XID: + if ((header->th_blk_flag == TH_DATA_IS_XID) && + (header->th_is_xid == 0x01)) + goto dumpth; + case TH_SWEEP_REQ: + goto dumpth; + case TH_SWEEP_RESP: + goto dumpth; + default: + break; + } + + pheader = (struct pdu *)p; + ctcm_pr_debug("pdu->offset: %d hex: %04x\n", + pheader->pdu_offset, pheader->pdu_offset); + ctcm_pr_debug("pdu->flag : %02x\n", pheader->pdu_flag); + ctcm_pr_debug("pdu->proto : %02x\n", pheader->pdu_proto); + ctcm_pr_debug("pdu->seq : %02x\n", pheader->pdu_seq); + goto dumpdata; + +dumpth: + ctcm_pr_debug("th->seg : %02x\n", header->th_seg); + ctcm_pr_debug("th->ch : %02x\n", header->th_ch_flag); + ctcm_pr_debug("th->blk_flag: %02x\n", header->th_blk_flag); + ctcm_pr_debug("th->type : %s\n", + (header->th_is_xid) ? "DATA" : "XID"); + ctcm_pr_debug("th->seqnum : %04x\n", header->th_seq_num); + + } +dumpdata: + if (bl > 32) + bl = 32; + ctcm_pr_debug("data: "); + for (i = 0; i < bl; i++) + ctcm_pr_debug("%02x%s", *p++, (i % 16) ? " " : "\n"); + ctcm_pr_debug("\n"); +} +#endif + +static struct net_device *ctcmpc_get_dev(int port_num) +{ + char device[20]; + struct net_device *dev; + struct ctcm_priv *priv; + + sprintf(device, "%s%i", MPC_DEVICE_NAME, port_num); + + dev = __dev_get_by_name(&init_net, device); + + if (dev == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s: Device not found by name: %s", + CTCM_FUNTAIL, device); + return NULL; + } + priv = dev->ml_priv; + if (priv == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): dev->ml_priv is NULL", + CTCM_FUNTAIL, device); + return NULL; + } + if (priv->mpcg == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): priv->mpcg is NULL", + CTCM_FUNTAIL, device); + return NULL; + } + return dev; +} + +/* + * ctc_mpc_alloc_channel + * (exported interface) + * + * Device Initialization : + * ACTPATH driven IO operations + */ +int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) +{ + struct net_device *dev; + struct mpc_group *grp; + struct ctcm_priv *priv; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return 1; + priv = dev->ml_priv; + grp = priv->mpcg; + + grp->allochanfunc = callback; + grp->port_num = port_num; + grp->port_persist = 1; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "%s(%s): state=%s", + CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_INOP: + /* Group is in the process of terminating */ + grp->alloc_called = 1; + break; + case MPCG_STATE_RESET: + /* MPC Group will transition to state */ + /* MPCG_STATE_XID2INITW iff the minimum number */ + /* of 1 read and 1 write channel have successfully*/ + /* activated */ + /*fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);*/ + if (callback) + grp->send_qllc_disc = 1; + case MPCG_STATE_XID0IOWAIT: + fsm_deltimer(&grp->timer); + grp->outstanding_xid2 = 0; + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + if (callback) + ctcm_open(dev); + fsm_event(priv->fsm, DEV_EVENT_START, dev); + break; + case MPCG_STATE_READY: + /* XID exchanges completed after PORT was activated */ + /* Link station already active */ + /* Maybe timing issue...retry callback */ + grp->allocchan_callback_retries++; + if (grp->allocchan_callback_retries < 4) { + if (grp->allochanfunc) + grp->allochanfunc(grp->port_num, + grp->group_max_buflen); + } else { + /* there are problems...bail out */ + /* there may be a state mismatch so restart */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + grp->allocchan_callback_retries = 0; + } + break; + } + + return 0; +} +EXPORT_SYMBOL(ctc_mpc_alloc_channel); + +/* + * ctc_mpc_establish_connectivity + * (exported interface) + */ +void ctc_mpc_establish_connectivity(int port_num, + void (*callback)(int, int, int)) +{ + struct net_device *dev; + struct mpc_group *grp; + struct ctcm_priv *priv; + struct channel *rch, *wch; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + rch = priv->channel[CTCM_READ]; + wch = priv->channel[CTCM_WRITE]; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "%s(%s): state=%s", + CTCM_FUNTAIL, dev->name, fsm_getstate_str(grp->fsm)); + + grp->estconnfunc = callback; + grp->port_num = port_num; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_READY: + /* XID exchanges completed after PORT was activated */ + /* Link station already active */ + /* Maybe timing issue...retry callback */ + fsm_deltimer(&grp->timer); + grp->estconn_callback_retries++; + if (grp->estconn_callback_retries < 4) { + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 0, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } + } else { + /* there are problems...bail out */ + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + grp->estconn_callback_retries = 0; + } + break; + case MPCG_STATE_INOP: + case MPCG_STATE_RESET: + /* MPC Group is not ready to start XID - min num of */ + /* 1 read and 1 write channel have not been acquired*/ + + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): REJECTED - inactive channels", + CTCM_FUNTAIL, dev->name); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + break; + case MPCG_STATE_XID2INITW: + /* alloc channel was called but no XID exchange */ + /* has occurred. initiate xside XID exchange */ + /* make sure yside XID0 processing has not started */ + + if ((fsm_getstate(rch->fsm) > CH_XID0_PENDING) || + (fsm_getstate(wch->fsm) > CH_XID0_PENDING)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): ABORT - PASSIVE XID", + CTCM_FUNTAIL, dev->name); + break; + } + grp->send_qllc_disc = 1; + fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIT); + fsm_deltimer(&grp->timer); + fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, + MPCG_EVENT_TIMER, dev); + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + if ((rch->in_mpcgroup) && + (fsm_getstate(rch->fsm) == CH_XID0_PENDING)) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, rch); + else { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): RX-%s not ready for ACTIVE XID0", + CTCM_FUNTAIL, dev->name, rch->id); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + fsm_deltimer(&grp->timer); + goto done; + } + if ((wch->in_mpcgroup) && + (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) + fsm_event(grp->fsm, MPCG_EVENT_XID0DO, wch); + else { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): WX-%s not ready for ACTIVE XID0", + CTCM_FUNTAIL, dev->name, wch->id); + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + } + fsm_deltimer(&grp->timer); + goto done; + } + break; + case MPCG_STATE_XID0IOWAIT: + /* already in active XID negotiations */ + default: + break; + } + +done: + CTCM_PR_DEBUG("Exit %s()\n", __func__); + return; +} +EXPORT_SYMBOL(ctc_mpc_establish_connectivity); + +/* + * ctc_mpc_dealloc_ch + * (exported interface) + */ +void ctc_mpc_dealloc_ch(int port_num) +{ + struct net_device *dev; + struct ctcm_priv *priv; + struct mpc_group *grp; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_DEBUG, + "%s: %s: refcount = %d\n", + CTCM_FUNTAIL, dev->name, netdev_refcnt_read(dev)); + + fsm_deltimer(&priv->restart_timer); + grp->channels_terminating = 0; + fsm_deltimer(&grp->timer); + grp->allochanfunc = NULL; + grp->estconnfunc = NULL; + grp->port_persist = 0; + grp->send_qllc_disc = 0; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + ctcm_close(dev); + return; +} +EXPORT_SYMBOL(ctc_mpc_dealloc_ch); + +/* + * ctc_mpc_flow_control + * (exported interface) + */ +void ctc_mpc_flow_control(int port_num, int flowc) +{ + struct ctcm_priv *priv; + struct mpc_group *grp; + struct net_device *dev; + struct channel *rch; + int mpcg_state; + + dev = ctcmpc_get_dev(port_num); + if (dev == NULL) + return; + priv = dev->ml_priv; + grp = priv->mpcg; + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s: %s: flowc = %d", + CTCM_FUNTAIL, dev->name, flowc); + + rch = priv->channel[CTCM_READ]; + + mpcg_state = fsm_getstate(grp->fsm); + switch (flowc) { + case 1: + if (mpcg_state == MPCG_STATE_FLOWC) + break; + if (mpcg_state == MPCG_STATE_READY) { + if (grp->flow_off_called == 1) + grp->flow_off_called = 0; + else + fsm_newstate(grp->fsm, MPCG_STATE_FLOWC); + break; + } + break; + case 0: + if (mpcg_state == MPCG_STATE_FLOWC) { + fsm_newstate(grp->fsm, MPCG_STATE_READY); + /* ensure any data that has accumulated */ + /* on the io_queue will now be sen t */ + tasklet_schedule(&rch->ch_tasklet); + } + /* possible race condition */ + if (mpcg_state == MPCG_STATE_READY) { + grp->flow_off_called = 1; + break; + } + break; + } + +} +EXPORT_SYMBOL(ctc_mpc_flow_control); + +static int mpc_send_qllc_discontact(struct net_device *); + +/* + * helper function of ctcmpc_unpack_skb +*/ +static void mpc_rcvd_sweep_resp(struct mpcg_info *mpcginfo) +{ + struct channel *rch = mpcginfo->ch; + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = priv->channel[CTCM_WRITE]; + + CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, ch, ch->id); + CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); + + grp->sweep_rsp_pend_num--; + + if ((grp->sweep_req_pend_num == 0) && + (grp->sweep_rsp_pend_num == 0)) { + fsm_deltimer(&ch->sweep_timer); + grp->in_sweep = 0; + rch->th_seq_num = 0x00; + ch->th_seq_num = 0x00; + ctcm_clear_busy_do(dev); + } + + return; + +} + +/* + * helper function of mpc_rcvd_sweep_req + * which is a helper of ctcmpc_unpack_skb + */ +static void ctcmpc_send_sweep_resp(struct channel *rch) +{ + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct th_sweep *header; + struct sk_buff *sweep_skb; + struct channel *ch = priv->channel[CTCM_WRITE]; + + CTCM_PR_DEBUG("%s: ch=0x%p id=%s\n", __func__, rch, rch->id); + + sweep_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); + if (sweep_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): sweep_skb allocation ERROR\n", + CTCM_FUNTAIL, rch->id); + goto done; + } + + header = kmalloc(sizeof(struct th_sweep), gfp_type()); + + if (!header) { + dev_kfree_skb_any(sweep_skb); + goto done; + } + + header->th.th_seg = 0x00 ; + header->th.th_ch_flag = TH_SWEEP_RESP; + header->th.th_blk_flag = 0x00; + header->th.th_is_xid = 0x00; + header->th.th_seq_num = 0x00; + header->sw.th_last_seq = ch->th_seq_num; + + skb_put_data(sweep_skb, header, TH_SWEEP_LENGTH); + + kfree(header); + + netif_trans_update(dev); + skb_queue_tail(&ch->sweep_queue, sweep_skb); + + fsm_addtimer(&ch->sweep_timer, 100, CTC_EVENT_RSWEEP_TIMER, ch); + + return; + +done: + grp->in_sweep = 0; + ctcm_clear_busy_do(dev); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + + return; +} + +/* + * helper function of ctcmpc_unpack_skb + */ +static void mpc_rcvd_sweep_req(struct mpcg_info *mpcginfo) +{ + struct channel *rch = mpcginfo->ch; + struct net_device *dev = rch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = priv->channel[CTCM_WRITE]; + + if (do_debug) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + " %s(): ch=0x%p id=%s\n", __func__, ch, ch->id); + + if (grp->in_sweep == 0) { + grp->in_sweep = 1; + ctcm_test_and_set_busy(dev); + grp->sweep_req_pend_num = grp->active_channels[CTCM_READ]; + grp->sweep_rsp_pend_num = grp->active_channels[CTCM_READ]; + } + + CTCM_D3_DUMP((char *)mpcginfo->sweep, TH_SWEEP_LENGTH); + + grp->sweep_req_pend_num--; + ctcmpc_send_sweep_resp(ch); + kfree(mpcginfo); + return; +} + +/* + * MPC Group Station FSM definitions + */ +static const char *mpcg_event_names[] = { + [MPCG_EVENT_INOP] = "INOP Condition", + [MPCG_EVENT_DISCONC] = "Discontact Received", + [MPCG_EVENT_XID0DO] = "Channel Active - Start XID", + [MPCG_EVENT_XID2] = "XID2 Received", + [MPCG_EVENT_XID2DONE] = "XID0 Complete", + [MPCG_EVENT_XID7DONE] = "XID7 Complete", + [MPCG_EVENT_TIMER] = "XID Setup Timer", + [MPCG_EVENT_DOIO] = "XID DoIO", +}; + +static const char *mpcg_state_names[] = { + [MPCG_STATE_RESET] = "Reset", + [MPCG_STATE_INOP] = "INOP", + [MPCG_STATE_XID2INITW] = "Passive XID- XID0 Pending Start", + [MPCG_STATE_XID2INITX] = "Passive XID- XID0 Pending Complete", + [MPCG_STATE_XID7INITW] = "Passive XID- XID7 Pending P1 Start", + [MPCG_STATE_XID7INITX] = "Passive XID- XID7 Pending P2 Complete", + [MPCG_STATE_XID0IOWAIT] = "Active XID- XID0 Pending Start", + [MPCG_STATE_XID0IOWAIX] = "Active XID- XID0 Pending Complete", + [MPCG_STATE_XID7INITI] = "Active XID- XID7 Pending Start", + [MPCG_STATE_XID7INITZ] = "Active XID- XID7 Pending Complete ", + [MPCG_STATE_XID7INITF] = "XID - XID7 Complete ", + [MPCG_STATE_FLOWC] = "FLOW CONTROL ON", + [MPCG_STATE_READY] = "READY", +}; + +/* + * The MPC Group Station FSM + * 22 events + */ +static const fsm_node mpcg_fsm[] = { + { MPCG_STATE_RESET, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_INOP, MPCG_EVENT_INOP, mpc_action_nop }, + { MPCG_STATE_FLOWC, MPCG_EVENT_INOP, mpc_action_go_inop }, + + { MPCG_STATE_READY, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_READY, MPCG_EVENT_INOP, mpc_action_go_inop }, + + { MPCG_STATE_XID2INITW, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID2INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID2INITX, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID2INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITW, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID7INITX, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITX, MPCG_EVENT_DOIO, mpc_action_yside_xid }, + + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID0IOWAIT, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID0DO, mpc_action_doxid0 }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_XID2, mpc_action_rcvd_xid0 }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID0IOWAIX, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITI, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID2, mpc_action_rcvd_xid7 }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_XID7DONE, mpc_action_doxid7 }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_DISCONC, mpc_action_discontact }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_TIMER, mpc_action_timeout }, + { MPCG_STATE_XID7INITZ, MPCG_EVENT_DOIO, mpc_action_xside_xid }, + + { MPCG_STATE_XID7INITF, MPCG_EVENT_INOP, mpc_action_go_inop }, + { MPCG_STATE_XID7INITF, MPCG_EVENT_XID7DONE, mpc_action_go_ready }, +}; + +static int mpcg_fsm_len = ARRAY_SIZE(mpcg_fsm); + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_go_ready(fsm_instance *fsm, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + fsm_deltimer(&grp->timer); + + if (grp->saved_xid2->xid2_flag2 == 0x40) { + priv->xid->xid2_flag2 = 0x00; + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 1, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } else if (grp->allochanfunc) + grp->send_qllc_disc = 1; + + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): fails", + CTCM_FUNTAIL, dev->name); + return; + } + + grp->port_persist = 1; + grp->out_of_sequence = 0; + grp->estconn_called = 0; + + tasklet_hi_schedule(&grp->mpc_tasklet2); + + return; +} + +/* + * helper of ctcm_init_netdevice + * CTCM_PROTO_MPC only + */ +void mpc_group_ready(unsigned long adev) +{ + struct net_device *dev = (struct net_device *)adev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct channel *ch = NULL; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, + "%s: %s: GROUP TRANSITIONED TO READY, maxbuf = %d\n", + CTCM_FUNTAIL, dev->name, grp->group_max_buflen); + + fsm_newstate(grp->fsm, MPCG_STATE_READY); + + /* Put up a read on the channel */ + ch = priv->channel[CTCM_READ]; + ch->pdu_seq = 0; + CTCM_PR_DBGDATA("ctcmpc: %s() ToDCM_pdu_seq= %08x\n" , + __func__, ch->pdu_seq); + + ctcmpc_chx_rxidle(ch->fsm, CTC_EVENT_START, ch); + /* Put the write channel in idle state */ + ch = priv->channel[CTCM_WRITE]; + if (ch->collect_len > 0) { + spin_lock(&ch->collect_lock); + ctcm_purge_skb_queue(&ch->collect_queue); + ch->collect_len = 0; + spin_unlock(&ch->collect_lock); + } + ctcm_chx_txidle(ch->fsm, CTC_EVENT_START, ch); + ctcm_clear_busy(dev); + + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, 0, + grp->group_max_buflen); + grp->estconnfunc = NULL; + } else if (grp->allochanfunc) + grp->allochanfunc(grp->port_num, grp->group_max_buflen); + + grp->send_qllc_disc = 1; + grp->changed_side = 0; + + return; + +} + +/* + * Increment the MPC Group Active Channel Counts + * helper of dev_action (called from channel fsm) + */ +void mpc_channel_action(struct channel *ch, int direction, int action) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + if (grp == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): No MPC group", + CTCM_FUNTAIL, dev->name); + return; + } + + CTCM_PR_DEBUG("enter %s: ch=0x%p id=%s\n", __func__, ch, ch->id); + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s: %i / Grp:%s total_channels=%i, active_channels: " + "read=%i, write=%i\n", __func__, action, + fsm_getstate_str(grp->fsm), grp->num_channel_paths, + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); + + if ((action == MPC_CHANNEL_ADD) && (ch->in_mpcgroup == 0)) { + grp->num_channel_paths++; + grp->active_channels[direction]++; + grp->outstanding_xid2++; + ch->in_mpcgroup = 1; + + if (ch->xid_skb != NULL) + dev_kfree_skb_any(ch->xid_skb); + + ch->xid_skb = __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, + GFP_ATOMIC | GFP_DMA); + if (ch->xid_skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Couldn't alloc ch xid_skb\n", + CTCM_FUNTAIL, dev->name); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + return; + } + ch->xid_skb_data = ch->xid_skb->data; + ch->xid_th = (struct th_header *)ch->xid_skb->data; + skb_put(ch->xid_skb, TH_HEADER_LENGTH); + ch->xid = (struct xid2 *)skb_tail_pointer(ch->xid_skb); + skb_put(ch->xid_skb, XID2_LENGTH); + ch->xid_id = skb_tail_pointer(ch->xid_skb); + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + skb_put_data(ch->xid_skb, grp->xid_skb->data, + grp->xid_skb->len); + + ch->xid->xid2_dlc_type = + ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) + ? XID2_READ_SIDE : XID2_WRITE_SIDE); + + if (CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) + ch->xid->xid2_buf_len = 0x00; + + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + fsm_newstate(ch->fsm, CH_XID0_PENDING); + + if ((grp->active_channels[CTCM_READ] > 0) && + (grp->active_channels[CTCM_WRITE] > 0) && + (fsm_getstate(grp->fsm) < MPCG_STATE_XID2INITW)) { + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_NOTICE, + "%s: %s: MPC GROUP CHANNELS ACTIVE\n", + __func__, dev->name); + } + } else if ((action == MPC_CHANNEL_REMOVE) && + (ch->in_mpcgroup == 1)) { + ch->in_mpcgroup = 0; + grp->num_channel_paths--; + grp->active_channels[direction]--; + + if (ch->xid_skb != NULL) + dev_kfree_skb_any(ch->xid_skb); + ch->xid_skb = NULL; + + if (grp->channels_terminating) + goto done; + + if (((grp->active_channels[CTCM_READ] == 0) && + (grp->active_channels[CTCM_WRITE] > 0)) + || ((grp->active_channels[CTCM_WRITE] == 0) && + (grp->active_channels[CTCM_READ] > 0))) + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } +done: + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "exit %s: %i / Grp:%s total_channels=%i, active_channels: " + "read=%i, write=%i\n", __func__, action, + fsm_getstate_str(grp->fsm), grp->num_channel_paths, + grp->active_channels[CTCM_READ], + grp->active_channels[CTCM_WRITE]); + + CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id); +} + +/** + * Unpack a just received skb and hand it over to + * upper layers. + * special MPC version of unpack_skb. + * + * ch The channel where this skb has been received. + * pskb The received skb. + */ +static void ctcmpc_unpack_skb(struct channel *ch, struct sk_buff *pskb) +{ + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct pdu *curr_pdu; + struct mpcg_info *mpcginfo; + struct th_header *header = NULL; + struct th_sweep *sweep = NULL; + int pdu_last_seen = 0; + __u32 new_len; + struct sk_buff *skb; + int skblen; + int sendrc = 0; + + CTCM_PR_DEBUG("ctcmpc enter: %s() %s cp:%i ch:%s\n", + __func__, dev->name, smp_processor_id(), ch->id); + + header = (struct th_header *)pskb->data; + if ((header->th_seg == 0) && + (header->th_ch_flag == 0) && + (header->th_blk_flag == 0) && + (header->th_seq_num == 0)) + /* nothing for us */ goto done; + + CTCM_PR_DBGDATA("%s: th_header\n", __func__); + CTCM_D3_DUMP((char *)header, TH_HEADER_LENGTH); + CTCM_PR_DBGDATA("%s: pskb len: %04x \n", __func__, pskb->len); + + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + skb_pull(pskb, TH_HEADER_LENGTH); + + if (likely(header->th_ch_flag == TH_HAS_PDU)) { + CTCM_PR_DBGDATA("%s: came into th_has_pdu\n", __func__); + if ((fsm_getstate(grp->fsm) == MPCG_STATE_FLOWC) || + ((fsm_getstate(grp->fsm) == MPCG_STATE_READY) && + (header->th_seq_num != ch->th_seq_num + 1) && + (ch->th_seq_num != 0))) { + /* This is NOT the next segment * + * we are not the correct race winner * + * go away and let someone else win * + * BUT..this only applies if xid negot * + * is done * + */ + grp->out_of_sequence += 1; + __skb_push(pskb, TH_HEADER_LENGTH); + skb_queue_tail(&ch->io_queue, pskb); + CTCM_PR_DBGDATA("%s: th_seq_num expect:%08x " + "got:%08x\n", __func__, + ch->th_seq_num + 1, header->th_seq_num); + + return; + } + grp->out_of_sequence = 0; + ch->th_seq_num = header->th_seq_num; + + CTCM_PR_DBGDATA("ctcmpc: %s() FromVTAM_th_seq=%08x\n", + __func__, ch->th_seq_num); + + if (unlikely(fsm_getstate(grp->fsm) != MPCG_STATE_READY)) + goto done; + while ((pskb->len > 0) && !pdu_last_seen) { + curr_pdu = (struct pdu *)pskb->data; + + CTCM_PR_DBGDATA("%s: pdu_header\n", __func__); + CTCM_D3_DUMP((char *)pskb->data, PDU_HEADER_LENGTH); + CTCM_PR_DBGDATA("%s: pskb len: %04x \n", + __func__, pskb->len); + + skb_pull(pskb, PDU_HEADER_LENGTH); + + if (curr_pdu->pdu_flag & PDU_LAST) + pdu_last_seen = 1; + if (curr_pdu->pdu_flag & PDU_CNTL) + pskb->protocol = htons(ETH_P_SNAP); + else + pskb->protocol = htons(ETH_P_SNA_DIX); + + if ((pskb->len <= 0) || (pskb->len > ch->max_bufsize)) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Dropping packet with " + "illegal siize %d", + CTCM_FUNTAIL, dev->name, pskb->len); + + priv->stats.rx_dropped++; + priv->stats.rx_length_errors++; + goto done; + } + skb_reset_mac_header(pskb); + new_len = curr_pdu->pdu_offset; + CTCM_PR_DBGDATA("%s: new_len: %04x \n", + __func__, new_len); + if ((new_len == 0) || (new_len > pskb->len)) { + /* should never happen */ + /* pskb len must be hosed...bail out */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): non valid pdu_offset: %04x", + /* "data may be lost", */ + CTCM_FUNTAIL, dev->name, new_len); + goto done; + } + skb = __dev_alloc_skb(new_len+4, GFP_ATOMIC); + + if (!skb) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): MEMORY allocation error", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + goto done; + } + skb_put_data(skb, pskb->data, new_len); + + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + skb->ip_summed = CHECKSUM_UNNECESSARY; + *((__u32 *) skb_push(skb, 4)) = ch->pdu_seq; + ch->pdu_seq++; + + if (do_debug_data) { + ctcm_pr_debug("%s: ToDCM_pdu_seq= %08x\n", + __func__, ch->pdu_seq); + ctcm_pr_debug("%s: skb:%0lx " + "skb len: %d \n", __func__, + (unsigned long)skb, skb->len); + ctcm_pr_debug("%s: up to 32 bytes " + "of pdu_data sent\n", __func__); + ctcmpc_dump32((char *)skb->data, skb->len); + } + + skblen = skb->len; + sendrc = netif_rx(skb); + priv->stats.rx_packets++; + priv->stats.rx_bytes += skblen; + skb_pull(pskb, new_len); /* point to next PDU */ + } + } else { + mpcginfo = kmalloc(sizeof(struct mpcg_info), gfp_type()); + if (mpcginfo == NULL) + goto done; + + mpcginfo->ch = ch; + mpcginfo->th = header; + mpcginfo->skb = pskb; + CTCM_PR_DEBUG("%s: Not PDU - may be control pkt\n", + __func__); + /* it's a sweep? */ + sweep = (struct th_sweep *)pskb->data; + mpcginfo->sweep = sweep; + if (header->th_ch_flag == TH_SWEEP_REQ) + mpc_rcvd_sweep_req(mpcginfo); + else if (header->th_ch_flag == TH_SWEEP_RESP) + mpc_rcvd_sweep_resp(mpcginfo); + else if (header->th_blk_flag == TH_DATA_IS_XID) { + struct xid2 *thisxid = (struct xid2 *)pskb->data; + skb_pull(pskb, XID2_LENGTH); + mpcginfo->xid = thisxid; + fsm_event(grp->fsm, MPCG_EVENT_XID2, mpcginfo); + } else if (header->th_blk_flag == TH_DISCONTACT) + fsm_event(grp->fsm, MPCG_EVENT_DISCONC, mpcginfo); + else if (header->th_seq_num != 0) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): control pkt expected\n", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + /* mpcginfo only used for non-data transfers */ + if (do_debug_data) + ctcmpc_dump_skb(pskb, -8); + } + kfree(mpcginfo); + } +done: + + dev_kfree_skb_any(pskb); + if (sendrc == NET_RX_DROP) { + dev_warn(&dev->dev, + "The network backlog for %s is exceeded, " + "package dropped\n", __func__); + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + + CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", + __func__, dev->name, ch, ch->id); +} + +/** + * tasklet helper for mpc's skb unpacking. + * + * ch The channel to work on. + * Allow flow control back pressure to occur here. + * Throttling back channel can result in excessive + * channel inactivity and system deact of channel + */ +void ctcmpc_bh(unsigned long thischan) +{ + struct channel *ch = (struct channel *)thischan; + struct sk_buff *skb; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s cp:%i enter: %s() %s\n", + dev->name, smp_processor_id(), __func__, ch->id); + /* caller has requested driver to throttle back */ + while ((fsm_getstate(grp->fsm) != MPCG_STATE_FLOWC) && + (skb = skb_dequeue(&ch->io_queue))) { + ctcmpc_unpack_skb(ch, skb); + if (grp->out_of_sequence > 20) { + /* assume data loss has occurred if */ + /* missing seq_num for extended */ + /* period of time */ + grp->out_of_sequence = 0; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + break; + } + if (skb == skb_peek(&ch->io_queue)) + break; + } + CTCM_PR_DEBUG("exit %s: %s: ch=0x%p id=%s\n", + __func__, dev->name, ch, ch->id); + return; +} + +/* + * MPC Group Initializations + */ +struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv) +{ + struct mpc_group *grp; + + CTCM_DBF_TEXT_(MPC_SETUP, CTC_DBF_INFO, + "Enter %s(%p)", CTCM_FUNTAIL, priv); + + grp = kzalloc(sizeof(struct mpc_group), GFP_KERNEL); + if (grp == NULL) + return NULL; + + grp->fsm = init_fsm("mpcg", mpcg_state_names, mpcg_event_names, + MPCG_NR_STATES, MPCG_NR_EVENTS, mpcg_fsm, + mpcg_fsm_len, GFP_KERNEL); + if (grp->fsm == NULL) { + kfree(grp); + return NULL; + } + + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + fsm_settimer(grp->fsm, &grp->timer); + + grp->xid_skb = + __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC | GFP_DMA); + if (grp->xid_skb == NULL) { + kfree_fsm(grp->fsm); + kfree(grp); + return NULL; + } + /* base xid for all channels in group */ + grp->xid_skb_data = grp->xid_skb->data; + grp->xid_th = (struct th_header *)grp->xid_skb->data; + skb_put_data(grp->xid_skb, &thnorm, TH_HEADER_LENGTH); + + grp->xid = (struct xid2 *)skb_tail_pointer(grp->xid_skb); + skb_put_data(grp->xid_skb, &init_xid, XID2_LENGTH); + grp->xid->xid2_adj_id = jiffies | 0xfff00000; + grp->xid->xid2_sender_id = jiffies; + + grp->xid_id = skb_tail_pointer(grp->xid_skb); + skb_put_data(grp->xid_skb, "VTAM", 4); + + grp->rcvd_xid_skb = + __dev_alloc_skb(MPC_BUFSIZE_DEFAULT, GFP_ATOMIC|GFP_DMA); + if (grp->rcvd_xid_skb == NULL) { + kfree_fsm(grp->fsm); + dev_kfree_skb(grp->xid_skb); + kfree(grp); + return NULL; + } + grp->rcvd_xid_data = grp->rcvd_xid_skb->data; + grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data; + skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH); + grp->saved_xid2 = NULL; + priv->xid = grp->xid; + priv->mpcg = grp; + return grp; +} + +/* + * The MPC Group Station FSM + */ + +/* + * MPC Group Station FSM actions + * CTCM_PROTO_MPC only + */ + +/** + * NOP action for statemachines + */ +static void mpc_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * invoked when the device transitions to dev_stopped + * MPC will stop each individual channel if a single XID failure + * occurs, or will intitiate all channels be stopped if a GROUP + * level failure occurs. + */ +static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct channel *wch; + + CTCM_PR_DEBUG("Enter %s: %s\n", __func__, dev->name); + + priv = dev->ml_priv; + grp = priv->mpcg; + grp->flow_off_called = 0; + fsm_deltimer(&grp->timer); + if (grp->channels_terminating) + return; + + grp->channels_terminating = 1; + grp->saved_state = fsm_getstate(grp->fsm); + fsm_newstate(grp->fsm, MPCG_STATE_INOP); + if (grp->saved_state > MPCG_STATE_XID7INITF) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): MPC GROUP INOPERATIVE", + CTCM_FUNTAIL, dev->name); + if ((grp->saved_state != MPCG_STATE_RESET) || + /* dealloc_channel has been called */ + (grp->port_persist == 0)) + fsm_deltimer(&priv->restart_timer); + + wch = priv->channel[CTCM_WRITE]; + + switch (grp->saved_state) { + case MPCG_STATE_RESET: + case MPCG_STATE_INOP: + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID7INITF: + break; + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + default: + tasklet_hi_schedule(&wch->ch_disc_tasklet); + } + + grp->xid2_tgnum = 0; + grp->group_max_buflen = 0; /*min of all received */ + grp->outstanding_xid2 = 0; + grp->outstanding_xid7 = 0; + grp->outstanding_xid7_p2 = 0; + grp->saved_xid2 = NULL; + grp->xidnogood = 0; + grp->changed_side = 0; + + grp->rcvd_xid_skb->data = grp->rcvd_xid_data; + skb_reset_tail_pointer(grp->rcvd_xid_skb); + grp->rcvd_xid_skb->len = 0; + grp->rcvd_xid_th = (struct th_header *)grp->rcvd_xid_skb->data; + skb_put_data(grp->rcvd_xid_skb, &thnorm, TH_HEADER_LENGTH); + + if (grp->send_qllc_disc == 1) { + grp->send_qllc_disc = 0; + mpc_send_qllc_discontact(dev); + } + + /* DO NOT issue DEV_EVENT_STOP directly out of this code */ + /* This can result in INOP of VTAM PU due to halting of */ + /* outstanding IO which causes a sense to be returned */ + /* Only about 3 senses are allowed and then IOS/VTAM will*/ + /* become unreachable without manual intervention */ + if ((grp->port_persist == 1) || (grp->alloc_called)) { + grp->alloc_called = 0; + fsm_deltimer(&priv->restart_timer); + fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_RESTART, dev); + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + if (grp->saved_state > MPCG_STATE_XID7INITF) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, + "%s(%s): MPC GROUP RECOVERY SCHEDULED", + CTCM_FUNTAIL, dev->name); + } else { + fsm_deltimer(&priv->restart_timer); + fsm_addtimer(&priv->restart_timer, 500, DEV_EVENT_STOP, dev); + fsm_newstate(grp->fsm, MPCG_STATE_RESET); + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ALWAYS, + "%s(%s): NO MPC GROUP RECOVERY ATTEMPTED", + CTCM_FUNTAIL, dev->name); + } +} + +/** + * Handle mpc group action timeout. + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + * + * fi An instance of an mpc_group fsm. + * event The event, just happened. + * arg Generic pointer, casted from net_device * upon call. + */ +static void mpc_action_timeout(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv; + struct mpc_group *grp; + struct channel *wch; + struct channel *rch; + + priv = dev->ml_priv; + grp = priv->mpcg; + wch = priv->channel[CTCM_WRITE]; + rch = priv->channel[CTCM_READ]; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + /* Unless there is outstanding IO on the */ + /* channel just return and wait for ATTN */ + /* interrupt to begin XID negotiations */ + if ((fsm_getstate(rch->fsm) == CH_XID0_PENDING) && + (fsm_getstate(wch->fsm) == CH_XID0_PENDING)) + break; + default: + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, + "%s: dev=%s exit", + CTCM_FUNTAIL, dev->name); + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +void mpc_action_discontact(fsm_instance *fi, int event, void *arg) +{ + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev; + struct ctcm_priv *priv; + struct mpc_group *grp; + + if (ch) { + dev = ch->netdev; + if (dev) { + priv = dev->ml_priv; + if (priv) { + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s: %s: %s\n", + CTCM_FUNTAIL, dev->name, ch->id); + grp = priv->mpcg; + grp->send_qllc_disc = 1; + fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); + } + } + } + + return; +} + +/* + * MPC Group Station - not part of FSM + * CTCM_PROTO_MPC only + * called from add_channel in ctcm_main.c + */ +void mpc_action_send_discontact(unsigned long thischan) +{ + int rc; + struct channel *ch = (struct channel *)thischan; + unsigned long saveflags = 0; + + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + rc = ccw_device_start(ch->cdev, &ch->ccw[15], + (unsigned long)ch, 0xff, 0); + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + + if (rc != 0) { + ctcm_ccw_check_rc(ch, rc, (char *)__func__); + } + + return; +} + + +/* + * helper function of mpc FSM + * CTCM_PROTO_MPC only + * mpc_action_rcvd_xid7 +*/ +static int mpc_validate_xid(struct mpcg_info *mpcginfo) +{ + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + struct xid2 *xid = mpcginfo->xid; + int rc = 0; + __u64 our_id = 0; + __u64 their_id = 0; + int len = TH_HEADER_LENGTH + PDU_HEADER_LENGTH; + + CTCM_PR_DEBUG("Enter %s: xid=%p\n", __func__, xid); + + if (xid == NULL) { + rc = 1; + /* XID REJECTED: xid == NULL */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid = NULL", + CTCM_FUNTAIL, ch->id); + goto done; + } + + CTCM_D3_DUMP((char *)xid, XID2_LENGTH); + + /*the received direction should be the opposite of ours */ + if (((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? XID2_WRITE_SIDE : + XID2_READ_SIDE) != xid->xid2_dlc_type) { + rc = 2; + /* XID REJECTED: r/w channel pairing mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): r/w channel pairing mismatch", + CTCM_FUNTAIL, ch->id); + goto done; + } + + if (xid->xid2_dlc_type == XID2_READ_SIDE) { + CTCM_PR_DEBUG("%s: grpmaxbuf:%d xid2buflen:%d\n", __func__, + grp->group_max_buflen, xid->xid2_buf_len); + + if (grp->group_max_buflen == 0 || grp->group_max_buflen > + xid->xid2_buf_len - len) + grp->group_max_buflen = xid->xid2_buf_len - len; + } + + if (grp->saved_xid2 == NULL) { + grp->saved_xid2 = + (struct xid2 *)skb_tail_pointer(grp->rcvd_xid_skb); + + skb_put_data(grp->rcvd_xid_skb, xid, XID2_LENGTH); + grp->rcvd_xid_skb->data = grp->rcvd_xid_data; + + skb_reset_tail_pointer(grp->rcvd_xid_skb); + grp->rcvd_xid_skb->len = 0; + + /* convert two 32 bit numbers into 1 64 bit for id compare */ + our_id = (__u64)priv->xid->xid2_adj_id; + our_id = our_id << 32; + our_id = our_id + priv->xid->xid2_sender_id; + their_id = (__u64)xid->xid2_adj_id; + their_id = their_id << 32; + their_id = their_id + xid->xid2_sender_id; + /* lower id assume the xside role */ + if (our_id < their_id) { + grp->roll = XSIDE; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): WE HAVE LOW ID - TAKE XSIDE", + CTCM_FUNTAIL, ch->id); + } else { + grp->roll = YSIDE; + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_NOTICE, + "%s(%s): WE HAVE HIGH ID - TAKE YSIDE", + CTCM_FUNTAIL, ch->id); + } + + } else { + if (xid->xid2_flag4 != grp->saved_xid2->xid2_flag4) { + rc = 3; + /* XID REJECTED: xid flag byte4 mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid flag byte4 mismatch", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_flag2 == 0x40) { + rc = 4; + /* XID REJECTED - xid NOGOOD */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): xid NOGOOD", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_adj_id != grp->saved_xid2->xid2_adj_id) { + rc = 5; + /* XID REJECTED - Adjacent Station ID Mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Adjacent Station ID Mismatch", + CTCM_FUNTAIL, ch->id); + } + if (xid->xid2_sender_id != grp->saved_xid2->xid2_sender_id) { + rc = 6; + /* XID REJECTED - Sender Address Mismatch */ + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): Sender Address Mismatch", + CTCM_FUNTAIL, ch->id); + } + } +done: + if (rc) { + dev_warn(&dev->dev, + "The XID used in the MPC protocol is not valid, " + "rc = %d\n", rc); + priv->xid->xid2_flag2 = 0x40; + grp->saved_xid2->xid2_flag2 = 0x40; + } + + return rc; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side) +{ + struct channel *ch = arg; + int rc = 0; + int gotlock = 0; + unsigned long saveflags = 0; /* avoids compiler warning with + spin_unlock_irqrestore */ + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + if (ctcm_checkalloc_buffer(ch)) + goto done; + + /* + * skb data-buffer referencing: + */ + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + /* result of the previous 3 statements is NOT always + * already set after ctcm_checkalloc_buffer + * because of possible reuse of the trans_skb + */ + memset(ch->trans_skb->data, 0, 16); + ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; + /* check is main purpose here: */ + skb_put(ch->trans_skb, TH_HEADER_LENGTH); + ch->rcvd_xid = (struct xid2 *)skb_tail_pointer(ch->trans_skb); + /* check is main purpose here: */ + skb_put(ch->trans_skb, XID2_LENGTH); + ch->rcvd_xid_id = skb_tail_pointer(ch->trans_skb); + /* cleanup back to startpoint */ + ch->trans_skb->data = ch->trans_skb_data; + skb_reset_tail_pointer(ch->trans_skb); + ch->trans_skb->len = 0; + + /* non-checking rewrite of above skb data-buffer referencing: */ + /* + memset(ch->trans_skb->data, 0, 16); + ch->rcvd_xid_th = (struct th_header *)ch->trans_skb_data; + ch->rcvd_xid = (struct xid2 *)(ch->trans_skb_data + TH_HEADER_LENGTH); + ch->rcvd_xid_id = ch->trans_skb_data + TH_HEADER_LENGTH + XID2_LENGTH; + */ + + ch->ccw[8].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[8].count = 0; + ch->ccw[8].cda = 0x00; + + if (!(ch->xid_th && ch->xid && ch->xid_id)) + CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, + "%s(%s): xid_th=%p, xid=%p, xid_id=%p", + CTCM_FUNTAIL, ch->id, ch->xid_th, ch->xid, ch->xid_id); + + if (side == XSIDE) { + /* mpc_action_xside_xid */ + if (ch->xid_th == NULL) + goto done; + ch->ccw[9].cmd_code = CCW_CMD_WRITE; + ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[9].count = TH_HEADER_LENGTH; + ch->ccw[9].cda = virt_to_phys(ch->xid_th); + + if (ch->xid == NULL) + goto done; + ch->ccw[10].cmd_code = CCW_CMD_WRITE; + ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[10].count = XID2_LENGTH; + ch->ccw[10].cda = virt_to_phys(ch->xid); + + ch->ccw[11].cmd_code = CCW_CMD_READ; + ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[11].count = TH_HEADER_LENGTH; + ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th); + + ch->ccw[12].cmd_code = CCW_CMD_READ; + ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[12].count = XID2_LENGTH; + ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid); + + ch->ccw[13].cmd_code = CCW_CMD_READ; + ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id); + + } else { /* side == YSIDE : mpc_action_yside_xid */ + ch->ccw[9].cmd_code = CCW_CMD_READ; + ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[9].count = TH_HEADER_LENGTH; + ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th); + + ch->ccw[10].cmd_code = CCW_CMD_READ; + ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[10].count = XID2_LENGTH; + ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid); + + if (ch->xid_th == NULL) + goto done; + ch->ccw[11].cmd_code = CCW_CMD_WRITE; + ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[11].count = TH_HEADER_LENGTH; + ch->ccw[11].cda = virt_to_phys(ch->xid_th); + + if (ch->xid == NULL) + goto done; + ch->ccw[12].cmd_code = CCW_CMD_WRITE; + ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[12].count = XID2_LENGTH; + ch->ccw[12].cda = virt_to_phys(ch->xid); + + if (ch->xid_id == NULL) + goto done; + ch->ccw[13].cmd_code = CCW_CMD_WRITE; + ch->ccw[13].cda = virt_to_phys(ch->xid_id); + + } + ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC; + ch->ccw[13].count = 4; + + ch->ccw[14].cmd_code = CCW_CMD_NOOP; + ch->ccw[14].flags = CCW_FLAG_SLI; + ch->ccw[14].count = 0; + ch->ccw[14].cda = 0; + + CTCM_CCW_DUMP((char *)&ch->ccw[8], sizeof(struct ccw1) * 7); + CTCM_D3_DUMP((char *)ch->xid_th, TH_HEADER_LENGTH); + CTCM_D3_DUMP((char *)ch->xid, XID2_LENGTH); + CTCM_D3_DUMP((char *)ch->xid_id, 4); + + if (!in_irq()) { + /* Such conditional locking is a known problem for + * sparse because its static undeterministic. + * Warnings should be ignored here. */ + spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); + gotlock = 1; + } + + fsm_addtimer(&ch->timer, 5000 , CTC_EVENT_TIMER, ch); + rc = ccw_device_start(ch->cdev, &ch->ccw[8], + (unsigned long)ch, 0xff, 0); + + if (gotlock) /* see remark above about conditional locking */ + spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); + + if (rc != 0) { + ctcm_ccw_check_rc(ch, rc, + (side == XSIDE) ? "x-side XID" : "y-side XID"); + } + +done: + CTCM_PR_DEBUG("Exit %s: ch=0x%p id=%s\n", + __func__, ch, ch->id); + return; + +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_xside_xid(fsm_instance *fsm, int event, void *arg) +{ + mpc_action_side_xid(fsm, arg, XSIDE); +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_yside_xid(fsm_instance *fsm, int event, void *arg) +{ + mpc_action_side_xid(fsm, arg, YSIDE); +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_doxid0(fsm_instance *fsm, int event, void *arg) +{ + struct channel *ch = arg; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + + if (ch->xid == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): ch->xid == NULL", + CTCM_FUNTAIL, dev->name); + return; + } + + fsm_newstate(ch->fsm, CH_XID0_INPROGRESS); + + ch->xid->xid2_option = XID2_0; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID2INITX: + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + break; + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + break; + } + + fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); + + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only +*/ +static void mpc_action_doxid7(fsm_instance *fsm, int event, void *arg) +{ + struct net_device *dev = arg; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = NULL; + int direction; + int send = 0; + + if (priv) + grp = priv->mpcg; + if (grp == NULL) + return; + + for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { + struct channel *ch = priv->channel[direction]; + struct xid2 *thisxid = ch->xid; + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + thisxid->xid2_option = XID2_7; + send = 0; + + /* xid7 phase 1 */ + if (grp->outstanding_xid7_p2 > 0) { + if (grp->roll == YSIDE) { + if (fsm_getstate(ch->fsm) == CH_XID7_PENDING1) { + fsm_newstate(ch->fsm, CH_XID7_PENDING2); + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + skb_put_data(ch->xid_skb, &thdummy, + TH_HEADER_LENGTH); + send = 1; + } + } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING2) { + fsm_newstate(ch->fsm, CH_XID7_PENDING2); + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + skb_put_data(ch->xid_skb, &thnorm, + TH_HEADER_LENGTH); + send = 1; + } + } else { + /* xid7 phase 2 */ + if (grp->roll == YSIDE) { + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING4) { + fsm_newstate(ch->fsm, CH_XID7_PENDING4); + skb_put_data(ch->xid_skb, &thnorm, + TH_HEADER_LENGTH); + ch->ccw[8].cmd_code = CCW_CMD_WRITE_CTL; + send = 1; + } + } else if (fsm_getstate(ch->fsm) == CH_XID7_PENDING3) { + fsm_newstate(ch->fsm, CH_XID7_PENDING4); + ch->ccw[8].cmd_code = CCW_CMD_SENSE_CMD; + skb_put_data(ch->xid_skb, &thdummy, + TH_HEADER_LENGTH); + send = 1; + } + } + + if (send) + fsm_event(grp->fsm, MPCG_EVENT_DOIO, ch); + } + + return; +} + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_rcvd_xid0(fsm_instance *fsm, int event, void *arg) +{ + + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s: ch-id:%s xid2:%i xid7:%i xidt_p2:%i \n", + __func__, ch->id, grp->outstanding_xid2, + grp->outstanding_xid7, grp->outstanding_xid7_p2); + + if (fsm_getstate(ch->fsm) < CH_XID7_PENDING) + fsm_newstate(ch->fsm, CH_XID7_PENDING); + + grp->outstanding_xid2--; + grp->outstanding_xid7++; + grp->outstanding_xid7_p2++; + + /* must change state before validating xid to */ + /* properly handle interim interrupts received*/ + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID2INITW: + fsm_newstate(grp->fsm, MPCG_STATE_XID2INITX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID0IOWAIT: + fsm_newstate(grp->fsm, MPCG_STATE_XID0IOWAIX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID2INITX: + if (grp->outstanding_xid2 == 0) { + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITW); + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev); + } + break; + case MPCG_STATE_XID0IOWAIX: + if (grp->outstanding_xid2 == 0) { + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITI); + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID2DONE, dev); + } + break; + } + + CTCM_PR_DEBUG("ctcmpc:%s() %s xid2:%i xid7:%i xidt_p2:%i \n", + __func__, ch->id, grp->outstanding_xid2, + grp->outstanding_xid7, grp->outstanding_xid7_p2); + CTCM_PR_DEBUG("ctcmpc:%s() %s grpstate: %s chanstate: %s \n", + __func__, ch->id, + fsm_getstate_str(grp->fsm), fsm_getstate_str(ch->fsm)); + return; + +} + + +/* + * MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static void mpc_action_rcvd_xid7(fsm_instance *fsm, int event, void *arg) +{ + struct mpcg_info *mpcginfo = arg; + struct channel *ch = mpcginfo->ch; + struct net_device *dev = ch->netdev; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("Enter %s: cp=%i ch=0x%p id=%s\n", + __func__, smp_processor_id(), ch, ch->id); + CTCM_PR_DEBUG("%s: outstanding_xid7: %i, outstanding_xid7_p2: %i\n", + __func__, grp->outstanding_xid7, grp->outstanding_xid7_p2); + + grp->outstanding_xid7--; + ch->xid_skb->data = ch->xid_skb_data; + skb_reset_tail_pointer(ch->xid_skb); + ch->xid_skb->len = 0; + + switch (fsm_getstate(grp->fsm)) { + case MPCG_STATE_XID7INITI: + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITZ); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID7INITW: + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITX); + mpc_validate_xid(mpcginfo); + break; + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID7INITX: + if (grp->outstanding_xid7 == 0) { + if (grp->outstanding_xid7_p2 > 0) { + grp->outstanding_xid7 = + grp->outstanding_xid7_p2; + grp->outstanding_xid7_p2 = 0; + } else + fsm_newstate(grp->fsm, MPCG_STATE_XID7INITF); + + mpc_validate_xid(mpcginfo); + fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); + break; + } + mpc_validate_xid(mpcginfo); + break; + } + return; +} + +/* + * mpc_action helper of an MPC Group Station FSM action + * CTCM_PROTO_MPC only + */ +static int mpc_send_qllc_discontact(struct net_device *dev) +{ + __u32 new_len = 0; + struct sk_buff *skb; + struct qllc *qllcptr; + struct ctcm_priv *priv = dev->ml_priv; + struct mpc_group *grp = priv->mpcg; + + CTCM_PR_DEBUG("%s: GROUP STATE: %s\n", + __func__, mpcg_state_names[grp->saved_state]); + + switch (grp->saved_state) { + /* + * establish conn callback function is + * preferred method to report failure + */ + case MPCG_STATE_XID0IOWAIT: + case MPCG_STATE_XID0IOWAIX: + case MPCG_STATE_XID7INITI: + case MPCG_STATE_XID7INITZ: + case MPCG_STATE_XID2INITW: + case MPCG_STATE_XID2INITX: + case MPCG_STATE_XID7INITW: + case MPCG_STATE_XID7INITX: + if (grp->estconnfunc) { + grp->estconnfunc(grp->port_num, -1, 0); + grp->estconnfunc = NULL; + break; + } + case MPCG_STATE_FLOWC: + case MPCG_STATE_READY: + grp->send_qllc_disc = 2; + new_len = sizeof(struct qllc); + qllcptr = kzalloc(new_len, gfp_type() | GFP_DMA); + if (qllcptr == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): qllcptr allocation error", + CTCM_FUNTAIL, dev->name); + return -ENOMEM; + } + + qllcptr->qllc_address = 0xcc; + qllcptr->qllc_commands = 0x03; + + skb = __dev_alloc_skb(new_len, GFP_ATOMIC); + + if (skb == NULL) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): skb allocation error", + CTCM_FUNTAIL, dev->name); + priv->stats.rx_dropped++; + kfree(qllcptr); + return -ENOMEM; + } + + skb_put_data(skb, qllcptr, new_len); + kfree(qllcptr); + + if (skb_headroom(skb) < 4) { + CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, + "%s(%s): skb_headroom error", + CTCM_FUNTAIL, dev->name); + dev_kfree_skb_any(skb); + return -ENOMEM; + } + + *((__u32 *)skb_push(skb, 4)) = + priv->channel[CTCM_READ]->pdu_seq; + priv->channel[CTCM_READ]->pdu_seq++; + CTCM_PR_DBGDATA("ctcmpc: %s ToDCM_pdu_seq= %08x\n", + __func__, priv->channel[CTCM_READ]->pdu_seq); + + /* receipt of CC03 resets anticipated sequence number on + receiving side */ + priv->channel[CTCM_READ]->pdu_seq = 0x00; + skb_reset_mac_header(skb); + skb->dev = dev; + skb->protocol = htons(ETH_P_SNAP); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + CTCM_D3_DUMP(skb->data, (sizeof(struct qllc) + 4)); + + netif_rx(skb); + break; + default: + break; + + } + + return 0; +} +/* --- This is the END my friend --- */ + diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h new file mode 100644 index 000000000..441d7b211 --- /dev/null +++ b/drivers/s390/net/ctcm_mpc.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + * MPC additions: + * Belinda Thompson (belindat@us.ibm.com) + * Andy Richter (richtera@us.ibm.com) + */ + +#ifndef _CTC_MPC_H_ +#define _CTC_MPC_H_ + +#include <linux/interrupt.h> +#include <linux/skbuff.h> +#include "fsm.h" + +/* + * MPC external interface + * Note that ctc_mpc_xyz are called with a lock on ................ + */ + +/* port_number is the mpc device 0, 1, 2 etc mpc2 is port_number 2 */ + +/* passive open Just wait for XID2 exchange */ +extern int ctc_mpc_alloc_channel(int port, + void (*callback)(int port_num, int max_write_size)); +/* active open Alloc then send XID2 */ +extern void ctc_mpc_establish_connectivity(int port, + void (*callback)(int port_num, int rc, int max_write_size)); + +extern void ctc_mpc_dealloc_ch(int port); +extern void ctc_mpc_flow_control(int port, int flowc); + +/* + * other MPC Group prototypes and structures + */ + +#define ETH_P_SNA_DIX 0x80D5 + +/* + * Declaration of an XID2 + * + */ +#define ALLZEROS 0x0000000000000000 + +#define XID_FM2 0x20 +#define XID2_0 0x00 +#define XID2_7 0x07 +#define XID2_WRITE_SIDE 0x04 +#define XID2_READ_SIDE 0x05 + +struct xid2 { + __u8 xid2_type_id; + __u8 xid2_len; + __u32 xid2_adj_id; + __u8 xid2_rlen; + __u8 xid2_resv1; + __u8 xid2_flag1; + __u8 xid2_fmtt; + __u8 xid2_flag4; + __u16 xid2_resv2; + __u8 xid2_tgnum; + __u32 xid2_sender_id; + __u8 xid2_flag2; + __u8 xid2_option; + char xid2_resv3[8]; + __u16 xid2_resv4; + __u8 xid2_dlc_type; + __u16 xid2_resv5; + __u8 xid2_mpc_flag; + __u8 xid2_resv6; + __u16 xid2_buf_len; + char xid2_buffer[255 - (13 * sizeof(__u8) + + 2 * sizeof(__u32) + + 4 * sizeof(__u16) + + 8 * sizeof(char))]; +} __attribute__ ((packed)); + +#define XID2_LENGTH (sizeof(struct xid2)) + +struct th_header { + __u8 th_seg; + __u8 th_ch_flag; +#define TH_HAS_PDU 0xf0 +#define TH_IS_XID 0x01 +#define TH_SWEEP_REQ 0xfe +#define TH_SWEEP_RESP 0xff + __u8 th_blk_flag; +#define TH_DATA_IS_XID 0x80 +#define TH_RETRY 0x40 +#define TH_DISCONTACT 0xc0 +#define TH_SEG_BLK 0x20 +#define TH_LAST_SEG 0x10 +#define TH_PDU_PART 0x08 + __u8 th_is_xid; /* is 0x01 if this is XID */ + __u32 th_seq_num; +} __attribute__ ((packed)); + +struct th_addon { + __u32 th_last_seq; + __u32 th_resvd; +} __attribute__ ((packed)); + +struct th_sweep { + struct th_header th; + struct th_addon sw; +} __attribute__ ((packed)); + +#define TH_HEADER_LENGTH (sizeof(struct th_header)) +#define TH_SWEEP_LENGTH (sizeof(struct th_sweep)) + +#define PDU_LAST 0x80 +#define PDU_CNTL 0x40 +#define PDU_FIRST 0x20 + +struct pdu { + __u32 pdu_offset; + __u8 pdu_flag; + __u8 pdu_proto; /* 0x01 is APPN SNA */ + __u16 pdu_seq; +} __attribute__ ((packed)); + +#define PDU_HEADER_LENGTH (sizeof(struct pdu)) + +struct qllc { + __u8 qllc_address; +#define QLLC_REQ 0xFF +#define QLLC_RESP 0x00 + __u8 qllc_commands; +#define QLLC_DISCONNECT 0x53 +#define QLLC_UNSEQACK 0x73 +#define QLLC_SETMODE 0x93 +#define QLLC_EXCHID 0xBF +} __attribute__ ((packed)); + + +/* + * Definition of one MPC group + */ + +#define MAX_MPCGCHAN 10 +#define MPC_XID_TIMEOUT_VALUE 10000 +#define MPC_CHANNEL_ADD 0 +#define MPC_CHANNEL_REMOVE 1 +#define MPC_CHANNEL_ATTN 2 +#define XSIDE 1 +#define YSIDE 0 + +struct mpcg_info { + struct sk_buff *skb; + struct channel *ch; + struct xid2 *xid; + struct th_sweep *sweep; + struct th_header *th; +}; + +struct mpc_group { + struct tasklet_struct mpc_tasklet; + struct tasklet_struct mpc_tasklet2; + int changed_side; + int saved_state; + int channels_terminating; + int out_of_sequence; + int flow_off_called; + int port_num; + int port_persist; + int alloc_called; + __u32 xid2_adj_id; + __u8 xid2_tgnum; + __u32 xid2_sender_id; + int num_channel_paths; + int active_channels[2]; + __u16 group_max_buflen; + int outstanding_xid2; + int outstanding_xid7; + int outstanding_xid7_p2; + int sweep_req_pend_num; + int sweep_rsp_pend_num; + struct sk_buff *xid_skb; + char *xid_skb_data; + struct th_header *xid_th; + struct xid2 *xid; + char *xid_id; + struct th_header *rcvd_xid_th; + struct sk_buff *rcvd_xid_skb; + char *rcvd_xid_data; + __u8 in_sweep; + __u8 roll; + struct xid2 *saved_xid2; + void (*allochanfunc)(int, int); + int allocchan_callback_retries; + void (*estconnfunc)(int, int, int); + int estconn_callback_retries; + int estconn_called; + int xidnogood; + int send_qllc_disc; + fsm_timer timer; + fsm_instance *fsm; /* group xid fsm */ +}; + +#ifdef DEBUGDATA +void ctcmpc_dumpit(char *buf, int len); +#else +static inline void ctcmpc_dumpit(char *buf, int len) +{ +} +#endif + +#ifdef DEBUGDATA +/* + * Dump header and first 16 bytes of an sk_buff for debugging purposes. + * + * skb The struct sk_buff to dump. + * offset Offset relative to skb-data, where to start the dump. + */ +void ctcmpc_dump_skb(struct sk_buff *skb, int offset); +#else +static inline void ctcmpc_dump_skb(struct sk_buff *skb, int offset) +{} +#endif + +static inline void ctcmpc_dump32(char *buf, int len) +{ + if (len < 32) + ctcmpc_dumpit(buf, len); + else + ctcmpc_dumpit(buf, 32); +} + +int ctcmpc_open(struct net_device *); +void ctcm_ccw_check_rc(struct channel *, int, char *); +void mpc_group_ready(unsigned long adev); +void mpc_channel_action(struct channel *ch, int direction, int action); +void mpc_action_send_discontact(unsigned long thischan); +void mpc_action_discontact(fsm_instance *fi, int event, void *arg); +void ctcmpc_bh(unsigned long thischan); +#endif +/* --- This is the END my friend --- */ diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c new file mode 100644 index 000000000..e3813a7aa --- /dev/null +++ b/drivers/s390/net/ctcm_sysfs.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2007 + * Authors: Peter Tiedemann (ptiedem@de.ibm.com) + * + */ + +#undef DEBUG +#undef DEBUGDATA +#undef DEBUGCCW + +#define KMSG_COMPONENT "ctcm" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/device.h> +#include <linux/sysfs.h> +#include <linux/slab.h> +#include "ctcm_main.h" + +/* + * sysfs attributes + */ + +static ssize_t ctcm_buffer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + return sprintf(buf, "%d\n", priv->buffer_size); +} + +static ssize_t ctcm_buffer_write(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct net_device *ndev; + unsigned int bs1; + struct ctcm_priv *priv = dev_get_drvdata(dev); + int rc; + + if (!(priv && priv->channel[CTCM_READ] && + priv->channel[CTCM_READ]->netdev)) { + CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); + return -ENODEV; + } + ndev = priv->channel[CTCM_READ]->netdev; + + rc = kstrtouint(buf, 0, &bs1); + if (rc) + goto einval; + if (bs1 > CTCM_BUFSIZE_LIMIT) + goto einval; + if (bs1 < (576 + LL_HEADER_LENGTH + 2)) + goto einval; + priv->buffer_size = bs1; /* just to overwrite the default */ + + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) + goto einval; + + priv->channel[CTCM_READ]->max_bufsize = bs1; + priv->channel[CTCM_WRITE]->max_bufsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; + priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; + + CTCM_DBF_DEV(SETUP, ndev, buf); + return count; + +einval: + CTCM_DBF_DEV(SETUP, ndev, "buff_err"); + return -EINVAL; +} + +static void ctcm_print_statistics(struct ctcm_priv *priv) +{ + char *sbuf; + char *p; + + if (!priv) + return; + sbuf = kmalloc(2048, GFP_KERNEL); + if (sbuf == NULL) + return; + p = sbuf; + + p += sprintf(p, " Device FSM state: %s\n", + fsm_getstate_str(priv->fsm)); + p += sprintf(p, " RX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[CTCM_READ]->fsm)); + p += sprintf(p, " TX channel FSM state: %s\n", + fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm)); + p += sprintf(p, " Max. TX buffer used: %ld\n", + priv->channel[WRITE]->prof.maxmulti); + p += sprintf(p, " Max. chained SKBs: %ld\n", + priv->channel[WRITE]->prof.maxcqueue); + p += sprintf(p, " TX single write ops: %ld\n", + priv->channel[WRITE]->prof.doios_single); + p += sprintf(p, " TX multi write ops: %ld\n", + priv->channel[WRITE]->prof.doios_multi); + p += sprintf(p, " Netto bytes written: %ld\n", + priv->channel[WRITE]->prof.txlen); + p += sprintf(p, " Max. TX IO-time: %u\n", + jiffies_to_usecs(priv->channel[WRITE]->prof.tx_time)); + + printk(KERN_INFO "Statistics for %s:\n%s", + priv->channel[CTCM_WRITE]->netdev->name, sbuf); + kfree(sbuf); + return; +} + +static ssize_t stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *gdev = to_ccwgroupdev(dev); + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv || gdev->state != CCWGROUP_ONLINE) + return -ENODEV; + ctcm_print_statistics(priv); + return sprintf(buf, "0\n"); +} + +static ssize_t stats_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + if (!priv) + return -ENODEV; + /* Reset statistics */ + memset(&priv->channel[WRITE]->prof, 0, + sizeof(priv->channel[CTCM_WRITE]->prof)); + return count; +} + +static ssize_t ctcm_proto_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctcm_priv *priv = dev_get_drvdata(dev); + if (!priv) + return -ENODEV; + + return sprintf(buf, "%d\n", priv->protocol); +} + +static ssize_t ctcm_proto_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int value, rc; + struct ctcm_priv *priv = dev_get_drvdata(dev); + + if (!priv) + return -ENODEV; + rc = kstrtoint(buf, 0, &value); + if (rc || + !((value == CTCM_PROTO_S390) || + (value == CTCM_PROTO_LINUX) || + (value == CTCM_PROTO_MPC) || + (value == CTCM_PROTO_OS390))) + return -EINVAL; + priv->protocol = value; + CTCM_DBF_DEV(SETUP, dev, buf); + + return count; +} + +static const char *ctcm_type[] = { + "not a channel", + "CTC/A", + "FICON channel", + "ESCON channel", + "unknown channel type", + "unsupported channel type", +}; + +static ssize_t ctcm_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", + ctcm_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); +static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store); +static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL); +static DEVICE_ATTR(stats, 0644, stats_show, stats_write); + +static struct attribute *ctcm_attr[] = { + &dev_attr_protocol.attr, + &dev_attr_type.attr, + &dev_attr_buffer.attr, + &dev_attr_stats.attr, + NULL, +}; + +static struct attribute_group ctcm_attr_group = { + .attrs = ctcm_attr, +}; +const struct attribute_group *ctcm_attr_groups[] = { + &ctcm_attr_group, + NULL, +}; diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c new file mode 100644 index 000000000..eb07862bd --- /dev/null +++ b/drivers/s390/net/fsm.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * A generic FSM based on fsm used in isdn4linux + * + */ + +#include "fsm.h" +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/timer.h> + +MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION("Finite state machine helper functions"); +MODULE_LICENSE("GPL"); + +fsm_instance * +init_fsm(char *name, const char **state_names, const char **event_names, int nr_states, + int nr_events, const fsm_node *tmpl, int tmpl_len, gfp_t order) +{ + int i; + fsm_instance *this; + fsm_function_t *m; + fsm *f; + + this = kzalloc(sizeof(fsm_instance), order); + if (this == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc instance\n", name); + return NULL; + } + strlcpy(this->name, name, sizeof(this->name)); + init_waitqueue_head(&this->wait_q); + + f = kzalloc(sizeof(fsm), order); + if (f == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc fsm\n", name); + kfree_fsm(this); + return NULL; + } + f->nr_events = nr_events; + f->nr_states = nr_states; + f->event_names = event_names; + f->state_names = state_names; + this->f = f; + + m = kcalloc(nr_states*nr_events, sizeof(fsm_function_t), order); + if (m == NULL) { + printk(KERN_WARNING + "fsm(%s): init_fsm: Couldn't alloc jumptable\n", name); + kfree_fsm(this); + return NULL; + } + f->jumpmatrix = m; + + for (i = 0; i < tmpl_len; i++) { + if ((tmpl[i].cond_state >= nr_states) || + (tmpl[i].cond_event >= nr_events) ) { + printk(KERN_ERR + "fsm(%s): init_fsm: Bad template l=%d st(%ld/%ld) ev(%ld/%ld)\n", + name, i, (long)tmpl[i].cond_state, (long)f->nr_states, + (long)tmpl[i].cond_event, (long)f->nr_events); + kfree_fsm(this); + return NULL; + } else + m[nr_states * tmpl[i].cond_event + tmpl[i].cond_state] = + tmpl[i].function; + } + return this; +} + +void +kfree_fsm(fsm_instance *this) +{ + if (this) { + if (this->f) { + kfree(this->f->jumpmatrix); + kfree(this->f); + } + kfree(this); + } else + printk(KERN_WARNING + "fsm: kfree_fsm called with NULL argument\n"); +} + +#if FSM_DEBUG_HISTORY +void +fsm_print_history(fsm_instance *fi) +{ + int idx = 0; + int i; + + if (fi->history_size >= FSM_HISTORY_SIZE) + idx = fi->history_index; + + printk(KERN_DEBUG "fsm(%s): History:\n", fi->name); + for (i = 0; i < fi->history_size; i++) { + int e = fi->history[idx].event; + int s = fi->history[idx++].state; + idx %= FSM_HISTORY_SIZE; + if (e == -1) + printk(KERN_DEBUG " S=%s\n", + fi->f->state_names[s]); + else + printk(KERN_DEBUG " S=%s E=%s\n", + fi->f->state_names[s], + fi->f->event_names[e]); + } + fi->history_size = fi->history_index = 0; +} + +void +fsm_record_history(fsm_instance *fi, int state, int event) +{ + fi->history[fi->history_index].state = state; + fi->history[fi->history_index++].event = event; + fi->history_index %= FSM_HISTORY_SIZE; + if (fi->history_size < FSM_HISTORY_SIZE) + fi->history_size++; +} +#endif + +const char * +fsm_getstate_str(fsm_instance *fi) +{ + int st = atomic_read(&fi->state); + if (st >= fi->f->nr_states) + return "Invalid"; + return fi->f->state_names[st]; +} + +static void +fsm_expire_timer(struct timer_list *t) +{ + fsm_timer *this = from_timer(this, t, tl); +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Timer %p expired\n", + this->fi->name, this); +#endif + fsm_event(this->fi, this->expire_event, this->event_arg); +} + +void +fsm_settimer(fsm_instance *fi, fsm_timer *this) +{ + this->fi = fi; +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name, + this); +#endif + timer_setup(&this->tl, fsm_expire_timer, 0); +} + +void +fsm_deltimer(fsm_timer *this) +{ +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Delete timer %p\n", this->fi->name, + this); +#endif + del_timer(&this->tl); +} + +int +fsm_addtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Add timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + timer_setup(&this->tl, fsm_expire_timer, 0); + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); + return 0; +} + +/* FIXME: this function is never used, why */ +void +fsm_modtimer(fsm_timer *this, int millisec, int event, void *arg) +{ + +#if FSM_TIMER_DEBUG + printk(KERN_DEBUG "fsm(%s): Restart timer %p %dms\n", + this->fi->name, this, millisec); +#endif + + del_timer(&this->tl); + timer_setup(&this->tl, fsm_expire_timer, 0); + this->expire_event = event; + this->event_arg = arg; + this->tl.expires = jiffies + (millisec * HZ) / 1000; + add_timer(&this->tl); +} + +EXPORT_SYMBOL(init_fsm); +EXPORT_SYMBOL(kfree_fsm); +EXPORT_SYMBOL(fsm_settimer); +EXPORT_SYMBOL(fsm_deltimer); +EXPORT_SYMBOL(fsm_addtimer); +EXPORT_SYMBOL(fsm_modtimer); +EXPORT_SYMBOL(fsm_getstate_str); + +#if FSM_DEBUG_HISTORY +EXPORT_SYMBOL(fsm_print_history); +EXPORT_SYMBOL(fsm_record_history); +#endif diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h new file mode 100644 index 000000000..16dc071a2 --- /dev/null +++ b/drivers/s390/net/fsm.h @@ -0,0 +1,266 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FSM_H_ +#define _FSM_H_ + +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/timer.h> +#include <linux/time.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/atomic.h> + +/** + * Define this to get debugging messages. + */ +#define FSM_DEBUG 0 + +/** + * Define this to get debugging massages for + * timer handling. + */ +#define FSM_TIMER_DEBUG 0 + +/** + * Define these to record a history of + * Events/Statechanges and print it if a + * action_function is not found. + */ +#define FSM_DEBUG_HISTORY 0 +#define FSM_HISTORY_SIZE 40 + +struct fsm_instance_t; + +/** + * Definition of an action function, called by a FSM + */ +typedef void (*fsm_function_t)(struct fsm_instance_t *, int, void *); + +/** + * Internal jump table for a FSM + */ +typedef struct { + fsm_function_t *jumpmatrix; + int nr_events; + int nr_states; + const char **event_names; + const char **state_names; +} fsm; + +#if FSM_DEBUG_HISTORY +/** + * Element of State/Event history used for debugging. + */ +typedef struct { + int state; + int event; +} fsm_history; +#endif + +/** + * Representation of a FSM + */ +typedef struct fsm_instance_t { + fsm *f; + atomic_t state; + char name[16]; + void *userdata; + int userint; + wait_queue_head_t wait_q; +#if FSM_DEBUG_HISTORY + int history_index; + int history_size; + fsm_history history[FSM_HISTORY_SIZE]; +#endif +} fsm_instance; + +/** + * Description of a state-event combination + */ +typedef struct { + int cond_state; + int cond_event; + fsm_function_t function; +} fsm_node; + +/** + * Description of a FSM Timer. + */ +typedef struct { + fsm_instance *fi; + struct timer_list tl; + int expire_event; + void *event_arg; +} fsm_timer; + +/** + * Creates an FSM + * + * @param name Name of this instance for logging purposes. + * @param state_names An array of names for all states for logging purposes. + * @param event_names An array of names for all events for logging purposes. + * @param nr_states Number of states for this instance. + * @param nr_events Number of events for this instance. + * @param tmpl An array of fsm_nodes, describing this FSM. + * @param tmpl_len Length of the describing array. + * @param order Parameter for allocation of the FSM data structs. + */ +extern fsm_instance * +init_fsm(char *name, const char **state_names, + const char **event_names, + int nr_states, int nr_events, const fsm_node *tmpl, + int tmpl_len, gfp_t order); + +/** + * Releases an FSM + * + * @param fi Pointer to an FSM, previously created with init_fsm. + */ +extern void kfree_fsm(fsm_instance *fi); + +#if FSM_DEBUG_HISTORY +extern void +fsm_print_history(fsm_instance *fi); + +extern void +fsm_record_history(fsm_instance *fi, int state, int event); +#endif + +/** + * Emits an event to a FSM. + * If an action function is defined for the current state/event combination, + * this function is called. + * + * @param fi Pointer to FSM which should receive the event. + * @param event The event do be delivered. + * @param arg A generic argument, handed to the action function. + * + * @return 0 on success, + * 1 if current state or event is out of range + * !0 if state and event in range, but no action defined. + */ +static inline int +fsm_event(fsm_instance *fi, int event, void *arg) +{ + fsm_function_t r; + int state = atomic_read(&fi->state); + + if ((state >= fi->f->nr_states) || + (event >= fi->f->nr_events) ) { + printk(KERN_ERR "fsm(%s): Invalid state st(%ld/%ld) ev(%d/%ld)\n", + fi->name, (long)state,(long)fi->f->nr_states, event, + (long)fi->f->nr_events); +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return 1; + } + r = fi->f->jumpmatrix[fi->f->nr_states * event + state]; + if (r) { +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): state %s event %s\n", + fi->name, fi->f->state_names[state], + fi->f->event_names[event]); +#endif +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, state, event); +#endif + r(fi, event, arg); + return 0; + } else { +#if FSM_DEBUG || FSM_DEBUG_HISTORY + printk(KERN_DEBUG "fsm(%s): no function for event %s in state %s\n", + fi->name, fi->f->event_names[event], + fi->f->state_names[state]); +#endif +#if FSM_DEBUG_HISTORY + fsm_print_history(fi); +#endif + return !0; + } +} + +/** + * Modifies the state of an FSM. + * This does <em>not</em> trigger an event or calls an action function. + * + * @param fi Pointer to FSM + * @param state The new state for this FSM. + */ +static inline void +fsm_newstate(fsm_instance *fi, int newstate) +{ + atomic_set(&fi->state,newstate); +#if FSM_DEBUG_HISTORY + fsm_record_history(fi, newstate, -1); +#endif +#if FSM_DEBUG + printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, + fi->f->state_names[newstate]); +#endif + wake_up(&fi->wait_q); +} + +/** + * Retrieves the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM. + */ +static inline int +fsm_getstate(fsm_instance *fi) +{ + return atomic_read(&fi->state); +} + +/** + * Retrieves the name of the state of an FSM + * + * @param fi Pointer to FSM + * + * @return The current state of the FSM in a human readable form. + */ +extern const char *fsm_getstate_str(fsm_instance *fi); + +/** + * Initializes a timer for an FSM. + * This prepares an fsm_timer for usage with fsm_addtimer. + * + * @param fi Pointer to FSM + * @param timer The timer to be initialized. + */ +extern void fsm_settimer(fsm_instance *fi, fsm_timer *); + +/** + * Clears a pending timer of an FSM instance. + * + * @param timer The timer to clear. + */ +extern void fsm_deltimer(fsm_timer *timer); + +/** + * Adds and starts a timer to an FSM instance. + * + * @param timer The timer to be added. The field fi of that timer + * must have been set to point to the instance. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + * + * @return 0 on success, -1 if timer is already active. + */ +extern int fsm_addtimer(fsm_timer *timer, int millisec, int event, void *arg); + +/** + * Modifies a timer of an FSM. + * + * @param timer The timer to modify. + * @param millisec Duration, after which the timer should expire. + * @param event Event, to trigger if timer expires. + * @param arg Generic argument, provided to expiry function. + */ +extern void fsm_modtimer(fsm_timer *timer, int millisec, int event, void *arg); + +#endif /* _FSM_H_ */ diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h new file mode 100644 index 000000000..0aab90817 --- /dev/null +++ b/drivers/s390/net/ism.h @@ -0,0 +1,221 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef S390_ISM_H +#define S390_ISM_H + +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/pci.h> +#include <net/smc.h> + +#define UTIL_STR_LEN 16 + +/* + * Do not use the first word of the DMB bits to ensure 8 byte aligned access. + */ +#define ISM_DMB_WORD_OFFSET 1 +#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32) +#define ISM_NR_DMBS 1920 + +#define ISM_REG_SBA 0x1 +#define ISM_REG_IEQ 0x2 +#define ISM_READ_GID 0x3 +#define ISM_ADD_VLAN_ID 0x4 +#define ISM_DEL_VLAN_ID 0x5 +#define ISM_SET_VLAN 0x6 +#define ISM_RESET_VLAN 0x7 +#define ISM_QUERY_INFO 0x8 +#define ISM_QUERY_RGID 0x9 +#define ISM_REG_DMB 0xA +#define ISM_UNREG_DMB 0xB +#define ISM_SIGNAL_IEQ 0xE +#define ISM_UNREG_SBA 0x11 +#define ISM_UNREG_IEQ 0x12 + +#define ISM_ERROR 0xFFFF + +struct ism_req_hdr { + u32 cmd; + u16 : 16; + u16 len; +}; + +struct ism_resp_hdr { + u32 cmd; + u16 ret; + u16 len; +}; + +union ism_reg_sba { + struct { + struct ism_req_hdr hdr; + u64 sba; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_ieq { + struct { + struct ism_req_hdr hdr; + u64 ieq; + u64 len; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_read_gid { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u64 gid; + } response; +} __aligned(16); + +union ism_qi { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + u32 version; + u32 max_len; + u64 ism_state; + u64 my_gid; + u64 sba; + u64 ieq; + u32 ieq_len; + u32 : 32; + u32 dmbs_owned; + u32 dmbs_used; + u32 vlan_required; + u32 vlan_nr_ids; + u16 vlan_id[64]; + } response; +} __aligned(64); + +union ism_query_rgid { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 vlan_valid; + u32 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_reg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb; + u32 dmb_len; + u32 sba_idx; + u32 vlan_valid; + u32 vlan_id; + u64 rgid; + } request; + struct { + struct ism_resp_hdr hdr; + u64 dmb_tok; + } response; +} __aligned(32); + +union ism_sig_ieq { + struct { + struct ism_req_hdr hdr; + u64 rgid; + u32 trigger_irq; + u32 event_code; + u64 info; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(32); + +union ism_unreg_dmb { + struct { + struct ism_req_hdr hdr; + u64 dmb_tok; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +union ism_cmd_simple { + struct { + struct ism_req_hdr hdr; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(8); + +union ism_set_vlan_id { + struct { + struct ism_req_hdr hdr; + u64 vlan_id; + } request; + struct { + struct ism_resp_hdr hdr; + } response; +} __aligned(16); + +struct ism_eq_header { + u64 idx; + u64 ieq_len; + u64 entry_len; + u64 : 64; +}; + +struct ism_eq { + struct ism_eq_header header; + struct smcd_event entry[15]; +}; + +struct ism_sba { + u32 s : 1; /* summary bit */ + u32 e : 1; /* event bit */ + u32 : 30; + u32 dmb_bits[ISM_NR_DMBS / 32]; + u32 reserved[3]; + u16 dmbe_mask[ISM_NR_DMBS]; +}; + +struct ism_dev { + spinlock_t lock; + struct pci_dev *pdev; + struct smcd_dev *smcd; + + void __iomem *ctl; + + struct ism_sba *sba; + dma_addr_t sba_dma_addr; + DECLARE_BITMAP(sba_bitmap, ISM_NR_DMBS); + + struct ism_eq *ieq; + dma_addr_t ieq_dma_addr; + + int ieq_idx; +}; + +#define ISM_CREATE_REQ(dmb, idx, sf, offset) \ + ((dmb) | (idx) << 24 | (sf) << 23 | (offset)) + +static inline int __ism_move(struct ism_dev *ism, u64 dmb_req, void *data, + unsigned int size) +{ + struct zpci_dev *zdev = to_zpci(ism->pdev); + u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, size); + + return zpci_write_block(req, data, dmb_req); +} + +#endif /* S390_ISM_H */ diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c new file mode 100644 index 000000000..00cc96341 --- /dev/null +++ b/drivers/s390/net/ism_drv.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ISM driver for s390. + * + * Copyright IBM Corp. 2018 + */ +#define KMSG_COMPONENT "ism" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/err.h> +#include <net/smc.h> + +#include <asm/debug.h> + +#include "ism.h" + +MODULE_DESCRIPTION("ISM driver for s390"); +MODULE_LICENSE("GPL"); + +#define PCI_DEVICE_ID_IBM_ISM 0x04ED +#define DRV_NAME "ism" + +static const struct pci_device_id ism_device_table[] = { + { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, ism_device_table); + +static debug_info_t *ism_debug_info; + +static int ism_cmd(struct ism_dev *ism, void *cmd) +{ + struct ism_req_hdr *req = cmd; + struct ism_resp_hdr *resp = cmd; + + memcpy_toio(ism->ctl + sizeof(*req), req + 1, req->len - sizeof(*req)); + memcpy_toio(ism->ctl, req, sizeof(*req)); + + WRITE_ONCE(resp->ret, ISM_ERROR); + + memcpy_fromio(resp, ism->ctl, sizeof(*resp)); + if (resp->ret) { + debug_text_event(ism_debug_info, 0, "cmd failure"); + debug_event(ism_debug_info, 0, resp, sizeof(*resp)); + goto out; + } + memcpy_fromio(resp + 1, ism->ctl + sizeof(*resp), + resp->len - sizeof(*resp)); +out: + return resp->ret; +} + +static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) +{ + union ism_cmd_simple cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = cmd_code; + cmd.request.hdr.len = sizeof(cmd.request); + + return ism_cmd(ism, &cmd); +} + +static int query_info(struct ism_dev *ism) +{ + union ism_qi cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_INFO; + cmd.request.hdr.len = sizeof(cmd.request); + + if (ism_cmd(ism, &cmd)) + goto out; + + debug_text_event(ism_debug_info, 3, "query info"); + debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); +out: + return 0; +} + +static int register_sba(struct ism_dev *ism) +{ + union ism_reg_sba cmd; + dma_addr_t dma_handle; + struct ism_sba *sba; + + sba = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!sba) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_SBA; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.sba = dma_handle; + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, sba, dma_handle); + return -EIO; + } + + ism->sba = sba; + ism->sba_dma_addr = dma_handle; + + return 0; +} + +static int register_ieq(struct ism_dev *ism) +{ + union ism_reg_ieq cmd; + dma_addr_t dma_handle; + struct ism_eq *ieq; + + ieq = dma_zalloc_coherent(&ism->pdev->dev, PAGE_SIZE, + &dma_handle, GFP_KERNEL); + if (!ieq) + return -ENOMEM; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + cmd.request.ieq = dma_handle; + cmd.request.len = sizeof(*ieq); + + if (ism_cmd(ism, &cmd)) { + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, ieq, dma_handle); + return -EIO; + } + + ism->ieq = ieq; + ism->ieq_idx = -1; + ism->ieq_dma_addr = dma_handle; + + return 0; +} + +static int unregister_sba(struct ism_dev *ism) +{ + int ret; + + if (!ism->sba) + return 0; + + ret = ism_cmd_simple(ism, ISM_UNREG_SBA); + if (ret && ret != ISM_ERROR) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->sba, ism->sba_dma_addr); + + ism->sba = NULL; + ism->sba_dma_addr = 0; + + return 0; +} + +static int unregister_ieq(struct ism_dev *ism) +{ + int ret; + + if (!ism->ieq) + return 0; + + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); + if (ret && ret != ISM_ERROR) + return -EIO; + + dma_free_coherent(&ism->pdev->dev, PAGE_SIZE, + ism->ieq, ism->ieq_dma_addr); + + ism->ieq = NULL; + ism->ieq_dma_addr = 0; + + return 0; +} + +static int ism_read_local_gid(struct ism_dev *ism) +{ + union ism_read_gid cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_READ_GID; + cmd.request.hdr.len = sizeof(cmd.request); + + ret = ism_cmd(ism, &cmd); + if (ret) + goto out; + + ism->smcd->local_gid = cmd.response.gid; +out: + return ret; +} + +static int ism_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, + u32 vid) +{ + struct ism_dev *ism = smcd->priv; + union ism_query_rgid cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_QUERY_RGID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.vlan_valid = vid_valid; + cmd.request.vlan_id = vid; + + return ism_cmd(ism, &cmd); +} + +static void ism_free_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + clear_bit(dmb->sba_idx, ism->sba_bitmap); + dma_free_coherent(&ism->pdev->dev, dmb->dmb_len, + dmb->cpu_addr, dmb->dma_addr); +} + +static int ism_alloc_dmb(struct ism_dev *ism, struct smcd_dmb *dmb) +{ + unsigned long bit; + + if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(&ism->pdev->dev)) + return -EINVAL; + + if (!dmb->sba_idx) { + bit = find_next_zero_bit(ism->sba_bitmap, ISM_NR_DMBS, + ISM_DMB_BIT_OFFSET); + if (bit == ISM_NR_DMBS) + return -ENOMEM; + + dmb->sba_idx = bit; + } + if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || + test_and_set_bit(dmb->sba_idx, ism->sba_bitmap)) + return -EINVAL; + + dmb->cpu_addr = dma_zalloc_coherent(&ism->pdev->dev, dmb->dmb_len, + &dmb->dma_addr, GFP_KERNEL | + __GFP_NOWARN | __GFP_NOMEMALLOC | + __GFP_COMP | __GFP_NORETRY); + if (!dmb->cpu_addr) + clear_bit(dmb->sba_idx, ism->sba_bitmap); + + return dmb->cpu_addr ? 0 : -ENOMEM; +} + +static int ism_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_reg_dmb cmd; + int ret; + + ret = ism_alloc_dmb(ism, dmb); + if (ret) + goto out; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_REG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb = dmb->dma_addr; + cmd.request.dmb_len = dmb->dmb_len; + cmd.request.sba_idx = dmb->sba_idx; + cmd.request.vlan_valid = dmb->vlan_valid; + cmd.request.vlan_id = dmb->vlan_id; + cmd.request.rgid = dmb->rgid; + + ret = ism_cmd(ism, &cmd); + if (ret) { + ism_free_dmb(ism, dmb); + goto out; + } + dmb->dmb_tok = cmd.response.dmb_tok; +out: + return ret; +} + +static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) +{ + struct ism_dev *ism = smcd->priv; + union ism_unreg_dmb cmd; + int ret; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_UNREG_DMB; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.dmb_tok = dmb->dmb_tok; + + ret = ism_cmd(ism, &cmd); + if (ret && ret != ISM_ERROR) + goto out; + + ism_free_dmb(ism, dmb); +out: + return ret; +} + +static int ism_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) +{ + struct ism_dev *ism = smcd->priv; + union ism_set_vlan_id cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.vlan_id = vlan_id; + + return ism_cmd(ism, &cmd); +} + +static int ism_set_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_SET_VLAN); +} + +static int ism_reset_vlan_required(struct smcd_dev *smcd) +{ + return ism_cmd_simple(smcd->priv, ISM_RESET_VLAN); +} + +static int ism_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, + u32 event_code, u64 info) +{ + struct ism_dev *ism = smcd->priv; + union ism_sig_ieq cmd; + + memset(&cmd, 0, sizeof(cmd)); + cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; + cmd.request.hdr.len = sizeof(cmd.request); + + cmd.request.rgid = rgid; + cmd.request.trigger_irq = trigger_irq; + cmd.request.event_code = event_code; + cmd.request.info = info; + + return ism_cmd(ism, &cmd); +} + +static unsigned int max_bytes(unsigned int start, unsigned int len, + unsigned int boundary) +{ + return min(boundary - (start & (boundary - 1)), len); +} + +static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, + bool sf, unsigned int offset, void *data, unsigned int size) +{ + struct ism_dev *ism = smcd->priv; + unsigned int bytes; + u64 dmb_req; + int ret; + + while (size) { + bytes = max_bytes(offset, size, PAGE_SIZE); + dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, + offset); + + ret = __ism_move(ism, dmb_req, data, bytes); + if (ret) + return ret; + + size -= bytes; + data += bytes; + offset += bytes; + } + + return 0; +} + +static void ism_handle_event(struct ism_dev *ism) +{ + struct smcd_event *entry; + + while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { + if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) + ism->ieq_idx = 0; + + entry = &ism->ieq->entry[ism->ieq_idx]; + debug_event(ism_debug_info, 2, entry, sizeof(*entry)); + smcd_handle_event(ism->smcd, entry); + } +} + +static irqreturn_t ism_handle_irq(int irq, void *data) +{ + struct ism_dev *ism = data; + unsigned long bit, end; + unsigned long *bv; + + bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; + end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; + + spin_lock(&ism->lock); + ism->sba->s = 0; + barrier(); + for (bit = 0;;) { + bit = find_next_bit_inv(bv, end, bit); + if (bit >= end) + break; + + clear_bit_inv(bit, bv); + ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; + barrier(); + smcd_handle_irq(ism->smcd, bit + ISM_DMB_BIT_OFFSET); + } + + if (ism->sba->e) { + ism->sba->e = 0; + barrier(); + ism_handle_event(ism); + } + spin_unlock(&ism->lock); + return IRQ_HANDLED; +} + +static const struct smcd_ops ism_ops = { + .query_remote_gid = ism_query_rgid, + .register_dmb = ism_register_dmb, + .unregister_dmb = ism_unregister_dmb, + .add_vlan_id = ism_add_vlan_id, + .del_vlan_id = ism_del_vlan_id, + .set_vlan_required = ism_set_vlan_required, + .reset_vlan_required = ism_reset_vlan_required, + .signal_event = ism_signal_ieq, + .move_data = ism_move, +}; + +static int ism_dev_init(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + int ret; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); + if (ret <= 0) + goto out; + + ret = request_irq(pci_irq_vector(pdev, 0), ism_handle_irq, 0, + pci_name(pdev), ism); + if (ret) + goto free_vectors; + + ret = register_sba(ism); + if (ret) + goto free_irq; + + ret = register_ieq(ism); + if (ret) + goto unreg_sba; + + ret = ism_read_local_gid(ism); + if (ret) + goto unreg_ieq; + + ret = smcd_register_dev(ism->smcd); + if (ret) + goto unreg_ieq; + + query_info(ism); + return 0; + +unreg_ieq: + unregister_ieq(ism); +unreg_sba: + unregister_sba(ism); +free_irq: + free_irq(pci_irq_vector(pdev, 0), ism); +free_vectors: + pci_free_irq_vectors(pdev); +out: + return ret; +} + +static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ism_dev *ism; + int ret; + + ism = kzalloc(sizeof(*ism), GFP_KERNEL); + if (!ism) + return -ENOMEM; + + spin_lock_init(&ism->lock); + dev_set_drvdata(&pdev->dev, ism); + ism->pdev = pdev; + + ret = pci_enable_device_mem(pdev); + if (ret) + goto err; + + ret = pci_request_mem_regions(pdev, DRV_NAME); + if (ret) + goto err_disable; + + ism->ctl = pci_iomap(pdev, 2, 0); + if (!ism->ctl) + goto err_resource; + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + goto err_unmap; + + pci_set_dma_seg_boundary(pdev, SZ_1M - 1); + pci_set_dma_max_seg_size(pdev, SZ_1M); + pci_set_master(pdev); + + ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops, + ISM_NR_DMBS); + if (!ism->smcd) + goto err_unmap; + + ism->smcd->priv = ism; + ret = ism_dev_init(ism); + if (ret) + goto err_free; + + return 0; + +err_free: + smcd_free_dev(ism->smcd); +err_unmap: + pci_iounmap(pdev, ism->ctl); +err_resource: + pci_release_mem_regions(pdev); +err_disable: + pci_disable_device(pdev); +err: + kfree(ism); + dev_set_drvdata(&pdev->dev, NULL); + return ret; +} + +static void ism_dev_exit(struct ism_dev *ism) +{ + struct pci_dev *pdev = ism->pdev; + + smcd_unregister_dev(ism->smcd); + unregister_ieq(ism); + unregister_sba(ism); + free_irq(pci_irq_vector(pdev, 0), ism); + pci_free_irq_vectors(pdev); +} + +static void ism_remove(struct pci_dev *pdev) +{ + struct ism_dev *ism = dev_get_drvdata(&pdev->dev); + + ism_dev_exit(ism); + + smcd_free_dev(ism->smcd); + pci_iounmap(pdev, ism->ctl); + pci_release_mem_regions(pdev); + pci_disable_device(pdev); + dev_set_drvdata(&pdev->dev, NULL); + kfree(ism); +} + +static int ism_suspend(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + ism_dev_exit(ism); + return 0; +} + +static int ism_resume(struct device *dev) +{ + struct ism_dev *ism = dev_get_drvdata(dev); + + return ism_dev_init(ism); +} + +static SIMPLE_DEV_PM_OPS(ism_pm_ops, ism_suspend, ism_resume); + +static struct pci_driver ism_driver = { + .name = DRV_NAME, + .id_table = ism_device_table, + .probe = ism_probe, + .remove = ism_remove, + .driver = { + .pm = &ism_pm_ops, + }, +}; + +static int __init ism_init(void) +{ + int ret; + + ism_debug_info = debug_register("ism", 2, 1, 16); + if (!ism_debug_info) + return -ENODEV; + + debug_register_view(ism_debug_info, &debug_hex_ascii_view); + ret = pci_register_driver(&ism_driver); + if (ret) + debug_unregister(ism_debug_info); + + return ret; +} + +static void __exit ism_exit(void) +{ + pci_unregister_driver(&ism_driver); + debug_unregister(ism_debug_info); +} + +module_init(ism_init); +module_exit(ism_exit); diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c new file mode 100644 index 000000000..d8f99ff53 --- /dev/null +++ b/drivers/s390/net/lcs.c @@ -0,0 +1,2471 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Linux for S/390 Lan Channel Station Network Driver + * + * Copyright IBM Corp. 1999, 2009 + * Author(s): Original Code written by + * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> + * Rewritten by + * Frank Pavlic <fpavlic@de.ibm.com> and + * Martin Schwidefsky <schwidefsky@de.ibm.com> + */ + +#define KMSG_COMPONENT "lcs" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/if.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/fddidevice.h> +#include <linux/inetdevice.h> +#include <linux/in.h> +#include <linux/igmp.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <net/arp.h> +#include <net/ip.h> + +#include <asm/debug.h> +#include <asm/idals.h> +#include <asm/timex.h> +#include <linux/device.h> +#include <asm/ccwgroup.h> + +#include "lcs.h" + + +#if !defined(CONFIG_ETHERNET) && !defined(CONFIG_FDDI) +#error Cannot compile lcs.c without some net devices switched on. +#endif + +/** + * initialization string for output + */ + +static char version[] __initdata = "LCS driver"; + +/** + * the root device for lcs group devices + */ +static struct device *lcs_root_dev; + +/** + * Some prototypes. + */ +static void lcs_tasklet(unsigned long); +static void lcs_start_kernel_thread(struct work_struct *); +static void lcs_get_frames_cb(struct lcs_channel *, struct lcs_buffer *); +#ifdef CONFIG_IP_MULTICAST +static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *); +#endif /* CONFIG_IP_MULTICAST */ +static int lcs_recovery(void *ptr); + +/** + * Debug Facility Stuff + */ +static char debug_buffer[255]; +static debug_info_t *lcs_dbf_setup; +static debug_info_t *lcs_dbf_trace; + +/** + * LCS Debug Facility functions + */ +static void +lcs_unregister_debug_facility(void) +{ + debug_unregister(lcs_dbf_setup); + debug_unregister(lcs_dbf_trace); +} + +static int +lcs_register_debug_facility(void) +{ + lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); + lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); + if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { + pr_err("Not enough memory for debug facility.\n"); + lcs_unregister_debug_facility(); + return -ENOMEM; + } + debug_register_view(lcs_dbf_setup, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_setup, 2); + debug_register_view(lcs_dbf_trace, &debug_hex_ascii_view); + debug_set_level(lcs_dbf_trace, 2); + return 0; +} + +/** + * Allocate io buffers. + */ +static int +lcs_alloc_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichalloc"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + /* alloc memory fo iobuffer */ + channel->iob[cnt].data = + kzalloc(LCS_IOBUFFERSIZE, GFP_DMA | GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = LCS_BUF_STATE_EMPTY; + } + if (cnt < LCS_NUM_BUFFS) { + /* Not all io buffers could be allocated. */ + LCS_DBF_TEXT(2, setup, "echalloc"); + while (cnt-- > 0) + kfree(channel->iob[cnt].data); + return -ENOMEM; + } + return 0; +} + +/** + * Free io buffers. + */ +static void +lcs_free_channel(struct lcs_channel *channel) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ichfree"); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + kfree(channel->iob[cnt].data); + channel->iob[cnt].data = NULL; + } +} + +/* + * Cleanup channel. + */ +static void +lcs_cleanup_channel(struct lcs_channel *channel) +{ + LCS_DBF_TEXT(3, setup, "cleanch"); + /* Kill write channel tasklets. */ + tasklet_kill(&channel->irq_tasklet); + /* Free channel buffers. */ + lcs_free_channel(channel); +} + +/** + * LCS free memory for card and channels. + */ +static void +lcs_free_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "remcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + kfree(card); +} + +/** + * LCS alloc memory for card and channels + */ +static struct lcs_card * +lcs_alloc_card(void) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, setup, "alloclcs"); + + card = kzalloc(sizeof(struct lcs_card), GFP_KERNEL | GFP_DMA); + if (card == NULL) + return NULL; + card->lan_type = LCS_FRAME_TYPE_AUTO; + card->pkt_seq = 0; + card->lancmd_timeout = LCS_LANCMD_TIMEOUT_DEFAULT; + /* Allocate io buffers for the read channel. */ + rc = lcs_alloc_channel(&card->read); + if (rc){ + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_free_card(card); + return NULL; + } + /* Allocate io buffers for the write channel. */ + rc = lcs_alloc_channel(&card->write); + if (rc) { + LCS_DBF_TEXT(2, setup, "iccwerr"); + lcs_cleanup_channel(&card->read); + lcs_free_card(card); + return NULL; + } + +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + return card; +} + +/* + * Setup read channel. + */ +static void +lcs_setup_read_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(2, setup, "ireadccw"); + /* Setup read ccws. */ + memset(card->read.ccws, 0, sizeof (struct ccw1) * (LCS_NUM_BUFFS + 1)); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->read.ccws[cnt].cmd_code = LCS_CCW_READ; + card->read.ccws[cnt].count = LCS_IOBUFFERSIZE; + card->read.ccws[cnt].flags = + CCW_FLAG_CC | CCW_FLAG_SLI | CCW_FLAG_PCI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->read.ccws[cnt].cda = + (__u32) __pa(card->read.iob[cnt].data); + ((struct lcs_header *) + card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET; + card->read.iob[cnt].callback = lcs_get_frames_cb; + card->read.iob[cnt].state = LCS_BUF_STATE_READY; + card->read.iob[cnt].count = LCS_IOBUFFERSIZE; + } + card->read.ccws[0].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags &= ~CCW_FLAG_PCI; + card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND; + /* Last ccw is a tic (transfer in channel). */ + card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->read.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->read.ccws); + /* Setg initial state of the read channel. */ + card->read.state = LCS_CH_STATE_INIT; + + card->read.io_idx = 0; + card->read.buf_idx = 0; +} + +static void +lcs_setup_read(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initread"); + + lcs_setup_read_ccws(card); + /* Initialize read channel tasklet. */ + card->read.irq_tasklet.data = (unsigned long) &card->read; + card->read.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->read.wait_q); +} + +/* + * Setup write channel. + */ +static void +lcs_setup_write_ccws(struct lcs_card *card) +{ + int cnt; + + LCS_DBF_TEXT(3, setup, "iwritccw"); + /* Setup write ccws. */ + memset(card->write.ccws, 0, sizeof(struct ccw1) * (LCS_NUM_BUFFS + 1)); + for (cnt = 0; cnt < LCS_NUM_BUFFS; cnt++) { + card->write.ccws[cnt].cmd_code = LCS_CCW_WRITE; + card->write.ccws[cnt].count = 0; + card->write.ccws[cnt].flags = + CCW_FLAG_SUSPEND | CCW_FLAG_CC | CCW_FLAG_SLI; + /* + * Note: we have allocated the buffer with GFP_DMA, so + * we do not need to do set_normalized_cda. + */ + card->write.ccws[cnt].cda = + (__u32) __pa(card->write.iob[cnt].data); + } + /* Last ccw is a tic (transfer in channel). */ + card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER; + card->write.ccws[LCS_NUM_BUFFS].cda = + (__u32) __pa(card->write.ccws); + /* Set initial state of the write channel. */ + card->read.state = LCS_CH_STATE_INIT; + + card->write.io_idx = 0; + card->write.buf_idx = 0; +} + +static void +lcs_setup_write(struct lcs_card *card) +{ + LCS_DBF_TEXT(3, setup, "initwrit"); + + lcs_setup_write_ccws(card); + /* Initialize write channel tasklet. */ + card->write.irq_tasklet.data = (unsigned long) &card->write; + card->write.irq_tasklet.func = lcs_tasklet; + /* Initialize waitqueue. */ + init_waitqueue_head(&card->write.wait_q); +} + +static void +lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_allowed_mask = threads; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} +static int lcs_threads_running(struct lcs_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) +{ + return wait_event_interruptible(card->wait_q, + lcs_threads_running(card, threads) == 0); +} + +static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + if ( !(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread) ) { + spin_unlock_irqrestore(&card->mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + return 0; +} + +static void +lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->mask_lock, flags); + wake_up(&card->wait_q); +} + +static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + if (card->thread_start_mask & thread){ + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)){ + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +static int +lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +{ + int rc = 0; + wait_event(card->wait_q, + (rc = __lcs_do_run_thread(card, thread)) >= 0); + return rc; +} + +static int +lcs_do_start_thread(struct lcs_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->mask_lock, flags); + LCS_DBF_TEXT_(4, trace, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->mask_lock, flags); + return rc; +} + +/** + * Initialize channels,card and state machines. + */ +static void +lcs_setup_card(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, setup, "initcard"); + LCS_DBF_HEX(2, setup, &card, sizeof(void*)); + + lcs_setup_read(card); + lcs_setup_write(card); + /* Set cards initial state. */ + card->state = DEV_STATE_DOWN; + card->tx_buffer = NULL; + card->tx_emitted = 0; + + init_waitqueue_head(&card->wait_q); + spin_lock_init(&card->lock); + spin_lock_init(&card->ipm_lock); + spin_lock_init(&card->mask_lock); +#ifdef CONFIG_IP_MULTICAST + INIT_LIST_HEAD(&card->ipm_list); +#endif + INIT_LIST_HEAD(&card->lancmd_waiters); +} + +static void lcs_clear_multicast_list(struct lcs_card *card) +{ +#ifdef CONFIG_IP_MULTICAST + struct lcs_ipm_list *ipm; + unsigned long flags; + + /* Free multicast list. */ + LCS_DBF_TEXT(3, setup, "clmclist"); + spin_lock_irqsave(&card->ipm_lock, flags); + while (!list_empty(&card->ipm_list)){ + ipm = list_entry(card->ipm_list.next, + struct lcs_ipm_list, list); + list_del(&ipm->list); + if (ipm->ipm_state != LCS_IPM_STATE_SET_REQUIRED){ + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + } + kfree(ipm); + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +#endif +} +/** + * Cleanup channels,card and state machines. + */ +static void +lcs_cleanup_card(struct lcs_card *card) +{ + + LCS_DBF_TEXT(3, setup, "cleancrd"); + LCS_DBF_HEX(2,setup,&card,sizeof(void*)); + + if (card->dev != NULL) + free_netdev(card->dev); + /* Cleanup channels. */ + lcs_cleanup_channel(&card->write); + lcs_cleanup_channel(&card->read); +} + +/** + * Start channel. + */ +static int +lcs_start_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT_(4, trace,"ssch%s", dev_name(&channel->ccwdev->dev)); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start(channel->ccwdev, + channel->ccws + channel->io_idx, 0, 0, + DOIO_DENY_PREFETCH | DOIO_ALLOW_SUSPEND); + if (rc == 0) + channel->state = LCS_CH_STATE_RUNNING; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4,trace,"essh%s", + dev_name(&channel->ccwdev->dev)); + dev_err(&channel->ccwdev->dev, + "Starting an LCS device resulted in an error," + " rc=%d!\n", rc); + } + return rc; +} + +static int +lcs_clear_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace,"clearch"); + LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, (addr_t) channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ecsc%s", + dev_name(&channel->ccwdev->dev)); + return rc; + } + wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_CLEARED)); + channel->state = LCS_CH_STATE_STOPPED; + return rc; +} + + +/** + * Stop channel. + */ +static int +lcs_stop_channel(struct lcs_channel *channel) +{ + unsigned long flags; + int rc; + + if (channel->state == LCS_CH_STATE_STOPPED) + return 0; + LCS_DBF_TEXT(4,trace,"haltsch"); + LCS_DBF_TEXT_(4, trace, "%s", dev_name(&channel->ccwdev->dev)); + channel->state = LCS_CH_STATE_INIT; + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, (addr_t) channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ehsc%s", + dev_name(&channel->ccwdev->dev)); + return rc; + } + /* Asynchronous halt initialted. Wait for its completion. */ + wait_event(channel->wait_q, (channel->state == LCS_CH_STATE_HALTED)); + lcs_clear_channel(channel); + return 0; +} + +/** + * start read and write channel + */ +static int +lcs_start_channels(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "chstart"); + /* start read channel */ + rc = lcs_start_channel(&card->read); + if (rc) + return rc; + /* start write channel */ + rc = lcs_start_channel(&card->write); + if (rc) + lcs_stop_channel(&card->read); + return rc; +} + +/** + * stop read and write channel + */ +static int +lcs_stop_channels(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, trace, "chhalt"); + lcs_stop_channel(&card->read); + lcs_stop_channel(&card->write); + return 0; +} + +/** + * Get empty buffer. + */ +static struct lcs_buffer * +__lcs_get_buffer(struct lcs_channel *channel) +{ + int index; + + LCS_DBF_TEXT(5, trace, "_getbuff"); + index = channel->io_idx; + do { + if (channel->iob[index].state == LCS_BUF_STATE_EMPTY) { + channel->iob[index].state = LCS_BUF_STATE_LOCKED; + return channel->iob + index; + } + index = (index + 1) & (LCS_NUM_BUFFS - 1); + } while (index != channel->io_idx); + return NULL; +} + +static struct lcs_buffer * +lcs_get_buffer(struct lcs_channel *channel) +{ + struct lcs_buffer *buffer; + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "getbuff"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer = __lcs_get_buffer(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return buffer; +} + +/** + * Resume channel program if the channel is suspended. + */ +static int +__lcs_resume_channel(struct lcs_channel *channel) +{ + int rc; + + if (channel->state != LCS_CH_STATE_SUSPENDED) + return 0; + if (channel->ccws[channel->io_idx].flags & CCW_FLAG_SUSPEND) + return 0; + LCS_DBF_TEXT_(5, trace, "rsch%s", dev_name(&channel->ccwdev->dev)); + rc = ccw_device_resume(channel->ccwdev); + if (rc) { + LCS_DBF_TEXT_(4, trace, "ersc%s", + dev_name(&channel->ccwdev->dev)); + dev_err(&channel->ccwdev->dev, + "Sending data from the LCS device to the LAN failed" + " with rc=%d\n",rc); + } else + channel->state = LCS_CH_STATE_RUNNING; + return rc; + +} + +/** + * Make a buffer ready for processing. + */ +static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +{ + int prev, next; + + LCS_DBF_TEXT(5, trace, "rdybits"); + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Check if we may clear the suspend bit of this buffer. */ + if (channel->ccws[next].flags & CCW_FLAG_SUSPEND) { + /* Check if we have to set the PCI bit. */ + if (!(channel->ccws[prev].flags & CCW_FLAG_SUSPEND)) + /* Suspend bit of the previous buffer is not set. */ + channel->ccws[index].flags |= CCW_FLAG_PCI; + /* Suspend bit of the next buffer is set. */ + channel->ccws[index].flags &= ~CCW_FLAG_SUSPEND; + } +} + +static int +lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + int index, rc; + + LCS_DBF_TEXT(5, trace, "rdybuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && + buffer->state != LCS_BUF_STATE_PROCESSED); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = LCS_BUF_STATE_READY; + index = buffer - channel->iob; + /* Set length. */ + channel->ccws[index].count = buffer->count; + /* Check relevant PCI/suspend bits. */ + __lcs_ready_buffer_bits(channel, index); + rc = __lcs_resume_channel(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + return rc; +} + +/** + * Mark the buffer as processed. Take care of the suspend bit + * of the previous buffer. This function is called from + * interrupt context, so the lock must not be taken. + */ +static int +__lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + int index, prev, next; + + LCS_DBF_TEXT(5, trace, "prcsbuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_READY); + buffer->state = LCS_BUF_STATE_PROCESSED; + index = buffer - channel->iob; + prev = (index - 1) & (LCS_NUM_BUFFS - 1); + next = (index + 1) & (LCS_NUM_BUFFS - 1); + /* Set the suspend bit and clear the PCI bit of this buffer. */ + channel->ccws[index].flags |= CCW_FLAG_SUSPEND; + channel->ccws[index].flags &= ~CCW_FLAG_PCI; + /* Check the suspend bit of the previous buffer. */ + if (channel->iob[prev].state == LCS_BUF_STATE_READY) { + /* + * Previous buffer is in state ready. It might have + * happened in lcs_ready_buffer that the suspend bit + * has not been cleared to avoid an endless loop. + * Do it now. + */ + __lcs_ready_buffer_bits(channel, prev); + } + /* Clear PCI bit of next buffer. */ + channel->ccws[next].flags &= ~CCW_FLAG_PCI; + return __lcs_resume_channel(channel); +} + +/** + * Put a processed buffer back to state empty. + */ +static void +lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + unsigned long flags; + + LCS_DBF_TEXT(5, trace, "relbuff"); + BUG_ON(buffer->state != LCS_BUF_STATE_LOCKED && + buffer->state != LCS_BUF_STATE_PROCESSED); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + buffer->state = LCS_BUF_STATE_EMPTY; + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); +} + +/** + * Get buffer for a lan command. + */ +static struct lcs_buffer * +lcs_get_lancmd(struct lcs_card *card, int count) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(4, trace, "getlncmd"); + /* Get buffer and wait if none is available. */ + wait_event(card->write.wait_q, + ((buffer = lcs_get_buffer(&card->write)) != NULL)); + count += sizeof(struct lcs_header); + *(__u16 *)(buffer->data + count) = 0; + buffer->count = count + sizeof(__u16); + buffer->callback = lcs_release_buffer; + cmd = (struct lcs_cmd *) buffer->data; + cmd->offset = count; + cmd->type = LCS_FRAME_TYPE_CONTROL; + cmd->slot = 0; + return buffer; +} + + +static void +lcs_get_reply(struct lcs_reply *reply) +{ + refcount_inc(&reply->refcnt); +} + +static void +lcs_put_reply(struct lcs_reply *reply) +{ + if (refcount_dec_and_test(&reply->refcnt)) + kfree(reply); +} + +static struct lcs_reply * +lcs_alloc_reply(struct lcs_cmd *cmd) +{ + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "getreply"); + + reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); + if (!reply) + return NULL; + refcount_set(&reply->refcnt, 1); + reply->sequence_no = cmd->sequence_no; + reply->received = 0; + reply->rc = 0; + init_waitqueue_head(&reply->wait_q); + + return reply; +} + +/** + * Notifier function for lancmd replies. Called from read irq. + */ +static void +lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) +{ + struct list_head *l, *n; + struct lcs_reply *reply; + + LCS_DBF_TEXT(4, trace, "notiwait"); + spin_lock(&card->lock); + list_for_each_safe(l, n, &card->lancmd_waiters) { + reply = list_entry(l, struct lcs_reply, list); + if (reply->sequence_no == cmd->sequence_no) { + lcs_get_reply(reply); + list_del_init(&reply->list); + if (reply->callback != NULL) + reply->callback(card, cmd); + reply->received = 1; + reply->rc = cmd->return_code; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + break; + } + } + spin_unlock(&card->lock); +} + +/** + * Emit buffer of a lan command. + */ +static void +lcs_lancmd_timeout(struct timer_list *t) +{ + struct lcs_reply *reply = from_timer(reply, t, timer); + struct lcs_reply *list_reply, *r; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "timeout"); + spin_lock_irqsave(&reply->card->lock, flags); + list_for_each_entry_safe(list_reply, r, + &reply->card->lancmd_waiters,list) { + if (reply == list_reply) { + lcs_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + reply->received = 1; + reply->rc = -ETIME; + wake_up(&reply->wait_q); + lcs_put_reply(reply); + return; + } + } + spin_unlock_irqrestore(&reply->card->lock, flags); +} + +static int +lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, + void (*reply_callback)(struct lcs_card *, struct lcs_cmd *)) +{ + struct lcs_reply *reply; + struct lcs_cmd *cmd; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4, trace, "sendcmd"); + cmd = (struct lcs_cmd *) buffer->data; + cmd->return_code = 0; + cmd->sequence_no = card->sequence_no++; + reply = lcs_alloc_reply(cmd); + if (!reply) + return -ENOMEM; + reply->callback = reply_callback; + reply->card = card; + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->lancmd_waiters); + spin_unlock_irqrestore(&card->lock, flags); + + buffer->callback = lcs_release_buffer; + rc = lcs_ready_buffer(&card->write, buffer); + if (rc) + return rc; + timer_setup(&reply->timer, lcs_lancmd_timeout, 0); + mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); + wait_event(reply->wait_q, reply->received); + del_timer_sync(&reply->timer); + LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); + rc = reply->rc; + lcs_put_reply(reply); + return rc ? -EIO : 0; +} + +/** + * LCS startup command + */ +static int +lcs_send_startup(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "startup"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTUP; + cmd->initiator = initiator; + cmd->cmd.lcs_startup.buff_size = LCS_IOBUFFERSIZE; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS shutdown command + */ +static int +lcs_send_shutdown(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "shutdown"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SHUTDOWN; + cmd->initiator = LCS_INITIATOR_TCPIP; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * LCS lanstat command + */ +static void +__lcs_lanstat_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "statcb"); + memcpy(card->mac, cmd->cmd.lcs_lanstat_cmd.mac_addr, LCS_MAC_LENGTH); +} + +static int +lcs_send_lanstat(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2,trace, "cmdstat"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + /* Setup lanstat command. */ + cmd->cmd_code = LCS_CMD_LANSTAT; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb); +} + +/** + * send stoplan command + */ +static int +lcs_send_stoplan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstpln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STOPLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send startlan command + */ +static void +__lcs_send_startlan_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "srtlancb"); + card->lan_type = cmd->cmd.lcs_std_cmd.lan_type; + card->portno = cmd->cmd.lcs_std_cmd.portno; +} + +static int +lcs_send_startlan(struct lcs_card *card, __u8 initiator) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdstaln"); + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_STARTLAN; + cmd->initiator = initiator; + cmd->cmd.lcs_std_cmd.lan_type = card->lan_type; + cmd->cmd.lcs_std_cmd.portno = card->portno; + return lcs_send_lancmd(card, buffer, __lcs_send_startlan_cb); +} + +#ifdef CONFIG_IP_MULTICAST +/** + * send setipm command (Multicast) + */ +static int +lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmdsetim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_SETIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * send delipm command (Multicast) + */ +static int +lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + + LCS_DBF_TEXT(2, trace, "cmddelim"); + buffer = lcs_get_lancmd(card, LCS_MULTICAST_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_DELIPM; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + memcpy(cmd->cmd.lcs_qipassist.lcs_ipass_ctlmsg.ip_mac_pair, + &ipm_list->ipm, sizeof (struct lcs_ip_mac_pair)); + LCS_DBF_TEXT_(2, trace, "%x",ipm_list->ipm.ip_addr); + return lcs_send_lancmd(card, buffer, NULL); +} + +/** + * check if multicast is supported by LCS + */ +static void +__lcs_check_multicast_cb(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(2, trace, "chkmccb"); + card->ip_assists_supported = + cmd->cmd.lcs_qipassist.ip_assists_supported; + card->ip_assists_enabled = + cmd->cmd.lcs_qipassist.ip_assists_enabled; +} + +static int +lcs_check_multicast_support(struct lcs_card *card) +{ + struct lcs_buffer *buffer; + struct lcs_cmd *cmd; + int rc; + + LCS_DBF_TEXT(2, trace, "cmdqipa"); + /* Send query ipassist. */ + buffer = lcs_get_lancmd(card, LCS_STD_CMD_SIZE); + cmd = (struct lcs_cmd *) buffer->data; + cmd->cmd_code = LCS_CMD_QIPASSIST; + cmd->initiator = LCS_INITIATOR_TCPIP; + cmd->cmd.lcs_qipassist.lan_type = card->lan_type; + cmd->cmd.lcs_qipassist.portno = card->portno; + cmd->cmd.lcs_qipassist.version = 4; + cmd->cmd.lcs_qipassist.num_ip_pairs = 1; + rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); + if (rc != 0) { + pr_err("Query IPAssist failed. Assuming unsupported!\n"); + return -EOPNOTSUPP; + } + if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) + return 0; + return -EOPNOTSUPP; +} + +/** + * set or del multicast address on LCS card + */ +static void +lcs_fix_multicast_list(struct lcs_card *card) +{ + struct list_head failed_list; + struct lcs_ipm_list *ipm, *tmp; + unsigned long flags; + int rc; + + LCS_DBF_TEXT(4,trace, "fixipm"); + INIT_LIST_HEAD(&failed_list); + spin_lock_irqsave(&card->ipm_lock, flags); +list_modified: + list_for_each_entry_safe(ipm, tmp, &card->ipm_list, list){ + switch (ipm->ipm_state) { + case LCS_IPM_STATE_SET_REQUIRED: + /* del from ipm_list so no one else can tamper with + * this entry */ + list_del_init(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + rc = lcs_send_setipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + if (rc) { + pr_info("Adding multicast address failed." + " Table possibly full!\n"); + /* store ipm in failed list -> will be added + * to ipm_list again, so a retry will be done + * during the next call of this function */ + list_add_tail(&ipm->list, &failed_list); + } else { + ipm->ipm_state = LCS_IPM_STATE_ON_CARD; + /* re-insert into ipm_list */ + list_add_tail(&ipm->list, &card->ipm_list); + } + goto list_modified; + case LCS_IPM_STATE_DEL_REQUIRED: + list_del(&ipm->list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + lcs_send_delipm(card, ipm); + spin_lock_irqsave(&card->ipm_lock, flags); + kfree(ipm); + goto list_modified; + case LCS_IPM_STATE_ON_CARD: + break; + } + } + /* re-insert all entries from the failed_list into ipm_list */ + list_for_each_entry_safe(ipm, tmp, &failed_list, list) + list_move_tail(&ipm->list, &card->ipm_list); + + spin_unlock_irqrestore(&card->ipm_lock, flags); +} + +/** + * get mac address for the relevant Multicast address + */ +static void +lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) +{ + LCS_DBF_TEXT(4,trace, "getmac"); + ip_eth_mc_map(ipm, mac); +} + +/** + * function called by net device to handle multicast address relevant things + */ +static void lcs_remove_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) +{ + struct ip_mc_list *im4; + struct list_head *l; + struct lcs_ipm_list *ipm; + unsigned long flags; + char buf[MAX_ADDR_LEN]; + + LCS_DBF_TEXT(4, trace, "remmclst"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + ipm = list_entry(l, struct lcs_ipm_list, list); + for (im4 = rcu_dereference(in4_dev->mc_list); + im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + if ( (ipm->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &ipm->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) + break; + } + if (im4 == NULL) + ipm->ipm_state = LCS_IPM_STATE_DEL_REQUIRED; + } + spin_unlock_irqrestore(&card->ipm_lock, flags); +} + +static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, + struct ip_mc_list *im4, + char *buf) +{ + struct lcs_ipm_list *tmp, *ipm = NULL; + struct list_head *l; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "chkmcent"); + spin_lock_irqsave(&card->ipm_lock, flags); + list_for_each(l, &card->ipm_list) { + tmp = list_entry(l, struct lcs_ipm_list, list); + if ( (tmp->ipm.ip_addr == im4->multiaddr) && + (memcmp(buf, &tmp->ipm.mac_addr, + LCS_MAC_LENGTH) == 0) ) { + ipm = tmp; + break; + } + } + spin_unlock_irqrestore(&card->ipm_lock, flags); + return ipm; +} + +static void lcs_set_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) +{ + + struct ip_mc_list *im4; + struct lcs_ipm_list *ipm; + char buf[MAX_ADDR_LEN]; + unsigned long flags; + + LCS_DBF_TEXT(4, trace, "setmclst"); + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rcu_dereference(im4->next_rcu)) { + lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev); + ipm = lcs_check_addr_entry(card, im4, buf); + if (ipm != NULL) + continue; /* Address already in list. */ + ipm = kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); + if (ipm == NULL) { + pr_info("Not enough memory to add" + " new multicast entry!\n"); + break; + } + memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); + ipm->ipm.ip_addr = im4->multiaddr; + ipm->ipm_state = LCS_IPM_STATE_SET_REQUIRED; + spin_lock_irqsave(&card->ipm_lock, flags); + LCS_DBF_HEX(2,trace,&ipm->ipm.ip_addr,4); + list_add(&ipm->list, &card->ipm_list); + spin_unlock_irqrestore(&card->ipm_lock, flags); + } +} + +static int +lcs_register_mc_addresses(void *data) +{ + struct lcs_card *card; + struct in_device *in4_dev; + + card = (struct lcs_card *) data; + + if (!lcs_do_run_thread(card, LCS_SET_MC_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "regmulti"); + + in4_dev = in_dev_get(card->dev); + if (in4_dev == NULL) + goto out; + rcu_read_lock(); + lcs_remove_mc_addresses(card,in4_dev); + lcs_set_mc_addresses(card, in4_dev); + rcu_read_unlock(); + in_dev_put(in4_dev); + + netif_carrier_off(card->dev); + netif_tx_disable(card->dev); + wait_event(card->write.wait_q, + (card->write.state != LCS_CH_STATE_RUNNING)); + lcs_fix_multicast_list(card); + if (card->state == DEV_STATE_UP) { + netif_carrier_on(card->dev); + netif_wake_queue(card->dev); + } +out: + lcs_clear_thread_running_bit(card, LCS_SET_MC_THREAD); + return 0; +} +#endif /* CONFIG_IP_MULTICAST */ + +/** + * function called by net device to + * handle multicast address relevant things + */ +static void +lcs_set_multicast_list(struct net_device *dev) +{ +#ifdef CONFIG_IP_MULTICAST + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "setmulti"); + card = (struct lcs_card *) dev->ml_priv; + + if (!lcs_set_thread_start_bit(card, LCS_SET_MC_THREAD)) + schedule_work(&card->kernel_thread_starter); +#endif /* CONFIG_IP_MULTICAST */ +} + +static long +lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) +{ + if (!IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + dev_warn(&cdev->dev, + "An I/O-error occurred on the LCS device\n"); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); + break; + case -ETIMEDOUT: + dev_warn(&cdev->dev, + "A command timed out on the LCS device\n"); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); + break; + default: + dev_warn(&cdev->dev, + "An error occurred on the LCS device, rc=%ld\n", + PTR_ERR(irb)); + LCS_DBF_TEXT(2, trace, "ckirberr"); + LCS_DBF_TEXT(2, trace, " rc???"); + } + return PTR_ERR(irb); +} + +static int +lcs_get_problem(struct ccw_device *cdev, struct irb *irb) +{ + int dstat, cstat; + char *sense; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + LCS_DBF_TEXT(2, trace, "CGENCHK"); + return 1; + } + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[LCS_SENSE_BYTE_1] & + LCS_SENSE_RESETTING_EVENT) { + LCS_DBF_TEXT(2, trace, "REVIND"); + return 1; + } + if (sense[LCS_SENSE_BYTE_0] & + LCS_SENSE_CMD_REJECT) { + LCS_DBF_TEXT(2, trace, "CMDREJ"); + return 0; + } + if ((!sense[LCS_SENSE_BYTE_0]) && + (!sense[LCS_SENSE_BYTE_1]) && + (!sense[LCS_SENSE_BYTE_2]) && + (!sense[LCS_SENSE_BYTE_3])) { + LCS_DBF_TEXT(2, trace, "ZEROSEN"); + return 0; + } + LCS_DBF_TEXT(2, trace, "DGENCHK"); + return 1; + } + return 0; +} + +static void +lcs_schedule_recovery(struct lcs_card *card) +{ + LCS_DBF_TEXT(2, trace, "startrec"); + if (!lcs_set_thread_start_bit(card, LCS_RECOVERY_THREAD)) + schedule_work(&card->kernel_thread_starter); +} + +/** + * IRQ Handler for LCS channels + */ +static void +lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) +{ + struct lcs_card *card; + struct lcs_channel *channel; + int rc, index; + int cstat, dstat; + + if (lcs_check_irb_error(cdev, irb)) + return; + + card = CARD_FROM_DEV(cdev); + if (card->read.ccwdev == cdev) + channel = &card->read; + else + channel = &card->write; + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + LCS_DBF_TEXT_(5, trace, "Rint%s", dev_name(&cdev->dev)); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.cstat, + irb->scsw.cmd.dstat); + LCS_DBF_TEXT_(5, trace, "%4x%4x", irb->scsw.cmd.fctl, + irb->scsw.cmd.actl); + + /* Check for channel and device errors presented */ + rc = lcs_get_problem(cdev, irb); + if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { + dev_warn(&cdev->dev, + "The LCS device stopped because of an error," + " dstat=0x%X, cstat=0x%X \n", + dstat, cstat); + if (rc) { + channel->state = LCS_CH_STATE_ERROR; + } + } + if (channel->state == LCS_CH_STATE_ERROR) { + lcs_schedule_recovery(card); + wake_up(&card->wait_q); + return; + } + /* How far in the ccw chain have we processed? */ + if ((channel->state != LCS_CH_STATE_INIT) && + (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && + (irb->scsw.cmd.cpa != 0)) { + index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa) + - channel->ccws; + if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) || + (irb->scsw.cmd.cstat & SCHN_STAT_PCI)) + /* Bloody io subsystem tells us lies about cpa... */ + index = (index - 1) & (LCS_NUM_BUFFS - 1); + while (channel->io_idx != index) { + __lcs_processed_buffer(channel, + channel->iob + channel->io_idx); + channel->io_idx = + (channel->io_idx + 1) & (LCS_NUM_BUFFS - 1); + } + } + + if ((irb->scsw.cmd.dstat & DEV_STAT_DEV_END) || + (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) || + (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) + /* Mark channel as stopped. */ + channel->state = LCS_CH_STATE_STOPPED; + else if (irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) + /* CCW execution stopped on a suspend bit. */ + channel->state = LCS_CH_STATE_SUSPENDED; + if (irb->scsw.cmd.fctl & SCSW_FCTL_HALT_FUNC) { + if (irb->scsw.cmd.cc != 0) { + ccw_device_halt(channel->ccwdev, (addr_t) channel); + return; + } + /* The channel has been stopped by halt_IO. */ + channel->state = LCS_CH_STATE_HALTED; + } + if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) + channel->state = LCS_CH_STATE_CLEARED; + /* Do the rest in the tasklet. */ + tasklet_schedule(&channel->irq_tasklet); +} + +/** + * Tasklet for IRQ handler + */ +static void +lcs_tasklet(unsigned long data) +{ + unsigned long flags; + struct lcs_channel *channel; + struct lcs_buffer *iob; + int buf_idx; + + channel = (struct lcs_channel *) data; + LCS_DBF_TEXT_(5, trace, "tlet%s", dev_name(&channel->ccwdev->dev)); + + /* Check for processed buffers. */ + iob = channel->iob; + buf_idx = channel->buf_idx; + while (iob[buf_idx].state == LCS_BUF_STATE_PROCESSED) { + /* Do the callback thing. */ + if (iob[buf_idx].callback != NULL) + iob[buf_idx].callback(channel, iob + buf_idx); + buf_idx = (buf_idx + 1) & (LCS_NUM_BUFFS - 1); + } + channel->buf_idx = buf_idx; + + if (channel->state == LCS_CH_STATE_STOPPED) + lcs_start_channel(channel); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + if (channel->state == LCS_CH_STATE_SUSPENDED && + channel->iob[channel->io_idx].state == LCS_BUF_STATE_READY) + __lcs_resume_channel(channel); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + /* Something happened on the channel. Wake up waiters. */ + wake_up(&channel->wait_q); +} + +/** + * Finish current tx buffer and make it ready for transmit. + */ +static void +__lcs_emit_txbuffer(struct lcs_card *card) +{ + LCS_DBF_TEXT(5, trace, "emittx"); + *(__u16 *)(card->tx_buffer->data + card->tx_buffer->count) = 0; + card->tx_buffer->count += 2; + lcs_ready_buffer(&card->write, card->tx_buffer); + card->tx_buffer = NULL; + card->tx_emitted++; +} + +/** + * Callback for finished tx buffers. + */ +static void +lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(5, trace, "txbuffcb"); + /* Put buffer back to pool. */ + lcs_release_buffer(channel, buffer); + card = container_of(channel, struct lcs_card, write); + if (netif_queue_stopped(card->dev) && netif_carrier_ok(card->dev)) + netif_wake_queue(card->dev); + spin_lock(&card->lock); + card->tx_emitted--; + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) + /* + * Last running tx buffer has finished. Submit partially + * filled current buffer. + */ + __lcs_emit_txbuffer(card); + spin_unlock(&card->lock); +} + +/** + * Packet transmit function called by network stack + */ +static int +__lcs_start_xmit(struct lcs_card *card, struct sk_buff *skb, + struct net_device *dev) +{ + struct lcs_header *header; + int rc = NETDEV_TX_OK; + + LCS_DBF_TEXT(5, trace, "hardxmit"); + if (skb == NULL) { + card->stats.tx_dropped++; + card->stats.tx_errors++; + return NETDEV_TX_OK; + } + if (card->state != DEV_STATE_UP) { + dev_kfree_skb(skb); + card->stats.tx_dropped++; + card->stats.tx_errors++; + card->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + if (skb->protocol == htons(ETH_P_IPV6)) { + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + netif_stop_queue(card->dev); + spin_lock(&card->lock); + if (card->tx_buffer != NULL && + card->tx_buffer->count + sizeof(struct lcs_header) + + skb->len + sizeof(u16) > LCS_IOBUFFERSIZE) + /* skb too big for current tx buffer. */ + __lcs_emit_txbuffer(card); + if (card->tx_buffer == NULL) { + /* Get new tx buffer */ + card->tx_buffer = lcs_get_buffer(&card->write); + if (card->tx_buffer == NULL) { + card->stats.tx_dropped++; + rc = NETDEV_TX_BUSY; + goto out; + } + card->tx_buffer->callback = lcs_txbuffer_cb; + card->tx_buffer->count = 0; + } + header = (struct lcs_header *) + (card->tx_buffer->data + card->tx_buffer->count); + card->tx_buffer->count += skb->len + sizeof(struct lcs_header); + header->offset = card->tx_buffer->count; + header->type = card->lan_type; + header->slot = card->portno; + skb_copy_from_linear_data(skb, header + 1, skb->len); + spin_unlock(&card->lock); + card->stats.tx_bytes += skb->len; + card->stats.tx_packets++; + dev_kfree_skb(skb); + netif_wake_queue(card->dev); + spin_lock(&card->lock); + if (card->tx_emitted <= 0 && card->tx_buffer != NULL) + /* If this is the first tx buffer emit it immediately. */ + __lcs_emit_txbuffer(card); +out: + spin_unlock(&card->lock); + return rc; +} + +static int +lcs_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(5, trace, "pktxmit"); + card = (struct lcs_card *) dev->ml_priv; + rc = __lcs_start_xmit(card, skb, dev); + return rc; +} + +/** + * send startlan and lanstat command to make LCS device ready + */ +static int +lcs_startlan_auto(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(2, trace, "strtauto"); +#ifdef CONFIG_ETHERNET + card->lan_type = LCS_FRAME_TYPE_ENET; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; + +#endif +#ifdef CONFIG_FDDI + card->lan_type = LCS_FRAME_TYPE_FDDI; + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + return 0; +#endif + return -EIO; +} + +static int +lcs_startlan(struct lcs_card *card) +{ + int rc, i; + + LCS_DBF_TEXT(2, trace, "startlan"); + rc = 0; + if (card->portno != LCS_INVALID_PORT_NO) { + if (card->lan_type == LCS_FRAME_TYPE_AUTO) + rc = lcs_startlan_auto(card); + else + rc = lcs_send_startlan(card, LCS_INITIATOR_TCPIP); + } else { + for (i = 0; i <= 16; i++) { + card->portno = i; + if (card->lan_type != LCS_FRAME_TYPE_AUTO) + rc = lcs_send_startlan(card, + LCS_INITIATOR_TCPIP); + else + /* autodetecting lan type */ + rc = lcs_startlan_auto(card); + if (rc == 0) + break; + } + } + if (rc == 0) + return lcs_send_lanstat(card); + return rc; +} + +/** + * LCS detect function + * setup channels and make them I/O ready + */ +static int +lcs_detect(struct lcs_card *card) +{ + int rc = 0; + + LCS_DBF_TEXT(2, setup, "lcsdetct"); + /* start/reset card */ + if (card->dev) + netif_stop_queue(card->dev); + rc = lcs_stop_channels(card); + if (rc == 0) { + rc = lcs_start_channels(card); + if (rc == 0) { + rc = lcs_send_startup(card, LCS_INITIATOR_TCPIP); + if (rc == 0) + rc = lcs_startlan(card); + } + } + if (rc == 0) { + card->state = DEV_STATE_UP; + } else { + card->state = DEV_STATE_DOWN; + card->write.state = LCS_CH_STATE_INIT; + card->read.state = LCS_CH_STATE_INIT; + } + return rc; +} + +/** + * LCS Stop card + */ +static int +lcs_stopcard(struct lcs_card *card) +{ + int rc; + + LCS_DBF_TEXT(3, setup, "stopcard"); + + if (card->read.state != LCS_CH_STATE_STOPPED && + card->write.state != LCS_CH_STATE_STOPPED && + card->read.state != LCS_CH_STATE_ERROR && + card->write.state != LCS_CH_STATE_ERROR && + card->state == DEV_STATE_UP) { + lcs_clear_multicast_list(card); + rc = lcs_send_stoplan(card,LCS_INITIATOR_TCPIP); + rc = lcs_send_shutdown(card); + } + rc = lcs_stop_channels(card); + card->state = DEV_STATE_DOWN; + + return rc; +} + +/** + * Kernel Thread helper functions for LGW initiated commands + */ +static void +lcs_start_kernel_thread(struct work_struct *work) +{ + struct lcs_card *card = container_of(work, struct lcs_card, kernel_thread_starter); + LCS_DBF_TEXT(5, trace, "krnthrd"); + if (lcs_do_start_thread(card, LCS_RECOVERY_THREAD)) + kthread_run(lcs_recovery, card, "lcs_recover"); +#ifdef CONFIG_IP_MULTICAST + if (lcs_do_start_thread(card, LCS_SET_MC_THREAD)) + kthread_run(lcs_register_mc_addresses, card, "regipm"); +#endif +} + +/** + * Process control frames. + */ +static void +lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) +{ + LCS_DBF_TEXT(5, trace, "getctrl"); + if (cmd->initiator == LCS_INITIATOR_LGW) { + switch(cmd->cmd_code) { + case LCS_CMD_STARTUP: + case LCS_CMD_STARTLAN: + lcs_schedule_recovery(card); + break; + case LCS_CMD_STOPLAN: + if (card->dev) { + pr_warn("Stoplan for %s initiated by LGW\n", + card->dev->name); + netif_carrier_off(card->dev); + } + break; + default: + LCS_DBF_TEXT(5, trace, "noLGWcmd"); + break; + } + } else + lcs_notify_lancmd_waiters(card, cmd); +} + +/** + * Unpack network packet. + */ +static void +lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) +{ + struct sk_buff *skb; + + LCS_DBF_TEXT(5, trace, "getskb"); + if (card->dev == NULL || + card->state != DEV_STATE_UP) + /* The card isn't up. Ignore the packet. */ + return; + + skb = dev_alloc_skb(skb_len); + if (skb == NULL) { + dev_err(&card->dev->dev, + " Allocating a socket buffer to interface %s failed\n", + card->dev->name); + card->stats.rx_dropped++; + return; + } + skb_put_data(skb, skb_data, skb_len); + skb->protocol = card->lan_type_trans(skb, card->dev); + card->stats.rx_bytes += skb_len; + card->stats.rx_packets++; + if (skb->protocol == htons(ETH_P_802_2)) + *((__u32 *)skb->cb) = ++card->pkt_seq; + netif_rx(skb); +} + +/** + * LCS main routine to get packets and lancmd replies from the buffers + */ +static void +lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer) +{ + struct lcs_card *card; + struct lcs_header *lcs_hdr; + __u16 offset; + + LCS_DBF_TEXT(5, trace, "lcsgtpkt"); + lcs_hdr = (struct lcs_header *) buffer->data; + if (lcs_hdr->offset == LCS_ILLEGAL_OFFSET) { + LCS_DBF_TEXT(4, trace, "-eiogpkt"); + return; + } + card = container_of(channel, struct lcs_card, read); + offset = 0; + while (lcs_hdr->offset != 0) { + if (lcs_hdr->offset <= 0 || + lcs_hdr->offset > LCS_IOBUFFERSIZE || + lcs_hdr->offset < offset) { + /* Offset invalid. */ + card->stats.rx_length_errors++; + card->stats.rx_errors++; + return; + } + /* What kind of frame is it? */ + if (lcs_hdr->type == LCS_FRAME_TYPE_CONTROL) + /* Control frame. */ + lcs_get_control(card, (struct lcs_cmd *) lcs_hdr); + else if (lcs_hdr->type == LCS_FRAME_TYPE_ENET || + lcs_hdr->type == LCS_FRAME_TYPE_TR || + lcs_hdr->type == LCS_FRAME_TYPE_FDDI) + /* Normal network packet. */ + lcs_get_skb(card, (char *)(lcs_hdr + 1), + lcs_hdr->offset - offset - + sizeof(struct lcs_header)); + else + /* Unknown frame type. */ + ; // FIXME: error message ? + /* Proceed to next frame. */ + offset = lcs_hdr->offset; + lcs_hdr->offset = LCS_ILLEGAL_OFFSET; + lcs_hdr = (struct lcs_header *) (buffer->data + offset); + } + /* The buffer is now empty. Make it ready again. */ + lcs_ready_buffer(&card->read, buffer); +} + +/** + * get network statistics for ifconfig and other user programs + */ +static struct net_device_stats * +lcs_getstats(struct net_device *dev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(4, trace, "netstats"); + card = (struct lcs_card *) dev->ml_priv; + return &card->stats; +} + +/** + * stop lcs device + * This function will be called by user doing ifconfig xxx down + */ +static int +lcs_stop_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "stopdev"); + card = (struct lcs_card *) dev->ml_priv; + netif_carrier_off(dev); + netif_tx_disable(dev); + dev->flags &= ~IFF_UP; + wait_event(card->write.wait_q, + (card->write.state != LCS_CH_STATE_RUNNING)); + rc = lcs_stopcard(card); + if (rc) + dev_err(&card->dev->dev, + " Shutting down the LCS device failed\n"); + return rc; +} + +/** + * start lcs device and make it runnable + * This function will be called by user doing ifconfig xxx up + */ +static int +lcs_open_device(struct net_device *dev) +{ + struct lcs_card *card; + int rc; + + LCS_DBF_TEXT(2, trace, "opendev"); + card = (struct lcs_card *) dev->ml_priv; + /* initialize statistics */ + rc = lcs_detect(card); + if (rc) { + pr_err("Error in opening device!\n"); + + } else { + dev->flags |= IFF_UP; + netif_carrier_on(dev); + netif_wake_queue(dev); + card->state = DEV_STATE_UP; + } + return rc; +} + +/** + * show function for portno called by cat or similar things + */ +static ssize_t +lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lcs_card *card; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + return sprintf(buf, "%d\n", card->portno); +} + +/** + * store the value which is piped to file portno + */ +static ssize_t +lcs_portno_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct lcs_card *card; + int rc; + s16 value; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + rc = kstrtos16(buf, 0, &value); + if (rc) + return -EINVAL; + /* TODO: sanity checks */ + card->portno = value; + if (card->dev) + card->dev->dev_port = card->portno; + + return count; + +} + +static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); + +static const char *lcs_type[] = { + "not a channel", + "2216 parallel", + "2216 channel", + "OSA LCS card", + "unknown channel type", + "unsupported channel type", +}; + +static ssize_t +lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct ccwgroup_device *cgdev; + + cgdev = to_ccwgroupdev(dev); + if (!cgdev) + return -ENODEV; + + return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]); +} + +static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); + +static ssize_t +lcs_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct lcs_card *card; + + card = dev_get_drvdata(dev); + + return card ? sprintf(buf, "%u\n", card->lancmd_timeout) : 0; +} + +static ssize_t +lcs_timeout_store (struct device *dev, struct device_attribute *attr, const char *buf, size_t count) +{ + struct lcs_card *card; + unsigned int value; + int rc; + + card = dev_get_drvdata(dev); + + if (!card) + return 0; + + rc = kstrtouint(buf, 0, &value); + if (rc) + return -EINVAL; + /* TODO: sanity checks */ + card->lancmd_timeout = value; + + return count; + +} + +static DEVICE_ATTR(lancmd_timeout, 0644, lcs_timeout_show, lcs_timeout_store); + +static ssize_t +lcs_dev_recover_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct lcs_card *card = dev_get_drvdata(dev); + char *tmp; + int i; + + if (!card) + return -EINVAL; + if (card->state != DEV_STATE_UP) + return -EPERM; + i = simple_strtoul(buf, &tmp, 16); + if (i == 1) + lcs_schedule_recovery(card); + return count; +} + +static DEVICE_ATTR(recover, 0200, NULL, lcs_dev_recover_store); + +static struct attribute * lcs_attrs[] = { + &dev_attr_portno.attr, + &dev_attr_type.attr, + &dev_attr_lancmd_timeout.attr, + &dev_attr_recover.attr, + NULL, +}; +static struct attribute_group lcs_attr_group = { + .attrs = lcs_attrs, +}; +static const struct attribute_group *lcs_attr_groups[] = { + &lcs_attr_group, + NULL, +}; +static const struct device_type lcs_devtype = { + .name = "lcs", + .groups = lcs_attr_groups, +}; + +/** + * lcs_probe_device is called on establishing a new ccwgroup_device. + */ +static int +lcs_probe_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + if (!get_device(&ccwgdev->dev)) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "add_dev"); + card = lcs_alloc_card(); + if (!card) { + LCS_DBF_TEXT_(2, setup, " rc%d", -ENOMEM); + put_device(&ccwgdev->dev); + return -ENOMEM; + } + dev_set_drvdata(&ccwgdev->dev, card); + ccwgdev->cdev[0]->handler = lcs_irq; + ccwgdev->cdev[1]->handler = lcs_irq; + card->gdev = ccwgdev; + INIT_WORK(&card->kernel_thread_starter, lcs_start_kernel_thread); + card->thread_start_mask = 0; + card->thread_allowed_mask = 0; + card->thread_running_mask = 0; + ccwgdev->dev.type = &lcs_devtype; + + return 0; +} + +static int +lcs_register_netdev(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + LCS_DBF_TEXT(2, setup, "regnetdv"); + card = dev_get_drvdata(&ccwgdev->dev); + if (card->dev->reg_state != NETREG_UNINITIALIZED) + return 0; + SET_NETDEV_DEV(card->dev, &ccwgdev->dev); + return register_netdev(card->dev); +} + +/** + * lcs_new_device will be called by setting the group device online. + */ +static const struct net_device_ops lcs_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, +}; + +static const struct net_device_ops lcs_mc_netdev_ops = { + .ndo_open = lcs_open_device, + .ndo_stop = lcs_stop_device, + .ndo_get_stats = lcs_getstats, + .ndo_start_xmit = lcs_start_xmit, + .ndo_set_rx_mode = lcs_set_multicast_list, +}; + +static int +lcs_new_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + struct net_device *dev=NULL; + enum lcs_dev_states recover_state; + int rc; + + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return -ENODEV; + + LCS_DBF_TEXT(2, setup, "newdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + card->read.ccwdev = ccwgdev->cdev[0]; + card->write.ccwdev = ccwgdev->cdev[1]; + + recover_state = card->state; + rc = ccw_device_set_online(card->read.ccwdev); + if (rc) + goto out_err; + rc = ccw_device_set_online(card->write.ccwdev); + if (rc) + goto out_werr; + + LCS_DBF_TEXT(3, setup, "lcsnewdv"); + + lcs_setup_card(card); + rc = lcs_detect(card); + if (rc) { + LCS_DBF_TEXT(2, setup, "dtctfail"); + dev_err(&ccwgdev->dev, + "Detecting a network adapter for LCS devices" + " failed with rc=%d (0x%x)\n", rc, rc); + lcs_stopcard(card); + goto out; + } + if (card->dev) { + LCS_DBF_TEXT(2, setup, "samedev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + goto netdev_out; + } + switch (card->lan_type) { +#ifdef CONFIG_ETHERNET + case LCS_FRAME_TYPE_ENET: + card->lan_type_trans = eth_type_trans; + dev = alloc_etherdev(0); + break; +#endif +#ifdef CONFIG_FDDI + case LCS_FRAME_TYPE_FDDI: + card->lan_type_trans = fddi_type_trans; + dev = alloc_fddidev(0); + break; +#endif + default: + LCS_DBF_TEXT(3, setup, "errinit"); + pr_err(" Initialization failed\n"); + goto out; + } + if (!dev) + goto out; + card->dev = dev; + card->dev->ml_priv = card; + card->dev->netdev_ops = &lcs_netdev_ops; + card->dev->dev_port = card->portno; + memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH); +#ifdef CONFIG_IP_MULTICAST + if (!lcs_check_multicast_support(card)) + card->dev->netdev_ops = &lcs_mc_netdev_ops; +#endif +netdev_out: + lcs_set_allowed_threads(card,0xffffffff); + if (recover_state == DEV_STATE_RECOVER) { + lcs_set_multicast_list(card->dev); + card->dev->flags |= IFF_UP; + netif_carrier_on(card->dev); + netif_wake_queue(card->dev); + card->state = DEV_STATE_UP; + } else { + lcs_stopcard(card); + } + + if (lcs_register_netdev(ccwgdev) != 0) + goto out; + + /* Print out supported assists: IPv6 */ + pr_info("LCS device %s %s IPv6 support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? + "with" : "without"); + /* Print out supported assist: Multicast */ + pr_info("LCS device %s %s Multicast support\n", card->dev->name, + (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? + "with" : "without"); + return 0; +out: + + ccw_device_set_offline(card->write.ccwdev); +out_werr: + ccw_device_set_offline(card->read.ccwdev); +out_err: + return -ENODEV; +} + +/** + * lcs_shutdown_device, called when setting the group device offline. + */ +static int +__lcs_shutdown_device(struct ccwgroup_device *ccwgdev, int recovery_mode) +{ + struct lcs_card *card; + enum lcs_dev_states recover_state; + int ret = 0, ret2 = 0, ret3 = 0; + + LCS_DBF_TEXT(3, setup, "shtdndev"); + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return -ENODEV; + if (recovery_mode == 0) { + lcs_set_allowed_threads(card, 0); + if (lcs_wait_for_threads(card, LCS_SET_MC_THREAD)) + return -ERESTARTSYS; + } + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + recover_state = card->state; + + ret = lcs_stop_device(card->dev); + ret2 = ccw_device_set_offline(card->read.ccwdev); + ret3 = ccw_device_set_offline(card->write.ccwdev); + if (!ret) + ret = (ret2) ? ret2 : ret3; + if (ret) + LCS_DBF_TEXT_(3, setup, "1err:%d", ret); + if (recover_state == DEV_STATE_UP) { + card->state = DEV_STATE_RECOVER; + } + return 0; +} + +static int +lcs_shutdown_device(struct ccwgroup_device *ccwgdev) +{ + return __lcs_shutdown_device(ccwgdev, 0); +} + +/** + * drive lcs recovery after startup and startlan initiated by Lan Gateway + */ +static int +lcs_recovery(void *ptr) +{ + struct lcs_card *card; + struct ccwgroup_device *gdev; + int rc; + + card = (struct lcs_card *) ptr; + + LCS_DBF_TEXT(4, trace, "recover1"); + if (!lcs_do_run_thread(card, LCS_RECOVERY_THREAD)) + return 0; + LCS_DBF_TEXT(4, trace, "recover2"); + gdev = card->gdev; + dev_warn(&gdev->dev, + "A recovery process has been started for the LCS device\n"); + rc = __lcs_shutdown_device(gdev, 1); + rc = lcs_new_device(gdev); + if (!rc) + pr_info("Device %s successfully recovered!\n", + card->dev->name); + else + pr_info("Device %s could not be recovered!\n", + card->dev->name); + lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); + return 0; +} + +/** + * lcs_remove_device, free buffers and card + */ +static void +lcs_remove_device(struct ccwgroup_device *ccwgdev) +{ + struct lcs_card *card; + + card = dev_get_drvdata(&ccwgdev->dev); + if (!card) + return; + + LCS_DBF_TEXT(3, setup, "remdev"); + LCS_DBF_HEX(3, setup, &card, sizeof(void*)); + if (ccwgdev->state == CCWGROUP_ONLINE) { + lcs_shutdown_device(ccwgdev); + } + if (card->dev) + unregister_netdev(card->dev); + lcs_cleanup_card(card); + lcs_free_card(card); + dev_set_drvdata(&ccwgdev->dev, NULL); + put_device(&ccwgdev->dev); +} + +static int lcs_pm_suspend(struct lcs_card *card) +{ + if (card->dev) + netif_device_detach(card->dev); + lcs_set_allowed_threads(card, 0); + lcs_wait_for_threads(card, 0xffffffff); + if (card->state != DEV_STATE_DOWN) + __lcs_shutdown_device(card->gdev, 1); + return 0; +} + +static int lcs_pm_resume(struct lcs_card *card) +{ + int rc = 0; + + if (card->state == DEV_STATE_RECOVER) + rc = lcs_new_device(card->gdev); + if (card->dev) + netif_device_attach(card->dev); + if (rc) { + dev_warn(&card->gdev->dev, "The lcs device driver " + "failed to recover the device\n"); + } + return rc; +} + +static int lcs_prepare(struct ccwgroup_device *gdev) +{ + return 0; +} + +static void lcs_complete(struct ccwgroup_device *gdev) +{ + return; +} + +static int lcs_freeze(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_suspend(card); +} + +static int lcs_thaw(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_resume(card); +} + +static int lcs_restore(struct ccwgroup_device *gdev) +{ + struct lcs_card *card = dev_get_drvdata(&gdev->dev); + return lcs_pm_resume(card); +} + +static struct ccw_device_id lcs_ids[] = { + {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel}, + {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216}, + {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, lcs_ids); + +static struct ccw_driver lcs_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, + .ids = lcs_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, + .int_class = IRQIO_LCS, +}; + +/** + * LCS ccwgroup driver registration + */ +static struct ccwgroup_driver lcs_group_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "lcs", + }, + .ccw_driver = &lcs_ccw_driver, + .setup = lcs_probe_device, + .remove = lcs_remove_device, + .set_online = lcs_new_device, + .set_offline = lcs_shutdown_device, + .prepare = lcs_prepare, + .complete = lcs_complete, + .freeze = lcs_freeze, + .thaw = lcs_thaw, + .restore = lcs_restore, +}; + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + err = ccwgroup_create_dev(lcs_root_dev, &lcs_group_driver, 2, buf); + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *lcs_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group lcs_drv_attr_group = { + .attrs = lcs_drv_attrs, +}; +static const struct attribute_group *lcs_drv_attr_groups[] = { + &lcs_drv_attr_group, + NULL, +}; + +/** + * LCS Module/Kernel initialization function + */ +static int +__init lcs_init_module(void) +{ + int rc; + + pr_info("Loading %s\n", version); + rc = lcs_register_debug_facility(); + LCS_DBF_TEXT(0, setup, "lcsinit"); + if (rc) + goto out_err; + lcs_root_dev = root_device_register("lcs"); + rc = PTR_ERR_OR_ZERO(lcs_root_dev); + if (rc) + goto register_err; + rc = ccw_driver_register(&lcs_ccw_driver); + if (rc) + goto ccw_err; + lcs_group_driver.driver.groups = lcs_drv_attr_groups; + rc = ccwgroup_driver_register(&lcs_group_driver); + if (rc) + goto ccwgroup_err; + return 0; + +ccwgroup_err: + ccw_driver_unregister(&lcs_ccw_driver); +ccw_err: + root_device_unregister(lcs_root_dev); +register_err: + lcs_unregister_debug_facility(); +out_err: + pr_err("Initializing the lcs device driver failed\n"); + return rc; +} + + +/** + * LCS module cleanup function + */ +static void +__exit lcs_cleanup_module(void) +{ + pr_info("Terminating lcs module.\n"); + LCS_DBF_TEXT(0, trace, "cleanup"); + ccwgroup_driver_unregister(&lcs_group_driver); + ccw_driver_unregister(&lcs_ccw_driver); + root_device_unregister(lcs_root_dev); + lcs_unregister_debug_facility(); +} + +module_init(lcs_init_module); +module_exit(lcs_cleanup_module); + +MODULE_AUTHOR("Frank Pavlic <fpavlic@de.ibm.com>"); +MODULE_LICENSE("GPL"); + diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h new file mode 100644 index 000000000..bd52caa3b --- /dev/null +++ b/drivers/s390/net/lcs.h @@ -0,0 +1,342 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/*lcs.h*/ + +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/skbuff.h> +#include <linux/workqueue.h> +#include <linux/refcount.h> +#include <asm/ccwdev.h> + +#define LCS_DBF_TEXT(level, name, text) \ + do { \ + debug_text_event(lcs_dbf_##name, level, text); \ + } while (0) + +#define LCS_DBF_HEX(level,name,addr,len) \ +do { \ + debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ +} while (0) + +#define LCS_DBF_TEXT_(level,name,text...) \ + do { \ + if (debug_level_enabled(lcs_dbf_##name, level)) { \ + sprintf(debug_buffer, text); \ + debug_text_event(lcs_dbf_##name, level, debug_buffer); \ + } \ + } while (0) + +/** + * sysfs related stuff + */ +#define CARD_FROM_DEV(cdev) \ + (struct lcs_card *) dev_get_drvdata( \ + &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); + +/** + * Enum for classifying detected devices. + */ +enum lcs_channel_types { + /* Device is not a channel */ + lcs_channel_type_none, + + /* Device is a 2216 channel */ + lcs_channel_type_parallel, + + /* Device is a 2216 channel */ + lcs_channel_type_2216, + + /* Device is a OSA2 card */ + lcs_channel_type_osa2 +}; + +/** + * CCW commands used in this driver + */ +#define LCS_CCW_WRITE 0x01 +#define LCS_CCW_READ 0x02 +#define LCS_CCW_TRANSFER 0x08 + +/** + * LCS device status primitives + */ +#define LCS_CMD_STARTLAN 0x01 +#define LCS_CMD_STOPLAN 0x02 +#define LCS_CMD_LANSTAT 0x04 +#define LCS_CMD_STARTUP 0x07 +#define LCS_CMD_SHUTDOWN 0x08 +#define LCS_CMD_QIPASSIST 0xb2 +#define LCS_CMD_SETIPM 0xb4 +#define LCS_CMD_DELIPM 0xb5 + +#define LCS_INITIATOR_TCPIP 0x00 +#define LCS_INITIATOR_LGW 0x01 +#define LCS_STD_CMD_SIZE 16 +#define LCS_MULTICAST_CMD_SIZE 404 + +/** + * LCS IPASSIST MASKS,only used when multicast is switched on + */ +/* Not supported by LCS */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_IN_CHECKSUM_SUPPORT 0x0002 +#define LCS_IPASS_OUT_CHECKSUM_SUPPORT 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +/* Supported by lcs 3172 */ +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS sense byte definitions + */ +#define LCS_SENSE_BYTE_0 0 +#define LCS_SENSE_BYTE_1 1 +#define LCS_SENSE_BYTE_2 2 +#define LCS_SENSE_BYTE_3 3 +#define LCS_SENSE_INTERFACE_DISCONNECT 0x01 +#define LCS_SENSE_EQUIPMENT_CHECK 0x10 +#define LCS_SENSE_BUS_OUT_CHECK 0x20 +#define LCS_SENSE_INTERVENTION_REQUIRED 0x40 +#define LCS_SENSE_CMD_REJECT 0x80 +#define LCS_SENSE_RESETTING_EVENT 0x80 +#define LCS_SENSE_DEVICE_ONLINE 0x20 + +/** + * LCS packet type definitions + */ +#define LCS_FRAME_TYPE_CONTROL 0 +#define LCS_FRAME_TYPE_ENET 1 +#define LCS_FRAME_TYPE_TR 2 +#define LCS_FRAME_TYPE_FDDI 7 +#define LCS_FRAME_TYPE_AUTO -1 + +/** + * some more definitions,we will sort them later + */ +#define LCS_ILLEGAL_OFFSET 0xffff +#define LCS_IOBUFFERSIZE 0x5000 +#define LCS_NUM_BUFFS 32 /* needs to be power of 2 */ +#define LCS_MAC_LENGTH 6 +#define LCS_INVALID_PORT_NO -1 +#define LCS_LANCMD_TIMEOUT_DEFAULT 5 + +/** + * Multicast state + */ +#define LCS_IPM_STATE_SET_REQUIRED 0 +#define LCS_IPM_STATE_DEL_REQUIRED 1 +#define LCS_IPM_STATE_ON_CARD 2 + +/** + * LCS IP Assist declarations + * seems to be only used for multicast + */ +#define LCS_IPASS_ARP_PROCESSING 0x0001 +#define LCS_IPASS_INBOUND_CSUM_SUPP 0x0002 +#define LCS_IPASS_OUTBOUND_CSUM_SUPP 0x0004 +#define LCS_IPASS_IP_FRAG_REASSEMBLY 0x0008 +#define LCS_IPASS_IP_FILTERING 0x0010 +#define LCS_IPASS_IPV6_SUPPORT 0x0020 +#define LCS_IPASS_MULTICAST_SUPPORT 0x0040 + +/** + * LCS Buffer states + */ +enum lcs_buffer_states { + LCS_BUF_STATE_EMPTY, /* buffer is empty */ + LCS_BUF_STATE_LOCKED, /* buffer is locked, don't touch */ + LCS_BUF_STATE_READY, /* buffer is ready for read/write */ + LCS_BUF_STATE_PROCESSED, +}; + +/** + * LCS Channel State Machine declarations + */ +enum lcs_channel_states { + LCS_CH_STATE_INIT, + LCS_CH_STATE_HALTED, + LCS_CH_STATE_STOPPED, + LCS_CH_STATE_RUNNING, + LCS_CH_STATE_SUSPENDED, + LCS_CH_STATE_CLEARED, + LCS_CH_STATE_ERROR, +}; + +/** + * LCS device state machine + */ +enum lcs_dev_states { + DEV_STATE_DOWN, + DEV_STATE_UP, + DEV_STATE_RECOVER, +}; + +enum lcs_threads { + LCS_SET_MC_THREAD = 1, + LCS_RECOVERY_THREAD = 2, +}; + +/** + * LCS struct declarations + */ +struct lcs_header { + __u16 offset; + __u8 type; + __u8 slot; +} __attribute__ ((packed)); + +struct lcs_ip_mac_pair { + __be32 ip_addr; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u8 reserved[2]; +} __attribute__ ((packed)); + +struct lcs_ipm_list { + struct list_head list; + struct lcs_ip_mac_pair ipm; + __u8 ipm_state; +}; + +struct lcs_cmd { + __u16 offset; + __u8 type; + __u8 slot; + __u8 cmd_code; + __u8 initiator; + __u16 sequence_no; + __u16 return_code; + union { + struct { + __u8 lan_type; + __u8 portno; + __u16 parameter_count; + __u8 operator_flags[3]; + __u8 reserved[3]; + } lcs_std_cmd; + struct { + __u16 unused1; + __u16 buff_size; + __u8 unused2[6]; + } lcs_startup; + struct { + __u8 lan_type; + __u8 portno; + __u8 unused[10]; + __u8 mac_addr[LCS_MAC_LENGTH]; + __u32 num_packets_deblocked; + __u32 num_packets_blocked; + __u32 num_packets_tx_on_lan; + __u32 num_tx_errors_detected; + __u32 num_tx_packets_disgarded; + __u32 num_packets_rx_from_lan; + __u32 num_rx_errors_detected; + __u32 num_rx_discarded_nobuffs_avail; + __u32 num_rx_packets_too_large; + } lcs_lanstat_cmd; +#ifdef CONFIG_IP_MULTICAST + struct { + __u8 lan_type; + __u8 portno; + __u16 num_ip_pairs; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __u16 version; + struct { + struct lcs_ip_mac_pair + ip_mac_pair[32]; + __u32 response_data; + } lcs_ipass_ctlmsg __attribute ((packed)); + } lcs_qipassist __attribute__ ((packed)); +#endif /*CONFIG_IP_MULTICAST */ + } cmd __attribute__ ((packed)); +} __attribute__ ((packed)); + +/** + * Forward declarations. + */ +struct lcs_card; +struct lcs_channel; + +/** + * Definition of an lcs buffer. + */ +struct lcs_buffer { + enum lcs_buffer_states state; + void *data; + int count; + /* Callback for completion notification. */ + void (*callback)(struct lcs_channel *, struct lcs_buffer *); +}; + +struct lcs_reply { + struct list_head list; + __u16 sequence_no; + refcount_t refcnt; + /* Callback for completion notification. */ + void (*callback)(struct lcs_card *, struct lcs_cmd *); + wait_queue_head_t wait_q; + struct lcs_card *card; + struct timer_list timer; + int received; + int rc; +}; + +/** + * Definition of an lcs channel + */ +struct lcs_channel { + enum lcs_channel_states state; + struct ccw_device *ccwdev; + struct ccw1 ccws[LCS_NUM_BUFFS + 1]; + wait_queue_head_t wait_q; + struct tasklet_struct irq_tasklet; + struct lcs_buffer iob[LCS_NUM_BUFFS]; + int io_idx; + int buf_idx; +}; + + +/** + * definition of the lcs card + */ +struct lcs_card { + spinlock_t lock; + spinlock_t ipm_lock; + enum lcs_dev_states state; + struct net_device *dev; + struct net_device_stats stats; + __be16 (*lan_type_trans)(struct sk_buff *skb, + struct net_device *dev); + struct ccwgroup_device *gdev; + struct lcs_channel read; + struct lcs_channel write; + struct lcs_buffer *tx_buffer; + int tx_emitted; + struct list_head lancmd_waiters; + int lancmd_timeout; + + struct work_struct kernel_thread_starter; + spinlock_t mask_lock; + unsigned long thread_start_mask; + unsigned long thread_running_mask; + unsigned long thread_allowed_mask; + wait_queue_head_t wait_q; + +#ifdef CONFIG_IP_MULTICAST + struct list_head ipm_list; +#endif + __u8 mac[LCS_MAC_LENGTH]; + __u16 ip_assists_supported; + __u16 ip_assists_enabled; + __s8 lan_type; + __u32 pkt_seq; + __u16 sequence_no; + __s16 portno; + /* Some info copied from probeinfo */ + u8 device_forced; + u8 max_port_no; + u8 hint_port_no; + s16 port_protocol_no; +} __attribute__ ((aligned(8))); + diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c new file mode 100644 index 000000000..5ce2424ca --- /dev/null +++ b/drivers/s390/net/netiucv.c @@ -0,0 +1,2216 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IUCV network driver + * + * Copyright IBM Corp. 2001, 2009 + * + * Author(s): + * Original netiucv driver: + * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) + * Sysfs integration and all bugs therein: + * Cornelia Huck (cornelia.huck@de.ibm.com) + * PM functions: + * Ursula Braun (ursula.braun@de.ibm.com) + * + * Documentation used: + * the source of the original IUCV driver by: + * Stefan Hegewald <hegewald@de.ibm.com> + * Hartmut Penner <hpenner@de.ibm.com> + * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) + * Martin Schwidefsky (schwidefsky@de.ibm.com) + * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 + */ + +#define KMSG_COMPONENT "netiucv" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#undef DEBUG + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/interrupt.h> +#include <linux/timer.h> +#include <linux/bitops.h> + +#include <linux/signal.h> +#include <linux/string.h> +#include <linux/device.h> + +#include <linux/ip.h> +#include <linux/if_arp.h> +#include <linux/tcp.h> +#include <linux/skbuff.h> +#include <linux/ctype.h> +#include <net/dst.h> + +#include <asm/io.h> +#include <linux/uaccess.h> +#include <asm/ebcdic.h> + +#include <net/iucv/iucv.h> +#include "fsm.h" + +MODULE_AUTHOR + ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver"); + +/** + * Debug Facility stuff + */ +#define IUCV_DBF_SETUP_NAME "iucv_setup" +#define IUCV_DBF_SETUP_LEN 64 +#define IUCV_DBF_SETUP_PAGES 2 +#define IUCV_DBF_SETUP_NR_AREAS 1 +#define IUCV_DBF_SETUP_LEVEL 3 + +#define IUCV_DBF_DATA_NAME "iucv_data" +#define IUCV_DBF_DATA_LEN 128 +#define IUCV_DBF_DATA_PAGES 2 +#define IUCV_DBF_DATA_NR_AREAS 1 +#define IUCV_DBF_DATA_LEVEL 2 + +#define IUCV_DBF_TRACE_NAME "iucv_trace" +#define IUCV_DBF_TRACE_LEN 16 +#define IUCV_DBF_TRACE_PAGES 4 +#define IUCV_DBF_TRACE_NR_AREAS 1 +#define IUCV_DBF_TRACE_LEVEL 3 + +#define IUCV_DBF_TEXT(name,level,text) \ + do { \ + debug_text_event(iucv_dbf_##name,level,text); \ + } while (0) + +#define IUCV_DBF_HEX(name,level,addr,len) \ + do { \ + debug_event(iucv_dbf_##name,level,(void*)(addr),len); \ + } while (0) + +DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); + +#define IUCV_DBF_TEXT_(name, level, text...) \ + do { \ + if (debug_level_enabled(iucv_dbf_##name, level)) { \ + char* __buf = get_cpu_var(iucv_dbf_txt_buf); \ + sprintf(__buf, text); \ + debug_text_event(iucv_dbf_##name, level, __buf); \ + put_cpu_var(iucv_dbf_txt_buf); \ + } \ + } while (0) + +#define IUCV_DBF_SPRINTF(name,level,text...) \ + do { \ + debug_sprintf_event(iucv_dbf_trace, level, ##text ); \ + debug_sprintf_event(iucv_dbf_trace, level, text ); \ + } while (0) + +/** + * some more debug stuff + */ +#define PRINTK_HEADER " iucv: " /* for debugging */ + +/* dummy device to make sure netiucv_pm functions are called */ +static struct device *netiucv_dev; + +static int netiucv_pm_prepare(struct device *); +static void netiucv_pm_complete(struct device *); +static int netiucv_pm_freeze(struct device *); +static int netiucv_pm_restore_thaw(struct device *); + +static const struct dev_pm_ops netiucv_pm_ops = { + .prepare = netiucv_pm_prepare, + .complete = netiucv_pm_complete, + .freeze = netiucv_pm_freeze, + .thaw = netiucv_pm_restore_thaw, + .restore = netiucv_pm_restore_thaw, +}; + +static struct device_driver netiucv_driver = { + .owner = THIS_MODULE, + .name = "netiucv", + .bus = &iucv_bus, + .pm = &netiucv_pm_ops, +}; + +static int netiucv_callback_connreq(struct iucv_path *, u8 *, u8 *); +static void netiucv_callback_connack(struct iucv_path *, u8 *); +static void netiucv_callback_connrej(struct iucv_path *, u8 *); +static void netiucv_callback_connsusp(struct iucv_path *, u8 *); +static void netiucv_callback_connres(struct iucv_path *, u8 *); +static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *); +static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler netiucv_handler = { + .path_pending = netiucv_callback_connreq, + .path_complete = netiucv_callback_connack, + .path_severed = netiucv_callback_connrej, + .path_quiesced = netiucv_callback_connsusp, + .path_resumed = netiucv_callback_connres, + .message_pending = netiucv_callback_rx, + .message_complete = netiucv_callback_txdone +}; + +/** + * Per connection profiling data + */ +struct connection_profile { + unsigned long maxmulti; + unsigned long maxcqueue; + unsigned long doios_single; + unsigned long doios_multi; + unsigned long txlen; + unsigned long tx_time; + unsigned long send_stamp; + unsigned long tx_pending; + unsigned long tx_max_pending; +}; + +/** + * Representation of one iucv connection + */ +struct iucv_connection { + struct list_head list; + struct iucv_path *path; + struct sk_buff *rx_buff; + struct sk_buff *tx_buff; + struct sk_buff_head collect_queue; + struct sk_buff_head commit_queue; + spinlock_t collect_lock; + int collect_len; + int max_buffsize; + fsm_timer timer; + fsm_instance *fsm; + struct net_device *netdev; + struct connection_profile prof; + char userid[9]; + char userdata[17]; +}; + +/** + * Linked list of all connection structs. + */ +static LIST_HEAD(iucv_connection_list); +static DEFINE_RWLOCK(iucv_connection_rwlock); + +/** + * Representation of event-data for the + * connection state machine. + */ +struct iucv_event { + struct iucv_connection *conn; + void *data; +}; + +/** + * Private part of the network device structure + */ +struct netiucv_priv { + struct net_device_stats stats; + unsigned long tbusy; + fsm_instance *fsm; + struct iucv_connection *conn; + struct device *dev; + int pm_state; +}; + +/** + * Link level header for a packet. + */ +struct ll_header { + u16 next; +}; + +#define NETIUCV_HDRLEN (sizeof(struct ll_header)) +#define NETIUCV_BUFSIZE_MAX 65537 +#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX +#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN) +#define NETIUCV_MTU_DEFAULT 9216 +#define NETIUCV_QUEUELEN_DEFAULT 50 +#define NETIUCV_TIMEOUT_5SEC 5000 + +/** + * Compatibility macros for busy handling + * of network devices. + */ +static void netiucv_clear_busy(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + clear_bit(0, &priv->tbusy); + netif_wake_queue(dev); +} + +static int netiucv_test_and_set_busy(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + netif_stop_queue(dev); + return test_and_set_bit(0, &priv->tbusy); +} + +static u8 iucvMagic_ascii[16] = { + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20 +}; + +static u8 iucvMagic_ebcdic[16] = { + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, + 0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40 +}; + +/** + * Convert an iucv userId to its printable + * form (strip whitespace at end). + * + * @param An iucv userId + * + * @returns The printable string (static data!!) + */ +static char *netiucv_printname(char *name, int len) +{ + static char tmp[17]; + char *p = tmp; + memcpy(tmp, name, len); + tmp[len] = '\0'; + while (*p && ((p - tmp) < len) && (!isspace(*p))) + p++; + *p = '\0'; + return tmp; +} + +static char *netiucv_printuser(struct iucv_connection *conn) +{ + static char tmp_uid[9]; + static char tmp_udat[17]; + static char buf[100]; + + if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) { + tmp_uid[8] = '\0'; + tmp_udat[16] = '\0'; + memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8); + memcpy(tmp_udat, conn->userdata, 16); + EBCASC(tmp_udat, 16); + memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16); + sprintf(buf, "%s.%s", tmp_uid, tmp_udat); + return buf; + } else + return netiucv_printname(conn->userid, 8); +} + +/** + * States of the interface statemachine. + */ +enum dev_states { + DEV_STATE_STOPPED, + DEV_STATE_STARTWAIT, + DEV_STATE_STOPWAIT, + DEV_STATE_RUNNING, + /** + * MUST be always the last element!! + */ + NR_DEV_STATES +}; + +static const char *dev_state_names[] = { + "Stopped", + "StartWait", + "StopWait", + "Running", +}; + +/** + * Events of the interface statemachine. + */ +enum dev_events { + DEV_EVENT_START, + DEV_EVENT_STOP, + DEV_EVENT_CONUP, + DEV_EVENT_CONDOWN, + /** + * MUST be always the last element!! + */ + NR_DEV_EVENTS +}; + +static const char *dev_event_names[] = { + "Start", + "Stop", + "Connection up", + "Connection down", +}; + +/** + * Events of the connection statemachine + */ +enum conn_events { + /** + * Events, representing callbacks from + * lowlevel iucv layer) + */ + CONN_EVENT_CONN_REQ, + CONN_EVENT_CONN_ACK, + CONN_EVENT_CONN_REJ, + CONN_EVENT_CONN_SUS, + CONN_EVENT_CONN_RES, + CONN_EVENT_RX, + CONN_EVENT_TXDONE, + + /** + * Events, representing errors return codes from + * calls to lowlevel iucv layer + */ + + /** + * Event, representing timer expiry. + */ + CONN_EVENT_TIMER, + + /** + * Events, representing commands from upper levels. + */ + CONN_EVENT_START, + CONN_EVENT_STOP, + + /** + * MUST be always the last element!! + */ + NR_CONN_EVENTS, +}; + +static const char *conn_event_names[] = { + "Remote connection request", + "Remote connection acknowledge", + "Remote connection reject", + "Connection suspended", + "Connection resumed", + "Data received", + "Data sent", + + "Timer", + + "Start", + "Stop", +}; + +/** + * States of the connection statemachine. + */ +enum conn_states { + /** + * Connection not assigned to any device, + * initial state, invalid + */ + CONN_STATE_INVALID, + + /** + * Userid assigned but not operating + */ + CONN_STATE_STOPPED, + + /** + * Connection registered, + * no connection request sent yet, + * no connection request received + */ + CONN_STATE_STARTWAIT, + + /** + * Connection registered and connection request sent, + * no acknowledge and no connection request received yet. + */ + CONN_STATE_SETUPWAIT, + + /** + * Connection up and running idle + */ + CONN_STATE_IDLE, + + /** + * Data sent, awaiting CONN_EVENT_TXDONE + */ + CONN_STATE_TX, + + /** + * Error during registration. + */ + CONN_STATE_REGERR, + + /** + * Error during registration. + */ + CONN_STATE_CONNERR, + + /** + * MUST be always the last element!! + */ + NR_CONN_STATES, +}; + +static const char *conn_state_names[] = { + "Invalid", + "Stopped", + "StartWait", + "SetupWait", + "Idle", + "TX", + "Terminating", + "Registration error", + "Connect error", +}; + + +/** + * Debug Facility Stuff + */ +static debug_info_t *iucv_dbf_setup = NULL; +static debug_info_t *iucv_dbf_data = NULL; +static debug_info_t *iucv_dbf_trace = NULL; + +DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf); + +static void iucv_unregister_dbf_views(void) +{ + debug_unregister(iucv_dbf_setup); + debug_unregister(iucv_dbf_data); + debug_unregister(iucv_dbf_trace); +} +static int iucv_register_dbf_views(void) +{ + iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME, + IUCV_DBF_SETUP_PAGES, + IUCV_DBF_SETUP_NR_AREAS, + IUCV_DBF_SETUP_LEN); + iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME, + IUCV_DBF_DATA_PAGES, + IUCV_DBF_DATA_NR_AREAS, + IUCV_DBF_DATA_LEN); + iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME, + IUCV_DBF_TRACE_PAGES, + IUCV_DBF_TRACE_NR_AREAS, + IUCV_DBF_TRACE_LEN); + + if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) || + (iucv_dbf_trace == NULL)) { + iucv_unregister_dbf_views(); + return -ENOMEM; + } + debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL); + + debug_register_view(iucv_dbf_data, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL); + + debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view); + debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL); + + return 0; +} + +/* + * Callback-wrappers, called from lowlevel iucv layer. + */ + +static void netiucv_callback_rx(struct iucv_path *path, + struct iucv_message *msg) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + + ev.conn = conn; + ev.data = msg; + fsm_event(conn->fsm, CONN_EVENT_RX, &ev); +} + +static void netiucv_callback_txdone(struct iucv_path *path, + struct iucv_message *msg) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + + ev.conn = conn; + ev.data = msg; + fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev); +} + +static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16]) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn); +} + +static int netiucv_callback_connreq(struct iucv_path *path, u8 *ipvmid, + u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + struct iucv_event ev; + static char tmp_user[9]; + static char tmp_udat[17]; + int rc; + + rc = -EINVAL; + memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8); + memcpy(tmp_udat, ipuser, 16); + EBCASC(tmp_udat, 16); + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(conn, &iucv_connection_list, list) { + if (strncmp(ipvmid, conn->userid, 8) || + strncmp(ipuser, conn->userdata, 16)) + continue; + /* Found a matching connection for this path. */ + conn->path = path; + ev.conn = conn; + ev.data = path; + fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev); + rc = 0; + } + IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n", + tmp_user, netiucv_printname(tmp_udat, 16)); + read_unlock_bh(&iucv_connection_rwlock); + return rc; +} + +static void netiucv_callback_connrej(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn); +} + +static void netiucv_callback_connsusp(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn); +} + +static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser) +{ + struct iucv_connection *conn = path->private; + + fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn); +} + +/** + * NOP action for statemachines + */ +static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) +{ +} + +/* + * Actions of the connection statemachine + */ + +/** + * netiucv_unpack_skb + * @conn: The connection where this skb has been received. + * @pskb: The received skb. + * + * Unpack a just received skb and hand it over to upper layers. + * Helper function for conn_action_rx. + */ +static void netiucv_unpack_skb(struct iucv_connection *conn, + struct sk_buff *pskb) +{ + struct net_device *dev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(dev); + u16 offset = 0; + + skb_put(pskb, NETIUCV_HDRLEN); + pskb->dev = dev; + pskb->ip_summed = CHECKSUM_NONE; + pskb->protocol = cpu_to_be16(ETH_P_IP); + + while (1) { + struct sk_buff *skb; + struct ll_header *header = (struct ll_header *) pskb->data; + + if (!header->next) + break; + + skb_pull(pskb, NETIUCV_HDRLEN); + header->next -= offset; + offset += header->next; + header->next -= NETIUCV_HDRLEN; + if (skb_tailroom(pskb) < header->next) { + IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n", + header->next, skb_tailroom(pskb)); + return; + } + skb_put(pskb, header->next); + skb_reset_mac_header(pskb); + skb = dev_alloc_skb(pskb->len); + if (!skb) { + IUCV_DBF_TEXT(data, 2, + "Out of memory in netiucv_unpack_skb\n"); + privptr->stats.rx_dropped++; + return; + } + skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len), + pskb->len); + skb_reset_mac_header(skb); + skb->dev = pskb->dev; + skb->protocol = pskb->protocol; + pskb->ip_summed = CHECKSUM_UNNECESSARY; + privptr->stats.rx_packets++; + privptr->stats.rx_bytes += skb->len; + /* + * Since receiving is always initiated from a tasklet (in iucv.c), + * we must use netif_rx_ni() instead of netif_rx() + */ + netif_rx_ni(skb); + skb_pull(pskb, header->next); + skb_put(pskb, NETIUCV_HDRLEN); + } +} + +static void conn_action_rx(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_message *msg = ev->data; + struct netiucv_priv *privptr = netdev_priv(conn->netdev); + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + + if (!conn->netdev) { + iucv_message_reject(conn->path, msg); + IUCV_DBF_TEXT(data, 2, + "Received data for unlinked connection\n"); + return; + } + if (msg->length > conn->max_buffsize) { + iucv_message_reject(conn->path, msg); + privptr->stats.rx_dropped++; + IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n", + msg->length, conn->max_buffsize); + return; + } + conn->rx_buff->data = conn->rx_buff->head; + skb_reset_tail_pointer(conn->rx_buff); + conn->rx_buff->len = 0; + rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, + msg->length, NULL); + if (rc || msg->length < 5) { + privptr->stats.rx_errors++; + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc); + return; + } + netiucv_unpack_skb(conn, conn->rx_buff); +} + +static void conn_action_txdone(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_message *msg = ev->data; + struct iucv_message txmsg; + struct netiucv_priv *privptr = NULL; + u32 single_flag = msg->tag; + u32 txbytes = 0; + u32 txpackets = 0; + u32 stat_maxcq = 0; + struct sk_buff *skb; + unsigned long saveflags; + struct ll_header header; + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + + if (!conn || !conn->netdev) { + IUCV_DBF_TEXT(data, 2, + "Send confirmation for unlinked connection\n"); + return; + } + privptr = netdev_priv(conn->netdev); + conn->prof.tx_pending--; + if (single_flag) { + if ((skb = skb_dequeue(&conn->commit_queue))) { + refcount_dec(&skb->users); + if (privptr) { + privptr->stats.tx_packets++; + privptr->stats.tx_bytes += + (skb->len - NETIUCV_HDRLEN + - NETIUCV_HDRLEN); + } + dev_kfree_skb_any(skb); + } + } + conn->tx_buff->data = conn->tx_buff->head; + skb_reset_tail_pointer(conn->tx_buff); + conn->tx_buff->len = 0; + spin_lock_irqsave(&conn->collect_lock, saveflags); + while ((skb = skb_dequeue(&conn->collect_queue))) { + header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN; + skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); + skb_copy_from_linear_data(skb, + skb_put(conn->tx_buff, skb->len), + skb->len); + txbytes += skb->len; + txpackets++; + stat_maxcq++; + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } + if (conn->collect_len > conn->prof.maxmulti) + conn->prof.maxmulti = conn->collect_len; + conn->collect_len = 0; + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + if (conn->tx_buff->len == 0) { + fsm_newstate(fi, CONN_STATE_IDLE); + return; + } + + header.next = 0; + skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN); + conn->prof.send_stamp = jiffies; + txmsg.class = 0; + txmsg.tag = 0; + rc = iucv_message_send(conn->path, &txmsg, 0, 0, + conn->tx_buff->data, conn->tx_buff->len); + conn->prof.doios_multi++; + conn->prof.txlen += conn->tx_buff->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + conn->prof.tx_pending--; + fsm_newstate(fi, CONN_STATE_IDLE); + if (privptr) + privptr->stats.tx_errors += txpackets; + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (privptr) { + privptr->stats.tx_packets += txpackets; + privptr->stats.tx_bytes += txbytes; + } + if (stat_maxcq > conn->prof.maxcqueue) + conn->prof.maxcqueue = stat_maxcq; + } +} + +static void conn_action_connaccept(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct iucv_path *path = ev->data; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + + conn->path = path; + path->msglim = NETIUCV_QUEUELEN_DEFAULT; + path->flags = 0; + rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn); + if (rc) { + IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc); + return; + } + fsm_newstate(fi, CONN_STATE_IDLE); + netdev->tx_queue_len = conn->path->msglim; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void conn_action_connreject(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_path *path = ev->data; + + IUCV_DBF_TEXT(trace, 3, __func__); + iucv_path_sever(path, NULL); +} + +static void conn_action_connack(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_IDLE); + netdev->tx_queue_len = conn->path->msglim; + fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev); +} + +static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + + IUCV_DBF_TEXT(trace, 3, __func__); + fsm_deltimer(&conn->timer); + iucv_path_sever(conn->path, conn->userdata); + fsm_newstate(fi, CONN_STATE_STARTWAIT); +} + +static void conn_action_connsever(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_deltimer(&conn->timer); + iucv_path_sever(conn->path, conn->userdata); + dev_info(privptr->dev, "The peer z/VM guest %s has closed the " + "connection\n", netiucv_printuser(conn)); + IUCV_DBF_TEXT(data, 2, + "conn_action_connsever: Remote dropped connection\n"); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void conn_action_start(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_newstate(fi, CONN_STATE_STARTWAIT); + + /* + * We must set the state before calling iucv_connect because the + * callback handler could be called at any point after the connection + * request is sent + */ + + fsm_newstate(fi, CONN_STATE_SETUPWAIT); + conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL); + IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n", + netdev->name, netiucv_printuser(conn)); + + rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid, + NULL, conn->userdata, conn); + switch (rc) { + case 0: + netdev->tx_queue_len = conn->path->msglim; + fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, + CONN_EVENT_TIMER, conn); + return; + case 11: + dev_warn(privptr->dev, + "The IUCV device failed to connect to z/VM guest %s\n", + netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 12: + dev_warn(privptr->dev, + "The IUCV device failed to connect to the peer on z/VM" + " guest %s\n", netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_STARTWAIT); + break; + case 13: + dev_err(privptr->dev, + "Connecting the IUCV device would exceed the maximum" + " number of IUCV connections\n"); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 14: + dev_err(privptr->dev, + "z/VM guest %s has too many IUCV connections" + " to connect with the IUCV device\n", + netiucv_printname(conn->userid, 8)); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + case 15: + dev_err(privptr->dev, + "The IUCV device cannot connect to a z/VM guest with no" + " IUCV authorization\n"); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + default: + dev_err(privptr->dev, + "Connecting the IUCV device failed with error %d\n", + rc); + fsm_newstate(fi, CONN_STATE_CONNERR); + break; + } + IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc); + kfree(conn->path); + conn->path = NULL; +} + +static void netiucv_purge_skb_queue(struct sk_buff_head *q) +{ + struct sk_buff *skb; + + while ((skb = skb_dequeue(q))) { + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + } +} + +static void conn_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct iucv_event *ev = arg; + struct iucv_connection *conn = ev->conn; + struct net_device *netdev = conn->netdev; + struct netiucv_priv *privptr = netdev_priv(netdev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_deltimer(&conn->timer); + fsm_newstate(fi, CONN_STATE_STOPPED); + netiucv_purge_skb_queue(&conn->collect_queue); + if (conn->path) { + IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n"); + iucv_path_sever(conn->path, conn->userdata); + kfree(conn->path); + conn->path = NULL; + } + netiucv_purge_skb_queue(&conn->commit_queue); + fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev); +} + +static void conn_action_inval(fsm_instance *fi, int event, void *arg) +{ + struct iucv_connection *conn = arg; + struct net_device *netdev = conn->netdev; + + IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n", + netdev->name, conn->userid); +} + +static const fsm_node conn_fsm[] = { + { CONN_STATE_INVALID, CONN_EVENT_START, conn_action_inval }, + { CONN_STATE_STOPPED, CONN_EVENT_START, conn_action_start }, + + { CONN_STATE_STOPPED, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_STARTWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_IDLE, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_TX, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_REGERR, CONN_EVENT_STOP, conn_action_stop }, + { CONN_STATE_CONNERR, CONN_EVENT_STOP, conn_action_stop }, + + { CONN_STATE_STOPPED, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REQ, conn_action_connreject }, + { CONN_STATE_TX, CONN_EVENT_CONN_REQ, conn_action_connreject }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack }, + { CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER, conn_action_conntimsev }, + + { CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_IDLE, CONN_EVENT_CONN_REJ, conn_action_connsever }, + { CONN_STATE_TX, CONN_EVENT_CONN_REJ, conn_action_connsever }, + + { CONN_STATE_IDLE, CONN_EVENT_RX, conn_action_rx }, + { CONN_STATE_TX, CONN_EVENT_RX, conn_action_rx }, + + { CONN_STATE_TX, CONN_EVENT_TXDONE, conn_action_txdone }, + { CONN_STATE_IDLE, CONN_EVENT_TXDONE, conn_action_txdone }, +}; + +static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node); + + +/* + * Actions for interface - statemachine. + */ + +/** + * dev_action_start + * @fi: An instance of an interface statemachine. + * @event: The event, just happened. + * @arg: Generic pointer, casted from struct net_device * upon call. + * + * Startup connection by sending CONN_EVENT_START to it. + */ +static void dev_action_start(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + fsm_newstate(fi, DEV_STATE_STARTWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn); +} + +/** + * Shutdown connection by sending CONN_EVENT_STOP to it. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_stop(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + struct iucv_event ev; + + IUCV_DBF_TEXT(trace, 3, __func__); + + ev.conn = privptr->conn; + + fsm_newstate(fi, DEV_STATE_STOPWAIT); + fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev); +} + +/** + * Called from connection statemachine + * when a connection is up and running. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_connup(fsm_instance *fi, int event, void *arg) +{ + struct net_device *dev = arg; + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_STARTWAIT: + fsm_newstate(fi, DEV_STATE_RUNNING); + dev_info(privptr->dev, + "The IUCV device has been connected" + " successfully to %s\n", + netiucv_printuser(privptr->conn)); + IUCV_DBF_TEXT(setup, 3, + "connection is up and running\n"); + break; + case DEV_STATE_STOPWAIT: + IUCV_DBF_TEXT(data, 2, + "dev_action_connup: in DEV_STATE_STOPWAIT\n"); + break; + } +} + +/** + * Called from connection statemachine + * when a connection has been shutdown. + * + * @param fi An instance of an interface statemachine. + * @param event The event, just happened. + * @param arg Generic pointer, casted from struct net_device * upon call. + */ +static void +dev_action_conndown(fsm_instance *fi, int event, void *arg) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + + switch (fsm_getstate(fi)) { + case DEV_STATE_RUNNING: + fsm_newstate(fi, DEV_STATE_STARTWAIT); + break; + case DEV_STATE_STOPWAIT: + fsm_newstate(fi, DEV_STATE_STOPPED); + IUCV_DBF_TEXT(setup, 3, "connection is down\n"); + break; + } +} + +static const fsm_node dev_fsm[] = { + { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, + + { DEV_STATE_STOPWAIT, DEV_EVENT_START, dev_action_start }, + { DEV_STATE_STOPWAIT, DEV_EVENT_CONDOWN, dev_action_conndown }, + + { DEV_STATE_STARTWAIT, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_STARTWAIT, DEV_EVENT_CONUP, dev_action_connup }, + + { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, + { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, + { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, +}; + +static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node); + +/** + * Transmit a packet. + * This is a helper function for netiucv_tx(). + * + * @param conn Connection to be used for sending. + * @param skb Pointer to struct sk_buff of packet to send. + * The linklevel header has already been set up + * by netiucv_tx(). + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_transmit_skb(struct iucv_connection *conn, + struct sk_buff *skb) +{ + struct iucv_message msg; + unsigned long saveflags; + struct ll_header header; + int rc; + + if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) { + int l = skb->len + NETIUCV_HDRLEN; + + spin_lock_irqsave(&conn->collect_lock, saveflags); + if (conn->collect_len + l > + (conn->max_buffsize - NETIUCV_HDRLEN)) { + rc = -EBUSY; + IUCV_DBF_TEXT(data, 2, + "EBUSY from netiucv_transmit_skb\n"); + } else { + refcount_inc(&skb->users); + skb_queue_tail(&conn->collect_queue, skb); + conn->collect_len += l; + rc = 0; + } + spin_unlock_irqrestore(&conn->collect_lock, saveflags); + } else { + struct sk_buff *nskb = skb; + /** + * Copy the skb to a new allocated skb in lowmem only if the + * data is located above 2G in memory or tailroom is < 2. + */ + unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + + NETIUCV_HDRLEN)) >> 31; + int copied = 0; + if (hi || (skb_tailroom(skb) < 2)) { + nskb = alloc_skb(skb->len + NETIUCV_HDRLEN + + NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA); + if (!nskb) { + IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n"); + rc = -ENOMEM; + return rc; + } else { + skb_reserve(nskb, NETIUCV_HDRLEN); + skb_put_data(nskb, skb->data, skb->len); + } + copied = 1; + } + /** + * skb now is below 2G and has enough room. Add headers. + */ + header.next = nskb->len + NETIUCV_HDRLEN; + memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN); + header.next = 0; + skb_put_data(nskb, &header, NETIUCV_HDRLEN); + + fsm_newstate(conn->fsm, CONN_STATE_TX); + conn->prof.send_stamp = jiffies; + + msg.tag = 1; + msg.class = 0; + rc = iucv_message_send(conn->path, &msg, 0, 0, + nskb->data, nskb->len); + conn->prof.doios_single++; + conn->prof.txlen += skb->len; + conn->prof.tx_pending++; + if (conn->prof.tx_pending > conn->prof.tx_max_pending) + conn->prof.tx_max_pending = conn->prof.tx_pending; + if (rc) { + struct netiucv_priv *privptr; + fsm_newstate(conn->fsm, CONN_STATE_IDLE); + conn->prof.tx_pending--; + privptr = netdev_priv(conn->netdev); + if (privptr) + privptr->stats.tx_errors++; + if (copied) + dev_kfree_skb(nskb); + else { + /** + * Remove our headers. They get added + * again on retransmit. + */ + skb_pull(skb, NETIUCV_HDRLEN); + skb_trim(skb, skb->len - NETIUCV_HDRLEN); + } + IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc); + } else { + if (copied) + dev_kfree_skb(skb); + refcount_inc(&nskb->users); + skb_queue_tail(&conn->commit_queue, nskb); + } + } + + return rc; +} + +/* + * Interface API for upper network layers + */ + +/** + * Open an interface. + * Called from generic network layer when ifconfig up is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_open(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_START, dev); + return 0; +} + +/** + * Close an interface. + * Called from generic network layer when ifconfig down is run. + * + * @param dev Pointer to interface struct. + * + * @return 0 on success, -ERRNO on failure. (Never fails.) + */ +static int netiucv_close(struct net_device *dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + fsm_event(priv->fsm, DEV_EVENT_STOP, dev); + return 0; +} + +static int netiucv_pm_prepare(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + return 0; +} + +static void netiucv_pm_complete(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + return; +} + +/** + * netiucv_pm_freeze() - Freeze PM callback + * @dev: netiucv device + * + * close open netiucv interfaces + */ +static int netiucv_pm_freeze(struct device *dev) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = NULL; + int rc = 0; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (priv && priv->conn) + ndev = priv->conn->netdev; + if (!ndev) + goto out; + netif_device_detach(ndev); + priv->pm_state = fsm_getstate(priv->fsm); + rc = netiucv_close(ndev); +out: + return rc; +} + +/** + * netiucv_pm_restore_thaw() - Thaw and restore PM callback + * @dev: netiucv device + * + * re-open netiucv interfaces closed during freeze + */ +static int netiucv_pm_restore_thaw(struct device *dev) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = NULL; + int rc = 0; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (priv && priv->conn) + ndev = priv->conn->netdev; + if (!ndev) + goto out; + switch (priv->pm_state) { + case DEV_STATE_RUNNING: + case DEV_STATE_STARTWAIT: + rc = netiucv_open(ndev); + break; + default: + break; + } + netif_device_attach(ndev); +out: + return rc; +} + +/** + * Start transmission of a packet. + * Called from generic network device layer. + * + * @param skb Pointer to buffer containing the packet. + * @param dev Pointer to interface struct. + * + * @return 0 if packet consumed, !0 if packet rejected. + * Note: If we return !0, then the packet is free'd by + * the generic network layer. + */ +static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) +{ + struct netiucv_priv *privptr = netdev_priv(dev); + int rc; + + IUCV_DBF_TEXT(trace, 4, __func__); + /** + * Some sanity checks ... + */ + if (skb == NULL) { + IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n"); + privptr->stats.tx_dropped++; + return NETDEV_TX_OK; + } + if (skb_headroom(skb) < NETIUCV_HDRLEN) { + IUCV_DBF_TEXT(data, 2, + "netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n"); + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + return NETDEV_TX_OK; + } + + /** + * If connection is not running, try to restart it + * and throw away packet. + */ + if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { + dev_kfree_skb(skb); + privptr->stats.tx_dropped++; + privptr->stats.tx_errors++; + privptr->stats.tx_carrier_errors++; + return NETDEV_TX_OK; + } + + if (netiucv_test_and_set_busy(dev)) { + IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n"); + return NETDEV_TX_BUSY; + } + netif_trans_update(dev); + rc = netiucv_transmit_skb(privptr->conn, skb); + netiucv_clear_busy(dev); + return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK; +} + +/** + * netiucv_stats + * @dev: Pointer to interface struct. + * + * Returns interface statistics of a device. + * + * Returns pointer to stats struct of this interface. + */ +static struct net_device_stats *netiucv_stats (struct net_device * dev) +{ + struct netiucv_priv *priv = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return &priv->stats; +} + +/* + * attributes in sysfs + */ + +static ssize_t user_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", netiucv_printuser(priv->conn)); +} + +static int netiucv_check_user(const char *buf, size_t count, char *username, + char *userdata) +{ + const char *p; + int i; + + p = strchr(buf, '.'); + if ((p && ((count > 26) || + ((p - buf) > 8) || + (buf + count - p > 18))) || + (!p && (count > 9))) { + IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n"); + return -EINVAL; + } + + for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) { + if (isalnum(*p) || *p == '$') { + username[i] = toupper(*p); + continue; + } + if (*p == '\n') + /* trailing lf, grr */ + break; + IUCV_DBF_TEXT_(setup, 2, + "conn_write: invalid character %02x\n", *p); + return -EINVAL; + } + while (i < 8) + username[i++] = ' '; + username[8] = '\0'; + + if (*p == '.') { + p++; + for (i = 0; i < 16 && *p; i++, p++) { + if (*p == '\n') + break; + userdata[i] = toupper(*p); + } + while (i > 0 && i < 16) + userdata[i++] = ' '; + } else + memcpy(userdata, iucvMagic_ascii, 16); + userdata[16] = '\0'; + ASCEBC(userdata, 16); + + return 0; +} + +static ssize_t user_write(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + char username[9]; + char userdata[17]; + int rc; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + + if (memcmp(username, priv->conn->userid, 9) && + (ndev->flags & (IFF_UP | IFF_RUNNING))) { + /* username changed while the interface is active. */ + IUCV_DBF_TEXT(setup, 2, "user_write: device active\n"); + return -EPERM; + } + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) { + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); + return -EEXIST; + } + } + read_unlock_bh(&iucv_connection_rwlock); + memcpy(priv->conn->userid, username, 9); + memcpy(priv->conn->userdata, userdata, 17); + return count; +} + +static DEVICE_ATTR(user, 0644, user_show, user_write); + +static ssize_t buffer_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%d\n", priv->conn->max_buffsize); +} + +static ssize_t buffer_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + struct net_device *ndev = priv->conn->netdev; + unsigned int bs1; + int rc; + + IUCV_DBF_TEXT(trace, 3, __func__); + if (count >= 39) + return -EINVAL; + + rc = kstrtouint(buf, 0, &bs1); + + if (rc == -EINVAL) { + IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %s\n", + buf); + return -EINVAL; + } + if ((rc == -ERANGE) || (bs1 > NETIUCV_BUFSIZE_MAX)) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too large\n", + bs1); + return -EINVAL; + } + if ((ndev->flags & IFF_RUNNING) && + (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) { + IUCV_DBF_TEXT_(setup, 2, + "buffer_write: buffer size %d too small\n", + bs1); + return -EINVAL; + } + + priv->conn->max_buffsize = bs1; + if (!(ndev->flags & IFF_RUNNING)) + ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN; + + return count; + +} + +static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write); + +static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm)); +} + +static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL); + +static ssize_t conn_fsm_show (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm)); +} + +static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL); + +static ssize_t maxmulti_show (struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti); +} + +static ssize_t maxmulti_write (struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.maxmulti = 0; + return count; +} + +static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write); + +static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue); +} + +static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.maxcqueue = 0; + return count; +} + +static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write); + +static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_single); +} + +static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.doios_single = 0; + return count; +} + +static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write); + +static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi); +} + +static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + priv->conn->prof.doios_multi = 0; + return count; +} + +static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write); + +static ssize_t txlen_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.txlen); +} + +static ssize_t txlen_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.txlen = 0; + return count; +} + +static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write); + +static ssize_t txtime_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_time); +} + +static ssize_t txtime_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_time = 0; + return count; +} + +static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write); + +static ssize_t txpend_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending); +} + +static ssize_t txpend_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write); + +static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 5, __func__); + return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending); +} + +static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct netiucv_priv *priv = dev_get_drvdata(dev); + + IUCV_DBF_TEXT(trace, 4, __func__); + priv->conn->prof.tx_max_pending = 0; + return count; +} + +static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write); + +static struct attribute *netiucv_attrs[] = { + &dev_attr_buffer.attr, + &dev_attr_user.attr, + NULL, +}; + +static struct attribute_group netiucv_attr_group = { + .attrs = netiucv_attrs, +}; + +static struct attribute *netiucv_stat_attrs[] = { + &dev_attr_device_fsm_state.attr, + &dev_attr_connection_fsm_state.attr, + &dev_attr_max_tx_buffer_used.attr, + &dev_attr_max_chained_skbs.attr, + &dev_attr_tx_single_write_ops.attr, + &dev_attr_tx_multi_write_ops.attr, + &dev_attr_netto_bytes.attr, + &dev_attr_max_tx_io_time.attr, + &dev_attr_tx_pending.attr, + &dev_attr_tx_max_pending.attr, + NULL, +}; + +static struct attribute_group netiucv_stat_attr_group = { + .name = "stats", + .attrs = netiucv_stat_attrs, +}; + +static const struct attribute_group *netiucv_attr_groups[] = { + &netiucv_stat_attr_group, + &netiucv_attr_group, + NULL, +}; + +static int netiucv_register_device(struct net_device *ndev) +{ + struct netiucv_priv *priv = netdev_priv(ndev); + struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); + int ret; + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (dev) { + dev_set_name(dev, "net%s", ndev->name); + dev->bus = &iucv_bus; + dev->parent = iucv_root; + dev->groups = netiucv_attr_groups; + /* + * The release function could be called after the + * module has been unloaded. It's _only_ task is to + * free the struct. Therefore, we specify kfree() + * directly here. (Probably a little bit obfuscating + * but legitime ...). + */ + dev->release = (void (*)(struct device *))kfree; + dev->driver = &netiucv_driver; + } else + return -ENOMEM; + + ret = device_register(dev); + if (ret) { + put_device(dev); + return ret; + } + priv->dev = dev; + dev_set_drvdata(dev, priv); + return 0; +} + +static void netiucv_unregister_device(struct device *dev) +{ + IUCV_DBF_TEXT(trace, 3, __func__); + device_unregister(dev); +} + +/** + * Allocate and initialize a new connection structure. + * Add it to the list of netiucv connections; + */ +static struct iucv_connection *netiucv_new_connection(struct net_device *dev, + char *username, + char *userdata) +{ + struct iucv_connection *conn; + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + goto out; + skb_queue_head_init(&conn->collect_queue); + skb_queue_head_init(&conn->commit_queue); + spin_lock_init(&conn->collect_lock); + conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; + conn->netdev = dev; + + conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->rx_buff) + goto out_conn; + conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA); + if (!conn->tx_buff) + goto out_rx; + conn->fsm = init_fsm("netiucvconn", conn_state_names, + conn_event_names, NR_CONN_STATES, + NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN, + GFP_KERNEL); + if (!conn->fsm) + goto out_tx; + + fsm_settimer(conn->fsm, &conn->timer); + fsm_newstate(conn->fsm, CONN_STATE_INVALID); + + if (userdata) + memcpy(conn->userdata, userdata, 17); + if (username) { + memcpy(conn->userid, username, 9); + fsm_newstate(conn->fsm, CONN_STATE_STOPPED); + } + + write_lock_bh(&iucv_connection_rwlock); + list_add_tail(&conn->list, &iucv_connection_list); + write_unlock_bh(&iucv_connection_rwlock); + return conn; + +out_tx: + kfree_skb(conn->tx_buff); +out_rx: + kfree_skb(conn->rx_buff); +out_conn: + kfree(conn); +out: + return NULL; +} + +/** + * Release a connection structure and remove it from the + * list of netiucv connections. + */ +static void netiucv_remove_connection(struct iucv_connection *conn) +{ + + IUCV_DBF_TEXT(trace, 3, __func__); + write_lock_bh(&iucv_connection_rwlock); + list_del_init(&conn->list); + write_unlock_bh(&iucv_connection_rwlock); + fsm_deltimer(&conn->timer); + netiucv_purge_skb_queue(&conn->collect_queue); + if (conn->path) { + iucv_path_sever(conn->path, conn->userdata); + kfree(conn->path); + conn->path = NULL; + } + netiucv_purge_skb_queue(&conn->commit_queue); + kfree_fsm(conn->fsm); + kfree_skb(conn->rx_buff); + kfree_skb(conn->tx_buff); +} + +/** + * Release everything of a net device. + */ +static void netiucv_free_netdevice(struct net_device *dev) +{ + struct netiucv_priv *privptr = netdev_priv(dev); + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (!dev) + return; + + if (privptr) { + if (privptr->conn) + netiucv_remove_connection(privptr->conn); + if (privptr->fsm) + kfree_fsm(privptr->fsm); + privptr->conn = NULL; privptr->fsm = NULL; + /* privptr gets freed by free_netdev() */ + } +} + +/** + * Initialize a net device. (Called from kernel in alloc_netdev()) + */ +static const struct net_device_ops netiucv_netdev_ops = { + .ndo_open = netiucv_open, + .ndo_stop = netiucv_close, + .ndo_get_stats = netiucv_stats, + .ndo_start_xmit = netiucv_tx, +}; + +static void netiucv_setup_netdevice(struct net_device *dev) +{ + dev->mtu = NETIUCV_MTU_DEFAULT; + dev->min_mtu = 576; + dev->max_mtu = NETIUCV_MTU_MAX; + dev->needs_free_netdev = true; + dev->priv_destructor = netiucv_free_netdevice; + dev->hard_header_len = NETIUCV_HDRLEN; + dev->addr_len = 0; + dev->type = ARPHRD_SLIP; + dev->tx_queue_len = NETIUCV_QUEUELEN_DEFAULT; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->netdev_ops = &netiucv_netdev_ops; +} + +/** + * Allocate and initialize everything of a net device. + */ +static struct net_device *netiucv_init_netdevice(char *username, char *userdata) +{ + struct netiucv_priv *privptr; + struct net_device *dev; + + dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d", + NET_NAME_UNKNOWN, netiucv_setup_netdevice); + if (!dev) + return NULL; + rtnl_lock(); + if (dev_alloc_name(dev, dev->name) < 0) + goto out_netdev; + + privptr = netdev_priv(dev); + privptr->fsm = init_fsm("netiucvdev", dev_state_names, + dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS, + dev_fsm, DEV_FSM_LEN, GFP_KERNEL); + if (!privptr->fsm) + goto out_netdev; + + privptr->conn = netiucv_new_connection(dev, username, userdata); + if (!privptr->conn) { + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n"); + goto out_fsm; + } + fsm_newstate(privptr->fsm, DEV_STATE_STOPPED); + return dev; + +out_fsm: + kfree_fsm(privptr->fsm); +out_netdev: + rtnl_unlock(); + free_netdev(dev); + return NULL; +} + +static ssize_t connection_store(struct device_driver *drv, const char *buf, + size_t count) +{ + char username[9]; + char userdata[17]; + int rc; + struct net_device *dev; + struct netiucv_priv *priv; + struct iucv_connection *cp; + + IUCV_DBF_TEXT(trace, 3, __func__); + rc = netiucv_check_user(buf, count, username, userdata); + if (rc) + return rc; + + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + if (!strncmp(username, cp->userid, 9) && + !strncmp(userdata, cp->userdata, 17)) { + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s " + "already exists\n", netiucv_printuser(cp)); + return -EEXIST; + } + } + read_unlock_bh(&iucv_connection_rwlock); + + dev = netiucv_init_netdevice(username, userdata); + if (!dev) { + IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n"); + return -ENODEV; + } + + rc = netiucv_register_device(dev); + if (rc) { + rtnl_unlock(); + IUCV_DBF_TEXT_(setup, 2, + "ret %d from netiucv_register_device\n", rc); + goto out_free_ndev; + } + + /* sysfs magic */ + priv = netdev_priv(dev); + SET_NETDEV_DEV(dev, priv->dev); + + rc = register_netdevice(dev); + rtnl_unlock(); + if (rc) + goto out_unreg; + + dev_info(priv->dev, "The IUCV interface to %s has been established " + "successfully\n", + netiucv_printuser(priv->conn)); + + return count; + +out_unreg: + netiucv_unregister_device(priv->dev); +out_free_ndev: + netiucv_free_netdevice(dev); + return rc; +} +static DRIVER_ATTR_WO(connection); + +static ssize_t remove_store(struct device_driver *drv, const char *buf, + size_t count) +{ + struct iucv_connection *cp; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + char name[IFNAMSIZ]; + const char *p; + int i; + + IUCV_DBF_TEXT(trace, 3, __func__); + + if (count >= IFNAMSIZ) + count = IFNAMSIZ - 1; + + for (i = 0, p = buf; i < count && *p; i++, p++) { + if (*p == '\n' || *p == ' ') + /* trailing lf, grr */ + break; + name[i] = *p; + } + name[i] = '\0'; + + read_lock_bh(&iucv_connection_rwlock); + list_for_each_entry(cp, &iucv_connection_list, list) { + ndev = cp->netdev; + priv = netdev_priv(ndev); + dev = priv->dev; + if (strncmp(name, ndev->name, count)) + continue; + read_unlock_bh(&iucv_connection_rwlock); + if (ndev->flags & (IFF_UP | IFF_RUNNING)) { + dev_warn(dev, "The IUCV device is connected" + " to %s and cannot be removed\n", + priv->conn->userid); + IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); + return -EPERM; + } + unregister_netdev(ndev); + netiucv_unregister_device(dev); + return count; + } + read_unlock_bh(&iucv_connection_rwlock); + IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); + return -EINVAL; +} +static DRIVER_ATTR_WO(remove); + +static struct attribute * netiucv_drv_attrs[] = { + &driver_attr_connection.attr, + &driver_attr_remove.attr, + NULL, +}; + +static struct attribute_group netiucv_drv_attr_group = { + .attrs = netiucv_drv_attrs, +}; + +static const struct attribute_group *netiucv_drv_attr_groups[] = { + &netiucv_drv_attr_group, + NULL, +}; + +static void netiucv_banner(void) +{ + pr_info("driver initialized\n"); +} + +static void __exit netiucv_exit(void) +{ + struct iucv_connection *cp; + struct net_device *ndev; + struct netiucv_priv *priv; + struct device *dev; + + IUCV_DBF_TEXT(trace, 3, __func__); + while (!list_empty(&iucv_connection_list)) { + cp = list_entry(iucv_connection_list.next, + struct iucv_connection, list); + ndev = cp->netdev; + priv = netdev_priv(ndev); + dev = priv->dev; + + unregister_netdev(ndev); + netiucv_unregister_device(dev); + } + + device_unregister(netiucv_dev); + driver_unregister(&netiucv_driver); + iucv_unregister(&netiucv_handler, 1); + iucv_unregister_dbf_views(); + + pr_info("driver unloaded\n"); + return; +} + +static int __init netiucv_init(void) +{ + int rc; + + rc = iucv_register_dbf_views(); + if (rc) + goto out; + rc = iucv_register(&netiucv_handler, 1); + if (rc) + goto out_dbf; + IUCV_DBF_TEXT(trace, 3, __func__); + netiucv_driver.groups = netiucv_drv_attr_groups; + rc = driver_register(&netiucv_driver); + if (rc) { + IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); + goto out_iucv; + } + /* establish dummy device */ + netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!netiucv_dev) { + rc = -ENOMEM; + goto out_driver; + } + dev_set_name(netiucv_dev, "netiucv"); + netiucv_dev->bus = &iucv_bus; + netiucv_dev->parent = iucv_root; + netiucv_dev->release = (void (*)(struct device *))kfree; + netiucv_dev->driver = &netiucv_driver; + rc = device_register(netiucv_dev); + if (rc) { + put_device(netiucv_dev); + goto out_driver; + } + netiucv_banner(); + return rc; + +out_driver: + driver_unregister(&netiucv_driver); +out_iucv: + iucv_unregister(&netiucv_handler, 1); +out_dbf: + iucv_unregister_dbf_views(); +out: + return rc; +} + +module_init(netiucv_init); +module_exit(netiucv_exit); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h new file mode 100644 index 000000000..b2657582c --- /dev/null +++ b/drivers/s390/net/qeth_core.h @@ -0,0 +1,1073 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_CORE_H__ +#define __QETH_CORE_H__ + +#include <linux/if.h> +#include <linux/if_arp.h> +#include <linux/etherdevice.h> +#include <linux/if_vlan.h> +#include <linux/ctype.h> +#include <linux/in6.h> +#include <linux/bitops.h> +#include <linux/seq_file.h> +#include <linux/ethtool.h> +#include <linux/hashtable.h> +#include <linux/ip.h> +#include <linux/refcount.h> +#include <linux/workqueue.h> + +#include <net/ipv6.h> +#include <net/if_inet6.h> +#include <net/addrconf.h> + +#include <asm/debug.h> +#include <asm/qdio.h> +#include <asm/ccwdev.h> +#include <asm/ccwgroup.h> +#include <asm/sysinfo.h> + +#include "qeth_core_mpc.h" + +/** + * Debug Facility stuff + */ +enum qeth_dbf_names { + QETH_DBF_SETUP, + QETH_DBF_MSG, + QETH_DBF_CTRL, + QETH_DBF_INFOS /* must be last element */ +}; + +struct qeth_dbf_info { + char name[DEBUG_MAX_NAME_LEN]; + int pages; + int areas; + int len; + int level; + struct debug_view *view; + debug_info_t *id; +}; + +#define QETH_DBF_CTRL_LEN 256 + +#define QETH_DBF_TEXT(name, level, text) \ + debug_text_event(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_DBF_HEX(name, level, addr, len) \ + debug_event(qeth_dbf[QETH_DBF_##name].id, level, (void *)(addr), len) + +#define QETH_DBF_MESSAGE(level, text...) \ + debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) + +#define QETH_DBF_TEXT_(name, level, text...) \ + qeth_dbf_longtext(qeth_dbf[QETH_DBF_##name].id, level, text) + +#define QETH_CARD_TEXT(card, level, text) \ + debug_text_event(card->debug, level, text) + +#define QETH_CARD_HEX(card, level, addr, len) \ + debug_event(card->debug, level, (void *)(addr), len) + +#define QETH_CARD_MESSAGE(card, text...) \ + debug_sprintf_event(card->debug, level, text) + +#define QETH_CARD_TEXT_(card, level, text...) \ + qeth_dbf_longtext(card->debug, level, text) + +#define SENSE_COMMAND_REJECT_BYTE 0 +#define SENSE_COMMAND_REJECT_FLAG 0x80 +#define SENSE_RESETTING_EVENT_BYTE 1 +#define SENSE_RESETTING_EVENT_FLAG 0x80 + +/* + * Common IO related definitions + */ +#define CARD_RDEV(card) card->read.ccwdev +#define CARD_WDEV(card) card->write.ccwdev +#define CARD_DDEV(card) card->data.ccwdev +#define CARD_BUS_ID(card) dev_name(&card->gdev->dev) +#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev) +#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev) +#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev) +#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev) + +/** + * card stuff + */ +struct qeth_perf_stats { + unsigned int bufs_rec; + unsigned int bufs_sent; + unsigned int buf_elements_sent; + + unsigned int skbs_sent_pack; + unsigned int bufs_sent_pack; + + unsigned int sc_dp_p; + unsigned int sc_p_dp; + /* qdio_cq_handler: number of times called, time spent in */ + __u64 cq_start_time; + unsigned int cq_cnt; + unsigned int cq_time; + /* qdio_input_handler: number of times called, time spent in */ + __u64 inbound_start_time; + unsigned int inbound_cnt; + unsigned int inbound_time; + /* qeth_send_packet: number of times called, time spent in */ + __u64 outbound_start_time; + unsigned int outbound_cnt; + unsigned int outbound_time; + /* qdio_output_handler: number of times called, time spent in */ + __u64 outbound_handler_start_time; + unsigned int outbound_handler_cnt; + unsigned int outbound_handler_time; + /* number of calls to and time spent in do_QDIO for inbound queue */ + __u64 inbound_do_qdio_start_time; + unsigned int inbound_do_qdio_cnt; + unsigned int inbound_do_qdio_time; + /* number of calls to and time spent in do_QDIO for outbound queues */ + __u64 outbound_do_qdio_start_time; + unsigned int outbound_do_qdio_cnt; + unsigned int outbound_do_qdio_time; + unsigned int large_send_bytes; + unsigned int large_send_cnt; + unsigned int sg_skbs_sent; + /* initial values when measuring starts */ + unsigned long initial_rx_packets; + unsigned long initial_tx_packets; + /* inbound scatter gather data */ + unsigned int sg_skbs_rx; + unsigned int sg_frags_rx; + unsigned int sg_alloc_page_rx; + unsigned int tx_csum; + unsigned int tx_lin; + unsigned int tx_linfail; + unsigned int rx_csum; +}; + +/* Routing stuff */ +struct qeth_routing_info { + enum qeth_routing_types type; +}; + +/* IPA stuff */ +struct qeth_ipa_info { + __u32 supported_funcs; + __u32 enabled_funcs; +}; + +/* SETBRIDGEPORT stuff */ +enum qeth_sbp_roles { + QETH_SBP_ROLE_NONE = 0, + QETH_SBP_ROLE_PRIMARY = 1, + QETH_SBP_ROLE_SECONDARY = 2, +}; + +enum qeth_sbp_states { + QETH_SBP_STATE_INACTIVE = 0, + QETH_SBP_STATE_STANDBY = 1, + QETH_SBP_STATE_ACTIVE = 2, +}; + +#define QETH_SBP_HOST_NOTIFICATION 1 + +struct qeth_sbp_info { + __u32 supported_funcs; + enum qeth_sbp_roles role; + __u32 hostnotification:1; + __u32 reflect_promisc:1; + __u32 reflect_promisc_primary:1; +}; + +struct qeth_vnicc_info { + /* supported/currently configured VNICCs; updated in IPA exchanges */ + u32 sup_chars; + u32 cur_chars; + /* supported commands: bitmasks which VNICCs support respective cmd */ + u32 set_char_sup; + u32 getset_timeout_sup; + /* timeout value for the learning characteristic */ + u32 learning_timeout; + /* characteristics wanted/configured by user */ + u32 wanted_chars; + /* has user explicitly enabled rx_bcast while online? */ + bool rx_bcast_enabled; +}; + +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_setadp_cmd func) +{ + return (ipa->supported_funcs & func); +} + +static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa, + enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & func); +} + +static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, + enum qeth_ipa_funcs func) +{ + return (ipa->supported_funcs & ipa->enabled_funcs & func); +} + +#define qeth_adp_supported(c, f) \ + qeth_is_adp_supported(&c->options.adp, f) +#define qeth_is_supported(c, f) \ + qeth_is_ipa_supported(&c->options.ipa4, f) +#define qeth_is_enabled(c, f) \ + qeth_is_ipa_enabled(&c->options.ipa4, f) +#define qeth_is_supported6(c, f) \ + qeth_is_ipa_supported(&c->options.ipa6, f) +#define qeth_is_enabled6(c, f) \ + qeth_is_ipa_enabled(&c->options.ipa6, f) +#define qeth_is_ipafunc_supported(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? \ + qeth_is_supported6(c, f) : qeth_is_supported(c, f)) +#define qeth_is_ipafunc_enabled(c, prot, f) \ + ((prot == QETH_PROT_IPV6) ? \ + qeth_is_enabled6(c, f) : qeth_is_enabled(c, f)) + +#define QETH_IDX_FUNC_LEVEL_OSD 0x0101 +#define QETH_IDX_FUNC_LEVEL_IQD 0x4108 + +#define QETH_BUFSIZE 4096 +#define CCW_CMD_WRITE 0x01 +#define CCW_CMD_READ 0x02 + +/** + * some more defs + */ +#define QETH_TX_TIMEOUT 100 * HZ +#define QETH_RCD_TIMEOUT 60 * HZ +#define QETH_RECLAIM_WORK_TIME HZ +#define QETH_MAX_PORTNO 15 + +/*IPv6 address autoconfiguration stuff*/ +#define UNIQUE_ID_IF_CREATE_ADDR_FAILED 0xfffe +#define UNIQUE_ID_NOT_BY_CARD 0x10000 + +/*****************************************************************************/ +/* QDIO queue and buffer handling */ +/*****************************************************************************/ +#define QETH_MAX_QUEUES 4 +#define QETH_IN_BUF_SIZE_DEFAULT 65536 +#define QETH_IN_BUF_COUNT_DEFAULT 64 +#define QETH_IN_BUF_COUNT_HSDEFAULT 128 +#define QETH_IN_BUF_COUNT_MIN 8 +#define QETH_IN_BUF_COUNT_MAX 128 +#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12) +#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \ + ((card)->qdio.in_buf_pool.buf_count / 2) + +/* buffers we have to be behind before we get a PCI */ +#define QETH_PCI_THRESHOLD_A(card) ((card)->qdio.in_buf_pool.buf_count+1) +/*enqueued free buffers left before we get a PCI*/ +#define QETH_PCI_THRESHOLD_B(card) 0 +/*not used unless the microcode gets patched*/ +#define QETH_PCI_TIMER_VALUE(card) 3 + +/* priority queing */ +#define QETH_PRIOQ_DEFAULT QETH_NO_PRIO_QUEUEING +#define QETH_DEFAULT_QUEUE 2 +#define QETH_NO_PRIO_QUEUEING 0 +#define QETH_PRIO_Q_ING_PREC 1 +#define QETH_PRIO_Q_ING_TOS 2 +#define QETH_PRIO_Q_ING_SKB 3 +#define QETH_PRIO_Q_ING_VLAN 4 + +/* Packing */ +#define QETH_LOW_WATERMARK_PACK 2 +#define QETH_HIGH_WATERMARK_PACK 5 +#define QETH_WATERMARK_PACK_FUZZ 1 + +/* large receive scatter gather copy break */ +#define QETH_RX_SG_CB (PAGE_SIZE >> 1) +#define QETH_RX_PULL_LEN 256 + +struct qeth_hdr_layer3 { + __u8 id; + __u8 flags; + __u16 inbound_checksum; /*TSO:__u16 seqno */ + __u32 token; /*TSO: __u32 reserved */ + __u16 length; + __u8 vlan_prio; + __u8 ext_flags; + __u16 vlan_id; + __u16 frame_offset; + union { + /* TX: */ + u8 ipv6_addr[16]; + struct ipv4 { + u8 res[12]; + u32 addr; + } ipv4; + /* RX: */ + struct rx { + u8 res1[2]; + u8 src_mac[6]; + u8 res2[4]; + u16 vlan_id; + u8 res3[2]; + } rx; + } next_hop; +}; + +struct qeth_hdr_layer2 { + __u8 id; + __u8 flags[3]; + __u8 port_no; + __u8 hdr_length; + __u16 pkt_length; + __u16 seq_no; + __u16 vlan_id; + __u32 reserved; + __u8 reserved2[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_osn { + __u8 id; + __u8 reserved; + __u16 seq_no; + __u16 reserved2; + __u16 control_flags; + __u16 pdu_length; + __u8 reserved3[18]; + __u32 ccid; +} __attribute__ ((packed)); + +struct qeth_hdr { + union { + struct qeth_hdr_layer2 l2; + struct qeth_hdr_layer3 l3; + struct qeth_hdr_osn osn; + } hdr; +} __attribute__ ((packed)); + +/*TCP Segmentation Offload header*/ +struct qeth_hdr_ext_tso { + __u16 hdr_tot_len; + __u8 imb_hdr_no; + __u8 reserved; + __u8 hdr_type; + __u8 hdr_version; + __u16 hdr_len; + __u32 payload_len; + __u16 mss; + __u16 dg_hdr_len; + __u8 padding[16]; +} __attribute__ ((packed)); + +struct qeth_hdr_tso { + struct qeth_hdr hdr; /*hdr->hdr.l3.xxx*/ + struct qeth_hdr_ext_tso ext; +} __attribute__ ((packed)); + + +/* flags for qeth_hdr.flags */ +#define QETH_HDR_PASSTHRU 0x10 +#define QETH_HDR_IPV6 0x80 +#define QETH_HDR_CAST_MASK 0x07 +enum qeth_cast_flags { + QETH_CAST_UNICAST = 0x06, + QETH_CAST_MULTICAST = 0x04, + QETH_CAST_BROADCAST = 0x05, + QETH_CAST_ANYCAST = 0x07, + QETH_CAST_NOCAST = 0x00, +}; + +enum qeth_layer2_frame_flags { + QETH_LAYER2_FLAG_MULTICAST = 0x01, + QETH_LAYER2_FLAG_BROADCAST = 0x02, + QETH_LAYER2_FLAG_UNICAST = 0x04, + QETH_LAYER2_FLAG_VLAN = 0x10, +}; + +enum qeth_header_ids { + QETH_HEADER_TYPE_LAYER3 = 0x01, + QETH_HEADER_TYPE_LAYER2 = 0x02, + QETH_HEADER_TYPE_TSO = 0x03, + QETH_HEADER_TYPE_OSN = 0x04, +}; +/* flags for qeth_hdr.ext_flags */ +#define QETH_HDR_EXT_VLAN_FRAME 0x01 +#define QETH_HDR_EXT_TOKEN_ID 0x02 +#define QETH_HDR_EXT_INCLUDE_VLAN_TAG 0x04 +#define QETH_HDR_EXT_SRC_MAC_ADDR 0x08 +#define QETH_HDR_EXT_CSUM_HDR_REQ 0x10 +#define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20 +#define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/ + +enum qeth_qdio_buffer_states { + /* + * inbound: read out by driver; owned by hardware in order to be filled + * outbound: owned by driver in order to be filled + */ + QETH_QDIO_BUF_EMPTY, + /* + * inbound: filled by hardware; owned by driver in order to be read out + * outbound: filled by driver; owned by hardware in order to be sent + */ + QETH_QDIO_BUF_PRIMED, + /* + * inbound: not applicable + * outbound: identified to be pending in TPQ + */ + QETH_QDIO_BUF_PENDING, + /* + * inbound: not applicable + * outbound: found in completion queue + */ + QETH_QDIO_BUF_IN_CQ, + /* + * inbound: not applicable + * outbound: handled via transfer pending / completion queue + */ + QETH_QDIO_BUF_HANDLED_DELAYED, +}; + +enum qeth_qdio_info_states { + QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED, + QETH_QDIO_CLEANING +}; + +struct qeth_buffer_pool_entry { + struct list_head list; + struct list_head init_list; + void *elements[QDIO_MAX_ELEMENTS_PER_BUFFER]; +}; + +struct qeth_qdio_buffer_pool { + struct list_head entry_list; + int buf_count; +}; + +struct qeth_qdio_buffer { + struct qdio_buffer *buffer; + /* the buffer pool entry currently associated to this buffer */ + struct qeth_buffer_pool_entry *pool_entry; + struct sk_buff *rx_skb; +}; + +struct qeth_qdio_q { + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; + int next_buf_to_init; +}; + +struct qeth_qdio_out_buffer { + struct qdio_buffer *buffer; + atomic_t state; + int next_element_to_fill; + struct sk_buff_head skb_list; + int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; + + struct qeth_qdio_out_q *q; + struct qeth_qdio_out_buffer *next_pending; +}; + +struct qeth_card; + +enum qeth_out_q_states { + QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED, + QETH_OUT_Q_LOCKED_FLUSH, +}; + +struct qeth_qdio_out_q { + struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; + struct qdio_outbuf_state *bufstates; /* convenience pointer */ + int queue_no; + struct qeth_card *card; + atomic_t state; + int do_pack; + /* + * index of buffer to be filled by driver; state EMPTY or PACKING + */ + int next_buf_to_fill; + /* + * number of buffers that are currently filled (PRIMED) + * -> these buffers are hardware-owned + */ + atomic_t used_buffers; + /* indicates whether PCI flag must be set (or if one is outstanding) */ + atomic_t set_pci_flags_count; +}; + +struct qeth_qdio_info { + atomic_t state; + /* input */ + int no_in_queues; + struct qeth_qdio_q *in_q; + struct qeth_qdio_q *c_q; + struct qeth_qdio_buffer_pool in_buf_pool; + struct qeth_qdio_buffer_pool init_pool; + int in_buf_size; + + /* output */ + int no_out_queues; + struct qeth_qdio_out_q **out_qs; + struct qdio_outbuf_state *out_bufstates; + + /* priority queueing */ + int do_prio_queueing; + int default_out_queue; +}; + +/** + * buffer stuff for read channel + */ +#define QETH_CMD_BUFFER_NO 8 + +/** + * channel state machine + */ +enum qeth_channel_states { + CH_STATE_UP, + CH_STATE_DOWN, + CH_STATE_ACTIVATING, + CH_STATE_HALTED, + CH_STATE_STOPPED, + CH_STATE_RCD, + CH_STATE_RCD_DONE, +}; +/** + * card state machine + */ +enum qeth_card_states { + CARD_STATE_DOWN, + CARD_STATE_HARDSETUP, + CARD_STATE_SOFTSETUP, + CARD_STATE_UP, + CARD_STATE_RECOVER, +}; + +/** + * Protocol versions + */ +enum qeth_prot_versions { + QETH_PROT_IPV4 = 0x0004, + QETH_PROT_IPV6 = 0x0006, +}; + +enum qeth_cmd_buffer_state { + BUF_STATE_FREE, + BUF_STATE_LOCKED, +}; + +enum qeth_cq { + QETH_CQ_DISABLED = 0, + QETH_CQ_ENABLED = 1, + QETH_CQ_NOTAVAILABLE = 2, +}; + +struct qeth_ipato { + bool enabled; + bool invert4; + bool invert6; + struct list_head entries; +}; + +struct qeth_channel; + +struct qeth_cmd_buffer { + enum qeth_cmd_buffer_state state; + struct qeth_channel *channel; + unsigned char *data; + int rc; + void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); +}; + +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + +/** + * definition of a qeth channel, used for read and write + */ +struct qeth_channel { + enum qeth_channel_states state; + struct ccw1 *ccw; + spinlock_t iob_lock; + wait_queue_head_t wait_q; + struct ccw_device *ccwdev; +/*command buffer for control data*/ + struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; + atomic_t irq_pending; + int io_buf_no; +}; + +/** + * OSA card related definitions + */ +struct qeth_token { + __u32 issuer_rm_w; + __u32 issuer_rm_r; + __u32 cm_filter_w; + __u32 cm_filter_r; + __u32 cm_connection_w; + __u32 cm_connection_r; + __u32 ulp_filter_w; + __u32 ulp_filter_r; + __u32 ulp_connection_w; + __u32 ulp_connection_r; +}; + +struct qeth_seqno { + __u32 trans_hdr; + __u32 pdu_hdr; + __u32 pdu_hdr_ack; + __u16 ipa; + __u32 pkt_seqno; +}; + +struct qeth_reply { + struct list_head list; + wait_queue_head_t wait_q; + int (*callback)(struct qeth_card *, struct qeth_reply *, + unsigned long); + u32 seqno; + unsigned long offset; + atomic_t received; + int rc; + void *param; + struct qeth_card *card; + refcount_t refcnt; +}; + +struct qeth_card_blkt { + int time_total; + int inter_packet; + int inter_packet_jumbo; +}; + +#define QETH_BROADCAST_WITH_ECHO 0x01 +#define QETH_BROADCAST_WITHOUT_ECHO 0x02 +#define QETH_LAYER2_MAC_READ 0x01 +#define QETH_LAYER2_MAC_REGISTERED 0x02 +struct qeth_card_info { + unsigned short unit_addr2; + unsigned short cula; + unsigned short chpid; + __u16 func_level; + char mcl_level[QETH_MCL_LENGTH + 1]; + int guestlan; + int mac_bits; + enum qeth_card_types type; + enum qeth_link_types link_type; + int broadcast_capable; + int unique_id; + bool layer_enforced; + struct qeth_card_blkt blkt; + enum qeth_ipa_promisc_modes promisc_mode; + __u32 diagass_support; + __u32 hwtrap; +}; + +struct qeth_card_options { + struct qeth_routing_info route4; + struct qeth_ipa_info ipa4; + struct qeth_ipa_info adp; /*Adapter parameters*/ + struct qeth_routing_info route6; + struct qeth_ipa_info ipa6; + struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ + struct qeth_vnicc_info vnicc; /* VNICC options */ + int fake_broadcast; + int layer2; + int performance_stats; + int rx_sg_cb; + enum qeth_ipa_isolation_modes isolation; + enum qeth_ipa_isolation_modes prev_isolation; + int sniffer; + enum qeth_cq cq; + char hsuid[9]; +}; + +/* + * thread bits for qeth_card thread masks + */ +enum qeth_threads { + QETH_RECOVER_THREAD = 1, +}; + +struct qeth_osn_info { + int (*assist_cb)(struct net_device *dev, void *data); + int (*data_cb)(struct sk_buff *skb); +}; + +enum qeth_discipline_id { + QETH_DISCIPLINE_UNDETERMINED = -1, + QETH_DISCIPLINE_LAYER3 = 0, + QETH_DISCIPLINE_LAYER2 = 1, +}; + +struct qeth_discipline { + const struct device_type *devtype; + int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); + int (*recover)(void *ptr); + int (*setup) (struct ccwgroup_device *); + void (*remove) (struct ccwgroup_device *); + int (*set_online) (struct ccwgroup_device *); + int (*set_offline) (struct ccwgroup_device *); + int (*freeze)(struct ccwgroup_device *); + int (*thaw) (struct ccwgroup_device *); + int (*restore)(struct ccwgroup_device *); + int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd); + int (*control_event_handler)(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +}; + +struct qeth_vlan_vid { + struct list_head list; + unsigned short vid; +}; + +enum qeth_addr_disposition { + QETH_DISP_ADDR_DELETE = 0, + QETH_DISP_ADDR_DO_NOTHING = 1, + QETH_DISP_ADDR_ADD = 2, +}; + +struct qeth_rx { + int b_count; + int b_index; + struct qdio_buffer_element *b_element; + int e_offset; + int qdio_err; +}; + +struct carrier_info { + __u8 card_type; + __u16 port_mode; + __u32 port_speed; +}; + +struct qeth_switch_info { + __u32 capabilities; + __u32 settings; +}; + +#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT + +struct qeth_card { + struct list_head list; + enum qeth_card_states state; + int lan_online; + spinlock_t lock; + struct ccwgroup_device *gdev; + struct qeth_channel read; + struct qeth_channel write; + struct qeth_channel data; + + struct net_device *dev; + struct net_device_stats stats; + + struct qeth_card_info info; + struct qeth_token token; + struct qeth_seqno seqno; + struct qeth_card_options options; + + wait_queue_head_t wait_q; + spinlock_t mclock; + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + struct mutex vid_list_mutex; /* vid_list */ + struct list_head vid_list; + DECLARE_HASHTABLE(mac_htable, 4); + DECLARE_HASHTABLE(ip_htable, 4); + DECLARE_HASHTABLE(ip_mc_htable, 4); + struct work_struct kernel_thread_starter; + spinlock_t thread_mask_lock; + unsigned long thread_start_mask; + unsigned long thread_allowed_mask; + unsigned long thread_running_mask; + struct task_struct *recovery_task; + spinlock_t ip_lock; + struct qeth_ipato ipato; + struct list_head cmd_waiter_list; + /* QDIO buffer handling */ + struct qeth_qdio_info qdio; + struct qeth_perf_stats perf_stats; + int read_or_write_problem; + struct qeth_osn_info osn_info; + struct qeth_discipline *discipline; + atomic_t force_alloc_skb; + struct service_level qeth_service_level; + struct qdio_ssqd_desc ssqd; + debug_info_t *debug; + struct mutex conf_mutex; + struct mutex discipline_mutex; + struct napi_struct napi; + struct qeth_rx rx; + struct delayed_work buffer_reclaim_work; + int reclaim_index; + struct work_struct close_dev_work; +}; + +struct qeth_card_list_struct { + struct list_head list; + rwlock_t rwlock; +}; + +struct qeth_trap_id { + __u16 lparnr; + char vmname[8]; + __u8 chpid; + __u8 ssid; + __u16 devno; +} __packed; + +/*some helper functions*/ +#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") + +static inline bool qeth_netdev_is_registered(struct net_device *dev) +{ + return dev->netdev_ops != NULL; +} + +static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf, + unsigned int elements) +{ + unsigned int i; + + for (i = 0; i < elements; i++) + memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element)); + buf->element[14].sflags = 0; + buf->element[15].sflags = 0; +} + +/** + * qeth_get_elements_for_range() - find number of SBALEs to cover range. + * @start: Start of the address range. + * @end: Address after the end of the range. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * the specified address range. + */ +static inline int qeth_get_elements_for_range(addr_t start, addr_t end) +{ + return PFN_UP(end) - PFN_DOWN(start); +} + +static inline int qeth_get_micros(void) +{ + return (int) (get_tod_clock() >> 12); +} + +static inline int qeth_get_ip_version(struct sk_buff *skb) +{ + struct vlan_ethhdr *veth = vlan_eth_hdr(skb); + __be16 prot = veth->h_vlan_proto; + + if (prot == htons(ETH_P_8021Q)) + prot = veth->h_vlan_encapsulated_proto; + + switch (prot) { + case htons(ETH_P_IPV6): + return 6; + case htons(ETH_P_IP): + return 4; + default: + return 0; + } +} + +static inline void qeth_rx_csum(struct qeth_card *card, struct sk_buff *skb, + u8 flags) +{ + if ((card->dev->features & NETIF_F_RXCSUM) && + (flags & QETH_HDR_EXT_CSUM_TRANSP_REQ)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + if (card->options.performance_stats) + card->perf_stats.rx_csum++; + } else { + skb->ip_summed = CHECKSUM_NONE; + } +} + +static inline void qeth_tx_csum(struct sk_buff *skb, u8 *flags, int ipv) +{ + *flags |= QETH_HDR_EXT_CSUM_TRANSP_REQ; + if ((ipv == 4 && ip_hdr(skb)->protocol == IPPROTO_UDP) || + (ipv == 6 && ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)) + *flags |= QETH_HDR_EXT_UDP; + if (ipv == 4) { + /* some HW requires combined L3+L4 csum offload: */ + *flags |= QETH_HDR_EXT_CSUM_HDR_REQ; + ip_hdr(skb)->check = 0; + } +} + +static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, + struct qeth_buffer_pool_entry *entry) +{ + list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); +} + +static inline int qeth_is_diagass_supported(struct qeth_card *card, + enum qeth_diags_cmds cmd) +{ + return card->info.diagass_support & (__u32)cmd; +} + +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data, + enum qeth_prot_versions prot); +/* IPv4 variant */ +static inline int qeth_send_simple_setassparms(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV4); +} + +static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data) +{ + return qeth_send_simple_setassparms_prot(card, ipa_func, cmd_code, + data, QETH_PROT_IPV6); +} + +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv); +static inline struct qeth_qdio_out_q *qeth_get_tx_queue(struct qeth_card *card, + struct sk_buff *skb, + int ipv, int cast_type) +{ + if (IS_IQD(card) && cast_type != RTN_UNICAST) + return card->qdio.out_qs[card->qdio.no_out_queues - 1]; + if (!card->qdio.do_prio_queueing) + return card->qdio.out_qs[card->qdio.default_out_queue]; + return card->qdio.out_qs[qeth_get_priority_queue(card, skb, ipv)]; +} + +extern struct qeth_discipline qeth_l2_discipline; +extern struct qeth_discipline qeth_l3_discipline; +extern const struct attribute_group *qeth_generic_attr_groups[]; +extern const struct attribute_group *qeth_osn_attr_groups[]; +extern const struct attribute_group qeth_device_attr_group; +extern const struct attribute_group qeth_device_blkt_group; +extern const struct device_type qeth_generic_devtype; +extern struct workqueue_struct *qeth_wq; + +int qeth_card_hw_is_reachable(struct qeth_card *); +const char *qeth_get_cardname_short(struct qeth_card *); +int qeth_realloc_buffer_pool(struct qeth_card *, int); +int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); +void qeth_core_free_discipline(struct qeth_card *); + +/* exports for qeth discipline device drivers */ +extern struct qeth_card_list_struct qeth_core_card_list; +extern struct kmem_cache *qeth_core_header_cache; +extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; + +struct net_device *qeth_clone_netdev(struct net_device *orig); +void qeth_set_recovery_task(struct qeth_card *); +void qeth_clear_recovery_task(struct qeth_card *); +void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); +int qeth_threads_running(struct qeth_card *, unsigned long); +int qeth_wait_for_threads(struct qeth_card *, unsigned long); +int qeth_do_run_thread(struct qeth_card *, unsigned long); +void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long); +void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long); +int qeth_core_hardsetup_card(struct qeth_card *); +void qeth_print_status_message(struct qeth_card *); +int qeth_init_qdio_queues(struct qeth_card *); +int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, + int (*reply_cb) + (struct qeth_card *, struct qeth_reply *, unsigned long), + void *); +struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, + enum qeth_ipa_cmds, enum qeth_prot_versions); +struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, + struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, + struct qeth_hdr **); +void qeth_schedule_recovery(struct qeth_card *); +int qeth_poll(struct napi_struct *napi, int budget); +void qeth_clear_ipacmd_list(struct qeth_card *); +int qeth_qdio_clear_card(struct qeth_card *, int); +void qeth_clear_working_pool_list(struct qeth_card *); +void qeth_clear_cmd_buffers(struct qeth_channel *); +void qeth_clear_qdio_buffers(struct qeth_card *); +void qeth_setadp_promisc_mode(struct qeth_card *); +struct net_device_stats *qeth_get_stats(struct net_device *); +int qeth_setadpparms_change_macaddr(struct qeth_card *); +void qeth_tx_timeout(struct net_device *); +void qeth_prepare_control_data(struct qeth_card *, int, + struct qeth_cmd_buffer *); +void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob); +struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); +int qeth_query_switch_attributes(struct qeth_card *card, + struct qeth_switch_info *sw_info); +int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, + int (*reply_cb)(struct qeth_card *, struct qeth_reply*, unsigned long), + void *reply_param); +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, + int extra_elems, int data_offset); +int qeth_get_elements_for_frags(struct sk_buff *); +int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len); +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed); +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +int qeth_core_get_sset_count(struct net_device *, int); +void qeth_core_get_ethtool_stats(struct net_device *, + struct ethtool_stats *, u64 *); +void qeth_core_get_strings(struct net_device *, u32, u8 *); +void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); +void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); +int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); +int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); +int qeth_configure_cq(struct qeth_card *, enum qeth_cq); +int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action); +void qeth_trace_features(struct qeth_card *); +void qeth_close_dev(struct qeth_card *); +int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16, + long, + int (*reply_cb)(struct qeth_card *, + struct qeth_reply *, unsigned long), + void *); +int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long); +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *, + enum qeth_ipa_funcs, + __u16, __u16, + enum qeth_prot_versions); +int qeth_set_features(struct net_device *, netdev_features_t); +void qeth_enable_hw_features(struct net_device *dev); +netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features); +int qeth_vm_request_mac(struct qeth_card *card); +int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr **hdr, unsigned int hdr_len, + unsigned int proto_len, unsigned int *elements); + +/* exports for OSN */ +int qeth_osn_assist(struct net_device *, void *, int); +int qeth_osn_register(unsigned char *read_dev_no, struct net_device **, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)); +void qeth_osn_deregister(struct net_device *); + +#endif /* __QETH_CORE_H__ */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c new file mode 100644 index 000000000..d0aaef937 --- /dev/null +++ b/drivers/s390/net/qeth_core_main.c @@ -0,0 +1,6687 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/compat.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/mii.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/if_vlan.h> +#include <linux/netdevice.h> +#include <linux/netdev_features.h> +#include <linux/skbuff.h> +#include <linux/vmalloc.h> + +#include <net/iucv/af_iucv.h> +#include <net/dsfield.h> + +#include <asm/ebcdic.h> +#include <asm/chpid.h> +#include <asm/io.h> +#include <asm/sysinfo.h> +#include <asm/diag.h> +#include <asm/cio.h> +#include <asm/ccwdev.h> +#include <asm/cpcmd.h> + +#include "qeth_core.h" + +struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { + /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ + /* N P A M L V H */ + [QETH_DBF_SETUP] = {"qeth_setup", + 8, 1, 8, 5, &debug_hex_ascii_view, NULL}, + [QETH_DBF_MSG] = {"qeth_msg", 8, 1, 11 * sizeof(long), 3, + &debug_sprintf_view, NULL}, + [QETH_DBF_CTRL] = {"qeth_control", + 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL}, +}; +EXPORT_SYMBOL_GPL(qeth_dbf); + +struct qeth_card_list_struct qeth_core_card_list; +EXPORT_SYMBOL_GPL(qeth_core_card_list); +struct kmem_cache *qeth_core_header_cache; +EXPORT_SYMBOL_GPL(qeth_core_header_cache); +static struct kmem_cache *qeth_qdio_outbuf_cache; + +static struct device *qeth_core_root_dev; +static struct lock_class_key qdio_out_skb_queue_key; +static struct mutex qeth_mod_mutex; + +static void qeth_send_control_data_cb(struct qeth_channel *, + struct qeth_cmd_buffer *); +static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); +static void qeth_free_buffer_pool(struct qeth_card *); +static int qeth_qdio_establish(struct qeth_card *); +static void qeth_free_qdio_buffers(struct qeth_card *); +static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification); +static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); + +struct workqueue_struct *qeth_wq; +EXPORT_SYMBOL_GPL(qeth_wq); + +int qeth_card_hw_is_reachable(struct qeth_card *card) +{ + return (card->state == CARD_STATE_SOFTSETUP) || + (card->state == CARD_STATE_UP); +} +EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable); + +static void qeth_close_dev_handler(struct work_struct *work) +{ + struct qeth_card *card; + + card = container_of(work, struct qeth_card, close_dev_work); + QETH_CARD_TEXT(card, 2, "cldevhdl"); + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + ccwgroup_set_offline(card->gdev); +} + +void qeth_close_dev(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "cldevsubm"); + queue_work(qeth_wq, &card->close_dev_work); +} +EXPORT_SYMBOL_GPL(qeth_close_dev); + +static const char *qeth_get_cardname(struct qeth_card *card) +{ + if (card->info.guestlan) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return " Virtual NIC QDIO"; + case QETH_CARD_TYPE_IQD: + return " Virtual NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return " Virtual NIC QDIO - OSM"; + case QETH_CARD_TYPE_OSX: + return " Virtual NIC QDIO - OSX"; + default: + return " unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return " OSD Express"; + case QETH_CARD_TYPE_IQD: + return " HiperSockets"; + case QETH_CARD_TYPE_OSN: + return " OSN QDIO"; + case QETH_CARD_TYPE_OSM: + return " OSM QDIO"; + case QETH_CARD_TYPE_OSX: + return " OSX QDIO"; + default: + return " unknown"; + } + } + return " n/a"; +} + +/* max length to be returned: 14 */ +const char *qeth_get_cardname_short(struct qeth_card *card) +{ + if (card->info.guestlan) { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + return "Virt.NIC QDIO"; + case QETH_CARD_TYPE_IQD: + return "Virt.NIC Hiper"; + case QETH_CARD_TYPE_OSM: + return "Virt.NIC OSM"; + case QETH_CARD_TYPE_OSX: + return "Virt.NIC OSX"; + default: + return "unknown"; + } + } else { + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + switch (card->info.link_type) { + case QETH_LINK_TYPE_FAST_ETH: + return "OSD_100"; + case QETH_LINK_TYPE_HSTR: + return "HSTR"; + case QETH_LINK_TYPE_GBIT_ETH: + return "OSD_1000"; + case QETH_LINK_TYPE_10GBIT_ETH: + return "OSD_10GIG"; + case QETH_LINK_TYPE_LANE_ETH100: + return "OSD_FE_LANE"; + case QETH_LINK_TYPE_LANE_TR: + return "OSD_TR_LANE"; + case QETH_LINK_TYPE_LANE_ETH1000: + return "OSD_GbE_LANE"; + case QETH_LINK_TYPE_LANE: + return "OSD_ATM_LANE"; + default: + return "OSD_Express"; + } + case QETH_CARD_TYPE_IQD: + return "HiperSockets"; + case QETH_CARD_TYPE_OSN: + return "OSN"; + case QETH_CARD_TYPE_OSM: + return "OSM_1000"; + case QETH_CARD_TYPE_OSX: + return "OSX_10GIG"; + default: + return "unknown"; + } + } + return "n/a"; +} + +void qeth_set_recovery_task(struct qeth_card *card) +{ + card->recovery_task = current; +} +EXPORT_SYMBOL_GPL(qeth_set_recovery_task); + +void qeth_clear_recovery_task(struct qeth_card *card) +{ + card->recovery_task = NULL; +} +EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); + +static bool qeth_is_recovery_task(const struct qeth_card *card) +{ + return card->recovery_task == current; +} + +void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, + int clear_start_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_allowed_mask = threads; + if (clear_start_mask) + card->thread_start_mask &= threads; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_set_allowed_threads); + +int qeth_threads_running(struct qeth_card *card, unsigned long threads) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + rc = (card->thread_running_mask & threads); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_threads_running); + +int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) +{ + if (qeth_is_recovery_task(card)) + return 0; + return wait_event_interruptible(card->wait_q, + qeth_threads_running(card, threads) == 0); +} +EXPORT_SYMBOL_GPL(qeth_wait_for_threads); + +void qeth_clear_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + + QETH_CARD_TEXT(card, 5, "clwrklst"); + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.in_buf_pool.entry_list, list){ + list_del(&pool_entry->list); + } +} +EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list); + +static int qeth_alloc_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry; + void *ptr; + int i, j; + + QETH_CARD_TEXT(card, 5, "alocpool"); + for (i = 0; i < card->qdio.init_pool.buf_count; ++i) { + pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL); + if (!pool_entry) { + qeth_free_buffer_pool(card); + return -ENOMEM; + } + for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) { + ptr = (void *) __get_free_page(GFP_KERNEL); + if (!ptr) { + while (j > 0) + free_page((unsigned long) + pool_entry->elements[--j]); + kfree(pool_entry); + qeth_free_buffer_pool(card); + return -ENOMEM; + } + pool_entry->elements[j] = ptr; + } + list_add(&pool_entry->init_list, + &card->qdio.init_pool.entry_list); + } + return 0; +} + +int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt) +{ + QETH_CARD_TEXT(card, 2, "realcbp"); + + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) + return -EPERM; + + /* TODO: steel/add buffers from/to a running card's buffer pool (?) */ + qeth_clear_working_pool_list(card); + qeth_free_buffer_pool(card); + card->qdio.in_buf_pool.buf_count = bufcnt; + card->qdio.init_pool.buf_count = bufcnt; + return qeth_alloc_buffer_pool(card); +} +EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); + +static void qeth_free_qdio_queue(struct qeth_qdio_q *q) +{ + if (!q) + return; + + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) +{ + struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + int i; + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { + kfree(q); + return NULL; + } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) + q->bufs[i].buffer = q->qdio_bufs[i]; + + QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); + return q; +} + +static int qeth_cq_init(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + QETH_DBF_TEXT(SETUP, 2, "cqinit"); + qdio_reset_buffers(card->qdio.c_q->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); + card->qdio.c_q->next_buf_to_init = 127; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, + card->qdio.no_in_queues - 1, 0, + 127); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + goto out; + } + } + rc = 0; +out: + return rc; +} + +static int qeth_alloc_cq(struct qeth_card *card) +{ + int rc; + + if (card->options.cq == QETH_CQ_ENABLED) { + int i; + struct qdio_outbuf_state *outbuf_states; + + QETH_DBF_TEXT(SETUP, 2, "cqon"); + card->qdio.c_q = qeth_alloc_qdio_queue(); + if (!card->qdio.c_q) { + rc = -1; + goto kmsg_out; + } + card->qdio.no_in_queues = 2; + card->qdio.out_bufstates = + kcalloc(card->qdio.no_out_queues * + QDIO_MAX_BUFFERS_PER_Q, + sizeof(struct qdio_outbuf_state), + GFP_KERNEL); + outbuf_states = card->qdio.out_bufstates; + if (outbuf_states == NULL) { + rc = -1; + goto free_cq_out; + } + for (i = 0; i < card->qdio.no_out_queues; ++i) { + card->qdio.out_qs[i]->bufstates = outbuf_states; + outbuf_states += QDIO_MAX_BUFFERS_PER_Q; + } + } else { + QETH_DBF_TEXT(SETUP, 2, "nocq"); + card->qdio.c_q = NULL; + card->qdio.no_in_queues = 1; + } + QETH_DBF_TEXT_(SETUP, 2, "iqc%d", card->qdio.no_in_queues); + rc = 0; +out: + return rc; +free_cq_out: + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; +kmsg_out: + dev_err(&card->gdev->dev, "Failed to create completion queue\n"); + goto out; +} + +static void qeth_free_cq(struct qeth_card *card) +{ + if (card->qdio.c_q) { + --card->qdio.no_in_queues; + qeth_free_qdio_queue(card->qdio.c_q); + card->qdio.c_q = NULL; + } + kfree(card->qdio.out_bufstates); + card->qdio.out_bufstates = NULL; +} + +static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) +{ + enum iucv_tx_notify n; + + switch (sbalf15) { + case 0: + n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK; + break; + case 4: + case 16: + case 17: + case 18: + n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE : + TX_NOTIFY_UNREACHABLE; + break; + default: + n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR : + TX_NOTIFY_GENERALERROR; + break; + } + + return n; +} + +static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, + int forced_cleanup) +{ + if (q->card->options.cq != QETH_CQ_ENABLED) + return; + + if (q->bufs[bidx]->next_pending != NULL) { + struct qeth_qdio_out_buffer *head = q->bufs[bidx]; + struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; + + while (c) { + if (forced_cleanup || + atomic_read(&c->state) == + QETH_QDIO_BUF_HANDLED_DELAYED) { + struct qeth_qdio_out_buffer *f = c; + QETH_CARD_TEXT(f->q->card, 5, "fp"); + QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); + /* release here to avoid interleaving between + outbound tasklet and inbound tasklet + regarding notifications and lifecycle */ + qeth_release_skbs(c); + + c = f->next_pending; + WARN_ON_ONCE(head->next_pending != f); + head->next_pending = c; + kmem_cache_free(qeth_qdio_outbuf_cache, f); + } else { + head = c; + c = c->next_pending; + } + + } + } +} + + +static void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) +{ + struct qaob *aob; + struct qeth_qdio_out_buffer *buffer; + enum iucv_tx_notify notification; + unsigned int i; + + aob = (struct qaob *) phys_to_virt(phys_aob_addr); + QETH_CARD_TEXT(card, 5, "haob"); + QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr); + buffer = (struct qeth_qdio_out_buffer *) aob->user1; + QETH_CARD_TEXT_(card, 5, "%lx", aob->user1); + + if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) { + notification = TX_NOTIFY_OK; + } else { + WARN_ON_ONCE(atomic_read(&buffer->state) != + QETH_QDIO_BUF_PENDING); + atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ); + notification = TX_NOTIFY_DELAYED_OK; + } + + if (aob->aorc != 0) { + QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc); + notification = qeth_compute_cq_notification(aob->aorc, 1); + } + qeth_notify_skbs(buffer->q, buffer, notification); + + /* Free dangling allocations. The attached skbs are handled by + * qeth_cleanup_handled_pending(). + */ + for (i = 0; + i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card); + i++) { + if (aob->sba[i] && buffer->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + (void *) aob->sba[i]); + } + atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED); + + qdio_release_aob(aob); +} + +static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) +{ + return card->options.cq == QETH_CQ_ENABLED && + card->qdio.c_q != NULL && + queue != 0 && + queue == card->qdio.no_in_queues - 1; +} + +static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u32 len, void *data) +{ + ccw->cmd_code = cmd_code; + ccw->flags = CCW_FLAG_SLI; + ccw->count = len; + ccw->cda = (__u32) __pa(data); +} + +static int __qeth_issue_next_read(struct qeth_card *card) +{ + struct qeth_channel *channel = &card->read; + struct qeth_cmd_buffer *iob; + int rc; + + QETH_CARD_TEXT(card, 5, "issnxrd"); + if (channel->state != CH_STATE_UP) + return -EIO; + iob = qeth_get_buffer(channel); + if (!iob) { + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " + "available\n", dev_name(&card->gdev->dev)); + return -ENOMEM; + } + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + QETH_CARD_TEXT(card, 6, "noirqpnd"); + rc = ccw_device_start(channel->ccwdev, channel->ccw, + (addr_t) iob, 0, 0); + if (rc) { + QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " + "rc=%i\n", dev_name(&card->gdev->dev), rc); + atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); + card->read_or_write_problem = 1; + qeth_schedule_recovery(card); + wake_up(&card->wait_q); + } + return rc; +} + +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + +static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) +{ + struct qeth_reply *reply; + + reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); + if (reply) { + refcount_set(&reply->refcnt, 1); + atomic_set(&reply->received, 0); + reply->card = card; + } + return reply; +} + +static void qeth_get_reply(struct qeth_reply *reply) +{ + refcount_inc(&reply->refcnt); +} + +static void qeth_put_reply(struct qeth_reply *reply) +{ + if (refcount_dec_and_test(&reply->refcnt)) + kfree(reply); +} + +static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc, + struct qeth_card *card) +{ + const char *ipa_name; + int com = cmd->hdr.command; + ipa_name = qeth_get_ipa_cmd_name(com); + if (rc) + QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned " + "x%X \"%s\"\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card), rc, + qeth_get_ipa_msg(rc)); + else + QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n", + ipa_name, com, dev_name(&card->gdev->dev), + QETH_CARD_IFNAME(card)); +} + +static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, + struct qeth_cmd_buffer *iob) +{ + struct qeth_ipa_cmd *cmd = NULL; + + QETH_CARD_TEXT(card, 5, "chkipad"); + if (IS_IPA(iob->data)) { + cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data); + if (IS_IPA_REPLY(cmd)) { + if (cmd->hdr.command != IPA_CMD_SETCCID && + cmd->hdr.command != IPA_CMD_DELCCID && + cmd->hdr.command != IPA_CMD_MODCCID && + cmd->hdr.command != IPA_CMD_SET_DIAG_ASS) + qeth_issue_ipa_msg(cmd, + cmd->hdr.return_code, card); + return cmd; + } else { + switch (cmd->hdr.command) { + case IPA_CMD_STOPLAN: + if (cmd->hdr.return_code == + IPA_RC_VEPA_TO_VEB_TRANSITION) { + dev_err(&card->gdev->dev, + "Interface %s is down because the " + "adjacent port is no longer in " + "reflective relay mode\n", + QETH_CARD_IFNAME(card)); + qeth_close_dev(card); + } else { + dev_warn(&card->gdev->dev, + "The link for interface %s on CHPID" + " 0x%X failed\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + qeth_issue_ipa_msg(cmd, + cmd->hdr.return_code, card); + } + card->lan_online = 0; + netif_carrier_off(card->dev); + return NULL; + case IPA_CMD_STARTLAN: + dev_info(&card->gdev->dev, + "The link for %s on CHPID 0x%X has" + " been restored\n", + QETH_CARD_IFNAME(card), + card->info.chpid); + netif_carrier_on(card->dev); + card->lan_online = 1; + if (card->info.hwtrap) + card->info.hwtrap = 2; + qeth_schedule_recovery(card); + return NULL; + case IPA_CMD_SETBRIDGEPORT_IQD: + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + if (card->discipline->control_event_handler + (card, cmd)) + return cmd; + else + return NULL; + case IPA_CMD_MODCCID: + return cmd; + case IPA_CMD_REGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "irla"); + break; + case IPA_CMD_UNREGISTER_LOCAL_ADDR: + QETH_CARD_TEXT(card, 3, "urla"); + break; + default: + QETH_DBF_MESSAGE(2, "Received data is IPA " + "but not a reply!\n"); + break; + } + } + } + return cmd; +} + +void qeth_clear_ipacmd_list(struct qeth_card *card) +{ + struct qeth_reply *reply, *r; + unsigned long flags; + + QETH_CARD_TEXT(card, 4, "clipalst"); + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + qeth_get_reply(reply); + reply->rc = -EIO; + atomic_inc(&reply->received); + list_del_init(&reply->list); + wake_up(&reply->wait_q); + qeth_put_reply(reply); + } + spin_unlock_irqrestore(&card->lock, flags); +} +EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); + +static int qeth_check_idx_response(struct qeth_card *card, + unsigned char *buffer) +{ + if (!buffer) + return 0; + + QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); + if ((buffer[2] & 0xc0) == 0xc0) { + QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n", + buffer[4]); + QETH_CARD_TEXT(card, 2, "ckidxres"); + QETH_CARD_TEXT(card, 2, " idxterm"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); + if (buffer[4] == 0xf6) { + dev_err(&card->gdev->dev, + "The qeth device is not configured " + "for the OSI layer required by z/VM\n"); + return -EPERM; + } + return -EIO; + } + return 0; +} + +static struct qeth_card *CARD_FROM_CDEV(struct ccw_device *cdev) +{ + struct qeth_card *card = dev_get_drvdata(&((struct ccwgroup_device *) + dev_get_drvdata(&cdev->dev))->dev); + return card; +} + +static struct qeth_cmd_buffer *__qeth_get_buffer(struct qeth_channel *channel) +{ + __u8 index; + + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "getbuff"); + index = channel->io_buf_no; + do { + if (channel->iob[index].state == BUF_STATE_FREE) { + channel->iob[index].state = BUF_STATE_LOCKED; + channel->io_buf_no = (channel->io_buf_no + 1) % + QETH_CMD_BUFFER_NO; + memset(channel->iob[index].data, 0, QETH_BUFSIZE); + return channel->iob + index; + } + index = (index + 1) % QETH_CMD_BUFFER_NO; + } while (index != channel->io_buf_no); + + return NULL; +} + +void qeth_release_buffer(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + unsigned long flags; + + QETH_CARD_TEXT(CARD_FROM_CDEV(channel->ccwdev), 6, "relbuff"); + spin_lock_irqsave(&channel->iob_lock, flags); + memset(iob->data, 0, QETH_BUFSIZE); + iob->state = BUF_STATE_FREE; + iob->callback = qeth_send_control_data_cb; + iob->rc = 0; + spin_unlock_irqrestore(&channel->iob_lock, flags); + wake_up(&channel->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_release_buffer); + +static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer = NULL; + unsigned long flags; + + spin_lock_irqsave(&channel->iob_lock, flags); + buffer = __qeth_get_buffer(channel); + spin_unlock_irqrestore(&channel->iob_lock, flags); + return buffer; +} + +struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *channel) +{ + struct qeth_cmd_buffer *buffer; + wait_event(channel->wait_q, + ((buffer = qeth_get_buffer(channel)) != NULL)); + return buffer; +} +EXPORT_SYMBOL_GPL(qeth_wait_for_buffer); + +void qeth_clear_cmd_buffers(struct qeth_channel *channel) +{ + int cnt; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) + qeth_release_buffer(channel, &channel->iob[cnt]); + channel->io_buf_no = 0; +} +EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); + +static void qeth_send_control_data_cb(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + struct qeth_reply *reply, *r; + struct qeth_ipa_cmd *cmd; + unsigned long flags; + int keep_reply; + int rc = 0; + + card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 4, "sndctlcb"); + rc = qeth_check_idx_response(card, iob->data); + switch (rc) { + case 0: + break; + case -EIO: + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + /* fall through */ + default: + goto out; + } + + cmd = qeth_check_ipa_data(card, iob); + if ((cmd == NULL) && (card->state != CARD_STATE_DOWN)) + goto out; + /*in case of OSN : check if cmd is set */ + if (card->info.type == QETH_CARD_TYPE_OSN && + cmd && + cmd->hdr.command != IPA_CMD_STARTLAN && + card->osn_info.assist_cb != NULL) { + card->osn_info.assist_cb(card->dev, cmd); + goto out; + } + + spin_lock_irqsave(&card->lock, flags); + list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { + if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || + ((cmd) && (reply->seqno == cmd->hdr.seqno))) { + qeth_get_reply(reply); + list_del_init(&reply->list); + spin_unlock_irqrestore(&card->lock, flags); + keep_reply = 0; + if (reply->callback != NULL) { + if (cmd) { + reply->offset = (__u16)((char *)cmd - + (char *)iob->data); + keep_reply = reply->callback(card, + reply, + (unsigned long)cmd); + } else + keep_reply = reply->callback(card, + reply, + (unsigned long)iob); + } + if (cmd) + reply->rc = (u16) cmd->hdr.return_code; + else if (iob->rc) + reply->rc = iob->rc; + if (keep_reply) { + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, + &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + } else { + atomic_inc(&reply->received); + wake_up(&reply->wait_q); + } + qeth_put_reply(reply); + goto out; + } + } + spin_unlock_irqrestore(&card->lock, flags); +out: + memcpy(&card->seqno.pdu_hdr_ack, + QETH_PDU_HEADER_SEQ_NO(iob->data), + QETH_SEQ_NO_LENGTH); + qeth_release_buffer(channel, iob); +} + +static int qeth_set_thread_start_bit(struct qeth_card *card, + unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (!(card->thread_allowed_mask & thread) || + (card->thread_start_mask & thread)) { + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return -EPERM; + } + card->thread_start_mask |= thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return 0; +} + +void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_start_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit); + +void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + card->thread_running_mask &= ~thread; + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + wake_up_all(&card->wait_q); +} +EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); + +static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + if (card->thread_start_mask & thread) { + if ((card->thread_allowed_mask & thread) && + !(card->thread_running_mask & thread)) { + rc = 1; + card->thread_start_mask &= ~thread; + card->thread_running_mask |= thread; + } else + rc = -EPERM; + } + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +int qeth_do_run_thread(struct qeth_card *card, unsigned long thread) +{ + int rc = 0; + + wait_event(card->wait_q, + (rc = __qeth_do_run_thread(card, thread)) >= 0); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_run_thread); + +void qeth_schedule_recovery(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "startrec"); + if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0) + schedule_work(&card->kernel_thread_starter); +} +EXPORT_SYMBOL_GPL(qeth_schedule_recovery); + +static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) +{ + int dstat, cstat; + char *sense; + struct qeth_card *card; + + sense = (char *) irb->ecw; + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + card = CARD_FROM_CDEV(cdev); + + if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK | + SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | + SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { + QETH_CARD_TEXT(card, 2, "CGENCHK"); + dev_warn(&cdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n", + dev_name(&cdev->dev), dstat, cstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, + 16, 1, irb, 64, 1); + return 1; + } + + if (dstat & DEV_STAT_UNIT_CHECK) { + if (sense[SENSE_RESETTING_EVENT_BYTE] & + SENSE_RESETTING_EVENT_FLAG) { + QETH_CARD_TEXT(card, 2, "REVIND"); + return 1; + } + if (sense[SENSE_COMMAND_REJECT_BYTE] & + SENSE_COMMAND_REJECT_FLAG) { + QETH_CARD_TEXT(card, 2, "CMDREJi"); + return 1; + } + if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { + QETH_CARD_TEXT(card, 2, "AFFE"); + return 1; + } + if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { + QETH_CARD_TEXT(card, 2, "ZEROSEN"); + return 0; + } + QETH_CARD_TEXT(card, 2, "DGENCHK"); + return 1; + } + return 0; +} + +static long __qeth_check_irb_error(struct ccw_device *cdev, + unsigned long intparm, struct irb *irb) +{ + struct qeth_card *card; + + card = CARD_FROM_CDEV(cdev); + + if (!card || !IS_ERR(irb)) + return 0; + + switch (PTR_ERR(irb)) { + case -EIO: + QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", + dev_name(&cdev->dev)); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -EIO); + break; + case -ETIMEDOUT: + dev_warn(&cdev->dev, "A hardware operation timed out" + " on the device\n"); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT_(card, 2, " rc%d", -ETIMEDOUT); + if (intparm == QETH_RCD_PARM) { + if (card->data.ccwdev == cdev) { + card->data.state = CH_STATE_DOWN; + wake_up(&card->wait_q); + } + } + break; + default: + QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", + dev_name(&cdev->dev), PTR_ERR(irb)); + QETH_CARD_TEXT(card, 2, "ckirberr"); + QETH_CARD_TEXT(card, 2, " rc???"); + } + return PTR_ERR(irb); +} + +static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, + struct irb *irb) +{ + int rc; + int cstat, dstat; + struct qeth_cmd_buffer *iob = NULL; + struct qeth_channel *channel; + struct qeth_card *card; + + card = CARD_FROM_CDEV(cdev); + if (!card) + return; + + QETH_CARD_TEXT(card, 5, "irq"); + + if (card->read.ccwdev == cdev) { + channel = &card->read; + QETH_CARD_TEXT(card, 5, "read"); + } else if (card->write.ccwdev == cdev) { + channel = &card->write; + QETH_CARD_TEXT(card, 5, "write"); + } else { + channel = &card->data; + QETH_CARD_TEXT(card, 5, "data"); + } + + if (qeth_intparm_is_iob(intparm)) + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + + if (__qeth_check_irb_error(cdev, intparm, irb)) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_release_buffer(iob->channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return; + } + + atomic_set(&channel->irq_pending, 0); + + if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) + channel->state = CH_STATE_STOPPED; + + if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC)) + channel->state = CH_STATE_HALTED; + + /*let's wake up immediately on data channel*/ + if ((channel == &card->data) && (intparm != 0) && + (intparm != QETH_RCD_PARM)) + goto out; + + if (intparm == QETH_CLEAR_CHANNEL_PARM) { + QETH_CARD_TEXT(card, 6, "clrchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + if (intparm == QETH_HALT_CHANNEL_PARM) { + QETH_CARD_TEXT(card, 6, "hltchpar"); + /* we don't have to handle this further */ + intparm = 0; + } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + + if ((dstat & DEV_STAT_UNIT_EXCEP) || + (dstat & DEV_STAT_UNIT_CHECK) || + (cstat)) { + if (irb->esw.esw0.erw.cons) { + dev_warn(&channel->ccwdev->dev, + "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s sense data available. cstat " + "0x%X dstat 0x%X\n", + dev_name(&channel->ccwdev->dev), cstat, dstat); + print_hex_dump(KERN_WARNING, "qeth: irb ", + DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); + print_hex_dump(KERN_WARNING, "qeth: sense data ", + DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1); + } + if (intparm == QETH_RCD_PARM) { + channel->state = CH_STATE_DOWN; + goto out; + } + rc = qeth_get_problem(cdev, irb); + if (rc) { + card->read_or_write_problem = 1; + if (iob) + qeth_release_buffer(iob->channel, iob); + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + goto out; + } + } + + if (intparm == QETH_RCD_PARM) { + channel->state = CH_STATE_RCD_DONE; + goto out; + } + if (channel == &card->data) + return; + if (channel == &card->read && + channel->state == CH_STATE_UP) + __qeth_issue_next_read(card); + + if (iob && iob->callback) + iob->callback(iob->channel, iob); + +out: + wake_up(&card->wait_q); + return; +} + +static void qeth_notify_skbs(struct qeth_qdio_out_q *q, + struct qeth_qdio_out_buffer *buf, + enum iucv_tx_notify notification) +{ + struct sk_buff *skb; + + if (skb_queue_empty(&buf->skb_list)) + goto out; + skb = skb_peek(&buf->skb_list); + while (skb) { + QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); + QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); + if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { + if (skb->sk) { + struct iucv_sock *iucv = iucv_sk(skb->sk); + iucv->sk_txnotify(skb, notification); + } + } + if (skb_queue_is_last(&buf->skb_list, skb)) + skb = NULL; + else + skb = skb_queue_next(&buf->skb_list, skb); + } +out: + return; +} + +static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) +{ + struct sk_buff *skb; + struct iucv_sock *iucv; + int notify_general_error = 0; + + if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) + notify_general_error = 1; + + /* release may never happen from within CQ tasklet scope */ + WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); + + skb = skb_dequeue(&buf->skb_list); + while (skb) { + QETH_CARD_TEXT(buf->q->card, 5, "skbr"); + QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); + if (notify_general_error && + be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { + if (skb->sk) { + iucv = iucv_sk(skb->sk); + iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); + } + } + refcount_dec(&skb->users); + dev_kfree_skb_any(skb); + skb = skb_dequeue(&buf->skb_list); + } +} + +static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf) +{ + int i; + + /* is PCI flag set on buffer? */ + if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ) + atomic_dec(&queue->set_pci_flags_count); + + qeth_release_skbs(buf); + + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { + if (buf->buffer->element[i].addr && buf->is_header[i]) + kmem_cache_free(qeth_core_header_cache, + buf->buffer->element[i].addr); + buf->is_header[i] = 0; + } + + qeth_scrub_qdio_buffer(buf->buffer, + QETH_MAX_BUFFER_ELEMENTS(queue->card)); + buf->next_element_to_fill = 0; + atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); +} + +static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free) +{ + int j; + + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (!q->bufs[j]) + continue; + qeth_cleanup_handled_pending(q, j, 1); + qeth_clear_output_buffer(q, q->bufs[j]); + if (free) { + kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); + q->bufs[j] = NULL; + } + } +} + +void qeth_clear_qdio_buffers(struct qeth_card *card) +{ + int i; + + QETH_CARD_TEXT(card, 2, "clearqdbf"); + /* clear outbound buffers to free skbs */ + for (i = 0; i < card->qdio.no_out_queues; ++i) { + if (card->qdio.out_qs[i]) { + qeth_clear_outq_buffers(card->qdio.out_qs[i], 0); + } + } +} +EXPORT_SYMBOL_GPL(qeth_clear_qdio_buffers); + +static void qeth_free_buffer_pool(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *pool_entry, *tmp; + int i = 0; + list_for_each_entry_safe(pool_entry, tmp, + &card->qdio.init_pool.entry_list, init_list){ + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) + free_page((unsigned long)pool_entry->elements[i]); + list_del(&pool_entry->init_list); + kfree(pool_entry); + } +} + +static void qeth_clean_channel(struct qeth_channel *channel) +{ + struct ccw_device *cdev = channel->ccwdev; + int cnt; + + QETH_DBF_TEXT(SETUP, 2, "freech"); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = NULL; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) + kfree(channel->iob[cnt].data); + kfree(channel->ccw); +} + +static int qeth_setup_channel(struct qeth_channel *channel, bool alloc_buffers) +{ + struct ccw_device *cdev = channel->ccwdev; + int cnt; + + QETH_DBF_TEXT(SETUP, 2, "setupch"); + + channel->ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); + if (!channel->ccw) + return -ENOMEM; + channel->state = CH_STATE_DOWN; + atomic_set(&channel->irq_pending, 0); + init_waitqueue_head(&channel->wait_q); + + spin_lock_irq(get_ccwdev_lock(cdev)); + cdev->handler = qeth_irq; + spin_unlock_irq(get_ccwdev_lock(cdev)); + + if (!alloc_buffers) + return 0; + + for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) { + channel->iob[cnt].data = + kzalloc(QETH_BUFSIZE, GFP_DMA|GFP_KERNEL); + if (channel->iob[cnt].data == NULL) + break; + channel->iob[cnt].state = BUF_STATE_FREE; + channel->iob[cnt].channel = channel; + channel->iob[cnt].callback = qeth_send_control_data_cb; + channel->iob[cnt].rc = 0; + } + if (cnt < QETH_CMD_BUFFER_NO) { + qeth_clean_channel(channel); + return -ENOMEM; + } + channel->io_buf_no = 0; + spin_lock_init(&channel->iob_lock); + + return 0; +} + +static void qeth_set_single_write_queues(struct qeth_card *card) +{ + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 4)) + qeth_free_qdio_buffers(card); + + card->qdio.no_out_queues = 1; + if (card->qdio.default_out_queue != 0) + dev_info(&card->gdev->dev, "Priority Queueing not supported\n"); + + card->qdio.default_out_queue = 0; +} + +static void qeth_set_multiple_write_queues(struct qeth_card *card) +{ + if ((atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED) && + (card->qdio.no_out_queues == 1)) { + qeth_free_qdio_buffers(card); + card->qdio.default_out_queue = 2; + } + card->qdio.no_out_queues = 4; +} + +static int qeth_update_from_chp_desc(struct qeth_card *card) +{ + struct ccw_device *ccwdev; + struct channel_path_desc_fmt0 *chp_dsc; + + QETH_DBF_TEXT(SETUP, 2, "chp_desc"); + + ccwdev = card->data.ccwdev; + chp_dsc = ccw_device_get_chp_desc(ccwdev, 0); + if (!chp_dsc) + return -ENOMEM; + + card->info.func_level = 0x4100 + chp_dsc->desc; + if (card->info.type == QETH_CARD_TYPE_IQD) + goto out; + + /* CHPP field bit 6 == 1 -> single queue */ + if ((chp_dsc->chpp & 0x02) == 0x02) + qeth_set_single_write_queues(card); + else + qeth_set_multiple_write_queues(card); +out: + kfree(chp_dsc); + QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues); + QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level); + return 0; +} + +static void qeth_init_qdio_info(struct qeth_card *card) +{ + QETH_DBF_TEXT(SETUP, 4, "intqdinf"); + atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + card->qdio.no_out_queues = QETH_MAX_QUEUES; + + /* inbound */ + card->qdio.no_in_queues = 1; + card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; + if (card->info.type == QETH_CARD_TYPE_IQD) + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT; + else + card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; + card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count; + INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list); + INIT_LIST_HEAD(&card->qdio.init_pool.entry_list); +} + +static void qeth_set_initial_options(struct qeth_card *card) +{ + card->options.route4.type = NO_ROUTER; + card->options.route6.type = NO_ROUTER; + card->options.rx_sg_cb = QETH_RX_SG_CB; + card->options.isolation = ISOLATION_MODE_NONE; + card->options.cq = QETH_CQ_DISABLED; +} + +static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) +{ + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(&card->thread_mask_lock, flags); + QETH_CARD_TEXT_(card, 4, " %02x%02x%02x", + (u8) card->thread_start_mask, + (u8) card->thread_allowed_mask, + (u8) card->thread_running_mask); + rc = (card->thread_start_mask & thread); + spin_unlock_irqrestore(&card->thread_mask_lock, flags); + return rc; +} + +static void qeth_start_kernel_thread(struct work_struct *work) +{ + struct task_struct *ts; + struct qeth_card *card = container_of(work, struct qeth_card, + kernel_thread_starter); + QETH_CARD_TEXT(card , 2, "strthrd"); + + if (card->read.state != CH_STATE_UP && + card->write.state != CH_STATE_UP) + return; + if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) { + ts = kthread_run(card->discipline->recover, (void *)card, + "qeth_recover"); + if (IS_ERR(ts)) { + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, + QETH_RECOVER_THREAD); + } + } +} + +static void qeth_buffer_reclaim_work(struct work_struct *); +static void qeth_setup_card(struct qeth_card *card) +{ + QETH_DBF_TEXT(SETUP, 2, "setupcrd"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->info.type = CARD_RDEV(card)->id.driver_info; + card->state = CARD_STATE_DOWN; + spin_lock_init(&card->mclock); + spin_lock_init(&card->lock); + spin_lock_init(&card->ip_lock); + spin_lock_init(&card->thread_mask_lock); + mutex_init(&card->conf_mutex); + mutex_init(&card->discipline_mutex); + mutex_init(&card->vid_list_mutex); + INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread); + INIT_LIST_HEAD(&card->cmd_waiter_list); + init_waitqueue_head(&card->wait_q); + qeth_set_initial_options(card); + /* IP address takeover */ + INIT_LIST_HEAD(&card->ipato.entries); + qeth_init_qdio_info(card); + INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); + INIT_WORK(&card->close_dev_work, qeth_close_dev_handler); +} + +static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) +{ + struct qeth_card *card = container_of(slr, struct qeth_card, + qeth_service_level); + if (card->info.mcl_level[0]) + seq_printf(m, "qeth: %s firmware level %s\n", + CARD_BUS_ID(card), card->info.mcl_level); +} + +static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) +{ + struct qeth_card *card; + + QETH_DBF_TEXT(SETUP, 2, "alloccrd"); + card = kzalloc(sizeof(*card), GFP_KERNEL); + if (!card) + goto out; + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + card->gdev = gdev; + CARD_RDEV(card) = gdev->cdev[0]; + CARD_WDEV(card) = gdev->cdev[1]; + CARD_DDEV(card) = gdev->cdev[2]; + if (qeth_setup_channel(&card->read, true)) + goto out_ip; + if (qeth_setup_channel(&card->write, true)) + goto out_channel; + if (qeth_setup_channel(&card->data, false)) + goto out_data; + card->options.layer2 = -1; + card->qeth_service_level.seq_print = qeth_core_sl_print; + register_service_level(&card->qeth_service_level); + return card; + +out_data: + qeth_clean_channel(&card->write); +out_channel: + qeth_clean_channel(&card->read); +out_ip: + kfree(card); +out: + return NULL; +} + +static int qeth_clear_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "clearch"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_clear(channel->ccwdev, QETH_CLEAR_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_STOPPED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_STOPPED) + return -ETIME; + channel->state = CH_STATE_DOWN; + return 0; +} + +static int qeth_halt_channel(struct qeth_channel *channel) +{ + unsigned long flags; + struct qeth_card *card; + int rc; + + card = CARD_FROM_CDEV(channel->ccwdev); + QETH_CARD_TEXT(card, 3, "haltch"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_halt(channel->ccwdev, QETH_HALT_CHANNEL_PARM); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) + return rc; + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_HALTED, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_HALTED) + return -ETIME; + return 0; +} + +static int qeth_halt_channels(struct qeth_card *card) +{ + int rc1 = 0, rc2 = 0, rc3 = 0; + + QETH_CARD_TEXT(card, 3, "haltchs"); + rc1 = qeth_halt_channel(&card->read); + rc2 = qeth_halt_channel(&card->write); + rc3 = qeth_halt_channel(&card->data); + if (rc1) + return rc1; + if (rc2) + return rc2; + return rc3; +} + +static int qeth_clear_channels(struct qeth_card *card) +{ + int rc1 = 0, rc2 = 0, rc3 = 0; + + QETH_CARD_TEXT(card, 3, "clearchs"); + rc1 = qeth_clear_channel(&card->read); + rc2 = qeth_clear_channel(&card->write); + rc3 = qeth_clear_channel(&card->data); + if (rc1) + return rc1; + if (rc2) + return rc2; + return rc3; +} + +static int qeth_clear_halt_card(struct qeth_card *card, int halt) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "clhacrd"); + + if (halt) + rc = qeth_halt_channels(card); + if (rc) + return rc; + return qeth_clear_channels(card); +} + +int qeth_qdio_clear_card(struct qeth_card *card, int use_halt) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "qdioclr"); + switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, + QETH_QDIO_CLEANING)) { + case QETH_QDIO_ESTABLISHED: + if (card->info.type == QETH_CARD_TYPE_IQD) + rc = qdio_shutdown(CARD_DDEV(card), + QDIO_FLAG_CLEANUP_USING_HALT); + else + rc = qdio_shutdown(CARD_DDEV(card), + QDIO_FLAG_CLEANUP_USING_CLEAR); + if (rc) + QETH_CARD_TEXT_(card, 3, "1err%d", rc); + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + break; + case QETH_QDIO_CLEANING: + return rc; + default: + break; + } + rc = qeth_clear_halt_card(card, use_halt); + if (rc) + QETH_CARD_TEXT_(card, 3, "2err%d", rc); + card->state = CARD_STATE_DOWN; + return rc; +} +EXPORT_SYMBOL_GPL(qeth_qdio_clear_card); + +static int qeth_read_conf_data(struct qeth_card *card, void **buffer, + int *length) +{ + struct ciw *ciw; + char *rcd_buf; + int ret; + struct qeth_channel *channel = &card->data; + unsigned long flags; + + /* + * scan for RCD command in extended SenseID data + */ + ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD); + if (!ciw || ciw->cmd == 0) + return -EOPNOTSUPP; + rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA); + if (!rcd_buf) + return -ENOMEM; + + qeth_setup_ccw(channel->ccw, ciw->cmd, ciw->count, rcd_buf); + channel->state = CH_STATE_RCD; + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + ret = ccw_device_start_timeout(channel->ccwdev, channel->ccw, + QETH_RCD_PARM, LPM_ANYPATH, 0, + QETH_RCD_TIMEOUT); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (!ret) + wait_event(card->wait_q, + (channel->state == CH_STATE_RCD_DONE || + channel->state == CH_STATE_DOWN)); + if (channel->state == CH_STATE_DOWN) + ret = -EIO; + else + channel->state = CH_STATE_DOWN; + if (ret) { + kfree(rcd_buf); + *buffer = NULL; + *length = 0; + } else { + *length = ciw->count; + *buffer = rcd_buf; + } + return ret; +} + +static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd) +{ + QETH_DBF_TEXT(SETUP, 2, "cfgunit"); + card->info.chpid = prcd[30]; + card->info.unit_addr2 = prcd[31]; + card->info.cula = prcd[63]; + card->info.guestlan = ((prcd[0x10] == _ascebc['V']) && + (prcd[0x11] == _ascebc['M'])); +} + +static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card) +{ + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + struct diag26c_vnic_resp *response = NULL; + struct diag26c_vnic_req *request = NULL; + struct ccw_dev_id id; + char userid[80]; + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "vmlayer"); + + cpcmd("QUERY USERID", userid, sizeof(userid), &rc); + if (rc) + goto out; + + request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); + response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); + if (!request || !response) { + rc = -ENOMEM; + goto out; + } + + ccw_device_get_id(CARD_RDEV(card), &id); + request->resp_buf_len = sizeof(*response); + request->resp_version = DIAG26C_VERSION6_VM65918; + request->req_format = DIAG26C_VNIC_INFO; + ASCEBC(userid, 8); + memcpy(&request->sys_name, userid, 8); + request->devno = id.devno; + + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + rc = diag26c(request, response, DIAG26C_PORT_VNIC); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + if (rc) + goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); + + if (request->resp_buf_len < sizeof(*response) || + response->version != request->resp_version) { + rc = -EIO; + goto out; + } + + if (response->protocol == VNIC_INFO_PROT_L2) + disc = QETH_DISCIPLINE_LAYER2; + else if (response->protocol == VNIC_INFO_PROT_L3) + disc = QETH_DISCIPLINE_LAYER3; + +out: + kfree(response); + kfree(request); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "err%x", rc); + return disc; +} + +/* Determine whether the device requires a specific layer discipline */ +static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card) +{ + enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED; + + if (card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSN) + disc = QETH_DISCIPLINE_LAYER2; + else if (card->info.guestlan) + disc = (card->info.type == QETH_CARD_TYPE_IQD) ? + QETH_DISCIPLINE_LAYER3 : + qeth_vm_detect_layer(card); + + switch (disc) { + case QETH_DISCIPLINE_LAYER2: + QETH_DBF_TEXT(SETUP, 3, "force l2"); + break; + case QETH_DISCIPLINE_LAYER3: + QETH_DBF_TEXT(SETUP, 3, "force l3"); + break; + default: + QETH_DBF_TEXT(SETUP, 3, "force no"); + } + + return disc; +} + +static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) +{ + QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); + + if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && + prcd[76] >= 0xF1 && prcd[76] <= 0xF4) { + card->info.blkt.time_total = 0; + card->info.blkt.inter_packet = 0; + card->info.blkt.inter_packet_jumbo = 0; + } else { + card->info.blkt.time_total = 250; + card->info.blkt.inter_packet = 5; + card->info.blkt.inter_packet_jumbo = 15; + } +} + +static void qeth_init_tokens(struct qeth_card *card) +{ + card->token.issuer_rm_w = 0x00010103UL; + card->token.cm_filter_w = 0x00010108UL; + card->token.cm_connection_w = 0x0001010aUL; + card->token.ulp_filter_w = 0x0001010bUL; + card->token.ulp_connection_w = 0x0001010dUL; +} + +static void qeth_init_func_level(struct qeth_card *card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD; + break; + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSN: + card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD; + break; + default: + break; + } +} + +static int qeth_idx_activate_get_answer(struct qeth_channel *channel, + void (*idx_reply_cb)(struct qeth_channel *, + struct qeth_cmd_buffer *)) +{ + struct qeth_cmd_buffer *iob; + unsigned long flags; + int rc; + struct qeth_card *card; + + QETH_DBF_TEXT(SETUP, 2, "idxanswr"); + card = CARD_FROM_CDEV(channel->ccwdev); + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + iob->callback = idx_reply_cb; + qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data); + + wait_event(card->wait_q, + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); + QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) { + QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); + QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); + wake_up(&card->wait_q); + return rc; + } + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_UP, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_UP) { + rc = -ETIME; + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + } else + rc = 0; + return rc; +} + +static int qeth_idx_activate_channel(struct qeth_channel *channel, + void (*idx_reply_cb)(struct qeth_channel *, + struct qeth_cmd_buffer *)) +{ + struct qeth_card *card; + struct qeth_cmd_buffer *iob; + unsigned long flags; + __u16 temp; + __u8 tmp; + int rc; + struct ccw_dev_id temp_devid; + + card = CARD_FROM_CDEV(channel->ccwdev); + + QETH_DBF_TEXT(SETUP, 2, "idxactch"); + + iob = qeth_get_buffer(channel); + if (!iob) + return -ENOMEM; + iob->callback = idx_reply_cb; + qeth_setup_ccw(channel->ccw, CCW_CMD_WRITE, IDX_ACTIVATE_SIZE, + iob->data); + if (channel == &card->write) { + memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.trans_hdr++; + } else { + memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE); + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + } + tmp = ((u8)card->dev->dev_port) | 0x80; + memcpy(QETH_IDX_ACT_PNO(iob->data), &tmp, 1); + memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), + &card->info.func_level, sizeof(__u16)); + ccw_device_get_id(CARD_DDEV(card), &temp_devid); + memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); + temp = (card->info.cula << 8) + card->info.unit_addr2; + memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); + + wait_event(card->wait_q, + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); + QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + + if (rc) { + QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", + rc); + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + atomic_set(&channel->irq_pending, 0); + qeth_release_buffer(channel, iob); + wake_up(&card->wait_q); + return rc; + } + rc = wait_event_interruptible_timeout(card->wait_q, + channel->state == CH_STATE_ACTIVATING, QETH_TIMEOUT); + if (rc == -ERESTARTSYS) + return rc; + if (channel->state != CH_STATE_ACTIVATING) { + dev_warn(&channel->ccwdev->dev, "The qeth device driver" + " failed to recover an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", + dev_name(&channel->ccwdev->dev)); + QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); + return -ETIME; + } + return qeth_idx_activate_get_answer(channel, idx_reply_cb); +} + +static int qeth_peer_func_level(int level) +{ + if ((level & 0xff) == 8) + return (level & 0xff) + 0x400; + if (((level >> 8) & 3) == 1) + return (level & 0xff) + 0x200; + return level; +} + +static void qeth_idx_write_cb(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + __u16 temp; + + QETH_DBF_TEXT(SETUP , 2, "idxwrcb"); + + if (channel->state == CH_STATE_DOWN) { + channel->state = CH_STATE_ACTIVATING; + goto out; + } + card = CARD_FROM_CDEV(channel->ccwdev); + + if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { + if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == QETH_IDX_ACT_ERR_EXCL) + dev_err(&channel->ccwdev->dev, + "The adapter is used exclusively by another " + "host\n"); + else + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" + " negative reply\n", + dev_name(&channel->ccwdev->dev)); + goto out; + } + memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " + "function level mismatch (sent: 0x%x, received: " + "0x%x)\n", dev_name(&channel->ccwdev->dev), + card->info.func_level, temp); + goto out; + } + channel->state = CH_STATE_UP; +out: + qeth_release_buffer(channel, iob); +} + +static void qeth_idx_read_cb(struct qeth_channel *channel, + struct qeth_cmd_buffer *iob) +{ + struct qeth_card *card; + __u16 temp; + + QETH_DBF_TEXT(SETUP , 2, "idxrdcb"); + if (channel->state == CH_STATE_DOWN) { + channel->state = CH_STATE_ACTIVATING; + goto out; + } + + card = CARD_FROM_CDEV(channel->ccwdev); + if (qeth_check_idx_response(card, iob->data)) + goto out; + + if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { + switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) { + case QETH_IDX_ACT_ERR_EXCL: + dev_err(&channel->ccwdev->dev, + "The adapter is used exclusively by another " + "host\n"); + break; + case QETH_IDX_ACT_ERR_AUTH: + case QETH_IDX_ACT_ERR_AUTH_USER: + dev_err(&channel->ccwdev->dev, + "Setting the device online failed because of " + "insufficient authorization\n"); + break; + default: + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" + " negative reply\n", + dev_name(&channel->ccwdev->dev)); + } + QETH_CARD_TEXT_(card, 2, "idxread%c", + QETH_IDX_ACT_CAUSE_CODE(iob->data)); + goto out; + } + + memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); + if (temp != qeth_peer_func_level(card->info.func_level)) { + QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " + "level mismatch (sent: 0x%x, received: 0x%x)\n", + dev_name(&channel->ccwdev->dev), + card->info.func_level, temp); + goto out; + } + memcpy(&card->token.issuer_rm_r, + QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + memcpy(&card->info.mcl_level[0], + QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH); + channel->state = CH_STATE_UP; +out: + qeth_release_buffer(channel, iob); +} + +void qeth_prepare_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob) +{ + qeth_setup_ccw(iob->channel->ccw, CCW_CMD_WRITE, len, iob->data); + iob->callback = qeth_release_buffer; + + memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), + &card->seqno.trans_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.trans_hdr++; + memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data), + &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH); + card->seqno.pdu_hdr++; + memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data), + &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH); + QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); +} +EXPORT_SYMBOL_GPL(qeth_prepare_control_data); + +/** + * qeth_send_control_data() - send control command to the card + * @card: qeth_card structure pointer + * @len: size of the command buffer + * @iob: qeth_cmd_buffer pointer + * @reply_cb: callback function pointer + * @cb_card: pointer to the qeth_card structure + * @cb_reply: pointer to the qeth_reply structure + * @cb_cmd: pointer to the original iob for non-IPA + * commands, or to the qeth_ipa_cmd structure + * for the IPA commands. + * @reply_param: private pointer passed to the callback + * + * Returns the value of the `return_code' field of the response + * block returned from the hardware, or other error indication. + * Value of zero indicates successful execution of the command. + * + * Callback function gets called one or more times, with cb_cmd + * pointing to the response returned by the hardware. Callback + * function must return non-zero if more reply blocks are expected, + * and zero if the last or only reply block is received. Callback + * function can get the value of the reply_param pointer from the + * field 'param' of the structure qeth_reply. + */ + +int qeth_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *cb_card, + struct qeth_reply *cb_reply, + unsigned long cb_cmd), + void *reply_param) +{ + struct qeth_channel *channel = iob->channel; + int rc; + unsigned long flags; + struct qeth_reply *reply = NULL; + unsigned long timeout, event_timeout; + struct qeth_ipa_cmd *cmd = NULL; + + QETH_CARD_TEXT(card, 2, "sendctl"); + + if (card->read_or_write_problem) { + qeth_release_buffer(channel, iob); + return -EIO; + } + reply = qeth_alloc_reply(card); + if (!reply) { + qeth_release_buffer(channel, iob); + return -ENOMEM; + } + reply->callback = reply_cb; + reply->param = reply_param; + + init_waitqueue_head(&reply->wait_q); + + while (atomic_cmpxchg(&channel->irq_pending, 0, 1)) ; + + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; + event_timeout = QETH_IPA_TIMEOUT; + } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; + event_timeout = QETH_TIMEOUT; + } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + + timeout = jiffies + event_timeout; + + QETH_CARD_TEXT(card, 6, "noirqpnd"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, + (addr_t) iob, 0, 0, event_timeout); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " + "ccw_device_start rc = %i\n", + dev_name(&channel->ccwdev->dev), rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); + spin_lock_irqsave(&card->lock, flags); + list_del_init(&reply->list); + qeth_put_reply(reply); + spin_unlock_irqrestore(&card->lock, flags); + qeth_release_buffer(channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return rc; + } + + /* we have only one long running ipassist, since we can ensure + process context of this command we can sleep */ + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { + if (!wait_event_timeout(reply->wait_q, + atomic_read(&reply->received), event_timeout)) + goto time_err; + } else { + while (!atomic_read(&reply->received)) { + if (time_after(jiffies, timeout)) + goto time_err; + cpu_relax(); + } + } + + rc = reply->rc; + qeth_put_reply(reply); + return rc; + +time_err: + reply->rc = -ETIME; + spin_lock_irqsave(&reply->card->lock, flags); + list_del_init(&reply->list); + spin_unlock_irqrestore(&reply->card->lock, flags); + atomic_inc(&reply->received); + rc = reply->rc; + qeth_put_reply(reply); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_send_control_data); + +static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "cmenblcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_filter_r, + QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); + return 0; +} + +static int qeth_cm_enable(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "cmenable"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, CM_ENABLE, CM_ENABLE_SIZE); + memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data), + &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH); + + rc = qeth_send_control_data(card, CM_ENABLE_SIZE, iob, + qeth_cm_enable_cb, NULL); + return rc; +} + +static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "cmsetpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.cm_connection_r, + QETH_CM_SETUP_RESP_DEST_ADDR(iob->data), + QETH_MPC_TOKEN_LENGTH); + QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); + return 0; +} + +static int qeth_cm_setup(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "cmsetup"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, CM_SETUP, CM_SETUP_SIZE); + memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data), + &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data), + &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, CM_SETUP_SIZE, iob, + qeth_cm_setup_cb, NULL); + return rc; + +} + +static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu) +{ + struct net_device *dev = card->dev; + unsigned int new_mtu; + + if (!max_mtu) { + /* IQD needs accurate max MTU to set up its RX buffers: */ + if (IS_IQD(card)) + return -EINVAL; + /* tolerate quirky HW: */ + max_mtu = ETH_MAX_MTU; + } + + rtnl_lock(); + if (IS_IQD(card)) { + /* move any device with default MTU to new max MTU: */ + new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu; + + /* adjust RX buffer size to new max MTU: */ + card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE; + if (dev->max_mtu && dev->max_mtu != max_mtu) + qeth_free_qdio_buffers(card); + } else { + if (dev->mtu) + new_mtu = dev->mtu; + /* default MTUs for first setup: */ + else if (card->options.layer2) + new_mtu = ETH_DATA_LEN; + else + new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */ + } + + dev->max_mtu = max_mtu; + dev->mtu = min(new_mtu, max_mtu); + rtnl_unlock(); + return 0; +} + +static int qeth_get_mtu_outof_framesize(int framesize) +{ + switch (framesize) { + case 0x4000: + return 8192; + case 0x6000: + return 16384; + case 0xa000: + return 32768; + case 0xffff: + return 57344; + default: + return 0; + } +} + +static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + + __u16 mtu, framesize; + __u16 len; + __u8 link_type; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "ulpenacb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_filter_r, + QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + if (card->info.type == QETH_CARD_TYPE_IQD) { + memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2); + mtu = qeth_get_mtu_outof_framesize(framesize); + } else { + mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data); + } + *(u16 *)reply->param = mtu; + + memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2); + if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) { + memcpy(&link_type, + QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1); + card->info.link_type = link_type; + } else + card->info.link_type = 0; + QETH_DBF_TEXT_(SETUP, 2, "link%d", card->info.link_type); + QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); + return 0; +} + +static u8 qeth_mpc_select_prot_type(struct qeth_card *card) +{ + if (IS_OSN(card)) + return QETH_PROT_OSN2; + return (card->options.layer2 == 1) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP; +} + +static int qeth_ulp_enable(struct qeth_card *card) +{ + u8 prot_type = qeth_mpc_select_prot_type(card); + struct qeth_cmd_buffer *iob; + u16 max_mtu; + int rc; + + /*FIXME: trace view callbacks*/ + QETH_DBF_TEXT(SETUP, 2, "ulpenabl"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, ULP_ENABLE, ULP_ENABLE_SIZE); + + *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port; + memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, ULP_ENABLE_SIZE, iob, + qeth_ulp_enable_cb, &max_mtu); + if (rc) + return rc; + return qeth_update_max_mtu(card, max_mtu); +} + +static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "ulpstpcb"); + + iob = (struct qeth_cmd_buffer *) data; + memcpy(&card->token.ulp_connection_r, + QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + QETH_MPC_TOKEN_LENGTH); + if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data), + 3)) { + QETH_DBF_TEXT(SETUP, 2, "olmlimit"); + dev_err(&card->gdev->dev, "A connection could not be " + "established because of an OLM limit\n"); + iob->rc = -EMLINK; + } + QETH_DBF_TEXT_(SETUP, 2, " rc%d", iob->rc); + return 0; +} + +static int qeth_ulp_setup(struct qeth_card *card) +{ + int rc; + __u16 temp; + struct qeth_cmd_buffer *iob; + struct ccw_dev_id dev_id; + + QETH_DBF_TEXT(SETUP, 2, "ulpsetup"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, ULP_SETUP, ULP_SETUP_SIZE); + + memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data), + &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH); + + ccw_device_get_id(CARD_DDEV(card), &dev_id); + memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2); + temp = (card->info.cula << 8) + card->info.unit_addr2; + memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2); + rc = qeth_send_control_data(card, ULP_SETUP_SIZE, iob, + qeth_ulp_setup_cb, NULL); + return rc; +} + +static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) +{ + struct qeth_qdio_out_buffer *newbuf; + + newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC); + if (!newbuf) + return -ENOMEM; + + newbuf->buffer = q->qdio_bufs[bidx]; + skb_queue_head_init(&newbuf->skb_list); + lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); + newbuf->q = q; + newbuf->next_pending = q->bufs[bidx]; + atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY); + q->bufs[bidx] = newbuf; + return 0; +} + +static void qeth_free_output_queue(struct qeth_qdio_out_q *q) +{ + if (!q) + return; + + qeth_clear_outq_buffers(q, 1); + qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + kfree(q); +} + +static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void) +{ + struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); + + if (!q) + return NULL; + + if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { + kfree(q); + return NULL; + } + return q; +} + +static int qeth_alloc_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + QETH_DBF_TEXT(SETUP, 2, "allcqdbf"); + + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, + QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) + return 0; + + QETH_DBF_TEXT(SETUP, 2, "inq"); + card->qdio.in_q = qeth_alloc_qdio_queue(); + if (!card->qdio.in_q) + goto out_nomem; + + /* inbound buffer pool */ + if (qeth_alloc_buffer_pool(card)) + goto out_freeinq; + + /* outbound */ + card->qdio.out_qs = + kcalloc(card->qdio.no_out_queues, + sizeof(struct qeth_qdio_out_q *), + GFP_KERNEL); + if (!card->qdio.out_qs) + goto out_freepool; + for (i = 0; i < card->qdio.no_out_queues; ++i) { + card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf(); + if (!card->qdio.out_qs[i]) + goto out_freeoutq; + QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); + QETH_DBF_HEX(SETUP, 2, &card->qdio.out_qs[i], sizeof(void *)); + card->qdio.out_qs[i]->queue_no = i; + /* give outbound qeth_qdio_buffers their qdio_buffers */ + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL); + if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j)) + goto out_freeoutqbufs; + } + } + + /* completion */ + if (qeth_alloc_cq(card)) + goto out_freeoutq; + + return 0; + +out_freeoutqbufs: + while (j > 0) { + --j; + kmem_cache_free(qeth_qdio_outbuf_cache, + card->qdio.out_qs[i]->bufs[j]); + card->qdio.out_qs[i]->bufs[j] = NULL; + } +out_freeoutq: + while (i > 0) + qeth_free_output_queue(card->qdio.out_qs[--i]); + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; +out_freepool: + qeth_free_buffer_pool(card); +out_freeinq: + qeth_free_qdio_queue(card->qdio.in_q); + card->qdio.in_q = NULL; +out_nomem: + atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); + return -ENOMEM; +} + +static void qeth_free_qdio_buffers(struct qeth_card *card) +{ + int i, j; + + if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == + QETH_QDIO_UNINITIALIZED) + return; + + qeth_free_cq(card); + cancel_delayed_work_sync(&card->buffer_reclaim_work); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + if (card->qdio.in_q->bufs[j].rx_skb) + dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb); + } + qeth_free_qdio_queue(card->qdio.in_q); + card->qdio.in_q = NULL; + /* inbound buffer pool */ + qeth_free_buffer_pool(card); + /* free outbound qdio_qs */ + if (card->qdio.out_qs) { + for (i = 0; i < card->qdio.no_out_queues; i++) + qeth_free_output_queue(card->qdio.out_qs[i]); + kfree(card->qdio.out_qs); + card->qdio.out_qs = NULL; + } +} + +static void qeth_create_qib_param_field(struct qeth_card *card, + char *param_field) +{ + + param_field[0] = _ascebc['P']; + param_field[1] = _ascebc['C']; + param_field[2] = _ascebc['I']; + param_field[3] = _ascebc['T']; + *((unsigned int *) (¶m_field[4])) = QETH_PCI_THRESHOLD_A(card); + *((unsigned int *) (¶m_field[8])) = QETH_PCI_THRESHOLD_B(card); + *((unsigned int *) (¶m_field[12])) = QETH_PCI_TIMER_VALUE(card); +} + +static void qeth_create_qib_param_field_blkt(struct qeth_card *card, + char *param_field) +{ + param_field[16] = _ascebc['B']; + param_field[17] = _ascebc['L']; + param_field[18] = _ascebc['K']; + param_field[19] = _ascebc['T']; + *((unsigned int *) (¶m_field[20])) = card->info.blkt.time_total; + *((unsigned int *) (¶m_field[24])) = card->info.blkt.inter_packet; + *((unsigned int *) (¶m_field[28])) = + card->info.blkt.inter_packet_jumbo; +} + +static int qeth_qdio_activate(struct qeth_card *card) +{ + QETH_DBF_TEXT(SETUP, 3, "qdioact"); + return qdio_activate(CARD_DDEV(card)); +} + +static int qeth_dm_act(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "dmact"); + + iob = qeth_wait_for_buffer(&card->write); + memcpy(iob->data, DM_ACT, DM_ACT_SIZE); + + memcpy(QETH_DM_ACT_DEST_ADDR(iob->data), + &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH); + memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + rc = qeth_send_control_data(card, DM_ACT_SIZE, iob, NULL, NULL); + return rc; +} + +static int qeth_mpc_initialize(struct qeth_card *card) +{ + int rc; + + QETH_DBF_TEXT(SETUP, 2, "mpcinit"); + + rc = qeth_issue_next_read(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return rc; + } + rc = qeth_cm_enable(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); + goto out_qdio; + } + rc = qeth_cm_setup(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + goto out_qdio; + } + rc = qeth_ulp_enable(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); + goto out_qdio; + } + rc = qeth_ulp_setup(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out_qdio; + } + rc = qeth_alloc_qdio_buffers(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out_qdio; + } + rc = qeth_qdio_establish(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + qeth_free_qdio_buffers(card); + goto out_qdio; + } + rc = qeth_qdio_activate(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + goto out_qdio; + } + rc = qeth_dm_act(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); + goto out_qdio; + } + + return 0; +out_qdio: + qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + qdio_free(CARD_DDEV(card)); + return rc; +} + +void qeth_print_status_message(struct qeth_card *card) +{ + switch (card->info.type) { + case QETH_CARD_TYPE_OSD: + case QETH_CARD_TYPE_OSM: + case QETH_CARD_TYPE_OSX: + /* VM will use a non-zero first character + * to indicate a HiperSockets like reporting + * of the level OSA sets the first character to zero + * */ + if (!card->info.mcl_level[0]) { + sprintf(card->info.mcl_level, "%02x%02x", + card->info.mcl_level[2], + card->info.mcl_level[3]); + break; + } + /* fallthrough */ + case QETH_CARD_TYPE_IQD: + if ((card->info.guestlan) || + (card->info.mcl_level[0] & 0x80)) { + card->info.mcl_level[0] = (char) _ebcasc[(__u8) + card->info.mcl_level[0]]; + card->info.mcl_level[1] = (char) _ebcasc[(__u8) + card->info.mcl_level[1]]; + card->info.mcl_level[2] = (char) _ebcasc[(__u8) + card->info.mcl_level[2]]; + card->info.mcl_level[3] = (char) _ebcasc[(__u8) + card->info.mcl_level[3]]; + card->info.mcl_level[QETH_MCL_LENGTH] = 0; + } + break; + default: + memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1); + } + dev_info(&card->gdev->dev, + "Device is a%s card%s%s%s\nwith link type %s.\n", + qeth_get_cardname(card), + (card->info.mcl_level[0]) ? " (level: " : "", + (card->info.mcl_level[0]) ? card->info.mcl_level : "", + (card->info.mcl_level[0]) ? ")" : "", + qeth_get_cardname_short(card)); +} +EXPORT_SYMBOL_GPL(qeth_print_status_message); + +static void qeth_initialize_working_pool_list(struct qeth_card *card) +{ + struct qeth_buffer_pool_entry *entry; + + QETH_CARD_TEXT(card, 5, "inwrklst"); + + list_for_each_entry(entry, + &card->qdio.init_pool.entry_list, init_list) { + qeth_put_buffer_pool_entry(card, entry); + } +} + +static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( + struct qeth_card *card) +{ + struct list_head *plh; + struct qeth_buffer_pool_entry *entry; + int i, free; + struct page *page; + + if (list_empty(&card->qdio.in_buf_pool.entry_list)) + return NULL; + + list_for_each(plh, &card->qdio.in_buf_pool.entry_list) { + entry = list_entry(plh, struct qeth_buffer_pool_entry, list); + free = 1; + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(virt_to_page(entry->elements[i])) > 1) { + free = 0; + break; + } + } + if (free) { + list_del_init(&entry->list); + return entry; + } + } + + /* no free buffer in pool so take first one and swap pages */ + entry = list_entry(card->qdio.in_buf_pool.entry_list.next, + struct qeth_buffer_pool_entry, list); + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + if (page_count(virt_to_page(entry->elements[i])) > 1) { + page = alloc_page(GFP_ATOMIC); + if (!page) { + return NULL; + } else { + free_page((unsigned long)entry->elements[i]); + entry->elements[i] = page_address(page); + if (card->options.performance_stats) + card->perf_stats.sg_alloc_page_rx++; + } + } + } + list_del_init(&entry->list); + return entry; +} + +static int qeth_init_input_buffer(struct qeth_card *card, + struct qeth_qdio_buffer *buf) +{ + struct qeth_buffer_pool_entry *pool_entry; + int i; + + if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) { + buf->rx_skb = netdev_alloc_skb(card->dev, + QETH_RX_PULL_LEN + ETH_HLEN); + if (!buf->rx_skb) + return -ENOMEM; + } + + pool_entry = qeth_find_free_buffer_pool_entry(card); + if (!pool_entry) + return -ENOBUFS; + + /* + * since the buffer is accessed only from the input_tasklet + * there shouldn't be a need to synchronize; also, since we use + * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off + * buffers + */ + + buf->pool_entry = pool_entry; + for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { + buf->buffer->element[i].length = PAGE_SIZE; + buf->buffer->element[i].addr = pool_entry->elements[i]; + if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) + buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY; + else + buf->buffer->element[i].eflags = 0; + buf->buffer->element[i].sflags = 0; + } + return 0; +} + +int qeth_init_qdio_queues(struct qeth_card *card) +{ + int i, j; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "initqdqs"); + + /* inbound queue */ + qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); + memset(&card->rx, 0, sizeof(struct qeth_rx)); + + qeth_initialize_working_pool_list(card); + /*give only as many buffers to hardware as we have buffer pool entries*/ + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) { + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]); + if (rc) + return rc; + } + + card->qdio.in_q->next_buf_to_init = + card->qdio.in_buf_pool.buf_count - 1; + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0, + card->qdio.in_buf_pool.buf_count - 1); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + return rc; + } + + /* completion */ + rc = qeth_cq_init(card); + if (rc) { + return rc; + } + + /* outbound queue */ + for (i = 0; i < card->qdio.no_out_queues; ++i) { + qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs, + QDIO_MAX_BUFFERS_PER_Q); + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { + qeth_clear_output_buffer(card->qdio.out_qs[i], + card->qdio.out_qs[i]->bufs[j]); + } + card->qdio.out_qs[i]->card = card; + card->qdio.out_qs[i]->next_buf_to_fill = 0; + card->qdio.out_qs[i]->do_pack = 0; + atomic_set(&card->qdio.out_qs[i]->used_buffers, 0); + atomic_set(&card->qdio.out_qs[i]->set_pci_flags_count, 0); + atomic_set(&card->qdio.out_qs[i]->state, + QETH_OUT_Q_UNLOCKED); + } + return 0; +} +EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); + +static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) +{ + switch (link_type) { + case QETH_LINK_TYPE_HSTR: + return 2; + default: + return 1; + } +} + +static void qeth_fill_ipacmd_header(struct qeth_card *card, + struct qeth_ipa_cmd *cmd, __u8 command, + enum qeth_prot_versions prot) +{ + memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); + cmd->hdr.command = command; + cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ + cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); + cmd->hdr.rel_adapter_no = (u8) card->dev->dev_port; + if (card->options.layer2) + cmd->hdr.prim_version_no = 2; + else + cmd->hdr.prim_version_no = 1; + cmd->hdr.param_count = 1; + cmd->hdr.prot_version = prot; + cmd->hdr.ipa_supported = 0; + cmd->hdr.ipa_enabled = 0; +} + +struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card, + enum qeth_ipa_cmds ipacmd, enum qeth_prot_versions prot) +{ + struct qeth_cmd_buffer *iob; + + iob = qeth_get_buffer(&card->write); + if (iob) { + qeth_fill_ipacmd_header(card, __ipa_cmd(iob), ipacmd, prot); + } else { + dev_warn(&card->gdev->dev, + "The qeth driver ran out of channel command buffers\n"); + QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers", + dev_name(&card->gdev->dev)); + } + + return iob; +} +EXPORT_SYMBOL_GPL(qeth_get_ipacmd_buffer); + +void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob) +{ + u8 prot_type = qeth_mpc_select_prot_type(card); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); +} +EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd); + +/** + * qeth_send_ipa_cmd() - send an IPA command + * + * See qeth_send_control_data() for explanation of the arguments. + */ + +int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, + int (*reply_cb)(struct qeth_card *, struct qeth_reply*, + unsigned long), + void *reply_param) +{ + int rc; + + QETH_CARD_TEXT(card, 4, "sendipa"); + qeth_prepare_ipa_cmd(card, iob); + rc = qeth_send_control_data(card, IPA_CMD_LENGTH, + iob, reply_cb, reply_param); + if (rc == -ETIME) { + qeth_clear_ipacmd_list(card); + qeth_schedule_recovery(card); + } + return rc; +} +EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd); + +static int qeth_send_startlan(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT(SETUP, 2, "strtlan"); + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + return rc; +} + +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = + cmd->data.setadapterparms.hdr.return_code; + return cmd->hdr.return_code; +} + +static int qeth_query_setadapterparms_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + + if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { + card->info.link_type = + cmd->data.setadapterparms.data.query_cmds_supp.lan_type; + QETH_DBF_TEXT_(SETUP, 2, "lnk %d", card->info.link_type); + } + card->options.adp.supported_funcs = + cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; + return 0; +} + +static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, + __u32 command, __u32 cmdlen) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS, + QETH_PROT_IPV4); + if (iob) { + cmd = __ipa_cmd(iob); + cmd->data.setadapterparms.hdr.cmdlength = cmdlen; + cmd->data.setadapterparms.hdr.command_code = command; + cmd->data.setadapterparms.hdr.used_total = 1; + cmd->data.setadapterparms.hdr.seq_no = 1; + } + + return iob; +} + +static int qeth_query_setadapterparms(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 3, "queryadp"); + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_ipacmd_setadpparms)); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL); + return rc; +} + +static int qeth_query_ipassists_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "qipasscb"); + + cmd = (struct qeth_ipa_cmd *) data; + + switch (cmd->hdr.return_code) { + case IPA_RC_NOTSUPP: + case IPA_RC_L2_UNSUPPORTED_CMD: + QETH_DBF_TEXT(SETUP, 2, "ipaunsup"); + card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS; + card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS; + return -0; + default: + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled " + "rc=%d\n", + dev_name(&card->gdev->dev), + cmd->hdr.return_code); + return 0; + } + } + + if (cmd->hdr.prot_version == QETH_PROT_IPV4) { + card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) { + card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported; + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; + } else + QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected" + "\n", dev_name(&card->gdev->dev)); + return 0; +} + +static int qeth_query_ipassists(struct qeth_card *card, + enum qeth_prot_versions prot) +{ + int rc; + struct qeth_cmd_buffer *iob; + + QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL); + return rc; +} + +static int qeth_query_switch_attributes_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; + + QETH_CARD_TEXT(card, 2, "qswiatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); + return 0; +} + +int qeth_query_switch_attributes(struct qeth_card *card, + struct qeth_switch_info *sw_info) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qswiattr"); + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES)) + return -EOPNOTSUPP; + if (!netif_carrier_ok(card->dev)) + return -ENOMEDIUM; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, + sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, + qeth_query_switch_attributes_cb, sw_info); +} + +static int qeth_query_setdiagass_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "diagq:%x", rc); + else + card->info.diagass_support = cmd->data.diagass.ext; + return 0; +} + +static int qeth_query_setdiagass(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "qdiagass"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.diagass.subcmd_len = 16; + cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY; + return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL); +} + +static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid) +{ + unsigned long info = get_zeroed_page(GFP_KERNEL); + struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info; + struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info; + struct ccw_dev_id ccwid; + int level; + + tid->chpid = card->info.chpid; + ccw_device_get_id(CARD_RDEV(card), &ccwid); + tid->ssid = ccwid.ssid; + tid->devno = ccwid.devno; + if (!info) + return; + level = stsi(NULL, 0, 0, 0); + if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0)) + tid->lparnr = info222->lpar_number; + if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) { + EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name)); + memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname)); + } + free_page(info); + return; +} + +static int qeth_hw_trap_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "trapc:%x", rc); + return 0; +} + +int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "diagtrap"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.diagass.subcmd_len = 80; + cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP; + cmd->data.diagass.type = 1; + cmd->data.diagass.action = action; + switch (action) { + case QETH_DIAGS_TRAP_ARM: + cmd->data.diagass.options = 0x0003; + cmd->data.diagass.ext = 0x00010000 + + sizeof(struct qeth_trap_id); + qeth_get_trap_id(card, + (struct qeth_trap_id *)cmd->data.diagass.cdata); + break; + case QETH_DIAGS_TRAP_DISARM: + cmd->data.diagass.options = 0x0001; + break; + case QETH_DIAGS_TRAP_CAPTURE: + break; + } + return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL); +} +EXPORT_SYMBOL_GPL(qeth_hw_trap); + +static int qeth_check_qdio_errors(struct qeth_card *card, + struct qdio_buffer *buf, + unsigned int qdio_error, + const char *dbftext) +{ + if (qdio_error) { + QETH_CARD_TEXT(card, 2, dbftext); + QETH_CARD_TEXT_(card, 2, " F15=%02X", + buf->element[15].sflags); + QETH_CARD_TEXT_(card, 2, " F14=%02X", + buf->element[14].sflags); + QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); + if ((buf->element[15].sflags) == 0x12) { + card->stats.rx_dropped++; + return 0; + } else + return 1; + } + return 0; +} + +static void qeth_queue_input_buffer(struct qeth_card *card, int index) +{ + struct qeth_qdio_q *queue = card->qdio.in_q; + struct list_head *lh; + int count; + int i; + int rc; + int newcount = 0; + + count = (index < queue->next_buf_to_init)? + card->qdio.in_buf_pool.buf_count - + (queue->next_buf_to_init - index) : + card->qdio.in_buf_pool.buf_count - + (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); + /* only requeue at a certain threshold to avoid SIGAs */ + if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) { + for (i = queue->next_buf_to_init; + i < queue->next_buf_to_init + count; ++i) { + if (qeth_init_input_buffer(card, + &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { + break; + } else { + newcount++; + } + } + + if (newcount < count) { + /* we are in memory shortage so we switch back to + traditional skb allocation and drop packages */ + atomic_set(&card->force_alloc_skb, 3); + count = newcount; + } else { + atomic_add_unless(&card->force_alloc_skb, -1, 0); + } + + if (!count) { + i = 0; + list_for_each(lh, &card->qdio.in_buf_pool.entry_list) + i++; + if (i == card->qdio.in_buf_pool.buf_count) { + QETH_CARD_TEXT(card, 2, "qsarbw"); + card->reclaim_index = index; + schedule_delayed_work( + &card->buffer_reclaim_work, + QETH_RECLAIM_WORK_TIME); + } + return; + } + + /* + * according to old code it should be avoided to requeue all + * 128 buffers in order to benefit from PCI avoidance. + * this function keeps at least one buffer (the buffer at + * 'index') un-requeued -> this buffer is the first buffer that + * will be requeued the next time + */ + if (card->options.performance_stats) { + card->perf_stats.inbound_do_qdio_cnt++; + card->perf_stats.inbound_do_qdio_start_time = + qeth_get_micros(); + } + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, + queue->next_buf_to_init, count); + if (card->options.performance_stats) + card->perf_stats.inbound_do_qdio_time += + qeth_get_micros() - + card->perf_stats.inbound_do_qdio_start_time; + if (rc) { + QETH_CARD_TEXT(card, 2, "qinberr"); + } + queue->next_buf_to_init = (queue->next_buf_to_init + count) % + QDIO_MAX_BUFFERS_PER_Q; + } +} + +static void qeth_buffer_reclaim_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + buffer_reclaim_work.work); + + QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); + qeth_queue_input_buffer(card, card->reclaim_index); +} + +static void qeth_handle_send_error(struct qeth_card *card, + struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) +{ + int sbalf15 = buffer->buffer->element[15].sflags; + + QETH_CARD_TEXT(card, 6, "hdsnderr"); + if (card->info.type == QETH_CARD_TYPE_IQD) { + if (sbalf15 == 0) { + qdio_err = 0; + } else { + qdio_err = 1; + } + } + qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); + + if (!qdio_err) + return; + + if ((sbalf15 >= 15) && (sbalf15 <= 31)) + return; + + QETH_CARD_TEXT(card, 1, "lnkfail"); + QETH_CARD_TEXT_(card, 1, "%04x %02x", + (u16)qdio_err, (u8)sbalf15); +} + +/** + * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer. + * @queue: queue to check for packing buffer + * + * Returns number of buffers that were prepared for flush. + */ +static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue) +{ + struct qeth_qdio_out_buffer *buffer; + + buffer = queue->bufs[queue->next_buf_to_fill]; + if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) && + (buffer->next_element_to_fill > 0)) { + /* it's a packing buffer */ + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + queue->next_buf_to_fill = + (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; + return 1; + } + return 0; +} + +/* + * Switched to packing state if the number of used buffers on a queue + * reaches a certain limit. + */ +static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) +{ + if (!queue->do_pack) { + if (atomic_read(&queue->used_buffers) + >= QETH_HIGH_WATERMARK_PACK){ + /* switch non-PACKING -> PACKING */ + QETH_CARD_TEXT(queue->card, 6, "np->pack"); + if (queue->card->options.performance_stats) + queue->card->perf_stats.sc_dp_p++; + queue->do_pack = 1; + } + } +} + +/* + * Switches from packing to non-packing mode. If there is a packing + * buffer on the queue this buffer will be prepared to be flushed. + * In that case 1 is returned to inform the caller. If no buffer + * has to be flushed, zero is returned. + */ +static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) +{ + if (queue->do_pack) { + if (atomic_read(&queue->used_buffers) + <= QETH_LOW_WATERMARK_PACK) { + /* switch PACKING -> non-PACKING */ + QETH_CARD_TEXT(queue->card, 6, "pack->np"); + if (queue->card->options.performance_stats) + queue->card->perf_stats.sc_p_dp++; + queue->do_pack = 0; + return qeth_prep_flush_pack_buffer(queue); + } + } + return 0; +} + +static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, + int count) +{ + struct qeth_qdio_out_buffer *buf; + int rc; + int i; + unsigned int qdio_flags; + + for (i = index; i < index + count; ++i) { + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buf = queue->bufs[bidx]; + buf->buffer->element[buf->next_element_to_fill - 1].eflags |= + SBAL_EFLAGS_LAST_ENTRY; + + if (queue->bufstates) + queue->bufstates[bidx].user = buf; + + if (queue->card->info.type == QETH_CARD_TYPE_IQD) + continue; + + if (!queue->do_pack) { + if ((atomic_read(&queue->used_buffers) >= + (QETH_HIGH_WATERMARK_PACK - + QETH_WATERMARK_PACK_FUZZ)) && + !atomic_read(&queue->set_pci_flags_count)) { + /* it's likely that we'll go to packing + * mode soon */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; + } + } else { + if (!atomic_read(&queue->set_pci_flags_count)) { + /* + * there's no outstanding PCI any more, so we + * have to request a PCI to be sure the the PCI + * will wake at some time in the future then we + * can flush packed buffers that might still be + * hanging around, which can happen if no + * further send was requested by the stack + */ + atomic_inc(&queue->set_pci_flags_count); + buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ; + } + } + } + + netif_trans_update(queue->card->dev); + if (queue->card->options.performance_stats) { + queue->card->perf_stats.outbound_do_qdio_cnt++; + queue->card->perf_stats.outbound_do_qdio_start_time = + qeth_get_micros(); + } + qdio_flags = QDIO_FLAG_SYNC_OUTPUT; + if (atomic_read(&queue->set_pci_flags_count)) + qdio_flags |= QDIO_FLAG_PCI_OUT; + atomic_add(count, &queue->used_buffers); + + rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, + queue->queue_no, index, count); + if (queue->card->options.performance_stats) + queue->card->perf_stats.outbound_do_qdio_time += + qeth_get_micros() - + queue->card->perf_stats.outbound_do_qdio_start_time; + if (rc) { + queue->card->stats.tx_errors += count; + /* ignore temporary SIGA errors without busy condition */ + if (rc == -ENOBUFS) + return; + QETH_CARD_TEXT(queue->card, 2, "flushbuf"); + QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); + QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); + QETH_CARD_TEXT_(queue->card, 2, " c%d", count); + QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); + + /* this must not happen under normal circumstances. if it + * happens something is really wrong -> recover */ + qeth_schedule_recovery(queue->card); + return; + } + if (queue->card->options.performance_stats) + queue->card->perf_stats.bufs_sent += count; +} + +static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) +{ + int index; + int flush_cnt = 0; + int q_was_packing = 0; + + /* + * check if weed have to switch to non-packing mode or if + * we have to get a pci flag out on the queue + */ + if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || + !atomic_read(&queue->set_pci_flags_count)) { + if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == + QETH_OUT_Q_UNLOCKED) { + /* + * If we get in here, there was no action in + * do_send_packet. So, we check if there is a + * packing buffer to be flushed here. + */ + netif_stop_queue(queue->card->dev); + index = queue->next_buf_to_fill; + q_was_packing = queue->do_pack; + /* queue->do_pack may change */ + barrier(); + flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); + if (!flush_cnt && + !atomic_read(&queue->set_pci_flags_count)) + flush_cnt += qeth_prep_flush_pack_buffer(queue); + if (queue->card->options.performance_stats && + q_was_packing) + queue->card->perf_stats.bufs_sent_pack += + flush_cnt; + if (flush_cnt) + qeth_flush_buffers(queue, index, flush_cnt); + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + } + } +} + +static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + if (card->dev->flags & IFF_UP) + napi_schedule(&card->napi); +} + +int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq) +{ + int rc; + + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { + rc = -1; + goto out; + } else { + if (card->options.cq == cq) { + rc = 0; + goto out; + } + + if (card->state != CARD_STATE_DOWN && + card->state != CARD_STATE_RECOVER) { + rc = -1; + goto out; + } + + qeth_free_qdio_buffers(card); + card->options.cq = cq; + rc = 0; + } +out: + return rc; + +} +EXPORT_SYMBOL_GPL(qeth_configure_cq); + +static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, + unsigned int queue, int first_element, + int count) +{ + struct qeth_qdio_q *cq = card->qdio.c_q; + int i; + int rc; + + if (!qeth_is_cq(card, queue)) + goto out; + + QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element); + QETH_CARD_TEXT_(card, 5, "qcqhc%d", count); + QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err); + + if (qdio_err) { + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + goto out; + } + + if (card->options.performance_stats) { + card->perf_stats.cq_cnt++; + card->perf_stats.cq_start_time = qeth_get_micros(); + } + + for (i = first_element; i < first_element + count; ++i) { + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + struct qdio_buffer *buffer = cq->qdio_bufs[bidx]; + int e = 0; + + while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) && + buffer->element[e].addr) { + unsigned long phys_aob_addr; + + phys_aob_addr = (unsigned long) buffer->element[e].addr; + qeth_qdio_handle_aob(card, phys_aob_addr); + ++e; + } + qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER); + } + rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, + card->qdio.c_q->next_buf_to_init, + count); + if (rc) { + dev_warn(&card->gdev->dev, + "QDIO reported an error, rc=%i\n", rc); + QETH_CARD_TEXT(card, 2, "qcqherr"); + } + card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init + + count) % QDIO_MAX_BUFFERS_PER_Q; + + netif_wake_queue(card->dev); + + if (card->options.performance_stats) { + int delta_t = qeth_get_micros(); + delta_t -= card->perf_stats.cq_start_time; + card->perf_stats.cq_time += delta_t; + } +out: + return; +} + +static void qeth_qdio_input_handler(struct ccw_device *ccwdev, + unsigned int qdio_err, int queue, + int first_elem, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *)card_ptr; + + QETH_CARD_TEXT_(card, 2, "qihq%d", queue); + QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err); + + if (qeth_is_cq(card, queue)) + qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); + else if (qdio_err) + qeth_schedule_recovery(card); +} + +static void qeth_qdio_output_handler(struct ccw_device *ccwdev, + unsigned int qdio_error, int __queue, + int first_element, int count, + unsigned long card_ptr) +{ + struct qeth_card *card = (struct qeth_card *) card_ptr; + struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; + struct qeth_qdio_out_buffer *buffer; + int i; + + QETH_CARD_TEXT(card, 6, "qdouhdl"); + if (qdio_error & QDIO_ERROR_FATAL) { + QETH_CARD_TEXT(card, 2, "achkcond"); + netif_stop_queue(card->dev); + qeth_schedule_recovery(card); + return; + } + if (card->options.performance_stats) { + card->perf_stats.outbound_handler_cnt++; + card->perf_stats.outbound_handler_start_time = + qeth_get_micros(); + } + for (i = first_element; i < (first_element + count); ++i) { + int bidx = i % QDIO_MAX_BUFFERS_PER_Q; + buffer = queue->bufs[bidx]; + qeth_handle_send_error(card, buffer, qdio_error); + + if (queue->bufstates && + (queue->bufstates[bidx].flags & + QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) { + WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED); + + if (atomic_cmpxchg(&buffer->state, + QETH_QDIO_BUF_PRIMED, + QETH_QDIO_BUF_PENDING) == + QETH_QDIO_BUF_PRIMED) { + qeth_notify_skbs(queue, buffer, + TX_NOTIFY_PENDING); + } + QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); + + /* prepare the queue slot for re-use: */ + qeth_scrub_qdio_buffer(buffer->buffer, + QETH_MAX_BUFFER_ELEMENTS(card)); + if (qeth_init_qdio_out_buf(queue, bidx)) { + QETH_CARD_TEXT(card, 2, "outofbuf"); + qeth_schedule_recovery(card); + } + } else { + if (card->options.cq == QETH_CQ_ENABLED) { + enum iucv_tx_notify n; + + n = qeth_compute_cq_notification( + buffer->buffer->element[15].sflags, 0); + qeth_notify_skbs(queue, buffer, n); + } + + qeth_clear_output_buffer(queue, buffer); + } + qeth_cleanup_handled_pending(queue, bidx, 0); + } + atomic_sub(count, &queue->used_buffers); + /* check if we need to do something on this outbound queue */ + if (card->info.type != QETH_CARD_TYPE_IQD) + qeth_check_outbound_queue(queue); + + netif_wake_queue(queue->card->dev); + if (card->options.performance_stats) + card->perf_stats.outbound_handler_time += qeth_get_micros() - + card->perf_stats.outbound_handler_start_time; +} + +/* We cannot use outbound queue 3 for unicast packets on HiperSockets */ +static inline int qeth_cut_iqd_prio(struct qeth_card *card, int queue_num) +{ + if ((card->info.type == QETH_CARD_TYPE_IQD) && (queue_num == 3)) + return 2; + return queue_num; +} + +/** + * Note: Function assumes that we have 4 outbound queues. + */ +int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, + int ipv) +{ + __be16 *tci; + u8 tos; + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_TOS: + case QETH_PRIO_Q_ING_PREC: + switch (ipv) { + case 4: + tos = ipv4_get_dsfield(ip_hdr(skb)); + break; + case 6: + tos = ipv6_get_dsfield(ipv6_hdr(skb)); + break; + default: + return card->qdio.default_out_queue; + } + if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC) + return qeth_cut_iqd_prio(card, ~tos >> 6 & 3); + if (tos & IPTOS_MINCOST) + return qeth_cut_iqd_prio(card, 3); + if (tos & IPTOS_RELIABILITY) + return 2; + if (tos & IPTOS_THROUGHPUT) + return 1; + if (tos & IPTOS_LOWDELAY) + return 0; + break; + case QETH_PRIO_Q_ING_SKB: + if (skb->priority > 5) + return 0; + return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); + case QETH_PRIO_Q_ING_VLAN: + tci = &((struct ethhdr *)skb->data)->h_proto; + if (be16_to_cpu(*tci) == ETH_P_8021Q) + return qeth_cut_iqd_prio(card, + ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); + break; + default: + break; + } + return card->qdio.default_out_queue; +} +EXPORT_SYMBOL_GPL(qeth_get_priority_queue); + +/** + * qeth_get_elements_for_frags() - find number of SBALEs for skb frags. + * @skb: SKB address + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * fragmented part of the SKB. Returns zero for linear SKB. + */ +int qeth_get_elements_for_frags(struct sk_buff *skb) +{ + int cnt, elements = 0; + + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[cnt]; + + elements += qeth_get_elements_for_range( + (addr_t)skb_frag_address(frag), + (addr_t)skb_frag_address(frag) + skb_frag_size(frag)); + } + return elements; +} +EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); + +static unsigned int qeth_count_elements(struct sk_buff *skb, int data_offset) +{ + unsigned int elements = qeth_get_elements_for_frags(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + addr_t start = (addr_t)skb->data + data_offset; + + if (start != end) + elements += qeth_get_elements_for_range(start, end); + return elements; +} + +/** + * qeth_get_elements_no() - find number of SBALEs for skb data, inc. frags. + * @card: qeth card structure, to check max. elems. + * @skb: SKB address + * @extra_elems: extra elems needed, to check against max. + * @data_offset: range starts at skb->data + data_offset + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * skb data, including linear part and fragments. Checks if the result plus + * extra_elems fits under the limit for the card. Returns 0 if it does not. + * Note: extra_elems is not included in the returned result. + */ +int qeth_get_elements_no(struct qeth_card *card, + struct sk_buff *skb, int extra_elems, int data_offset) +{ + int elements = qeth_count_elements(skb, data_offset); + + if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { + QETH_DBF_MESSAGE(2, "Invalid size of IP packet " + "(Number=%d / Length=%d). Discarded.\n", + elements + extra_elems, skb->len); + return 0; + } + return elements; +} +EXPORT_SYMBOL_GPL(qeth_get_elements_no); + +int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) +{ + int hroom, inpage, rest; + + if (((unsigned long)skb->data & PAGE_MASK) != + (((unsigned long)skb->data + len - 1) & PAGE_MASK)) { + hroom = skb_headroom(skb); + inpage = PAGE_SIZE - ((unsigned long) skb->data % PAGE_SIZE); + rest = len - inpage; + if (rest > hroom) + return 1; + memmove(skb->data - rest, skb->data, skb_headlen(skb)); + skb->data -= rest; + skb->tail -= rest; + *hdr = (struct qeth_hdr *)skb->data; + QETH_DBF_MESSAGE(2, "skb bounce len: %d rest: %d\n", len, rest); + } + return 0; +} +EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); + +/** + * qeth_add_hw_header() - add a HW header to an skb. + * @skb: skb that the HW header should be added to. + * @hdr: double pointer to a qeth_hdr. When returning with >= 0, + * it contains a valid pointer to a qeth_hdr. + * @hdr_len: length of the HW header. + * @proto_len: length of protocol headers that need to be in same page as the + * HW header. + * + * Returns the pushed length. If the header can't be pushed on + * (eg. because it would cross a page boundary), it is allocated from + * the cache instead and 0 is returned. + * The number of needed buffer elements is returned in @elements. + * Error to create the hdr is indicated by returning with < 0. + */ +int qeth_add_hw_header(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr **hdr, unsigned int hdr_len, + unsigned int proto_len, unsigned int *elements) +{ + const unsigned int max_elements = QETH_MAX_BUFFER_ELEMENTS(card); + const unsigned int contiguous = proto_len ? proto_len : 1; + unsigned int __elements; + addr_t start, end; + bool push_ok; + int rc; + +check_layout: + start = (addr_t)skb->data - hdr_len; + end = (addr_t)skb->data; + + if (qeth_get_elements_for_range(start, end + contiguous) == 1) { + /* Push HW header into same page as first protocol header. */ + push_ok = true; + __elements = qeth_count_elements(skb, 0); + } else if (!proto_len && qeth_get_elements_for_range(start, end) == 1) { + /* Push HW header into a new page. */ + push_ok = true; + __elements = 1 + qeth_count_elements(skb, 0); + } else { + /* Use header cache, copy protocol headers up. */ + push_ok = false; + __elements = 1 + qeth_count_elements(skb, proto_len); + } + + /* Compress skb to fit into one IO buffer: */ + if (__elements > max_elements) { + if (!skb_is_nonlinear(skb)) { + /* Drop it, no easy way of shrinking it further. */ + QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n", + max_elements, __elements, skb->len); + return -E2BIG; + } + + rc = skb_linearize(skb); + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; + + /* Linearization changed the layout, re-evaluate: */ + goto check_layout; + } + + *elements = __elements; + /* Add the header: */ + if (push_ok) { + *hdr = skb_push(skb, hdr_len); + return hdr_len; + } + /* fall back */ + *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + /* Copy protocol headers behind HW header: */ + skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_add_hw_header); + +static void __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) +{ + struct qdio_buffer *buffer = buf->buffer; + int element = buf->next_element_to_fill; + int length = skb_headlen(skb) - offset; + char *data = skb->data + offset; + int length_here, cnt; + + /* map linear part into buffer element(s) */ + while (length > 0) { + /* length_here is the remaining amount of data in this page */ + length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + length -= length_here; + if (is_first_elem) { + is_first_elem = false; + if (length || skb_is_nonlinear(skb)) + /* skb needs additional elements */ + buffer->element[element].eflags = + SBAL_EFLAGS_FIRST_FRAG; + else + buffer->element[element].eflags = 0; + } else { + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + } + data += length_here; + element++; + } + + /* map page frags into buffer element(s) */ + for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + data = skb_frag_address(frag); + length = skb_frag_size(frag); + while (length > 0) { + length_here = PAGE_SIZE - + ((unsigned long) data % PAGE_SIZE); + if (length < length_here) + length_here = length; + + buffer->element[element].addr = data; + buffer->element[element].length = length_here; + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; + length -= length_here; + data += length_here; + element++; + } + } + + if (buffer->element[element - 1].eflags) + buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; + buf->next_element_to_fill = element; +} + +/** + * qeth_fill_buffer() - map skb into an output buffer + * @queue: QDIO queue to submit the buffer on + * @buf: buffer to transport the skb + * @skb: skb to map into the buffer + * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated + * from qeth_core_header_cache. + * @offset: when mapping the skb, start at skb->data + offset + * @hd_len: if > 0, build a dedicated header element of this size + */ +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) +{ + struct qdio_buffer *buffer = buf->buffer; + bool is_first_elem = true; + int flush_cnt = 0; + + refcount_inc(&skb->users); + skb_queue_tail(&buf->skb_list, skb); + + /* build dedicated header element */ + if (hd_len) { + int element = buf->next_element_to_fill; + is_first_elem = false; + + buffer->element[element].addr = hdr; + buffer->element[element].length = hd_len; + buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; + /* remember to free cache-allocated qeth_hdr: */ + buf->is_header[element] = ((void *)hdr != skb->data); + buf->next_element_to_fill++; + } + + __qeth_fill_buffer(skb, buf, is_first_elem, offset); + + if (!queue->do_pack) { + QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); + /* set state to PRIMED -> will be flushed */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt = 1; + } else { + QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); + if (queue->card->options.performance_stats) + queue->card->perf_stats.skbs_sent_pack++; + if (buf->next_element_to_fill >= + QETH_MAX_BUFFER_ELEMENTS(queue->card)) { + /* + * packed buffer if full -> set state PRIMED + * -> will be flushed + */ + atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED); + flush_cnt = 1; + } + } + return flush_cnt; +} + +int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) +{ + int index = queue->next_buf_to_fill; + struct qeth_qdio_out_buffer *buffer = queue->bufs[index]; + + /* + * check if buffer is empty to make sure that we do not 'overtake' + * ourselves and try to fill a buffer that is already primed + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) + return -EBUSY; + queue->next_buf_to_fill = (index + 1) % QDIO_MAX_BUFFERS_PER_Q; + qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); + qeth_flush_buffers(queue, index, 1); + return 0; +} +EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); + +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed) +{ + struct qeth_qdio_out_buffer *buffer; + int start_index; + int flush_count = 0; + int do_pack = 0; + int tmp; + int rc = 0; + + /* spin until we get the queue ... */ + while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, + QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); + start_index = queue->next_buf_to_fill; + buffer = queue->bufs[queue->next_buf_to_fill]; + /* + * check if buffer is empty to make sure that we do not 'overtake' + * ourselves and try to fill a buffer that is already primed + */ + if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { + atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); + return -EBUSY; + } + /* check if we need to switch packing state of this queue */ + qeth_switch_to_packing_if_needed(queue); + if (queue->do_pack) { + do_pack = 1; + /* does packet fit in current buffer? */ + if ((QETH_MAX_BUFFER_ELEMENTS(card) - + buffer->next_element_to_fill) < elements_needed) { + /* ... no -> set state PRIMED */ + atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED); + flush_count++; + queue->next_buf_to_fill = + (queue->next_buf_to_fill + 1) % + QDIO_MAX_BUFFERS_PER_Q; + buffer = queue->bufs[queue->next_buf_to_fill]; + /* we did a step forward, so check buffer state + * again */ + if (atomic_read(&buffer->state) != + QETH_QDIO_BUF_EMPTY) { + qeth_flush_buffers(queue, start_index, + flush_count); + atomic_set(&queue->state, + QETH_OUT_Q_UNLOCKED); + rc = -EBUSY; + goto out; + } + } + } + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); + queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % + QDIO_MAX_BUFFERS_PER_Q; + flush_count += tmp; + if (flush_count) + qeth_flush_buffers(queue, start_index, flush_count); + else if (!atomic_read(&queue->set_pci_flags_count)) + atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); + /* + * queue->state will go from LOCKED -> UNLOCKED or from + * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us + * (switch packing state or flush buffer to get another pci flag out). + * In that case we will enter this loop + */ + while (atomic_dec_return(&queue->state)) { + start_index = queue->next_buf_to_fill; + /* check if we can go back to non-packing state */ + tmp = qeth_switch_to_nonpacking_if_needed(queue); + /* + * check if we need to flush a packing buffer to get a pci + * flag out on the queue + */ + if (!tmp && !atomic_read(&queue->set_pci_flags_count)) + tmp = qeth_prep_flush_pack_buffer(queue); + if (tmp) { + qeth_flush_buffers(queue, start_index, tmp); + flush_count += tmp; + } + } +out: + /* at this point the queue is UNLOCKED again */ + if (queue->card->options.performance_stats && do_pack) + queue->card->perf_stats.bufs_sent_pack += flush_count; + + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_send_packet); + +static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_setadpparms *setparms; + + QETH_CARD_TEXT(card, 4, "prmadpcb"); + + setparms = &(cmd->data.setadapterparms); + if (qeth_setadpparms_inspect_rc(cmd)) { + QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); + setparms->data.mode = SET_PROMISC_MODE_OFF; + } + card->info.promisc_mode = setparms->data.mode; + return 0; +} + +void qeth_setadp_promisc_mode(struct qeth_card *card) +{ + enum qeth_ipa_promisc_modes mode; + struct net_device *dev = card->dev; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "setprom"); + + if (((dev->flags & IFF_PROMISC) && + (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || + (!(dev->flags & IFF_PROMISC) && + (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) + return; + mode = SET_PROMISC_MODE_OFF; + if (dev->flags & IFF_PROMISC) + mode = SET_PROMISC_MODE_ON; + QETH_CARD_TEXT_(card, 4, "mode:%x", mode); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE, + sizeof(struct qeth_ipacmd_setadpparms_hdr) + 8); + if (!iob) + return; + cmd = __ipa_cmd(iob); + cmd->data.setadapterparms.data.mode = mode; + qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL); +} +EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode); + +struct net_device_stats *qeth_get_stats(struct net_device *dev) +{ + struct qeth_card *card; + + card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "getstat"); + + return &card->stats; +} +EXPORT_SYMBOL_GPL(qeth_get_stats); + +static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + + if (!card->options.layer2 || + !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { + ether_addr_copy(card->dev->dev_addr, + cmd->data.setadapterparms.data.change_addr.addr); + card->info.mac_bits |= QETH_LAYER2_MAC_READ; + } + return 0; +} + +int qeth_setadpparms_change_macaddr(struct qeth_card *card) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "chgmac"); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS, + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_change_addr)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC; + cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN; + ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr, + card->dev->dev_addr); + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb, + NULL); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); + +static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_set_access_ctrl *access_ctrl_req; + int fallback = *(int *)reply->param; + + QETH_CARD_TEXT(card, 4, "setaccb"); + if (cmd->hdr.return_code) + return 0; + qeth_setadpparms_inspect_rc(cmd); + + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; + QETH_DBF_TEXT_(SETUP, 2, "setaccb"); + QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); + QETH_DBF_TEXT_(SETUP, 2, "rc=%d", + cmd->data.setadapterparms.hdr.return_code); + if (cmd->data.setadapterparms.hdr.return_code != + SET_ACCESS_CTRL_RC_SUCCESS) + QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n", + card->gdev->dev.kobj.name, + access_ctrl_req->subcmd_code, + cmd->data.setadapterparms.hdr.return_code); + switch (cmd->data.setadapterparms.hdr.return_code) { + case SET_ACCESS_CTRL_RC_SUCCESS: + if (card->options.isolation == ISOLATION_MODE_NONE) { + dev_info(&card->gdev->dev, + "QDIO data connection isolation is deactivated\n"); + } else { + dev_info(&card->gdev->dev, + "QDIO data connection isolation is activated\n"); + } + break; + case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED: + QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already " + "deactivated\n", dev_name(&card->gdev->dev)); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED: + QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already" + " activated\n", dev_name(&card->gdev->dev)); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_NOT_SUPPORTED: + dev_err(&card->gdev->dev, "Adapter does not " + "support QDIO data connection isolation\n"); + break; + case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER: + dev_err(&card->gdev->dev, + "Adapter is dedicated. " + "QDIO data connection isolation not supported\n"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF: + dev_err(&card->gdev->dev, + "TSO does not permit QDIO data connection isolation\n"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED: + dev_err(&card->gdev->dev, "The adjacent switch port does not " + "support reflective relay mode\n"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_FAILED: + dev_err(&card->gdev->dev, "The reflective relay mode cannot be " + "enabled at the adjacent switch port"); + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED: + dev_warn(&card->gdev->dev, "Turning off reflective relay mode " + "at the adjacent switch failed\n"); + break; + default: + /* this should never happen */ + if (fallback) + card->options.isolation = card->options.prev_isolation; + break; + } + return 0; +} + +static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card, + enum qeth_ipa_isolation_modes isolation, int fallback) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_set_access_ctrl *access_ctrl_req; + + QETH_CARD_TEXT(card, 4, "setacctl"); + + QETH_DBF_TEXT_(SETUP, 2, "setacctl"); + QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL, + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_set_access_ctrl)); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; + access_ctrl_req->subcmd_code = isolation; + + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb, + &fallback); + QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc); + return rc; +} + +int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 4, "setactlo"); + + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSX) && + qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) { + rc = qeth_setadpparms_set_access_ctrl(card, + card->options.isolation, fallback); + if (rc) { + QETH_DBF_MESSAGE(3, + "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n", + card->gdev->dev.kobj.name, + rc); + rc = -EOPNOTSUPP; + } + } else if (card->options.isolation != ISOLATION_MODE_NONE) { + card->options.isolation = ISOLATION_MODE_NONE; + + dev_err(&card->gdev->dev, "Adapter does not " + "support QDIO data connection isolation\n"); + rc = -EOPNOTSUPP; + } + return rc; +} +EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online); + +void qeth_tx_timeout(struct net_device *dev) +{ + struct qeth_card *card; + + card = dev->ml_priv; + QETH_CARD_TEXT(card, 4, "txtimeo"); + card->stats.tx_errors++; + qeth_schedule_recovery(card); +} +EXPORT_SYMBOL_GPL(qeth_tx_timeout); + +static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) +{ + struct qeth_card *card = dev->ml_priv; + int rc = 0; + + switch (regnum) { + case MII_BMCR: /* Basic mode control register */ + rc = BMCR_FULLDPLX; + if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) && + (card->info.link_type != QETH_LINK_TYPE_OSN) && + (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH)) + rc |= BMCR_SPEED100; + break; + case MII_BMSR: /* Basic mode status register */ + rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS | + BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL | + BMSR_100BASE4; + break; + case MII_PHYSID1: /* PHYS ID 1 */ + rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) | + dev->dev_addr[2]; + rc = (rc >> 5) & 0xFFFF; + break; + case MII_PHYSID2: /* PHYS ID 2 */ + rc = (dev->dev_addr[2] << 10) & 0xFFFF; + break; + case MII_ADVERTISE: /* Advertisement control reg */ + rc = ADVERTISE_ALL; + break; + case MII_LPA: /* Link partner ability reg */ + rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL | + LPA_100BASE4 | LPA_LPACK; + break; + case MII_EXPANSION: /* Expansion register */ + break; + case MII_DCOUNTER: /* disconnect counter */ + break; + case MII_FCSCOUNTER: /* false carrier counter */ + break; + case MII_NWAYTEST: /* N-way auto-neg test register */ + break; + case MII_RERRCOUNTER: /* rx error counter */ + rc = card->stats.rx_errors; + break; + case MII_SREVISION: /* silicon revision */ + break; + case MII_RESV1: /* reserved 1 */ + break; + case MII_LBRERROR: /* loopback, rx, bypass error */ + break; + case MII_PHYADDR: /* physical address */ + break; + case MII_RESV2: /* reserved 2 */ + break; + case MII_TPISTATUS: /* TPI status for 10mbps */ + break; + case MII_NCONFIG: /* network interface config */ + break; + default: + break; + } + return rc; +} + +static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, int len, + int (*reply_cb)(struct qeth_card *, struct qeth_reply *, + unsigned long), + void *reply_param) +{ + u16 s1, s2; + + QETH_CARD_TEXT(card, 4, "sendsnmp"); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + /* adjust PDU length fields in IPA_PDU_HEADER */ + s1 = (u32) IPA_PDU_HEADER_SIZE + len; + s2 = (u32) len; + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); + return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, + reply_cb, reply_param); +} + +static int qeth_snmp_command_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long sdata) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_arp_query_info *qinfo; + unsigned char *data; + void *snmp_data; + __u16 data_len; + + QETH_CARD_TEXT(card, 3, "snpcmdcb"); + + cmd = (struct qeth_ipa_cmd *) sdata; + data = (unsigned char *)((char *)cmd - reply->offset); + qinfo = (struct qeth_arp_query_info *) reply->param; + + if (cmd->hdr.return_code) { + QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code); + return 0; + } + if (cmd->data.setadapterparms.hdr.return_code) { + cmd->hdr.return_code = + cmd->data.setadapterparms.hdr.return_code; + QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code); + return 0; + } + data_len = *((__u16 *)QETH_IPA_PDU_LEN_PDU1(data)); + if (cmd->data.setadapterparms.hdr.seq_no == 1) { + snmp_data = &cmd->data.setadapterparms.data.snmp; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp); + } else { + snmp_data = &cmd->data.setadapterparms.data.snmp.request; + data_len -= offsetof(struct qeth_ipa_cmd, + data.setadapterparms.data.snmp.request); + } + + /* check if there is enough room in userspace */ + if ((qinfo->udata_len - qinfo->udata_offset) < data_len) { + QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOMEM); + cmd->hdr.return_code = IPA_RC_ENOMEM; + return 0; + } + QETH_CARD_TEXT_(card, 4, "snore%i", + cmd->data.setadapterparms.hdr.used_total); + QETH_CARD_TEXT_(card, 4, "sseqn%i", + cmd->data.setadapterparms.hdr.seq_no); + /*copy entries to user buffer*/ + memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len); + qinfo->udata_offset += data_len; + + /* check if all replies received ... */ + QETH_CARD_TEXT_(card, 4, "srtot%i", + cmd->data.setadapterparms.hdr.used_total); + QETH_CARD_TEXT_(card, 4, "srseq%i", + cmd->data.setadapterparms.hdr.seq_no); + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +static int qeth_snmp_command(struct qeth_card *card, char __user *udata) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_snmp_ureq *ureq; + unsigned int req_len; + struct qeth_arp_query_info qinfo = {0, }; + int rc = 0; + + QETH_CARD_TEXT(card, 3, "snmpcmd"); + + if (card->info.guestlan) + return -EOPNOTSUPP; + + if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && + (!card->options.layer2)) { + return -EOPNOTSUPP; + } + /* skip 4 bytes (data_len struct member) to get req_len */ + if (copy_from_user(&req_len, udata + sizeof(int), sizeof(int))) + return -EFAULT; + if (req_len > (QETH_BUFSIZE - IPA_PDU_HEADER_SIZE - + sizeof(struct qeth_ipacmd_hdr) - + sizeof(struct qeth_ipacmd_setadpparms_hdr))) + return -EINVAL; + ureq = memdup_user(udata, req_len + sizeof(struct qeth_snmp_ureq_hdr)); + if (IS_ERR(ureq)) { + QETH_CARD_TEXT(card, 2, "snmpnome"); + return PTR_ERR(ureq); + } + qinfo.udata_len = ureq->hdr.data_len; + qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); + if (!qinfo.udata) { + kfree(ureq); + return -ENOMEM; + } + qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr); + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, + QETH_SNMP_SETADP_CMDLENGTH + req_len); + if (!iob) { + rc = -ENOMEM; + goto out; + } + cmd = __ipa_cmd(iob); + memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len); + rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, + qeth_snmp_command_cb, (void *)&qinfo); + if (rc) + QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", + QETH_CARD_IFNAME(card), rc); + else { + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) + rc = -EFAULT; + } +out: + kfree(ureq); + kfree(qinfo.udata); + return rc; +} + +static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct qeth_qoat_priv *priv; + char *resdata; + int resdatalen; + + QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + + priv = (struct qeth_qoat_priv *)reply->param; + resdatalen = cmd->data.setadapterparms.hdr.cmdlength; + resdata = (char *)data + 28; + + if (resdatalen > (priv->buffer_len - priv->response_len)) { + cmd->hdr.return_code = IPA_RC_FFFF; + return 0; + } + + memcpy((priv->buffer + priv->response_len), resdata, + resdatalen); + priv->response_len += resdatalen; + + if (cmd->data.setadapterparms.hdr.seq_no < + cmd->data.setadapterparms.hdr.used_total) + return 1; + return 0; +} + +static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + struct qeth_query_oat *oat_req; + struct qeth_query_oat_data oat_data; + struct qeth_qoat_priv priv; + void __user *tmp; + + QETH_CARD_TEXT(card, 3, "qoatcmd"); + + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) { + rc = -EOPNOTSUPP; + goto out; + } + + if (copy_from_user(&oat_data, udata, + sizeof(struct qeth_query_oat_data))) { + rc = -EFAULT; + goto out; + } + + priv.buffer_len = oat_data.buffer_len; + priv.response_len = 0; + priv.buffer = vzalloc(oat_data.buffer_len); + if (!priv.buffer) { + rc = -ENOMEM; + goto out; + } + + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT, + sizeof(struct qeth_ipacmd_setadpparms_hdr) + + sizeof(struct qeth_query_oat)); + if (!iob) { + rc = -ENOMEM; + goto out_free; + } + cmd = __ipa_cmd(iob); + oat_req = &cmd->data.setadapterparms.data.query_oat; + oat_req->subcmd_code = oat_data.command; + + rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb, + &priv); + if (!rc) { + if (is_compat_task()) + tmp = compat_ptr(oat_data.ptr); + else + tmp = (void __user *)(unsigned long)oat_data.ptr; + + if (copy_to_user(tmp, priv.buffer, + priv.response_len)) { + rc = -EFAULT; + goto out_free; + } + + oat_data.response_len = priv.response_len; + + if (copy_to_user(udata, &oat_data, + sizeof(struct qeth_query_oat_data))) + rc = -EFAULT; + } else + if (rc == IPA_RC_FFFF) + rc = -EFAULT; + +out_free: + vfree(priv.buffer); +out: + return rc; +} + +static int qeth_query_card_info_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct qeth_query_card_info *card_info; + + QETH_CARD_TEXT(card, 2, "qcrdincb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; + return 0; +} + +static int qeth_query_card_info(struct qeth_card *card, + struct carrier_info *carrier_info) +{ + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "qcrdinfo"); + if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO)) + return -EOPNOTSUPP; + iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, + sizeof(struct qeth_ipacmd_setadpparms_hdr)); + if (!iob) + return -ENOMEM; + return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, + (void *)carrier_info); +} + +/** + * qeth_vm_request_mac() - Request a hypervisor-managed MAC address + * @card: pointer to a qeth_card + * + * Returns + * 0, if a MAC address has been set for the card's netdevice + * a return code, for various error conditions + */ +int qeth_vm_request_mac(struct qeth_card *card) +{ + struct diag26c_mac_resp *response; + struct diag26c_mac_req *request; + struct ccw_dev_id id; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "vmreqmac"); + + request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA); + response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA); + if (!request || !response) { + rc = -ENOMEM; + goto out; + } + + ccw_device_get_id(CARD_DDEV(card), &id); + request->resp_buf_len = sizeof(*response); + request->resp_version = DIAG26C_VERSION2; + request->op_code = DIAG26C_GET_MAC; + request->devno = id.devno; + + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + rc = diag26c(request, response, DIAG26C_MAC_SERVICES); + QETH_DBF_HEX(CTRL, 2, request, sizeof(*request)); + if (rc) + goto out; + QETH_DBF_HEX(CTRL, 2, response, sizeof(*response)); + + if (request->resp_buf_len < sizeof(*response) || + response->version != request->resp_version) { + rc = -EIO; + QETH_DBF_TEXT(SETUP, 2, "badresp"); + QETH_DBF_HEX(SETUP, 2, &request->resp_buf_len, + sizeof(request->resp_buf_len)); + } else if (!is_valid_ether_addr(response->mac)) { + rc = -EINVAL; + QETH_DBF_TEXT(SETUP, 2, "badmac"); + QETH_DBF_HEX(SETUP, 2, response->mac, ETH_ALEN); + } else { + ether_addr_copy(card->dev->dev_addr, response->mac); + } + +out: + kfree(response); + kfree(request); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_vm_request_mac); + +static int qeth_get_qdio_q_format(struct qeth_card *card) +{ + if (card->info.type == QETH_CARD_TYPE_IQD) + return QDIO_IQDIO_QFMT; + else + return QDIO_QETH_QFMT; +} + +static void qeth_determine_capabilities(struct qeth_card *card) +{ + int rc; + int length; + char *prcd; + struct ccw_device *ddev; + int ddev_offline = 0; + + QETH_DBF_TEXT(SETUP, 2, "detcapab"); + ddev = CARD_DDEV(card); + if (!ddev->online) { + ddev_offline = 1; + rc = ccw_device_set_online(ddev); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + goto out; + } + } + + rc = qeth_read_conf_data(card, (void **) &prcd, &length); + if (rc) { + QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", + dev_name(&card->gdev->dev), rc); + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out_offline; + } + qeth_configure_unitaddr(card, prcd); + if (ddev_offline) + qeth_configure_blkt_default(card, prcd); + kfree(prcd); + + rc = qdio_get_ssqd_desc(ddev, &card->ssqd); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + + QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); + QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1); + QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2); + QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3); + QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); + if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || + ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || + ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) { + dev_info(&card->gdev->dev, + "Completion Queueing supported\n"); + } else { + card->options.cq = QETH_CQ_NOTAVAILABLE; + } + + +out_offline: + if (ddev_offline == 1) + ccw_device_set_offline(ddev); +out: + return; +} + +static void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) + (struct ccw_device *, int, + unsigned long)) +{ + int i; + + if (card->options.cq == QETH_CQ_ENABLED) { + int offset = QDIO_MAX_BUFFERS_PER_Q * + (card->qdio.no_in_queues - 1); + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[offset + i] = + card->qdio.c_q->bufs[i].buffer; + + queue_start_poll[card->qdio.no_in_queues - 1] = NULL; + } +} + +static int qeth_qdio_establish(struct qeth_card *card) +{ + struct qdio_initialize init_data; + char *qib_param_field; + struct qdio_buffer **in_sbal_ptrs; + void (**queue_start_poll) (struct ccw_device *, int, unsigned long); + struct qdio_buffer **out_sbal_ptrs; + int i, j, k; + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "qdioest"); + + qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q, + GFP_KERNEL); + if (!qib_param_field) { + rc = -ENOMEM; + goto out_free_nothing; + } + + qeth_create_qib_param_field(card, qib_param_field); + qeth_create_qib_param_field_blkt(card, qib_param_field); + + in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q, + sizeof(void *), + GFP_KERNEL); + if (!in_sbal_ptrs) { + rc = -ENOMEM; + goto out_free_qib_param; + } + + for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) + in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer; + + queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *), + GFP_KERNEL); + if (!queue_start_poll) { + rc = -ENOMEM; + goto out_free_in_sbals; + } + for (i = 0; i < card->qdio.no_in_queues; ++i) + queue_start_poll[i] = qeth_qdio_start_poll; + + qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll); + + out_sbal_ptrs = + kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q, + sizeof(void *), + GFP_KERNEL); + if (!out_sbal_ptrs) { + rc = -ENOMEM; + goto out_free_queue_start_poll; + } + + for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i) + for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++) + out_sbal_ptrs[k] = + card->qdio.out_qs[i]->bufs[j]->buffer; + + memset(&init_data, 0, sizeof(struct qdio_initialize)); + init_data.cdev = CARD_DDEV(card); + init_data.q_format = qeth_get_qdio_q_format(card); + init_data.qib_param_field_format = 0; + init_data.qib_param_field = qib_param_field; + init_data.no_input_qs = card->qdio.no_in_queues; + init_data.no_output_qs = card->qdio.no_out_queues; + init_data.input_handler = qeth_qdio_input_handler; + init_data.output_handler = qeth_qdio_output_handler; + init_data.queue_start_poll_array = queue_start_poll; + init_data.int_parm = (unsigned long) card; + init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; + init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; + init_data.output_sbal_state_array = card->qdio.out_bufstates; + init_data.scan_threshold = + (card->info.type == QETH_CARD_TYPE_IQD) ? 1 : 32; + + if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, + QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) { + rc = qdio_allocate(&init_data); + if (rc) { + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + goto out; + } + rc = qdio_establish(&init_data); + if (rc) { + atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); + qdio_free(CARD_DDEV(card)); + } + } + + switch (card->options.cq) { + case QETH_CQ_ENABLED: + dev_info(&card->gdev->dev, "Completion Queue support enabled"); + break; + case QETH_CQ_DISABLED: + dev_info(&card->gdev->dev, "Completion Queue support disabled"); + break; + default: + break; + } +out: + kfree(out_sbal_ptrs); +out_free_queue_start_poll: + kfree(queue_start_poll); +out_free_in_sbals: + kfree(in_sbal_ptrs); +out_free_qib_param: + kfree(qib_param_field); +out_free_nothing: + return rc; +} + +static void qeth_core_free_card(struct qeth_card *card) +{ + QETH_DBF_TEXT(SETUP, 2, "freecrd"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + qeth_clean_channel(&card->read); + qeth_clean_channel(&card->write); + qeth_clean_channel(&card->data); + qeth_free_qdio_buffers(card); + unregister_service_level(&card->qeth_service_level); + kfree(card); +} + +void qeth_trace_features(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 2, "features"); + QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4)); + QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6)); + QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp)); + QETH_CARD_HEX(card, 2, &card->info.diagass_support, + sizeof(card->info.diagass_support)); +} +EXPORT_SYMBOL_GPL(qeth_trace_features); + +static struct ccw_device_id qeth_ids[] = { + {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01), + .driver_info = QETH_CARD_TYPE_OSD}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05), + .driver_info = QETH_CARD_TYPE_IQD}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06), + .driver_info = QETH_CARD_TYPE_OSN}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03), + .driver_info = QETH_CARD_TYPE_OSM}, + {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02), + .driver_info = QETH_CARD_TYPE_OSX}, + {}, +}; +MODULE_DEVICE_TABLE(ccw, qeth_ids); + +static struct ccw_driver qeth_ccw_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "qeth", + }, + .ids = qeth_ids, + .probe = ccwgroup_probe_ccwdev, + .remove = ccwgroup_remove_ccwdev, +}; + +int qeth_core_hardsetup_card(struct qeth_card *card) +{ + int retries = 3; + int rc; + + QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); + atomic_set(&card->force_alloc_skb, 0); + rc = qeth_update_from_chp_desc(card); + if (rc) + return rc; +retry: + if (retries < 3) + QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", + dev_name(&card->gdev->dev)); + rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); + rc = ccw_device_set_online(CARD_RDEV(card)); + if (rc) + goto retriable; + rc = ccw_device_set_online(CARD_WDEV(card)); + if (rc) + goto retriable; + rc = ccw_device_set_online(CARD_DDEV(card)); + if (rc) + goto retriable; +retriable: + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(SETUP, 2, "break1"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + qeth_determine_capabilities(card); + qeth_init_tokens(card); + qeth_init_func_level(card); + rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb); + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(SETUP, 2, "break2"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + rc = qeth_idx_activate_channel(&card->write, qeth_idx_write_cb); + if (rc == -ERESTARTSYS) { + QETH_DBF_TEXT(SETUP, 2, "break3"); + return rc; + } else if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc); + if (--retries < 0) + goto out; + else + goto retry; + } + card->read_or_write_problem = 0; + rc = qeth_mpc_initialize(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc); + goto out; + } + + rc = qeth_send_startlan(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + if (rc == IPA_RC_LAN_OFFLINE) { + dev_warn(&card->gdev->dev, + "The LAN is offline\n"); + card->lan_online = 0; + } else { + rc = -ENODEV; + goto out; + } + } else + card->lan_online = 1; + + card->options.ipa4.supported_funcs = 0; + card->options.ipa6.supported_funcs = 0; + card->options.adp.supported_funcs = 0; + card->options.sbp.supported_funcs = 0; + card->info.diagass_support = 0; + rc = qeth_query_ipassists(card, QETH_PROT_IPV4); + if (rc == -ENOMEM) + goto out; + if (qeth_is_supported(card, IPA_IPV6)) { + rc = qeth_query_ipassists(card, QETH_PROT_IPV6); + if (rc == -ENOMEM) + goto out; + } + if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { + rc = qeth_query_setadapterparms(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc); + goto out; + } + } + if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + rc = qeth_query_setdiagass(card); + if (rc < 0) { + QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc); + goto out; + } + } + return 0; +out: + dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " + "an error on the device\n"); + QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", + dev_name(&card->gdev->dev), rc); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); + +static void qeth_create_skb_frag(struct qdio_buffer_element *element, + struct sk_buff *skb, int offset, int data_len) +{ + struct page *page = virt_to_page(element->addr); + unsigned int next_frag; + + /* first fill the linear space */ + if (!skb->len) { + unsigned int linear = min(data_len, skb_tailroom(skb)); + + skb_put_data(skb, element->addr + offset, linear); + data_len -= linear; + if (!data_len) + return; + offset += linear; + /* fall through to add page frag for remaining data */ + } + + next_frag = skb_shinfo(skb)->nr_frags; + get_page(page); + skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len); +} + +static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) +{ + return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY); +} + +struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, + struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element **__element, int *__offset, + struct qeth_hdr **hdr) +{ + struct qdio_buffer_element *element = *__element; + struct qdio_buffer *buffer = qethbuffer->buffer; + int offset = *__offset; + struct sk_buff *skb; + int skb_len = 0; + void *data_ptr; + int data_len; + int headroom = 0; + int use_rx_sg = 0; + + /* qeth_hdr must not cross element boundaries */ + while (element->length < offset + sizeof(struct qeth_hdr)) { + if (qeth_is_last_sbale(element)) + return NULL; + element++; + offset = 0; + } + *hdr = element->addr + offset; + + offset += sizeof(struct qeth_hdr); + switch ((*hdr)->hdr.l2.id) { + case QETH_HEADER_TYPE_LAYER2: + skb_len = (*hdr)->hdr.l2.pkt_length; + break; + case QETH_HEADER_TYPE_LAYER3: + skb_len = (*hdr)->hdr.l3.length; + headroom = ETH_HLEN; + break; + case QETH_HEADER_TYPE_OSN: + skb_len = (*hdr)->hdr.osn.pdu_length; + headroom = sizeof(struct qeth_hdr); + break; + default: + break; + } + + if (!skb_len) + return NULL; + + if (((skb_len >= card->options.rx_sg_cb) && + (!(card->info.type == QETH_CARD_TYPE_OSN)) && + (!atomic_read(&card->force_alloc_skb))) || + (card->options.cq == QETH_CQ_ENABLED)) + use_rx_sg = 1; + + if (use_rx_sg && qethbuffer->rx_skb) { + /* QETH_CQ_ENABLED only: */ + skb = qethbuffer->rx_skb; + qethbuffer->rx_skb = NULL; + } else { + unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len; + + skb = napi_alloc_skb(&card->napi, linear + headroom); + } + if (!skb) + goto no_mem; + if (headroom) + skb_reserve(skb, headroom); + + data_ptr = element->addr + offset; + while (skb_len) { + data_len = min(skb_len, (int)(element->length - offset)); + if (data_len) { + if (use_rx_sg) + qeth_create_skb_frag(element, skb, offset, + data_len); + else + skb_put_data(skb, data_ptr, data_len); + } + skb_len -= data_len; + if (skb_len) { + if (qeth_is_last_sbale(element)) { + QETH_CARD_TEXT(card, 4, "unexeob"); + QETH_CARD_HEX(card, 2, buffer, sizeof(void *)); + dev_kfree_skb_any(skb); + card->stats.rx_errors++; + return NULL; + } + element++; + offset = 0; + data_ptr = element->addr; + } else { + offset += data_len; + } + } + *__element = element; + *__offset = offset; + if (use_rx_sg && card->options.performance_stats) { + card->perf_stats.sg_skbs_rx++; + card->perf_stats.sg_frags_rx += skb_shinfo(skb)->nr_frags; + } + return skb; +no_mem: + if (net_ratelimit()) { + QETH_CARD_TEXT(card, 2, "noskbmem"); + } + card->stats.rx_dropped++; + return NULL; +} +EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); + +int qeth_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + int work_done = 0; + struct qeth_qdio_buffer *buffer; + int done; + int new_budget = budget; + + if (card->options.performance_stats) { + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); + } + + while (1) { + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + card->rx.b_element = + &card->qdio.in_q->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + + while (card->rx.b_count) { + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + work_done += + card->discipline->process_rx_buffer( + card, new_budget, &done); + else + done = 1; + + if (done) { + if (card->options.performance_stats) + card->perf_stats.bufs_rec++; + qeth_put_buffer_pool_entry(card, + buffer->pool_entry); + qeth_queue_input_buffer(card, card->rx.b_index); + card->rx.b_count--; + if (card->rx.b_count) { + card->rx.b_index = + (card->rx.b_index + 1) % + QDIO_MAX_BUFFERS_PER_Q; + card->rx.b_element = + &card->qdio.in_q + ->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + } + + if (work_done >= budget) + goto out; + else + new_budget = budget - work_done; + } + } + + napi_complete_done(napi, work_done); + if (qdio_start_irq(card->data.ccwdev, 0)) + napi_schedule(&card->napi); +out: + if (card->options.performance_stats) + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; + return work_done; +} +EXPORT_SYMBOL_GPL(qeth_poll); + +static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd) +{ + if (!cmd->hdr.return_code) + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + return cmd->hdr.return_code; +} + +int qeth_setassparms_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "defadpcb"); + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0) { + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + if (cmd->hdr.prot_version == QETH_PROT_IPV4) + card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled; + if (cmd->hdr.prot_version == QETH_PROT_IPV6) + card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled; + } + return 0; +} +EXPORT_SYMBOL_GPL(qeth_setassparms_cb); + +struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, __u16 len, + enum qeth_prot_versions prot) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "getasscm"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot); + + if (iob) { + cmd = __ipa_cmd(iob); + cmd->data.setassparms.hdr.assist_no = ipa_func; + cmd->data.setassparms.hdr.length = 8 + len; + cmd->data.setassparms.hdr.command_code = cmd_code; + cmd->data.setassparms.hdr.return_code = 0; + cmd->data.setassparms.hdr.seq_no = 0; + } + + return iob; +} +EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd); + +int qeth_send_setassparms(struct qeth_card *card, + struct qeth_cmd_buffer *iob, __u16 len, long data, + int (*reply_cb)(struct qeth_card *, + struct qeth_reply *, unsigned long), + void *reply_param) +{ + int rc; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "sendassp"); + + cmd = __ipa_cmd(iob); + if (len <= sizeof(__u32)) + cmd->data.setassparms.data.flags_32bit = (__u32) data; + else /* (len > sizeof(__u32)) */ + memcpy(&cmd->data.setassparms.data, (void *) data, len); + + rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_send_setassparms); + +int qeth_send_simple_setassparms_prot(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + u16 cmd_code, long data, + enum qeth_prot_versions prot) +{ + int rc; + int length = 0; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT_(card, 4, "simassp%i", prot); + if (data) + length = sizeof(__u32); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot); + if (!iob) + return -ENOMEM; + rc = qeth_send_setassparms(card, iob, length, data, + qeth_setassparms_cb, NULL); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot); + +static void qeth_unregister_dbf_views(void) +{ + int x; + for (x = 0; x < QETH_DBF_INFOS; x++) { + debug_unregister(qeth_dbf[x].id); + qeth_dbf[x].id = NULL; + } +} + +void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...) +{ + char dbf_txt_buf[32]; + va_list args; + + if (!debug_level_enabled(id, level)) + return; + va_start(args, fmt); + vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); + va_end(args); + debug_text_event(id, level, dbf_txt_buf); +} +EXPORT_SYMBOL_GPL(qeth_dbf_longtext); + +static int qeth_register_dbf_views(void) +{ + int ret; + int x; + + for (x = 0; x < QETH_DBF_INFOS; x++) { + /* register the areas */ + qeth_dbf[x].id = debug_register(qeth_dbf[x].name, + qeth_dbf[x].pages, + qeth_dbf[x].areas, + qeth_dbf[x].len); + if (qeth_dbf[x].id == NULL) { + qeth_unregister_dbf_views(); + return -ENOMEM; + } + + /* register a view */ + ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view); + if (ret) { + qeth_unregister_dbf_views(); + return ret; + } + + /* set a passing level */ + debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level); + } + + return 0; +} + +int qeth_core_load_discipline(struct qeth_card *card, + enum qeth_discipline_id discipline) +{ + int rc = 0; + + mutex_lock(&qeth_mod_mutex); + switch (discipline) { + case QETH_DISCIPLINE_LAYER3: + card->discipline = try_then_request_module( + symbol_get(qeth_l3_discipline), "qeth_l3"); + break; + case QETH_DISCIPLINE_LAYER2: + card->discipline = try_then_request_module( + symbol_get(qeth_l2_discipline), "qeth_l2"); + break; + default: + break; + } + + if (!card->discipline) { + dev_err(&card->gdev->dev, "There is no kernel module to " + "support discipline %d\n", discipline); + rc = -EINVAL; + } + mutex_unlock(&qeth_mod_mutex); + return rc; +} + +void qeth_core_free_discipline(struct qeth_card *card) +{ + if (card->options.layer2) + symbol_put(qeth_l2_discipline); + else + symbol_put(qeth_l3_discipline); + card->discipline = NULL; +} + +const struct device_type qeth_generic_devtype = { + .name = "qeth_generic", + .groups = qeth_generic_attr_groups, +}; +EXPORT_SYMBOL_GPL(qeth_generic_devtype); + +static const struct device_type qeth_osn_devtype = { + .name = "qeth_osn", + .groups = qeth_osn_attr_groups, +}; + +#define DBF_NAME_LEN 20 + +struct qeth_dbf_entry { + char dbf_name[DBF_NAME_LEN]; + debug_info_t *dbf_info; + struct list_head dbf_list; +}; + +static LIST_HEAD(qeth_dbf_list); +static DEFINE_MUTEX(qeth_dbf_list_mutex); + +static debug_info_t *qeth_get_dbf_entry(char *name) +{ + struct qeth_dbf_entry *entry; + debug_info_t *rc = NULL; + + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry(entry, &qeth_dbf_list, dbf_list) { + if (strcmp(entry->dbf_name, name) == 0) { + rc = entry->dbf_info; + break; + } + } + mutex_unlock(&qeth_dbf_list_mutex); + return rc; +} + +static int qeth_add_dbf_entry(struct qeth_card *card, char *name) +{ + struct qeth_dbf_entry *new_entry; + + card->debug = debug_register(name, 2, 1, 8); + if (!card->debug) { + QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf"); + goto err; + } + if (debug_register_view(card->debug, &debug_hex_ascii_view)) + goto err_dbg; + new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL); + if (!new_entry) + goto err_dbg; + strncpy(new_entry->dbf_name, name, DBF_NAME_LEN); + new_entry->dbf_info = card->debug; + mutex_lock(&qeth_dbf_list_mutex); + list_add(&new_entry->dbf_list, &qeth_dbf_list); + mutex_unlock(&qeth_dbf_list_mutex); + + return 0; + +err_dbg: + debug_unregister(card->debug); +err: + return -ENOMEM; +} + +static void qeth_clear_dbf_list(void) +{ + struct qeth_dbf_entry *entry, *tmp; + + mutex_lock(&qeth_dbf_list_mutex); + list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) { + list_del(&entry->dbf_list); + debug_unregister(entry->dbf_info); + kfree(entry); + } + mutex_unlock(&qeth_dbf_list_mutex); +} + +static struct net_device *qeth_alloc_netdev(struct qeth_card *card) +{ + struct net_device *dev; + + switch (card->info.type) { + case QETH_CARD_TYPE_IQD: + dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN, ether_setup); + break; + case QETH_CARD_TYPE_OSN: + dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup); + break; + default: + dev = alloc_etherdev(0); + } + + if (!dev) + return NULL; + + dev->ml_priv = card; + dev->watchdog_timeo = QETH_TX_TIMEOUT; + dev->min_mtu = IS_OSN(card) ? 64 : 576; + /* initialized when device first goes online: */ + dev->max_mtu = 0; + dev->mtu = 0; + SET_NETDEV_DEV(dev, &card->gdev->dev); + netif_carrier_off(dev); + + if (!IS_OSN(card)) { + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->hw_features |= NETIF_F_SG; + dev->vlan_features |= NETIF_F_SG; + if (IS_IQD(card)) + dev->features |= NETIF_F_SG; + } + + return dev; +} + +struct net_device *qeth_clone_netdev(struct net_device *orig) +{ + struct net_device *clone = qeth_alloc_netdev(orig->ml_priv); + + if (!clone) + return NULL; + + clone->dev_port = orig->dev_port; + return clone; +} + +static int qeth_core_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card; + struct device *dev; + int rc; + enum qeth_discipline_id enforced_disc; + unsigned long flags; + char dbf_name[DBF_NAME_LEN]; + + QETH_DBF_TEXT(SETUP, 2, "probedev"); + + dev = &gdev->dev; + if (!get_device(dev)) + return -ENODEV; + + QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev)); + + card = qeth_alloc_card(gdev); + if (!card) { + QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM); + rc = -ENOMEM; + goto err_dev; + } + + snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s", + dev_name(&gdev->dev)); + card->debug = qeth_get_dbf_entry(dbf_name); + if (!card->debug) { + rc = qeth_add_dbf_entry(card, dbf_name); + if (rc) + goto err_card; + } + + dev_set_drvdata(&gdev->dev, card); + qeth_setup_card(card); + rc = qeth_update_from_chp_desc(card); + if (rc) + goto err_chp_desc; + + card->dev = qeth_alloc_netdev(card); + if (!card->dev) { + rc = -ENOMEM; + goto err_card; + } + + qeth_determine_capabilities(card); + enforced_disc = qeth_enforce_discipline(card); + switch (enforced_disc) { + case QETH_DISCIPLINE_UNDETERMINED: + gdev->dev.type = &qeth_generic_devtype; + break; + default: + card->info.layer_enforced = true; + rc = qeth_core_load_discipline(card, enforced_disc); + if (rc) + goto err_load; + + gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN) + ? card->discipline->devtype + : &qeth_osn_devtype; + rc = card->discipline->setup(card->gdev); + if (rc) + goto err_disc; + break; + } + + write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + list_add_tail(&card->list, &qeth_core_card_list.list); + write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + return 0; + +err_disc: + qeth_core_free_discipline(card); +err_load: + free_netdev(card->dev); +err_chp_desc: +err_card: + qeth_core_free_card(card); +err_dev: + put_device(dev); + return rc; +} + +static void qeth_core_remove_device(struct ccwgroup_device *gdev) +{ + unsigned long flags; + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + QETH_DBF_TEXT(SETUP, 2, "removedv"); + + if (card->discipline) { + card->discipline->remove(gdev); + qeth_core_free_discipline(card); + } + + write_lock_irqsave(&qeth_core_card_list.rwlock, flags); + list_del(&card->list); + write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + free_netdev(card->dev); + qeth_core_free_card(card); + dev_set_drvdata(&gdev->dev, NULL); + put_device(&gdev->dev); +} + +static int qeth_core_set_online(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + enum qeth_discipline_id def_discipline; + + if (!card->discipline) { + if (card->info.type == QETH_CARD_TYPE_IQD) + def_discipline = QETH_DISCIPLINE_LAYER3; + else + def_discipline = QETH_DISCIPLINE_LAYER2; + rc = qeth_core_load_discipline(card, def_discipline); + if (rc) + goto err; + rc = card->discipline->setup(card->gdev); + if (rc) { + qeth_core_free_discipline(card); + goto err; + } + } + rc = card->discipline->set_online(gdev); +err: + return rc; +} + +static int qeth_core_set_offline(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + return card->discipline->set_offline(gdev); +} + +static void qeth_core_shutdown(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); +} + +static int qeth_core_freeze(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline && card->discipline->freeze) + return card->discipline->freeze(gdev); + return 0; +} + +static int qeth_core_thaw(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline && card->discipline->thaw) + return card->discipline->thaw(gdev); + return 0; +} + +static int qeth_core_restore(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + if (card->discipline && card->discipline->restore) + return card->discipline->restore(gdev); + return 0; +} + +static ssize_t group_store(struct device_driver *ddrv, const char *buf, + size_t count) +{ + int err; + + err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3, + buf); + + return err ? err : count; +} +static DRIVER_ATTR_WO(group); + +static struct attribute *qeth_drv_attrs[] = { + &driver_attr_group.attr, + NULL, +}; +static struct attribute_group qeth_drv_attr_group = { + .attrs = qeth_drv_attrs, +}; +static const struct attribute_group *qeth_drv_attr_groups[] = { + &qeth_drv_attr_group, + NULL, +}; + +static struct ccwgroup_driver qeth_core_ccwgroup_driver = { + .driver = { + .groups = qeth_drv_attr_groups, + .owner = THIS_MODULE, + .name = "qeth", + }, + .ccw_driver = &qeth_ccw_driver, + .setup = qeth_core_probe_device, + .remove = qeth_core_remove_device, + .set_online = qeth_core_set_online, + .set_offline = qeth_core_set_offline, + .shutdown = qeth_core_shutdown, + .prepare = NULL, + .complete = NULL, + .freeze = qeth_core_freeze, + .thaw = qeth_core_thaw, + .restore = qeth_core_restore, +}; + +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct mii_ioctl_data *mii_data; + int rc = 0; + + if (!card) + return -ENODEV; + + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + + if (card->info.type == QETH_CARD_TYPE_OSN) + return -EPERM; + + switch (cmd) { + case SIOC_QETH_ADP_SET_SNMP_CONTROL: + rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_GET_CARD_TYPE: + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) && + !card->info.guestlan) + return 1; + else + return 0; + case SIOCGMIIPHY: + mii_data = if_mii(rq); + mii_data->phy_id = 0; + break; + case SIOCGMIIREG: + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + mii_data->val_out = qeth_mdio_read(dev, + mii_data->phy_id, mii_data->reg_num); + break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; + default: + if (card->discipline->do_ioctl) + rc = card->discipline->do_ioctl(dev, rq, cmd); + else + rc = -EOPNOTSUPP; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "ioce%x", rc); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_ioctl); + +static struct { + const char str[ETH_GSTRING_LEN]; +} qeth_ethtool_stats_keys[] = { +/* 0 */{"rx skbs"}, + {"rx buffers"}, + {"tx skbs"}, + {"tx buffers"}, + {"tx skbs no packing"}, + {"tx buffers no packing"}, + {"tx skbs packing"}, + {"tx buffers packing"}, + {"tx sg skbs"}, + {"tx buffer elements"}, +/* 10 */{"rx sg skbs"}, + {"rx sg frags"}, + {"rx sg page allocs"}, + {"tx large kbytes"}, + {"tx large count"}, + {"tx pk state ch n->p"}, + {"tx pk state ch p->n"}, + {"tx pk watermark low"}, + {"tx pk watermark high"}, + {"queue 0 buffer usage"}, +/* 20 */{"queue 1 buffer usage"}, + {"queue 2 buffer usage"}, + {"queue 3 buffer usage"}, + {"rx poll time"}, + {"rx poll count"}, + {"rx do_QDIO time"}, + {"rx do_QDIO count"}, + {"tx handler time"}, + {"tx handler count"}, + {"tx time"}, +/* 30 */{"tx count"}, + {"tx do_QDIO time"}, + {"tx do_QDIO count"}, + {"tx csum"}, + {"tx lin"}, + {"tx linfail"}, + {"cq handler count"}, + {"cq handler time"}, + {"rx csum"} +}; + +int qeth_core_get_sset_count(struct net_device *dev, int stringset) +{ + switch (stringset) { + case ETH_SS_STATS: + return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); + default: + return -EINVAL; + } +} +EXPORT_SYMBOL_GPL(qeth_core_get_sset_count); + +void qeth_core_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct qeth_card *card = dev->ml_priv; + data[0] = card->stats.rx_packets - + card->perf_stats.initial_rx_packets; + data[1] = card->perf_stats.bufs_rec; + data[2] = card->stats.tx_packets - + card->perf_stats.initial_tx_packets; + data[3] = card->perf_stats.bufs_sent; + data[4] = card->stats.tx_packets - card->perf_stats.initial_tx_packets + - card->perf_stats.skbs_sent_pack; + data[5] = card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack; + data[6] = card->perf_stats.skbs_sent_pack; + data[7] = card->perf_stats.bufs_sent_pack; + data[8] = card->perf_stats.sg_skbs_sent; + data[9] = card->perf_stats.buf_elements_sent; + data[10] = card->perf_stats.sg_skbs_rx; + data[11] = card->perf_stats.sg_frags_rx; + data[12] = card->perf_stats.sg_alloc_page_rx; + data[13] = (card->perf_stats.large_send_bytes >> 10); + data[14] = card->perf_stats.large_send_cnt; + data[15] = card->perf_stats.sc_dp_p; + data[16] = card->perf_stats.sc_p_dp; + data[17] = QETH_LOW_WATERMARK_PACK; + data[18] = QETH_HIGH_WATERMARK_PACK; + data[19] = atomic_read(&card->qdio.out_qs[0]->used_buffers); + data[20] = (card->qdio.no_out_queues > 1) ? + atomic_read(&card->qdio.out_qs[1]->used_buffers) : 0; + data[21] = (card->qdio.no_out_queues > 2) ? + atomic_read(&card->qdio.out_qs[2]->used_buffers) : 0; + data[22] = (card->qdio.no_out_queues > 3) ? + atomic_read(&card->qdio.out_qs[3]->used_buffers) : 0; + data[23] = card->perf_stats.inbound_time; + data[24] = card->perf_stats.inbound_cnt; + data[25] = card->perf_stats.inbound_do_qdio_time; + data[26] = card->perf_stats.inbound_do_qdio_cnt; + data[27] = card->perf_stats.outbound_handler_time; + data[28] = card->perf_stats.outbound_handler_cnt; + data[29] = card->perf_stats.outbound_time; + data[30] = card->perf_stats.outbound_cnt; + data[31] = card->perf_stats.outbound_do_qdio_time; + data[32] = card->perf_stats.outbound_do_qdio_cnt; + data[33] = card->perf_stats.tx_csum; + data[34] = card->perf_stats.tx_lin; + data[35] = card->perf_stats.tx_linfail; + data[36] = card->perf_stats.cq_cnt; + data[37] = card->perf_stats.cq_time; + data[38] = card->perf_stats.rx_csum; +} +EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); + +void qeth_core_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, &qeth_ethtool_stats_keys, + sizeof(qeth_ethtool_stats_keys)); + break; + default: + WARN_ON(1); + break; + } +} +EXPORT_SYMBOL_GPL(qeth_core_get_strings); + +void qeth_core_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct qeth_card *card = dev->ml_priv; + + strlcpy(info->driver, card->options.layer2 ? "qeth_l2" : "qeth_l3", + sizeof(info->driver)); + strlcpy(info->version, "1.0", sizeof(info->version)); + strlcpy(info->fw_version, card->info.mcl_level, + sizeof(info->fw_version)); + snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s", + CARD_RDEV_ID(card), CARD_WDEV_ID(card), CARD_DDEV_ID(card)); +} +EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); + +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ +/* specified port type. */ +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, + int maxspeed, int porttype) +{ + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); + + switch (porttype) { + case PORT_TP: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + break; + case PORT_FIBRE: + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); + WARN_ON_ONCE(1); + } + + /* fallthrough from high to low, to select all legal speeds: */ + switch (maxspeed) { + case SPEED_10000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); + case SPEED_1000: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); + case SPEED_100: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); + case SPEED_10: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + /* end fallthrough */ + break; + default: + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + WARN_ON_ONCE(1); + } +} + +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct qeth_card *card = netdev->ml_priv; + enum qeth_link_types link_type; + struct carrier_info carrier_info; + int rc; + + if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) + link_type = QETH_LINK_TYPE_10GBIT_ETH; + else + link_type = card->info.link_type; + + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; + + switch (link_type) { + case QETH_LINK_TYPE_FAST_ETH: + case QETH_LINK_TYPE_LANE_ETH100: + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; + break; + case QETH_LINK_TYPE_GBIT_ETH: + case QETH_LINK_TYPE_LANE_ETH1000: + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; + break; + case QETH_LINK_TYPE_10GBIT_ETH: + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; + break; + default: + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; + } + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); + + /* Check if we can obtain more accurate information. */ + /* If QUERY_CARD_INFO command is not supported or fails, */ + /* just return the heuristics that was filled above. */ + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + rc = qeth_query_card_info(card, &carrier_info); + if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ + return 0; + if (rc) /* report error from the hardware operation */ + return rc; + /* on success, fill in the information got from the hardware */ + + netdev_dbg(netdev, + "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", + carrier_info.card_type, + carrier_info.port_mode, + carrier_info.port_speed); + + /* Update attributes for which we've obtained more authoritative */ + /* information, leave the rest the way they where filled above. */ + switch (carrier_info.card_type) { + case CARD_INFO_TYPE_1G_COPPER_A: + case CARD_INFO_TYPE_1G_COPPER_B: + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_1G_FIBRE_A: + case CARD_INFO_TYPE_1G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); + break; + case CARD_INFO_TYPE_10G_FIBRE_A: + case CARD_INFO_TYPE_10G_FIBRE_B: + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); + break; + } + + switch (carrier_info.port_mode) { + case CARD_INFO_PORTM_FULLDUPLEX: + cmd->base.duplex = DUPLEX_FULL; + break; + case CARD_INFO_PORTM_HALFDUPLEX: + cmd->base.duplex = DUPLEX_HALF; + break; + } + + switch (carrier_info.port_speed) { + case CARD_INFO_PORTS_10M: + cmd->base.speed = SPEED_10; + break; + case CARD_INFO_PORTS_100M: + cmd->base.speed = SPEED_100; + break; + case CARD_INFO_PORTS_1G: + cmd->base.speed = SPEED_1000; + break; + case CARD_INFO_PORTS_10G: + cmd->base.speed = SPEED_10000; + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings); + +/* Callback to handle checksum offload command reply from OSA card. + * Verify that required features have been enabled on the card. + * Return error in hdr->return_code as this value is checked by caller. + * + * Always returns zero to indicate no further messages from the OSA card. + */ +static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_checksum_cmd *chksum_cb = + (struct qeth_checksum_cmd *)reply->param; + + QETH_CARD_TEXT(card, 4, "chkdoccb"); + if (qeth_setassparms_inspect_rc(cmd)) + return 0; + + memset(chksum_cb, 0, sizeof(*chksum_cb)); + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported); + } + if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) { + chksum_cb->supported = + cmd->data.setassparms.data.chksum.supported; + chksum_cb->enabled = + cmd->data.setassparms.data.chksum.enabled; + QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported); + QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled); + } + return 0; +} + +/* Send command to OSA card and check results. */ +static int qeth_ipa_checksum_run_cmd(struct qeth_card *card, + enum qeth_ipa_funcs ipa_func, + __u16 cmd_code, long data, + struct qeth_checksum_cmd *chksum_cb, + enum qeth_prot_versions prot) +{ + struct qeth_cmd_buffer *iob; + int rc = -ENOMEM; + + QETH_CARD_TEXT(card, 4, "chkdocmd"); + iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, + sizeof(__u32), prot); + if (iob) + rc = qeth_send_setassparms(card, iob, sizeof(__u32), data, + qeth_ipa_checksum_run_cmd_cb, + chksum_cb); + return rc; +} + +static int qeth_send_checksum_on(struct qeth_card *card, int cstype, + enum qeth_prot_versions prot) +{ + u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP; + struct qeth_checksum_cmd chksum_cb; + int rc; + + if (prot == QETH_PROT_IPV4) + required_features |= QETH_IPA_CHECKSUM_IP_HDR; + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0, + &chksum_cb, prot); + if (!rc) { + if ((required_features & chksum_cb.supported) != + required_features) + rc = -EIO; + else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) && + cstype == IPA_INBOUND_CHECKSUM) + dev_warn(&card->gdev->dev, + "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n", + QETH_CARD_IFNAME(card)); + } + if (rc) { + qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); + dev_warn(&card->gdev->dev, + "Starting HW IPv%d checksumming for %s failed, using SW checksumming\n", + prot, QETH_CARD_IFNAME(card)); + return rc; + } + rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE, + chksum_cb.supported, &chksum_cb, + prot); + if (!rc) { + if ((required_features & chksum_cb.enabled) != + required_features) + rc = -EIO; + } + if (rc) { + qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, prot); + dev_warn(&card->gdev->dev, + "Enabling HW IPv%d checksumming for %s failed, using SW checksumming\n", + prot, QETH_CARD_IFNAME(card)); + return rc; + } + + dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n", + cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot); + return 0; +} + +static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype, + enum qeth_prot_versions prot) +{ + int rc = (on) ? qeth_send_checksum_on(card, cstype, prot) + : qeth_send_simple_setassparms_prot(card, cstype, + IPA_CMD_ASS_STOP, 0, + prot); + return rc ? -EIO : 0; +} + +static int qeth_set_ipa_tso(struct qeth_card *card, int on) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "sttso"); + + if (on) { + rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting outbound TCP segmentation offload for %s failed\n", + QETH_CARD_IFNAME(card)); + return -EIO; + } + dev_info(&card->gdev->dev, "Outbound TSO enabled\n"); + } else { + rc = qeth_send_simple_setassparms(card, IPA_OUTBOUND_TSO, + IPA_CMD_ASS_STOP, 0); + } + return rc; +} + +static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on) +{ + int rc_ipv4 = (on) ? -EOPNOTSUPP : 0; + int rc_ipv6; + + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) + rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV4); + if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) + /* no/one Offload Assist available, so the rc is trivial */ + return rc_ipv4; + + rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM, + QETH_PROT_IPV6); + + if (on) + /* enable: success if any Assist is active */ + return (rc_ipv6) ? rc_ipv4 : 0; + + /* disable: failure if any Assist is still active */ + return (rc_ipv6) ? rc_ipv6 : rc_ipv4; +} + +#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ + NETIF_F_IPV6_CSUM) +/** + * qeth_enable_hw_features() - (Re-)Enable HW functions for device features + * @dev: a net_device + */ +void qeth_enable_hw_features(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t features; + + rtnl_lock(); + features = dev->features; + /* force-off any feature that needs an IPA sequence. + * netdev_update_features() will restart them. + */ + dev->features &= ~QETH_HW_FEATURES; + netdev_update_features(dev); + if (features != dev->features) + dev_warn(&card->gdev->dev, + "Device recovery failed to restore all offload features\n"); + rtnl_unlock(); +} +EXPORT_SYMBOL_GPL(qeth_enable_hw_features); + +int qeth_set_features(struct net_device *dev, netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + netdev_features_t changed = dev->features ^ features; + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "setfeat"); + QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); + + if ((changed & NETIF_F_IP_CSUM)) { + rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4); + if (rc) + changed ^= NETIF_F_IP_CSUM; + } + if (changed & NETIF_F_IPV6_CSUM) { + rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM, + IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6); + if (rc) + changed ^= NETIF_F_IPV6_CSUM; + } + if (changed & NETIF_F_RXCSUM) { + rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM); + if (rc) + changed ^= NETIF_F_RXCSUM; + } + if ((changed & NETIF_F_TSO)) { + rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO ? 1 : 0); + if (rc) + changed ^= NETIF_F_TSO; + } + + /* everything changed successfully? */ + if ((dev->features ^ features) == changed) + return 0; + /* something went wrong. save changed features and return error */ + dev->features ^= changed; + return -EIO; +} +EXPORT_SYMBOL_GPL(qeth_set_features); + +netdev_features_t qeth_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_DBF_TEXT(SETUP, 2, "fixfeat"); + if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) + features &= ~NETIF_F_IP_CSUM; + if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) + features &= ~NETIF_F_IPV6_CSUM; + if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) && + !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) + features &= ~NETIF_F_RXCSUM; + if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) + features &= ~NETIF_F_TSO; + /* if the card isn't up, remove features that require hw changes */ + if (card->state == CARD_STATE_DOWN || + card->state == CARD_STATE_RECOVER) + features &= ~QETH_HW_FEATURES; + QETH_DBF_HEX(SETUP, 2, &features, sizeof(features)); + return features; +} +EXPORT_SYMBOL_GPL(qeth_fix_features); + +netdev_features_t qeth_features_check(struct sk_buff *skb, + struct net_device *dev, + netdev_features_t features) +{ + /* GSO segmentation builds skbs with + * a (small) linear part for the headers, and + * page frags for the data. + * Compared to a linear skb, the header-only part consumes an + * additional buffer element. This reduces buffer utilization, and + * hurts throughput. So compress small segments into one element. + */ + if (netif_needs_gso(skb, features)) { + /* match skb_segment(): */ + unsigned int doffset = skb->data - skb_mac_header(skb); + unsigned int hsize = skb_shinfo(skb)->gso_size; + unsigned int hroom = skb_headroom(skb); + + /* linearize only if resulting skb allocations are order-0: */ + if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0)) + features &= ~NETIF_F_SG; + } + + return vlan_features_check(skb, features); +} +EXPORT_SYMBOL_GPL(qeth_features_check); + +static int __init qeth_core_init(void) +{ + int rc; + + pr_info("loading core functions\n"); + INIT_LIST_HEAD(&qeth_core_card_list.list); + INIT_LIST_HEAD(&qeth_dbf_list); + rwlock_init(&qeth_core_card_list.rwlock); + mutex_init(&qeth_mod_mutex); + + qeth_wq = create_singlethread_workqueue("qeth_wq"); + if (!qeth_wq) { + rc = -ENOMEM; + goto out_err; + } + + rc = qeth_register_dbf_views(); + if (rc) + goto dbf_err; + qeth_core_root_dev = root_device_register("qeth"); + rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); + if (rc) + goto register_err; + qeth_core_header_cache = kmem_cache_create("qeth_hdr", + sizeof(struct qeth_hdr) + ETH_HLEN, 64, 0, NULL); + if (!qeth_core_header_cache) { + rc = -ENOMEM; + goto slab_err; + } + qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf", + sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL); + if (!qeth_qdio_outbuf_cache) { + rc = -ENOMEM; + goto cqslab_err; + } + rc = ccw_driver_register(&qeth_ccw_driver); + if (rc) + goto ccw_err; + rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver); + if (rc) + goto ccwgroup_err; + + return 0; + +ccwgroup_err: + ccw_driver_unregister(&qeth_ccw_driver); +ccw_err: + kmem_cache_destroy(qeth_qdio_outbuf_cache); +cqslab_err: + kmem_cache_destroy(qeth_core_header_cache); +slab_err: + root_device_unregister(qeth_core_root_dev); +register_err: + qeth_unregister_dbf_views(); +dbf_err: + destroy_workqueue(qeth_wq); +out_err: + pr_err("Initializing the qeth device driver failed\n"); + return rc; +} + +static void __exit qeth_core_exit(void) +{ + qeth_clear_dbf_list(); + destroy_workqueue(qeth_wq); + ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver); + ccw_driver_unregister(&qeth_ccw_driver); + kmem_cache_destroy(qeth_qdio_outbuf_cache); + kmem_cache_destroy(qeth_core_header_cache); + root_device_unregister(qeth_core_root_dev); + qeth_unregister_dbf_views(); + pr_info("core functions removed\n"); +} + +module_init(qeth_core_init); +module_exit(qeth_core_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth core functions"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_core_mpc.c b/drivers/s390/net/qeth_core_mpc.c new file mode 100644 index 000000000..e891c0b52 --- /dev/null +++ b/drivers/s390/net/qeth_core_mpc.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#include <linux/module.h> +#include <asm/cio.h> +#include "qeth_core_mpc.h" + +unsigned char IDX_ACTIVATE_READ[] = { + 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x19, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, + 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, + 0x00, 0x00 +}; + +unsigned char IDX_ACTIVATE_WRITE[] = { + 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x15, 0x01, 0x01, 0x80, 0x00, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xc1, + 0xd3, 0xd3, 0xd6, 0xd3, 0xc5, 0x40, 0x00, 0x00, + 0x00, 0x00 +}; + +unsigned char CM_ENABLE[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x63, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x23, + 0x00, 0x00, 0x23, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x23, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x02, 0x00, 0x17, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0b, 0x04, 0x01, + 0x7e, 0x04, 0x05, 0x00, 0x01, 0x01, 0x0f, + 0x00, + 0x0c, 0x04, 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff +}; + +unsigned char CM_SETUP[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x64, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x81, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x24, + 0x00, 0x00, 0x24, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x04, 0x00, 0x18, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x00, 0x01, 0x01, 0x11, + 0x00, 0x09, 0x04, + 0x05, 0x05, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x06, + 0x04, 0x06, 0xc8, 0x00 +}; + +unsigned char ULP_ENABLE[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6b, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x00, 0x2b, + 0x00, 0x00, 0x2b, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x2b, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x02, 0x00, 0x1f, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x0b, 0x04, 0x01, + 0x03, 0x04, 0x05, 0x00, 0x01, 0x01, 0x12, + 0x00, + 0x14, 0x04, 0x0a, 0x00, 0x20, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x08, 0xc8, 0xe8, 0xc4, 0xf1, 0xc7, + 0xf1, 0x00, 0x00 +}; + +unsigned char ULP_SETUP[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x6c, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x24, 0x00, 0x2c, + 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x2c, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x41, 0x04, 0x00, 0x20, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x00, 0x01, 0x01, 0x14, + 0x00, 0x09, 0x04, + 0x05, 0x05, 0x30, 0x01, 0x00, 0x00, + 0x00, 0x06, + 0x04, 0x06, 0x40, 0x00, + 0x00, 0x08, 0x04, 0x0b, + 0x00, 0x00, 0x00, 0x00 +}; + +unsigned char DM_ACT[] = { + 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, 0x00, 0x55, + 0x10, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, + 0x41, 0x7e, 0x00, 0x01, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x02, 0x00, 0x24, 0x00, 0x15, + 0x00, 0x00, 0x2c, 0x05, 0x20, 0x01, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, 0x00, 0x15, 0x00, 0x00, 0x00, 0x40, + 0x00, 0x0c, 0x43, 0x60, 0x00, 0x09, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x09, 0x04, 0x04, + 0x05, 0x40, 0x01, 0x01, 0x00 +}; + +unsigned char IPA_PDU_HEADER[] = { + 0x00, 0xe0, 0x00, 0x00, 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x14, 0x00, 0x00, + (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) / 256, + (IPA_PDU_HEADER_SIZE+sizeof(struct qeth_ipa_cmd)) % 256, + 0x10, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0xc1, 0x03, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, + sizeof(struct qeth_ipa_cmd) / 256, + sizeof(struct qeth_ipa_cmd) % 256, + 0x00, + sizeof(struct qeth_ipa_cmd) / 256, + sizeof(struct qeth_ipa_cmd) % 256, + 0x05, + 0x77, 0x77, 0x77, 0x77, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x00, + sizeof(struct qeth_ipa_cmd) / 256, + sizeof(struct qeth_ipa_cmd) % 256, + 0x00, 0x00, 0x00, 0x40, +}; +EXPORT_SYMBOL_GPL(IPA_PDU_HEADER); + +struct ipa_rc_msg { + enum qeth_ipa_return_codes rc; + const char *msg; +}; + +static const struct ipa_rc_msg qeth_ipa_rc_msg[] = { + {IPA_RC_SUCCESS, "success"}, + {IPA_RC_NOTSUPP, "Command not supported"}, + {IPA_RC_IP_TABLE_FULL, "Add Addr IP Table Full - ipv6"}, + {IPA_RC_UNKNOWN_ERROR, "IPA command failed - reason unknown"}, + {IPA_RC_UNSUPPORTED_COMMAND, "Command not supported"}, + {IPA_RC_VNICC_OOSEQ, "Command issued out of sequence"}, + {IPA_RC_INVALID_FORMAT, "invalid format or length"}, + {IPA_RC_DUP_IPV6_REMOTE, "ipv6 address already registered remote"}, + {IPA_RC_SBP_IQD_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_DUP_IPV6_HOME, "ipv6 address already registered"}, + {IPA_RC_UNREGISTERED_ADDR, "Address not registered"}, + {IPA_RC_NO_ID_AVAILABLE, "No identifiers available"}, + {IPA_RC_ID_NOT_FOUND, "Identifier not found"}, + {IPA_RC_SBP_IQD_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_IQD_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_IQD_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_INVALID_IP_VERSION, "IP version incorrect"}, + {IPA_RC_SBP_IQD_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_LAN_FRAME_MISMATCH, "LAN and frame mismatch"}, + {IPA_RC_SBP_IQD_NO_QDIO_QUEUES, "QDIO queues not established"}, + {IPA_RC_L2_UNSUPPORTED_CMD, "Unsupported layer 2 command"}, + {IPA_RC_L2_DUP_MAC, "Duplicate MAC address"}, + {IPA_RC_L2_ADDR_TABLE_FULL, "Layer2 address table full"}, + {IPA_RC_L2_DUP_LAYER3_MAC, "Duplicate with layer 3 MAC"}, + {IPA_RC_L2_GMAC_NOT_FOUND, "GMAC not found"}, + {IPA_RC_L2_MAC_NOT_AUTH_BY_HYP, "L2 mac not authorized by hypervisor"}, + {IPA_RC_L2_MAC_NOT_AUTH_BY_ADP, "L2 mac not authorized by adapter"}, + {IPA_RC_L2_MAC_NOT_FOUND, "L2 mac address not found"}, + {IPA_RC_L2_INVALID_VLAN_ID, "L2 invalid vlan id"}, + {IPA_RC_L2_DUP_VLAN_ID, "L2 duplicate vlan id"}, + {IPA_RC_L2_VLAN_ID_NOT_FOUND, "L2 vlan id not found"}, + {IPA_RC_VNICC_VNICBP, "VNIC is BridgePort"}, + {IPA_RC_SBP_OSA_NOT_CONFIGURED, "Not configured for bridgeport"}, + {IPA_RC_SBP_OSA_OS_MISMATCH, "OS mismatch"}, + {IPA_RC_SBP_OSA_ANO_DEV_PRIMARY, "Primary bridgeport exists already"}, + {IPA_RC_SBP_OSA_CURRENT_SECOND, "Bridgeport is currently secondary"}, + {IPA_RC_SBP_OSA_LIMIT_SECOND, "Limit of secondary bridgeports reached"}, + {IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN, "Not authorized by zManager"}, + {IPA_RC_SBP_OSA_CURRENT_PRIMARY, "Bridgeport is currently primary"}, + {IPA_RC_SBP_OSA_NO_QDIO_QUEUES, "QDIO queues not established"}, + {IPA_RC_DATA_MISMATCH, "Data field mismatch (v4/v6 mixed)"}, + {IPA_RC_INVALID_MTU_SIZE, "Invalid MTU size"}, + {IPA_RC_INVALID_LANTYPE, "Invalid LAN type"}, + {IPA_RC_INVALID_LANNUM, "Invalid LAN num"}, + {IPA_RC_DUPLICATE_IP_ADDRESS, "Address already registered"}, + {IPA_RC_IP_ADDR_TABLE_FULL, "IP address table full"}, + {IPA_RC_LAN_PORT_STATE_ERROR, "LAN port state error"}, + {IPA_RC_SETIP_NO_STARTLAN, "Setip no startlan received"}, + {IPA_RC_SETIP_ALREADY_RECEIVED, "Setip already received"}, + {IPA_RC_IP_ADDR_ALREADY_USED, "IP address already in use on LAN"}, + {IPA_RC_MC_ADDR_NOT_FOUND, "Multicast address not found"}, + {IPA_RC_SETIP_INVALID_VERSION, "SETIP invalid IP version"}, + {IPA_RC_UNSUPPORTED_SUBCMD, "Unsupported assist subcommand"}, + {IPA_RC_ARP_ASSIST_NO_ENABLE, "Only partial success, no enable"}, + {IPA_RC_PRIMARY_ALREADY_DEFINED, "Primary already defined"}, + {IPA_RC_SECOND_ALREADY_DEFINED, "Secondary already defined"}, + {IPA_RC_INVALID_SETRTG_INDICATOR, "Invalid SETRTG indicator"}, + {IPA_RC_MC_ADDR_ALREADY_DEFINED, "Multicast address already defined"}, + {IPA_RC_LAN_OFFLINE, "STRTLAN_LAN_DISABLED - LAN offline"}, + {IPA_RC_VEPA_TO_VEB_TRANSITION, "Adj. switch disabled port mode RR"}, + {IPA_RC_INVALID_IP_VERSION2, "Invalid IP version"}, + {IPA_RC_ENOMEM, "Memory problem"}, + {IPA_RC_FFFF, "Unknown Error"} +}; + + + +const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc) +{ + int x; + + for (x = 0; x < ARRAY_SIZE(qeth_ipa_rc_msg) - 1; x++) + if (qeth_ipa_rc_msg[x].rc == rc) + return qeth_ipa_rc_msg[x].msg; + return qeth_ipa_rc_msg[x].msg; +} + + +struct ipa_cmd_names { + enum qeth_ipa_cmds cmd; + const char *name; +}; + +static const struct ipa_cmd_names qeth_ipa_cmd_names[] = { + {IPA_CMD_STARTLAN, "startlan"}, + {IPA_CMD_STOPLAN, "stoplan"}, + {IPA_CMD_SETVMAC, "setvmac"}, + {IPA_CMD_DELVMAC, "delvmac"}, + {IPA_CMD_SETGMAC, "setgmac"}, + {IPA_CMD_DELGMAC, "delgmac"}, + {IPA_CMD_SETVLAN, "setvlan"}, + {IPA_CMD_DELVLAN, "delvlan"}, + {IPA_CMD_VNICC, "vnic_characteristics"}, + {IPA_CMD_SETBRIDGEPORT_OSA, "set_bridge_port(osa)"}, + {IPA_CMD_SETCCID, "setccid"}, + {IPA_CMD_DELCCID, "delccid"}, + {IPA_CMD_MODCCID, "modccid"}, + {IPA_CMD_SETIP, "setip"}, + {IPA_CMD_QIPASSIST, "qipassist"}, + {IPA_CMD_SETASSPARMS, "setassparms"}, + {IPA_CMD_SETIPM, "setipm"}, + {IPA_CMD_DELIPM, "delipm"}, + {IPA_CMD_SETRTG, "setrtg"}, + {IPA_CMD_DELIP, "delip"}, + {IPA_CMD_SETADAPTERPARMS, "setadapterparms"}, + {IPA_CMD_SET_DIAG_ASS, "set_diag_ass"}, + {IPA_CMD_SETBRIDGEPORT_IQD, "set_bridge_port(hs)"}, + {IPA_CMD_CREATE_ADDR, "create_addr"}, + {IPA_CMD_DESTROY_ADDR, "destroy_addr"}, + {IPA_CMD_REGISTER_LOCAL_ADDR, "register_local_addr"}, + {IPA_CMD_UNREGISTER_LOCAL_ADDR, "unregister_local_addr"}, + {IPA_CMD_ADDRESS_CHANGE_NOTIF, "address_change_notification"}, + {IPA_CMD_UNKNOWN, "unknown"}, +}; + +const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd) +{ + int x; + + for (x = 0; x < ARRAY_SIZE(qeth_ipa_cmd_names) - 1; x++) + if (qeth_ipa_cmd_names[x].cmd == cmd) + return qeth_ipa_cmd_names[x].name; + return qeth_ipa_cmd_names[x].name; +} diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h new file mode 100644 index 000000000..aa5de1fe0 --- /dev/null +++ b/drivers/s390/net/qeth_core_mpc.h @@ -0,0 +1,910 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_CORE_MPC_H__ +#define __QETH_CORE_MPC_H__ + +#include <asm/qeth.h> +#include <uapi/linux/if_ether.h> + +#define IPA_PDU_HEADER_SIZE 0x40 +#define QETH_IPA_PDU_LEN_TOTAL(buffer) (buffer + 0x0e) +#define QETH_IPA_PDU_LEN_PDU1(buffer) (buffer + 0x26) +#define QETH_IPA_PDU_LEN_PDU2(buffer) (buffer + 0x29) +#define QETH_IPA_PDU_LEN_PDU3(buffer) (buffer + 0x3a) + +extern unsigned char IPA_PDU_HEADER[]; +#define QETH_IPA_CMD_DEST_ADDR(buffer) (buffer + 0x2c) + +#define IPA_CMD_LENGTH (IPA_PDU_HEADER_SIZE + sizeof(struct qeth_ipa_cmd)) + +#define QETH_SEQ_NO_LENGTH 4 +#define QETH_MPC_TOKEN_LENGTH 4 +#define QETH_MCL_LENGTH 4 + +#define QETH_TIMEOUT (10 * HZ) +#define QETH_IPA_TIMEOUT (45 * HZ) +#define QETH_IDX_COMMAND_SEQNO 0xffff0000 + +#define QETH_CLEAR_CHANNEL_PARM -10 +#define QETH_HALT_CHANNEL_PARM -11 +#define QETH_RCD_PARM -12 + +static inline bool qeth_intparm_is_iob(unsigned long intparm) +{ + switch (intparm) { + case QETH_CLEAR_CHANNEL_PARM: + case QETH_HALT_CHANNEL_PARM: + case QETH_RCD_PARM: + case 0: + return false; + } + return true; +} + +/*****************************************************************************/ +/* IP Assist related definitions */ +/*****************************************************************************/ +#define IPA_CMD_INITIATOR_HOST 0x00 +#define IPA_CMD_INITIATOR_OSA 0x01 +#define IPA_CMD_INITIATOR_HOST_REPLY 0x80 +#define IPA_CMD_INITIATOR_OSA_REPLY 0x81 +#define IPA_CMD_PRIM_VERSION_NO 0x01 + +enum qeth_card_types { + QETH_CARD_TYPE_OSD = 1, + QETH_CARD_TYPE_IQD = 5, + QETH_CARD_TYPE_OSN = 6, + QETH_CARD_TYPE_OSM = 3, + QETH_CARD_TYPE_OSX = 2, +}; + +#define IS_IQD(card) ((card)->info.type == QETH_CARD_TYPE_IQD) +#define IS_OSN(card) ((card)->info.type == QETH_CARD_TYPE_OSN) + +#define QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE 0x18 +/* only the first two bytes are looked at in qeth_get_cardname_short */ +enum qeth_link_types { + QETH_LINK_TYPE_FAST_ETH = 0x01, + QETH_LINK_TYPE_HSTR = 0x02, + QETH_LINK_TYPE_GBIT_ETH = 0x03, + QETH_LINK_TYPE_OSN = 0x04, + QETH_LINK_TYPE_10GBIT_ETH = 0x10, + QETH_LINK_TYPE_LANE_ETH100 = 0x81, + QETH_LINK_TYPE_LANE_TR = 0x82, + QETH_LINK_TYPE_LANE_ETH1000 = 0x83, + QETH_LINK_TYPE_LANE = 0x88, +}; + +/* + * Routing stuff + */ +#define RESET_ROUTING_FLAG 0x10 /* indicate that routing type shall be set */ +enum qeth_routing_types { + /* TODO: set to bit flag used in IPA Command */ + NO_ROUTER = 0, + PRIMARY_ROUTER = 1, + SECONDARY_ROUTER = 2, + MULTICAST_ROUTER = 3, + PRIMARY_CONNECTOR = 4, + SECONDARY_CONNECTOR = 5, +}; + +/* IPA Commands */ +enum qeth_ipa_cmds { + IPA_CMD_STARTLAN = 0x01, + IPA_CMD_STOPLAN = 0x02, + IPA_CMD_SETVMAC = 0x21, + IPA_CMD_DELVMAC = 0x22, + IPA_CMD_SETGMAC = 0x23, + IPA_CMD_DELGMAC = 0x24, + IPA_CMD_SETVLAN = 0x25, + IPA_CMD_DELVLAN = 0x26, + IPA_CMD_VNICC = 0x2a, + IPA_CMD_SETBRIDGEPORT_OSA = 0x2b, + IPA_CMD_SETCCID = 0x41, + IPA_CMD_DELCCID = 0x42, + IPA_CMD_MODCCID = 0x43, + IPA_CMD_SETIP = 0xb1, + IPA_CMD_QIPASSIST = 0xb2, + IPA_CMD_SETASSPARMS = 0xb3, + IPA_CMD_SETIPM = 0xb4, + IPA_CMD_DELIPM = 0xb5, + IPA_CMD_SETRTG = 0xb6, + IPA_CMD_DELIP = 0xb7, + IPA_CMD_SETADAPTERPARMS = 0xb8, + IPA_CMD_SET_DIAG_ASS = 0xb9, + IPA_CMD_SETBRIDGEPORT_IQD = 0xbe, + IPA_CMD_CREATE_ADDR = 0xc3, + IPA_CMD_DESTROY_ADDR = 0xc4, + IPA_CMD_REGISTER_LOCAL_ADDR = 0xd1, + IPA_CMD_UNREGISTER_LOCAL_ADDR = 0xd2, + IPA_CMD_ADDRESS_CHANGE_NOTIF = 0xd3, + IPA_CMD_UNKNOWN = 0x00 +}; + +enum qeth_ip_ass_cmds { + IPA_CMD_ASS_START = 0x0001, + IPA_CMD_ASS_STOP = 0x0002, + IPA_CMD_ASS_CONFIGURE = 0x0003, + IPA_CMD_ASS_ENABLE = 0x0004, +}; + +enum qeth_arp_process_subcmds { + IPA_CMD_ASS_ARP_SET_NO_ENTRIES = 0x0003, + IPA_CMD_ASS_ARP_QUERY_CACHE = 0x0004, + IPA_CMD_ASS_ARP_ADD_ENTRY = 0x0005, + IPA_CMD_ASS_ARP_REMOVE_ENTRY = 0x0006, + IPA_CMD_ASS_ARP_FLUSH_CACHE = 0x0007, + IPA_CMD_ASS_ARP_QUERY_INFO = 0x0104, + IPA_CMD_ASS_ARP_QUERY_STATS = 0x0204, +}; + + +/* Return Codes for IPA Commands + * according to OSA card Specs */ + +enum qeth_ipa_return_codes { + IPA_RC_SUCCESS = 0x0000, + IPA_RC_NOTSUPP = 0x0001, + IPA_RC_IP_TABLE_FULL = 0x0002, + IPA_RC_UNKNOWN_ERROR = 0x0003, + IPA_RC_UNSUPPORTED_COMMAND = 0x0004, + IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005, + IPA_RC_INVALID_FORMAT = 0x0006, + IPA_RC_DUP_IPV6_REMOTE = 0x0008, + IPA_RC_SBP_IQD_NOT_CONFIGURED = 0x000C, + IPA_RC_DUP_IPV6_HOME = 0x0010, + IPA_RC_UNREGISTERED_ADDR = 0x0011, + IPA_RC_NO_ID_AVAILABLE = 0x0012, + IPA_RC_ID_NOT_FOUND = 0x0013, + IPA_RC_SBP_IQD_ANO_DEV_PRIMARY = 0x0014, + IPA_RC_SBP_IQD_CURRENT_SECOND = 0x0018, + IPA_RC_SBP_IQD_LIMIT_SECOND = 0x001C, + IPA_RC_INVALID_IP_VERSION = 0x0020, + IPA_RC_SBP_IQD_CURRENT_PRIMARY = 0x0024, + IPA_RC_LAN_FRAME_MISMATCH = 0x0040, + IPA_RC_SBP_IQD_NO_QDIO_QUEUES = 0x00EB, + IPA_RC_L2_UNSUPPORTED_CMD = 0x2003, + IPA_RC_L2_DUP_MAC = 0x2005, + IPA_RC_L2_ADDR_TABLE_FULL = 0x2006, + IPA_RC_L2_DUP_LAYER3_MAC = 0x200a, + IPA_RC_L2_GMAC_NOT_FOUND = 0x200b, + IPA_RC_L2_MAC_NOT_AUTH_BY_HYP = 0x200c, + IPA_RC_L2_MAC_NOT_AUTH_BY_ADP = 0x200d, + IPA_RC_L2_MAC_NOT_FOUND = 0x2010, + IPA_RC_L2_INVALID_VLAN_ID = 0x2015, + IPA_RC_L2_DUP_VLAN_ID = 0x2016, + IPA_RC_L2_VLAN_ID_NOT_FOUND = 0x2017, + IPA_RC_L2_VLAN_ID_NOT_ALLOWED = 0x2050, + IPA_RC_VNICC_VNICBP = 0x20B0, + IPA_RC_SBP_OSA_NOT_CONFIGURED = 0x2B0C, + IPA_RC_SBP_OSA_OS_MISMATCH = 0x2B10, + IPA_RC_SBP_OSA_ANO_DEV_PRIMARY = 0x2B14, + IPA_RC_SBP_OSA_CURRENT_SECOND = 0x2B18, + IPA_RC_SBP_OSA_LIMIT_SECOND = 0x2B1C, + IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN = 0x2B20, + IPA_RC_SBP_OSA_CURRENT_PRIMARY = 0x2B24, + IPA_RC_SBP_OSA_NO_QDIO_QUEUES = 0x2BEB, + IPA_RC_DATA_MISMATCH = 0xe001, + IPA_RC_INVALID_MTU_SIZE = 0xe002, + IPA_RC_INVALID_LANTYPE = 0xe003, + IPA_RC_INVALID_LANNUM = 0xe004, + IPA_RC_DUPLICATE_IP_ADDRESS = 0xe005, + IPA_RC_IP_ADDR_TABLE_FULL = 0xe006, + IPA_RC_LAN_PORT_STATE_ERROR = 0xe007, + IPA_RC_SETIP_NO_STARTLAN = 0xe008, + IPA_RC_SETIP_ALREADY_RECEIVED = 0xe009, + IPA_RC_IP_ADDR_ALREADY_USED = 0xe00a, + IPA_RC_MC_ADDR_NOT_FOUND = 0xe00b, + IPA_RC_SETIP_INVALID_VERSION = 0xe00d, + IPA_RC_UNSUPPORTED_SUBCMD = 0xe00e, + IPA_RC_ARP_ASSIST_NO_ENABLE = 0xe00f, + IPA_RC_PRIMARY_ALREADY_DEFINED = 0xe010, + IPA_RC_SECOND_ALREADY_DEFINED = 0xe011, + IPA_RC_INVALID_SETRTG_INDICATOR = 0xe012, + IPA_RC_MC_ADDR_ALREADY_DEFINED = 0xe013, + IPA_RC_LAN_OFFLINE = 0xe080, + IPA_RC_VEPA_TO_VEB_TRANSITION = 0xe090, + IPA_RC_INVALID_IP_VERSION2 = 0xf001, + IPA_RC_ENOMEM = 0xfffe, + IPA_RC_FFFF = 0xffff +}; +/* for VNIC Characteristics */ +#define IPA_RC_VNICC_OOSEQ 0x0005 + +/* for SET_DIAGNOSTIC_ASSIST */ +#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL +#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR + +/* for SETBRIDGEPORT (double occupancies) */ +#define IPA_RC_SBP_IQD_OS_MISMATCH IPA_RC_DUP_IPV6_HOME +#define IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN IPA_RC_INVALID_IP_VERSION + +/* IPA function flags; each flag marks availability of respective function */ +enum qeth_ipa_funcs { + IPA_ARP_PROCESSING = 0x00000001L, + IPA_INBOUND_CHECKSUM = 0x00000002L, + IPA_OUTBOUND_CHECKSUM = 0x00000004L, + /* RESERVED = 0x00000008L,*/ + IPA_FILTERING = 0x00000010L, + IPA_IPV6 = 0x00000020L, + IPA_MULTICASTING = 0x00000040L, + IPA_IP_REASSEMBLY = 0x00000080L, + IPA_QUERY_ARP_COUNTERS = 0x00000100L, + IPA_QUERY_ARP_ADDR_INFO = 0x00000200L, + IPA_SETADAPTERPARMS = 0x00000400L, + IPA_VLAN_PRIO = 0x00000800L, + IPA_PASSTHRU = 0x00001000L, + IPA_FLUSH_ARP_SUPPORT = 0x00002000L, + IPA_FULL_VLAN = 0x00004000L, + IPA_INBOUND_PASSTHRU = 0x00008000L, + IPA_SOURCE_MAC = 0x00010000L, + IPA_OSA_MC_ROUTER = 0x00020000L, + IPA_QUERY_ARP_ASSIST = 0x00040000L, + IPA_INBOUND_TSO = 0x00080000L, + IPA_OUTBOUND_TSO = 0x00100000L, + IPA_INBOUND_CHECKSUM_V6 = 0x00400000L, + IPA_OUTBOUND_CHECKSUM_V6 = 0x00800000L, +}; + +/* SETIP/DELIP IPA Command: ***************************************************/ +enum qeth_ipa_setdelip_flags { + QETH_IPA_SETDELIP_DEFAULT = 0x00L, /* default */ + QETH_IPA_SETIP_VIPA_FLAG = 0x01L, /* no grat. ARP */ + QETH_IPA_SETIP_TAKEOVER_FLAG = 0x02L, /* nofail on grat. ARP */ + QETH_IPA_DELIP_ADDR_2_B_TAKEN_OVER = 0x20L, + QETH_IPA_DELIP_VIPA_FLAG = 0x40L, + QETH_IPA_DELIP_ADDR_NEEDS_SETIP = 0x80L, +}; + +/* SETADAPTER IPA Command: ****************************************************/ +enum qeth_ipa_setadp_cmd { + IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L, + IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L, + IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L, + IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L, + IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L, + IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L, + IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L, + IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L, + IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L, + IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L, + IPA_SETADP_QUERY_CARD_INFO = 0x00000400L, + IPA_SETADP_SET_PROMISC_MODE = 0x00000800L, + IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L, + IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L, + IPA_SETADP_QUERY_OAT = 0x00080000L, + IPA_SETADP_QUERY_SWITCH_ATTRIBUTES = 0x00100000L, +}; +enum qeth_ipa_mac_ops { + CHANGE_ADDR_READ_MAC = 0, + CHANGE_ADDR_REPLACE_MAC = 1, + CHANGE_ADDR_ADD_MAC = 2, + CHANGE_ADDR_DEL_MAC = 4, + CHANGE_ADDR_RESET_MAC = 8, +}; +enum qeth_ipa_addr_ops { + CHANGE_ADDR_READ_ADDR = 0, + CHANGE_ADDR_ADD_ADDR = 1, + CHANGE_ADDR_DEL_ADDR = 2, + CHANGE_ADDR_FLUSH_ADDR_TABLE = 4, +}; +enum qeth_ipa_promisc_modes { + SET_PROMISC_MODE_OFF = 0, + SET_PROMISC_MODE_ON = 1, +}; +enum qeth_ipa_isolation_modes { + ISOLATION_MODE_NONE = 0x00000000L, + ISOLATION_MODE_FWD = 0x00000001L, + ISOLATION_MODE_DROP = 0x00000002L, +}; +enum qeth_ipa_set_access_mode_rc { + SET_ACCESS_CTRL_RC_SUCCESS = 0x0000, + SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004, + SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008, + SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010, + SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014, + SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018, + SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED = 0x0022, + SET_ACCESS_CTRL_RC_REFLREL_FAILED = 0x0024, + SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED = 0x0028, +}; +enum qeth_card_info_card_type { + CARD_INFO_TYPE_1G_COPPER_A = 0x61, + CARD_INFO_TYPE_1G_FIBRE_A = 0x71, + CARD_INFO_TYPE_10G_FIBRE_A = 0x91, + CARD_INFO_TYPE_1G_COPPER_B = 0xb1, + CARD_INFO_TYPE_1G_FIBRE_B = 0xa1, + CARD_INFO_TYPE_10G_FIBRE_B = 0xc1, +}; +enum qeth_card_info_port_mode { + CARD_INFO_PORTM_HALFDUPLEX = 0x0002, + CARD_INFO_PORTM_FULLDUPLEX = 0x0003, +}; +enum qeth_card_info_port_speed { + CARD_INFO_PORTS_10M = 0x00000005, + CARD_INFO_PORTS_100M = 0x00000006, + CARD_INFO_PORTS_1G = 0x00000007, + CARD_INFO_PORTS_10G = 0x00000008, +}; + +/* (SET)DELIP(M) IPA stuff ***************************************************/ +struct qeth_ipacmd_setdelip4 { + __u8 ip_addr[4]; + __u8 mask[4]; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelip6 { + __u8 ip_addr[16]; + __u8 mask[16]; + __u32 flags; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setdelipm { + __u8 mac[6]; + __u8 padding[2]; + __u8 ip6[12]; + __u8 ip4[4]; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelmac { + __u32 mac_length; + __u8 mac[6]; +} __attribute__ ((packed)); + +struct qeth_ipacmd_layer2setdelvlan { + __u16 vlan_id; +} __attribute__ ((packed)); + + +struct qeth_ipacmd_setassparms_hdr { + __u32 assist_no; + __u16 length; + __u16 command_code; + __u16 return_code; + __u8 number_of_replies; + __u8 seq_no; +} __attribute__((packed)); + +struct qeth_arp_query_data { + __u16 request_bits; + __u16 reply_bits; + __u32 no_entries; + char data; /* only for replies */ +} __attribute__((packed)); + +/* used as parameter for arp_query reply */ +struct qeth_arp_query_info { + __u32 udata_len; + __u16 mask_bits; + __u32 udata_offset; + __u32 no_entries; + char *udata; +}; + +/* IPA set assist segmentation bit definitions for receive and + * transmit checksum offloading. + */ +enum qeth_ipa_checksum_bits { + QETH_IPA_CHECKSUM_IP_HDR = 0x0002, + QETH_IPA_CHECKSUM_UDP = 0x0008, + QETH_IPA_CHECKSUM_TCP = 0x0010, + QETH_IPA_CHECKSUM_LP2LP = 0x0020 +}; + +/* IPA Assist checksum offload reply layout. */ +struct qeth_checksum_cmd { + __u32 supported; + __u32 enabled; +} __packed; + +/* SETASSPARMS IPA Command: */ +struct qeth_ipacmd_setassparms { + struct qeth_ipacmd_setassparms_hdr hdr; + union { + __u32 flags_32bit; + struct qeth_checksum_cmd chksum; + struct qeth_arp_cache_entry add_arp_entry; + struct qeth_arp_query_data query_arp; + __u8 ip[16]; + } data; +} __attribute__ ((packed)); + + +/* SETRTG IPA Command: ****************************************************/ +struct qeth_set_routing { + __u8 type; +}; + +/* SETADAPTERPARMS IPA Command: *******************************************/ +struct qeth_query_cmds_supp { + __u32 no_lantypes_supp; + __u8 lan_type; + __u8 reserved1[3]; + __u32 supported_cmds; + __u8 reserved2[8]; +} __attribute__ ((packed)); + +struct qeth_change_addr { + u32 cmd; + u32 addr_size; + u32 no_macs; + u8 addr[ETH_ALEN]; +}; + +struct qeth_snmp_cmd { + __u8 token[16]; + __u32 request; + __u32 interface; + __u32 returncode; + __u32 firmwarelevel; + __u32 seqno; + __u8 data; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq_hdr { + __u32 data_len; + __u32 req_len; + __u32 reserved1; + __u32 reserved2; +} __attribute__ ((packed)); + +struct qeth_snmp_ureq { + struct qeth_snmp_ureq_hdr hdr; + struct qeth_snmp_cmd cmd; +} __attribute__((packed)); + +/* SET_ACCESS_CONTROL: same format for request and reply */ +struct qeth_set_access_ctrl { + __u32 subcmd_code; + __u8 reserved[8]; +} __attribute__((packed)); + +struct qeth_query_oat { + __u32 subcmd_code; + __u8 reserved[12]; +} __packed; + +struct qeth_qoat_priv { + __u32 buffer_len; + __u32 response_len; + char *buffer; +}; + +struct qeth_query_card_info { + __u8 card_type; + __u8 reserved1; + __u16 port_mode; + __u32 port_speed; + __u32 reserved2; +}; + +#define QETH_SWITCH_FORW_802_1 0x00000001 +#define QETH_SWITCH_FORW_REFL_RELAY 0x00000002 +#define QETH_SWITCH_CAP_RTE 0x00000004 +#define QETH_SWITCH_CAP_ECP 0x00000008 +#define QETH_SWITCH_CAP_VDP 0x00000010 + +struct qeth_query_switch_attributes { + __u8 version; + __u8 reserved1; + __u16 reserved2; + __u32 capabilities; + __u32 settings; + __u8 reserved3[8]; +}; + +struct qeth_ipacmd_setadpparms_hdr { + __u32 supp_hw_cmds; + __u32 reserved1; + __u16 cmdlength; + __u16 reserved2; + __u32 command_code; + __u16 return_code; + __u8 used_total; + __u8 seq_no; + __u32 reserved3; +} __attribute__ ((packed)); + +struct qeth_ipacmd_setadpparms { + struct qeth_ipacmd_setadpparms_hdr hdr; + union { + struct qeth_query_cmds_supp query_cmds_supp; + struct qeth_change_addr change_addr; + struct qeth_snmp_cmd snmp; + struct qeth_set_access_ctrl set_access_ctrl; + struct qeth_query_oat query_oat; + struct qeth_query_card_info card_info; + struct qeth_query_switch_attributes query_switch_attributes; + __u32 mode; + } data; +} __attribute__ ((packed)); + +/* CREATE_ADDR IPA Command: ***********************************************/ +struct qeth_create_destroy_address { + __u8 unique_id[8]; +} __attribute__ ((packed)); + +/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/ + +enum qeth_diags_cmds { + QETH_DIAGS_CMD_QUERY = 0x0001, + QETH_DIAGS_CMD_TRAP = 0x0002, + QETH_DIAGS_CMD_TRACE = 0x0004, + QETH_DIAGS_CMD_NOLOG = 0x0008, + QETH_DIAGS_CMD_DUMP = 0x0010, +}; + +enum qeth_diags_trace_types { + QETH_DIAGS_TYPE_HIPERSOCKET = 0x02, +}; + +enum qeth_diags_trace_cmds { + QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001, + QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002, + QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004, + QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008, + QETH_DIAGS_CMD_TRACE_QUERY = 0x0010, +}; + +enum qeth_diags_trap_action { + QETH_DIAGS_TRAP_ARM = 0x01, + QETH_DIAGS_TRAP_DISARM = 0x02, + QETH_DIAGS_TRAP_CAPTURE = 0x04, +}; + +struct qeth_ipacmd_diagass { + __u32 host_tod2; + __u32:32; + __u16 subcmd_len; + __u16:16; + __u32 subcmd; + __u8 type; + __u8 action; + __u16 options; + __u32 ext; + __u8 cdata[64]; +} __attribute__ ((packed)); + +/* VNIC Characteristics IPA Command: *****************************************/ +/* IPA commands/sub commands for VNICC */ +#define IPA_VNICC_QUERY_CHARS 0x00000000L +#define IPA_VNICC_QUERY_CMDS 0x00000001L +#define IPA_VNICC_ENABLE 0x00000002L +#define IPA_VNICC_DISABLE 0x00000004L +#define IPA_VNICC_SET_TIMEOUT 0x00000008L +#define IPA_VNICC_GET_TIMEOUT 0x00000010L + +/* VNICC flags */ +#define QETH_VNICC_FLOODING 0x80000000 +#define QETH_VNICC_MCAST_FLOODING 0x40000000 +#define QETH_VNICC_LEARNING 0x20000000 +#define QETH_VNICC_TAKEOVER_SETVMAC 0x10000000 +#define QETH_VNICC_TAKEOVER_LEARNING 0x08000000 +#define QETH_VNICC_BRIDGE_INVISIBLE 0x04000000 +#define QETH_VNICC_RX_BCAST 0x02000000 + +/* VNICC default values */ +#define QETH_VNICC_ALL 0xff000000 +#define QETH_VNICC_DEFAULT QETH_VNICC_RX_BCAST +/* default VNICC timeout in seconds */ +#define QETH_VNICC_DEFAULT_TIMEOUT 600 + +/* VNICC header */ +struct qeth_ipacmd_vnicc_hdr { + u32 sup; + u32 cur; +}; + +/* VNICC sub command header */ +struct qeth_vnicc_sub_hdr { + u16 data_length; + u16 reserved; + u32 sub_command; +}; + +/* query supported commands for VNIC characteristic */ +struct qeth_vnicc_query_cmds { + u32 vnic_char; + u32 sup_cmds; +}; + +/* enable/disable VNIC characteristic */ +struct qeth_vnicc_set_char { + u32 vnic_char; +}; + +/* get/set timeout for VNIC characteristic */ +struct qeth_vnicc_getset_timeout { + u32 vnic_char; + u32 timeout; +}; + +/* complete VNICC IPA command message */ +struct qeth_ipacmd_vnicc { + struct qeth_ipacmd_vnicc_hdr hdr; + struct qeth_vnicc_sub_hdr sub_hdr; + union { + struct qeth_vnicc_query_cmds query_cmds; + struct qeth_vnicc_set_char set_char; + struct qeth_vnicc_getset_timeout getset_timeout; + }; +}; + +/* SETBRIDGEPORT IPA Command: *********************************************/ +enum qeth_ipa_sbp_cmd { + IPA_SBP_QUERY_COMMANDS_SUPPORTED = 0x00000000L, + IPA_SBP_RESET_BRIDGE_PORT_ROLE = 0x00000001L, + IPA_SBP_SET_PRIMARY_BRIDGE_PORT = 0x00000002L, + IPA_SBP_SET_SECONDARY_BRIDGE_PORT = 0x00000004L, + IPA_SBP_QUERY_BRIDGE_PORTS = 0x00000008L, + IPA_SBP_BRIDGE_PORT_STATE_CHANGE = 0x00000010L, +}; + +struct net_if_token { + __u16 devnum; + __u8 cssid; + __u8 iid; + __u8 ssid; + __u8 chpid; + __u16 chid; +} __packed; + +struct mac_addr_lnid { + __u8 mac[6]; + __u16 lnid; +} __packed; + +struct qeth_ipacmd_sbp_hdr { + __u32 supported_sbp_cmds; + __u32 enabled_sbp_cmds; + __u16 cmdlength; + __u16 reserved1; + __u32 command_code; + __u16 return_code; + __u8 used_total; + __u8 seq_no; + __u32 reserved2; +} __packed; + +struct qeth_sbp_query_cmds_supp { + __u32 supported_cmds; + __u32 reserved; +} __packed; + +struct qeth_sbp_reset_role { +} __packed; + +struct qeth_sbp_set_primary { + struct net_if_token token; +} __packed; + +struct qeth_sbp_set_secondary { +} __packed; + +struct qeth_sbp_port_entry { + __u8 role; + __u8 state; + __u8 reserved1; + __u8 reserved2; + struct net_if_token token; +} __packed; + +struct qeth_sbp_query_ports { + __u8 primary_bp_supported; + __u8 secondary_bp_supported; + __u8 num_entries; + __u8 entry_length; + struct qeth_sbp_port_entry entry[]; +} __packed; + +struct qeth_sbp_state_change { + __u8 primary_bp_supported; + __u8 secondary_bp_supported; + __u8 num_entries; + __u8 entry_length; + struct qeth_sbp_port_entry entry[]; +} __packed; + +struct qeth_ipacmd_setbridgeport { + struct qeth_ipacmd_sbp_hdr hdr; + union { + struct qeth_sbp_query_cmds_supp query_cmds_supp; + struct qeth_sbp_reset_role reset_role; + struct qeth_sbp_set_primary set_primary; + struct qeth_sbp_set_secondary set_secondary; + struct qeth_sbp_query_ports query_ports; + struct qeth_sbp_state_change state_change; + } data; +} __packed; + +/* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ +/* Bitmask for entry->change_code. Both bits may be raised. */ +enum qeth_ipa_addr_change_code { + IPA_ADDR_CHANGE_CODE_VLANID = 0x01, + IPA_ADDR_CHANGE_CODE_MACADDR = 0x02, + IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */ +}; + +struct qeth_ipacmd_addr_change_entry { + struct net_if_token token; + struct mac_addr_lnid addr_lnid; + __u8 change_code; + __u8 reserved1; + __u16 reserved2; +} __packed; + +struct qeth_ipacmd_addr_change { + __u8 lost_event_mask; + __u8 reserved; + __u16 num_entries; + struct qeth_ipacmd_addr_change_entry entry[]; +} __packed; + +/* Header for each IPA command */ +struct qeth_ipacmd_hdr { + __u8 command; + __u8 initiator; + __u16 seqno; + __u16 return_code; + __u8 adapter_type; + __u8 rel_adapter_no; + __u8 prim_version_no; + __u8 param_count; + __u16 prot_version; + __u32 ipa_supported; + __u32 ipa_enabled; +} __attribute__ ((packed)); + +/* The IPA command itself */ +struct qeth_ipa_cmd { + struct qeth_ipacmd_hdr hdr; + union { + struct qeth_ipacmd_setdelip4 setdelip4; + struct qeth_ipacmd_setdelip6 setdelip6; + struct qeth_ipacmd_setdelipm setdelipm; + struct qeth_ipacmd_setassparms setassparms; + struct qeth_ipacmd_layer2setdelmac setdelmac; + struct qeth_ipacmd_layer2setdelvlan setdelvlan; + struct qeth_create_destroy_address create_destroy_addr; + struct qeth_ipacmd_setadpparms setadapterparms; + struct qeth_set_routing setrtg; + struct qeth_ipacmd_diagass diagass; + struct qeth_ipacmd_setbridgeport sbp; + struct qeth_ipacmd_addr_change addrchange; + struct qeth_ipacmd_vnicc vnicc; + } data; +} __attribute__ ((packed)); + +/* + * special command for ARP processing. + * this is not included in setassparms command before, because we get + * problem with the size of struct qeth_ipacmd_setassparms otherwise + */ +enum qeth_ipa_arp_return_codes { + QETH_IPA_ARP_RC_SUCCESS = 0x0000, + QETH_IPA_ARP_RC_FAILED = 0x0001, + QETH_IPA_ARP_RC_NOTSUPP = 0x0002, + QETH_IPA_ARP_RC_OUT_OF_RANGE = 0x0003, + QETH_IPA_ARP_RC_Q_NOTSUPP = 0x0004, + QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008, +}; + +extern const char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc); +extern const char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd); + +#define QETH_SETASS_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ + sizeof(struct qeth_ipacmd_setassparms_hdr)) +#define QETH_IPA_ARP_DATA_POS(buffer) (buffer + IPA_PDU_HEADER_SIZE + \ + QETH_SETASS_BASE_LEN) +#define QETH_SETADP_BASE_LEN (sizeof(struct qeth_ipacmd_hdr) + \ + sizeof(struct qeth_ipacmd_setadpparms_hdr)) +#define QETH_SNMP_SETADP_CMDLENGTH 16 + +#define QETH_ARP_DATA_SIZE 3968 +#define QETH_ARP_CMD_LEN (QETH_ARP_DATA_SIZE + 8) +/* Helper functions */ +#define IS_IPA_REPLY(cmd) ((cmd->hdr.initiator == IPA_CMD_INITIATOR_HOST) || \ + (cmd->hdr.initiator == IPA_CMD_INITIATOR_OSA_REPLY)) + +/*****************************************************************************/ +/* END OF IP Assist related definitions */ +/*****************************************************************************/ + +extern unsigned char CM_ENABLE[]; +#define CM_ENABLE_SIZE 0x63 +#define QETH_CM_ENABLE_ISSUER_RM_TOKEN(buffer) (buffer + 0x2c) +#define QETH_CM_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) +#define QETH_CM_ENABLE_USER_DATA(buffer) (buffer + 0x5b) + +#define QETH_CM_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x13) + + +extern unsigned char CM_SETUP[]; +#define CM_SETUP_SIZE 0x64 +#define QETH_CM_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_CM_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) +#define QETH_CM_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a) + +#define QETH_CM_SETUP_RESP_DEST_ADDR(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1a) + +extern unsigned char ULP_ENABLE[]; +#define ULP_ENABLE_SIZE 0x6b +#define QETH_ULP_ENABLE_LINKNUM(buffer) (buffer + 0x61) +#define QETH_ULP_ENABLE_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_ULP_ENABLE_FILTER_TOKEN(buffer) (buffer + 0x53) +#define QETH_ULP_ENABLE_PORTNAME_AND_LL(buffer) (buffer + 0x62) +#define QETH_ULP_ENABLE_RESP_FILTER_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x13) +#define QETH_ULP_ENABLE_RESP_MAX_MTU(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1f) +#define QETH_ULP_ENABLE_RESP_DIFINFO_LEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x17) +#define QETH_ULP_ENABLE_RESP_LINK_TYPE(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x2b) +/* Layer 2 definitions */ +#define QETH_PROT_LAYER2 0x08 +#define QETH_PROT_TCPIP 0x03 +#define QETH_PROT_OSN2 0x0a +#define QETH_ULP_ENABLE_PROT_TYPE(buffer) (buffer + 0x50) +#define QETH_IPA_CMD_PROT_TYPE(buffer) (buffer + 0x19) + +extern unsigned char ULP_SETUP[]; +#define ULP_SETUP_SIZE 0x6c +#define QETH_ULP_SETUP_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_ULP_SETUP_CONNECTION_TOKEN(buffer) (buffer + 0x51) +#define QETH_ULP_SETUP_FILTER_TOKEN(buffer) (buffer + 0x5a) +#define QETH_ULP_SETUP_CUA(buffer) (buffer + 0x68) +#define QETH_ULP_SETUP_REAL_DEVADDR(buffer) (buffer + 0x6a) + +#define QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(buffer) \ + (PDU_ENCAPSULATION(buffer) + 0x1a) + + +extern unsigned char DM_ACT[]; +#define DM_ACT_SIZE 0x55 +#define QETH_DM_ACT_DEST_ADDR(buffer) (buffer + 0x2c) +#define QETH_DM_ACT_CONNECTION_TOKEN(buffer) (buffer + 0x51) + + + +#define QETH_TRANSPORT_HEADER_SEQ_NO(buffer) (buffer + 4) +#define QETH_PDU_HEADER_SEQ_NO(buffer) (buffer + 0x1c) +#define QETH_PDU_HEADER_ACK_SEQ_NO(buffer) (buffer + 0x20) + +extern unsigned char IDX_ACTIVATE_READ[]; +extern unsigned char IDX_ACTIVATE_WRITE[]; + +#define IDX_ACTIVATE_SIZE 0x22 +#define QETH_IDX_ACT_PNO(buffer) (buffer+0x0b) +#define QETH_IDX_ACT_ISSUER_RM_TOKEN(buffer) (buffer + 0x0c) +#define QETH_IDX_NO_PORTNAME_REQUIRED(buffer) ((buffer)[0x0b] & 0x80) +#define QETH_IDX_ACT_FUNC_LEVEL(buffer) (buffer + 0x10) +#define QETH_IDX_ACT_DATASET_NAME(buffer) (buffer + 0x16) +#define QETH_IDX_ACT_QDIO_DEV_CUA(buffer) (buffer + 0x1e) +#define QETH_IDX_ACT_QDIO_DEV_REALADDR(buffer) (buffer + 0x20) +#define QETH_IS_IDX_ACT_POS_REPLY(buffer) (((buffer)[0x08] & 3) == 2) +#define QETH_IDX_REPLY_LEVEL(buffer) (buffer + 0x12) +#define QETH_IDX_ACT_CAUSE_CODE(buffer) (buffer)[0x09] +#define QETH_IDX_ACT_ERR_EXCL 0x19 +#define QETH_IDX_ACT_ERR_AUTH 0x1E +#define QETH_IDX_ACT_ERR_AUTH_USER 0x20 + +#define PDU_ENCAPSULATION(buffer) \ + (buffer + *(buffer + (*(buffer + 0x0b)) + \ + *(buffer + *(buffer + 0x0b) + 0x11) + 0x07)) + +#define IS_IPA(buffer) \ + ((buffer) && \ + (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1)) + +#endif diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c new file mode 100644 index 000000000..25d0be25b --- /dev/null +++ b/drivers/s390/net/qeth_core_sys.c @@ -0,0 +1,773 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/list.h> +#include <linux/rwsem.h> +#include <asm/ebcdic.h> + +#include "qeth_core.h" + +static ssize_t qeth_dev_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + + switch (card->state) { + case CARD_STATE_DOWN: + return sprintf(buf, "DOWN\n"); + case CARD_STATE_HARDSETUP: + return sprintf(buf, "HARDSETUP\n"); + case CARD_STATE_SOFTSETUP: + return sprintf(buf, "SOFTSETUP\n"); + case CARD_STATE_UP: + if (card->lan_online) + return sprintf(buf, "UP (LAN ONLINE)\n"); + else + return sprintf(buf, "UP (LAN OFFLINE)\n"); + case CARD_STATE_RECOVER: + return sprintf(buf, "RECOVER\n"); + default: + return sprintf(buf, "UNKNOWN\n"); + } +} + +static DEVICE_ATTR(state, 0444, qeth_dev_state_show, NULL); + +static ssize_t qeth_dev_chpid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + + return sprintf(buf, "%02X\n", card->info.chpid); +} + +static DEVICE_ATTR(chpid, 0444, qeth_dev_chpid_show, NULL); + +static ssize_t qeth_dev_if_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card)); +} + +static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL); + +static ssize_t qeth_dev_card_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + + return sprintf(buf, "%s\n", qeth_get_cardname_short(card)); +} + +static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); + +static const char *qeth_get_bufsize_str(struct qeth_card *card) +{ + if (card->qdio.in_buf_size == 16384) + return "16k"; + else if (card->qdio.in_buf_size == 24576) + return "24k"; + else if (card->qdio.in_buf_size == 32768) + return "32k"; + else if (card->qdio.in_buf_size == 40960) + return "40k"; + else + return "64k"; +} + +static ssize_t qeth_dev_inbuf_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + + return sprintf(buf, "%s\n", qeth_get_bufsize_str(card)); +} + +static DEVICE_ATTR(inbuf_size, 0444, qeth_dev_inbuf_size_show, NULL); + +static ssize_t qeth_dev_portno_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->dev->dev_port); +} + +static ssize_t qeth_dev_portno_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + unsigned int portno, limit; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + portno = simple_strtoul(buf, &tmp, 16); + if (portno > QETH_MAX_PORTNO) { + rc = -EINVAL; + goto out; + } + limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt); + if (portno > limit) { + rc = -EINVAL; + goto out; + } + card->dev->dev_port = portno; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(portno, 0644, qeth_dev_portno_show, qeth_dev_portno_store); + +static ssize_t qeth_dev_portname_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "no portname required\n"); +} + +static ssize_t qeth_dev_portname_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + dev_warn_once(&card->gdev->dev, + "portname is deprecated and is ignored\n"); + return count; +} + +static DEVICE_ATTR(portname, 0644, qeth_dev_portname_show, + qeth_dev_portname_store); + +static ssize_t qeth_dev_prioqing_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + switch (card->qdio.do_prio_queueing) { + case QETH_PRIO_Q_ING_PREC: + return sprintf(buf, "%s\n", "by precedence"); + case QETH_PRIO_Q_ING_TOS: + return sprintf(buf, "%s\n", "by type of service"); + case QETH_PRIO_Q_ING_SKB: + return sprintf(buf, "%s\n", "by skb-priority"); + case QETH_PRIO_Q_ING_VLAN: + return sprintf(buf, "%s\n", "by VLAN headers"); + default: + return sprintf(buf, "always queue %i\n", + card->qdio.default_out_queue); + } +} + +static ssize_t qeth_dev_prioqing_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + /* check if 1920 devices are supported , + * if though we have to permit priority queueing + */ + if (card->qdio.no_out_queues == 1) { + card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; + rc = -EPERM; + goto out; + } + + if (sysfs_streq(buf, "prio_queueing_prec")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_PREC; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_skb")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_SKB; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_tos")) { + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_TOS; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "prio_queueing_vlan")) { + if (!card->options.layer2) { + rc = -ENOTSUPP; + goto out; + } + card->qdio.do_prio_queueing = QETH_PRIO_Q_ING_VLAN; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else if (sysfs_streq(buf, "no_prio_queueing:0")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 0; + } else if (sysfs_streq(buf, "no_prio_queueing:1")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 1; + } else if (sysfs_streq(buf, "no_prio_queueing:2")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 2; + } else if (sysfs_streq(buf, "no_prio_queueing:3")) { + if (card->info.type == QETH_CARD_TYPE_IQD) { + rc = -EPERM; + goto out; + } + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = 3; + } else if (sysfs_streq(buf, "no_prio_queueing")) { + card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; + card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(priority_queueing, 0644, qeth_dev_prioqing_show, + qeth_dev_prioqing_store); + +static ssize_t qeth_dev_bufcnt_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->qdio.in_buf_pool.buf_count); +} + +static ssize_t qeth_dev_bufcnt_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + int cnt, old_cnt; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + old_cnt = card->qdio.in_buf_pool.buf_count; + cnt = simple_strtoul(buf, &tmp, 10); + cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN : + ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); + if (old_cnt != cnt) { + rc = qeth_realloc_buffer_pool(card, cnt); + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(buffer_count, 0644, qeth_dev_bufcnt_show, + qeth_dev_bufcnt_store); + +static ssize_t qeth_dev_recover_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + int i; + + if (!card) + return -EINVAL; + + if (card->state != CARD_STATE_UP) + return -EPERM; + + i = simple_strtoul(buf, &tmp, 16); + if (i == 1) + qeth_schedule_recovery(card); + + return count; +} + +static DEVICE_ATTR(recover, 0200, NULL, qeth_dev_recover_store); + +static ssize_t qeth_dev_performance_stats_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0); +} + +static ssize_t qeth_dev_performance_stats_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + int i, rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) { + if (i == card->options.performance_stats) + goto out; + card->options.performance_stats = i; + if (i == 0) + memset(&card->perf_stats, 0, + sizeof(struct qeth_perf_stats)); + card->perf_stats.initial_rx_packets = card->stats.rx_packets; + card->perf_stats.initial_tx_packets = card->stats.tx_packets; + } else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, + qeth_dev_performance_stats_store); + +static ssize_t qeth_dev_layer2_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.layer2); +} + +static ssize_t qeth_dev_layer2_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct net_device *ndev; + char *tmp; + int i, rc = 0; + enum qeth_discipline_id newdis; + + if (!card) + return -EINVAL; + + mutex_lock(&card->discipline_mutex); + if (card->state != CARD_STATE_DOWN) { + rc = -EPERM; + goto out; + } + + i = simple_strtoul(buf, &tmp, 16); + switch (i) { + case 0: + newdis = QETH_DISCIPLINE_LAYER3; + break; + case 1: + newdis = QETH_DISCIPLINE_LAYER2; + break; + default: + rc = -EINVAL; + goto out; + } + + if (card->options.layer2 == newdis) + goto out; + if (card->info.layer_enforced) { + /* fixed layer, can't switch */ + rc = -EOPNOTSUPP; + goto out; + } + + card->info.mac_bits = 0; + if (card->discipline) { + /* start with a new, pristine netdevice: */ + ndev = qeth_clone_netdev(card->dev); + if (!ndev) { + rc = -ENOMEM; + goto out; + } + + card->discipline->remove(card->gdev); + qeth_core_free_discipline(card); + card->options.layer2 = -1; + + free_netdev(card->dev); + card->dev = ndev; + } + + rc = qeth_core_load_discipline(card, newdis); + if (rc) + goto out; + + rc = card->discipline->setup(card->gdev); + if (rc) + qeth_core_free_discipline(card); +out: + mutex_unlock(&card->discipline_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, + qeth_dev_layer2_store); + +#define ATTR_QETH_ISOLATION_NONE ("none") +#define ATTR_QETH_ISOLATION_FWD ("forward") +#define ATTR_QETH_ISOLATION_DROP ("drop") + +static ssize_t qeth_dev_isolation_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + switch (card->options.isolation) { + case ISOLATION_MODE_NONE: + return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE); + case ISOLATION_MODE_FWD: + return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD); + case ISOLATION_MODE_DROP: + return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP); + default: + return snprintf(buf, 5, "%s\n", "N/A"); + } +} + +static ssize_t qeth_dev_isolation_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + enum qeth_ipa_isolation_modes isolation; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (card->info.type != QETH_CARD_TYPE_OSD && + card->info.type != QETH_CARD_TYPE_OSX) { + rc = -EOPNOTSUPP; + dev_err(&card->gdev->dev, "Adapter does not " + "support QDIO data connection isolation\n"); + goto out; + } + + /* parse input into isolation mode */ + if (sysfs_streq(buf, ATTR_QETH_ISOLATION_NONE)) { + isolation = ISOLATION_MODE_NONE; + } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_FWD)) { + isolation = ISOLATION_MODE_FWD; + } else if (sysfs_streq(buf, ATTR_QETH_ISOLATION_DROP)) { + isolation = ISOLATION_MODE_DROP; + } else { + rc = -EINVAL; + goto out; + } + rc = count; + + /* defer IP assist if device is offline (until discipline->set_online)*/ + card->options.prev_isolation = card->options.isolation; + card->options.isolation = isolation; + if (qeth_card_hw_is_reachable(card)) { + int ipa_rc = qeth_set_access_ctrl_online(card, 1); + if (ipa_rc != 0) + rc = ipa_rc; + } +out: + mutex_unlock(&card->conf_mutex); + return rc; +} + +static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show, + qeth_dev_isolation_store); + +static ssize_t qeth_dev_switch_attrs_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_switch_info sw_info; + int rc = 0; + + if (!card) + return -EINVAL; + + if (!qeth_card_hw_is_reachable(card)) + return sprintf(buf, "n/a\n"); + + rc = qeth_query_switch_attributes(card, &sw_info); + if (rc) + return rc; + + if (!sw_info.capabilities) + rc = sprintf(buf, "unknown"); + + if (sw_info.capabilities & QETH_SWITCH_FORW_802_1) + rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ? + "[802.1]" : "802.1")); + if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY) + rc += sprintf(buf + rc, + (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ? + " [rr]" : " rr")); + rc += sprintf(buf + rc, "\n"); + + return rc; +} + +static DEVICE_ATTR(switch_attrs, 0444, + qeth_dev_switch_attrs_show, NULL); + +static ssize_t qeth_hw_trap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + if (card->info.hwtrap) + return snprintf(buf, 5, "arm\n"); + else + return snprintf(buf, 8, "disarm\n"); +} + +static ssize_t qeth_hw_trap_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + int state = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (qeth_card_hw_is_reachable(card)) + state = 1; + + if (sysfs_streq(buf, "arm") && !card->info.hwtrap) { + if (state) { + if (qeth_is_diagass_supported(card, + QETH_DIAGS_CMD_TRAP)) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM); + if (!rc) + card->info.hwtrap = 1; + } else + rc = -EINVAL; + } else + card->info.hwtrap = 1; + } else if (sysfs_streq(buf, "disarm") && card->info.hwtrap) { + if (state) { + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + if (!rc) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + } else if (sysfs_streq(buf, "trap") && state && card->info.hwtrap) + rc = qeth_hw_trap(card, QETH_DIAGS_TRAP_CAPTURE); + else + rc = -EINVAL; + + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show, + qeth_hw_trap_store); + +static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) +{ + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", value); +} + +static ssize_t qeth_dev_blkt_store(struct qeth_card *card, + const char *buf, size_t count, int *value, int max_value) +{ + char *tmp; + int i, rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + i = simple_strtoul(buf, &tmp, 10); + if (i <= max_value) + *value = i; + else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_dev_blkt_total_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_show(buf, card, card->info.blkt.time_total); +} + +static ssize_t qeth_dev_blkt_total_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.time_total, 5000); +} + + + +static DEVICE_ATTR(total, 0644, qeth_dev_blkt_total_show, + qeth_dev_blkt_total_store); + +static ssize_t qeth_dev_blkt_inter_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_show(buf, card, card->info.blkt.inter_packet); +} + +static ssize_t qeth_dev_blkt_inter_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet, 1000); +} + +static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show, + qeth_dev_blkt_inter_store); + +static ssize_t qeth_dev_blkt_inter_jumbo_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_show(buf, card, + card->info.blkt.inter_packet_jumbo); +} + +static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + return qeth_dev_blkt_store(card, buf, count, + &card->info.blkt.inter_packet_jumbo, 1000); +} + +static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show, + qeth_dev_blkt_inter_jumbo_store); + +static struct attribute *qeth_blkt_device_attrs[] = { + &dev_attr_total.attr, + &dev_attr_inter.attr, + &dev_attr_inter_jumbo.attr, + NULL, +}; +const struct attribute_group qeth_device_blkt_group = { + .name = "blkt", + .attrs = qeth_blkt_device_attrs, +}; +EXPORT_SYMBOL_GPL(qeth_device_blkt_group); + +static struct attribute *qeth_device_attrs[] = { + &dev_attr_state.attr, + &dev_attr_chpid.attr, + &dev_attr_if_name.attr, + &dev_attr_card_type.attr, + &dev_attr_inbuf_size.attr, + &dev_attr_portno.attr, + &dev_attr_portname.attr, + &dev_attr_priority_queueing.attr, + &dev_attr_buffer_count.attr, + &dev_attr_recover.attr, + &dev_attr_performance_stats.attr, + &dev_attr_layer2.attr, + &dev_attr_isolation.attr, + &dev_attr_hw_trap.attr, + &dev_attr_switch_attrs.attr, + NULL, +}; +const struct attribute_group qeth_device_attr_group = { + .attrs = qeth_device_attrs, +}; +EXPORT_SYMBOL_GPL(qeth_device_attr_group); + +const struct attribute_group *qeth_generic_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + NULL, +}; + +static struct attribute *qeth_osn_device_attrs[] = { + &dev_attr_state.attr, + &dev_attr_chpid.attr, + &dev_attr_if_name.attr, + &dev_attr_card_type.attr, + &dev_attr_buffer_count.attr, + &dev_attr_recover.attr, + NULL, +}; +static struct attribute_group qeth_osn_device_attr_group = { + .attrs = qeth_osn_device_attrs, +}; +const struct attribute_group *qeth_osn_attr_groups[] = { + &qeth_osn_device_attr_group, + NULL, +}; diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h new file mode 100644 index 000000000..ddc615b43 --- /dev/null +++ b/drivers/s390/net/qeth_l2.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#ifndef __QETH_L2_H__ +#define __QETH_L2_H__ + +#include "qeth_core.h" + +extern const struct attribute_group *qeth_l2_attr_groups[]; + +int qeth_l2_create_device_attributes(struct device *); +void qeth_l2_remove_device_attributes(struct device *); +void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card); +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, + enum qeth_sbp_states *state); +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); +int qeth_bridgeport_an_set(struct qeth_card *card, int enable); + +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state); +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state); +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout); +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout); +bool qeth_l2_vnicc_is_in_use(struct qeth_card *card); + +struct qeth_mac { + u8 mac_addr[ETH_ALEN]; + u8 disp_flag:2; + struct hlist_node hnode; +}; + +#endif /* __QETH_L2_H__ */ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c new file mode 100644 index 000000000..8d30f9ac3 --- /dev/null +++ b/drivers/s390/net/qeth_l2_main.c @@ -0,0 +1,2405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/etherdevice.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/hashtable.h> +#include <asm/setup.h> +#include "qeth_core.h" +#include "qeth_l2.h" + +static int qeth_l2_set_offline(struct ccwgroup_device *); +static int qeth_l2_stop(struct net_device *); +static void qeth_bridgeport_query_support(struct qeth_card *card); +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd); +static void qeth_l2_vnicc_set_defaults(struct qeth_card *card); +static void qeth_l2_vnicc_init(struct qeth_card *card); +static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, + u32 *timeout); + +static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) +{ + struct qeth_card *card; + struct net_device *ndev; + __u16 temp_dev_no; + unsigned long flags; + struct ccw_dev_id read_devid; + + ndev = NULL; + memcpy(&temp_dev_no, read_dev_no, 2); + read_lock_irqsave(&qeth_core_card_list.rwlock, flags); + list_for_each_entry(card, &qeth_core_card_list.list, list) { + ccw_device_get_id(CARD_RDEV(card), &read_devid); + if (read_devid.devno == temp_dev_no) { + ndev = card->dev; + break; + } + } + read_unlock_irqrestore(&qeth_core_card_list.rwlock, flags); + return ndev; +} + +static int qeth_setdelmac_makerc(struct qeth_card *card, int retcode) +{ + int rc; + + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + switch (retcode) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + rc = -EOPNOTSUPP; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_DUP_MAC: + case IPA_RC_L2_DUP_LAYER3_MAC: + rc = -EEXIST; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP: + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EPERM; + break; + case IPA_RC_L2_MAC_NOT_FOUND: + rc = -ENOENT; + break; + case -ENOMEM: + rc = -ENOMEM; + break; + default: + rc = -EIO; + break; + } + return rc; +} + +static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 2, "L2sdmac"); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setdelmac.mac_length = ETH_ALEN; + ether_addr_copy(cmd->data.setdelmac.mac, mac); + return qeth_setdelmac_makerc(card, qeth_send_ipa_cmd(card, iob, + NULL, NULL)); +} + +static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) +{ + int rc; + + QETH_CARD_TEXT(card, 2, "L2Setmac"); + rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); + if (rc == 0) { + dev_info(&card->gdev->dev, + "MAC address %pM successfully registered on device %s\n", + mac, card->dev->name); + } else { + switch (rc) { + case -EEXIST: + dev_warn(&card->gdev->dev, + "MAC address %pM already exists\n", mac); + break; + case -EPERM: + dev_warn(&card->gdev->dev, + "MAC address %pM is not authorized\n", mac); + break; + } + } + return rc; +} + +static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) +{ + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? + IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; + int rc; + + QETH_CARD_TEXT(card, 2, "L2Wmac"); + rc = qeth_l2_send_setdelmac(card, mac, cmd); + if (rc == -EEXIST) + QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n", + mac, QETH_CARD_IFNAME(card)); + else if (rc) + QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; +} + +static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) +{ + enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ? + IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; + int rc; + + QETH_CARD_TEXT(card, 2, "L2Rmac"); + rc = qeth_l2_send_setdelmac(card, mac, cmd); + if (rc) + QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n", + mac, QETH_CARD_IFNAME(card), rc); + return rc; +} + +static void qeth_l2_del_all_macs(struct qeth_card *card) +{ + struct qeth_mac *mac; + struct hlist_node *tmp; + int i; + + spin_lock_bh(&card->mclock); + hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + hash_del(&mac->hnode); + kfree(mac); + } + spin_unlock_bh(&card->mclock); +} + +static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +{ + if (card->info.type == QETH_CARD_TYPE_OSN) + return RTN_UNICAST; + if (is_broadcast_ether_addr(skb->data)) + return RTN_BROADCAST; + if (is_multicast_ether_addr(skb->data)) + return RTN_MULTICAST; + return RTN_UNICAST; +} + +static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, + int cast_type, unsigned int data_len) +{ + struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); + + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + hdr->hdr.l2.pkt_length = data_len; + + /* set byte byte 3 to casting flags */ + if (cast_type == RTN_MULTICAST) + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_MULTICAST; + else if (cast_type == RTN_BROADCAST) + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_BROADCAST; + else + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; + + /* VSWITCH relies on the VLAN + * information to be present in + * the QDIO header */ + if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { + hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN; + hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI); + } +} + +static int qeth_setdelvlan_makerc(struct qeth_card *card, int retcode) +{ + if (retcode) + QETH_CARD_TEXT_(card, 2, "err%04x", retcode); + + switch (retcode) { + case IPA_RC_SUCCESS: + return 0; + case IPA_RC_L2_INVALID_VLAN_ID: + return -EINVAL; + case IPA_RC_L2_DUP_VLAN_ID: + return -EEXIST; + case IPA_RC_L2_VLAN_ID_NOT_FOUND: + return -ENOENT; + case IPA_RC_L2_VLAN_ID_NOT_ALLOWED: + return -EPERM; + case -ENOMEM: + return -ENOMEM; + default: + return -EIO; + } +} + +static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + + QETH_CARD_TEXT(card, 2, "L2sdvcb"); + if (cmd->hdr.return_code) { + QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n", + cmd->data.setdelvlan.vlan_id, + QETH_CARD_IFNAME(card), cmd->hdr.return_code); + QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command); + QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code); + } + return 0; +} + +static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd); + iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setdelvlan.vlan_id = i; + return qeth_setdelvlan_makerc(card, qeth_send_ipa_cmd(card, iob, + qeth_l2_send_setdelvlan_cb, NULL)); +} + +static void qeth_l2_process_vlans(struct qeth_card *card) +{ + struct qeth_vlan_vid *id; + + QETH_CARD_TEXT(card, 3, "L2prcvln"); + mutex_lock(&card->vid_list_mutex); + list_for_each_entry(id, &card->vid_list, list) { + qeth_l2_send_setdelvlan(card, id->vid, IPA_CMD_SETVLAN); + } + mutex_unlock(&card->vid_list_mutex); +} + +static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_vlan_vid *id; + int rc; + + QETH_CARD_TEXT_(card, 4, "aid:%d", vid); + if (!vid) + return 0; + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "aidREC"); + return 0; + } + id = kmalloc(sizeof(*id), GFP_KERNEL); + if (id) { + id->vid = vid; + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN); + if (rc) { + kfree(id); + return rc; + } + mutex_lock(&card->vid_list_mutex); + list_add_tail(&id->list, &card->vid_list); + mutex_unlock(&card->vid_list_mutex); + } else { + return -ENOMEM; + } + return 0; +} + +static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_vlan_vid *id, *tmpid = NULL; + struct qeth_card *card = dev->ml_priv; + int rc = 0; + + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "kidREC"); + return 0; + } + mutex_lock(&card->vid_list_mutex); + list_for_each_entry(id, &card->vid_list, list) { + if (id->vid == vid) { + list_del(&id->list); + tmpid = id; + break; + } + } + mutex_unlock(&card->vid_list_mutex); + if (tmpid) { + rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN); + kfree(tmpid); + } + return rc; +} + +static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) +{ + QETH_DBF_TEXT(SETUP , 2, "stopcard"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + qeth_set_allowed_threads(card, 0, 1); + if (card->read.state == CH_STATE_UP && + card->write.state == CH_STATE_UP && + (card->state == CARD_STATE_UP)) { + if (recovery_mode && + card->info.type != QETH_CARD_TYPE_OSN) { + qeth_l2_stop(card->dev); + } else { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; + card->state = CARD_STATE_SOFTSETUP; + } + if (card->state == CARD_STATE_SOFTSETUP) { + qeth_l2_del_all_macs(card); + qeth_clear_ipacmd_list(card); + card->state = CARD_STATE_HARDSETUP; + } + if (card->state == CARD_STATE_HARDSETUP) { + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qeth_clear_working_pool_list(card); + card->state = CARD_STATE_DOWN; + } + if (card->state == CARD_STATE_DOWN) { + qeth_clear_cmd_buffers(&card->read); + qeth_clear_cmd_buffers(&card->write); + } +} + +static int qeth_l2_process_inbound_buffer(struct qeth_card *card, + int budget, int *done) +{ + int work_done = 0; + struct sk_buff *skb; + struct qeth_hdr *hdr; + unsigned int len; + + *done = 0; + WARN_ON_ONCE(!budget); + while (budget) { + skb = qeth_core_get_next_skb(card, + &card->qdio.in_q->bufs[card->rx.b_index], + &card->rx.b_element, &card->rx.e_offset, &hdr); + if (!skb) { + *done = 1; + break; + } + switch (hdr->hdr.l2.id) { + case QETH_HEADER_TYPE_LAYER2: + skb->protocol = eth_type_trans(skb, skb->dev); + qeth_rx_csum(card, skb, hdr->hdr.l2.flags[1]); + if (skb->protocol == htons(ETH_P_802_2)) + *((__u32 *)skb->cb) = ++card->seqno.pkt_seqno; + len = skb->len; + napi_gro_receive(&card->napi, skb); + break; + case QETH_HEADER_TYPE_OSN: + if (card->info.type == QETH_CARD_TYPE_OSN) { + skb_push(skb, sizeof(struct qeth_hdr)); + skb_copy_to_linear_data(skb, hdr, + sizeof(struct qeth_hdr)); + len = skb->len; + card->osn_info.data_cb(skb); + break; + } + /* else unknown */ + default: + dev_kfree_skb_any(skb); + QETH_CARD_TEXT(card, 3, "inbunkno"); + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); + continue; + } + work_done++; + budget--; + card->stats.rx_packets++; + card->stats.rx_bytes += len; + } + return work_done; +} + +static int qeth_l2_request_initial_mac(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "l2reqmac"); + QETH_DBF_TEXT_(SETUP, 2, "doL2%s", CARD_BUS_ID(card)); + + if (MACHINE_IS_VM) { + rc = qeth_vm_request_mac(card); + if (!rc) + goto out; + QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n", + CARD_BUS_ID(card), rc); + QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc); + /* fall back to alternative mechanism: */ + } + + if (card->info.type == QETH_CARD_TYPE_IQD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX || + card->info.guestlan) { + rc = qeth_setadpparms_change_macaddr(card); + if (!rc) + goto out; + QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n", + CARD_BUS_ID(card), rc); + QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc); + /* fall back once more: */ + } + + /* some devices don't support a custom MAC address: */ + if (card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) + return (rc) ? rc : -EADDRNOTAVAIL; + eth_hw_addr_random(card->dev); + +out: + QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, card->dev->addr_len); + return 0; +} + +static int qeth_l2_set_mac_address(struct net_device *dev, void *p) +{ + struct sockaddr *addr = p; + struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; + int rc = 0; + + QETH_CARD_TEXT(card, 3, "setmac"); + + if (card->info.type == QETH_CARD_TYPE_OSN || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) { + QETH_CARD_TEXT(card, 3, "setmcTYP"); + return -EOPNOTSUPP; + } + QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "setmcREC"); + return -ERESTARTSYS; + } + + /* avoid racing against concurrent state change: */ + if (!mutex_trylock(&card->conf_mutex)) + return -EAGAIN; + + if (!qeth_card_hw_is_reachable(card)) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + goto out_unlock; + } + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + goto out_unlock; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + goto out_unlock; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + qeth_l2_remove_mac(card, old_addr); + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + +out_unlock: + mutex_unlock(&card->conf_mutex); + return rc; +} + +static void qeth_promisc_to_bridge(struct qeth_card *card) +{ + struct net_device *dev = card->dev; + enum qeth_ipa_promisc_modes promisc_mode; + int role; + int rc; + + QETH_CARD_TEXT(card, 3, "pmisc2br"); + + if (!card->options.sbp.reflect_promisc) + return; + promisc_mode = (dev->flags & IFF_PROMISC) ? SET_PROMISC_MODE_ON + : SET_PROMISC_MODE_OFF; + if (promisc_mode == card->info.promisc_mode) + return; + + if (promisc_mode == SET_PROMISC_MODE_ON) { + if (card->options.sbp.reflect_promisc_primary) + role = QETH_SBP_ROLE_PRIMARY; + else + role = QETH_SBP_ROLE_SECONDARY; + } else + role = QETH_SBP_ROLE_NONE; + + rc = qeth_bridgeport_setrole(card, role); + QETH_DBF_TEXT_(SETUP, 2, "bpm%c%04x", + (promisc_mode == SET_PROMISC_MODE_ON) ? '+' : '-', rc); + if (!rc) { + card->options.sbp.role = role; + card->info.promisc_mode = promisc_mode; + } + +} +/* New MAC address is added to the hash table and marked to be written on card + * only if there is not in the hash table storage already + * +*/ +static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha) +{ + u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); + struct qeth_mac *mac; + + hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { + if (ether_addr_equal_64bits(ha->addr, mac->mac_addr)) { + mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + return; + } + } + + mac = kzalloc(sizeof(struct qeth_mac), GFP_ATOMIC); + if (!mac) + return; + + ether_addr_copy(mac->mac_addr, ha->addr); + mac->disp_flag = QETH_DISP_ADDR_ADD; + + hash_add(card->mac_htable, &mac->hnode, mac_hash); +} + +static void qeth_l2_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + struct netdev_hw_addr *ha; + struct qeth_mac *mac; + struct hlist_node *tmp; + int i; + int rc; + + if (card->info.type == QETH_CARD_TYPE_OSN) + return; + + QETH_CARD_TEXT(card, 3, "setmulti"); + if (qeth_threads_running(card, QETH_RECOVER_THREAD) && + (card->state != CARD_STATE_UP)) + return; + + spin_lock_bh(&card->mclock); + + netdev_for_each_mc_addr(ha, dev) + qeth_l2_add_mac(card, ha); + netdev_for_each_uc_addr(ha, dev) + qeth_l2_add_mac(card, ha); + + hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) { + switch (mac->disp_flag) { + case QETH_DISP_ADDR_DELETE: + qeth_l2_remove_mac(card, mac->mac_addr); + hash_del(&mac->hnode); + kfree(mac); + break; + case QETH_DISP_ADDR_ADD: + rc = qeth_l2_write_mac(card, mac->mac_addr); + if (rc) { + hash_del(&mac->hnode); + kfree(mac); + break; + } + /* fall through */ + default: + /* for next call to set_rx_mode(): */ + mac->disp_flag = QETH_DISP_ADDR_DELETE; + } + } + + spin_unlock_bh(&card->mclock); + + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + qeth_setadp_promisc_mode(card); + else + qeth_promisc_to_bridge(card); +} + +static int qeth_l2_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type, int ipv) +{ + const unsigned int proto_len = IS_IQD(card) ? ETH_HLEN : 0; + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len = skb->len; + unsigned int data_offset = 0; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + unsigned int elements; + int push_len, rc; + bool is_sg; + + rc = skb_cow_head(skb, hw_hdr_len); + if (rc) + return rc; + + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, proto_len, + &elements); + if (push_len < 0) + return push_len; + if (!push_len) { + /* HW header needs its own buffer element. */ + hd_len = hw_hdr_len + proto_len; + data_offset = proto_len; + } + qeth_l2_fill_header(hdr, skb, cast_type, frame_len); + if (skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + + is_sg = skb_is_nonlinear(skb); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset, + hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset, + hd_len, elements); + } + + if (!rc) { + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; + } + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); + } + return rc; +} + +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) +{ + unsigned int elements; + struct qeth_hdr *hdr; + + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + hdr = (struct qeth_hdr *)skb->data; + elements = qeth_get_elements_no(card, skb, 0, 0); + if (!elements) + return -E2BIG; + if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) + return -EINVAL; + return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); +} + +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + int cast_type = qeth_l2_get_cast_type(card, skb); + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if ((card->state != CARD_STATE_UP) || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; + } + + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + if (IS_OSN(card)) + rc = qeth_l2_xmit_osn(card, skb, queue); + else + rc = qeth_l2_xmit(card, skb, queue, cast_type, ipv); + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ + +tx_drop: + card->stats.tx_dropped++; + card->stats.tx_errors++; + dev_kfree_skb_any(skb); + netif_wake_queue(dev); + return NETDEV_TX_OK; +} + +static int __qeth_l2_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + int rc = 0; + + QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; + if (card->state != CARD_STATE_SOFTSETUP) + return -ENODEV; + + if ((card->info.type != QETH_CARD_TYPE_OSN) && + (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))) { + QETH_CARD_TEXT(card, 4, "nomacadr"); + return -EPERM; + } + card->data.state = CH_STATE_UP; + card->state = CARD_STATE_UP; + netif_start_queue(dev); + + if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { + napi_enable(&card->napi); + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + } else + rc = -EIO; + return rc; +} + +static int qeth_l2_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l2_open(dev); +} + +static int qeth_l2_stop(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethstop"); + netif_tx_disable(dev); + if (card->state == CARD_STATE_UP) { + card->state = CARD_STATE_SOFTSETUP; + napi_disable(&card->napi); + } + return 0; +} + +static const struct device_type qeth_l2_devtype = { + .name = "qeth_layer2", + .groups = qeth_l2_attr_groups, +}; + +static int qeth_l2_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + + if (gdev->dev.type == &qeth_generic_devtype) { + rc = qeth_l2_create_device_attributes(&gdev->dev); + if (rc) + return rc; + } + INIT_LIST_HEAD(&card->vid_list); + hash_init(card->mac_htable); + card->options.layer2 = 1; + card->info.hwtrap = 0; + qeth_l2_vnicc_set_defaults(card); + return 0; +} + +static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) +{ + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + + if (cgdev->dev.type == &qeth_generic_devtype) + qeth_l2_remove_device_attributes(&cgdev->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + + if (cgdev->state == CCWGROUP_ONLINE) + qeth_l2_set_offline(cgdev); + + cancel_work_sync(&card->close_dev_work); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); +} + +static const struct ethtool_ops qeth_l2_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_strings = qeth_core_get_strings, + .get_ethtool_stats = qeth_core_get_ethtool_stats, + .get_sset_count = qeth_core_get_sset_count, + .get_drvinfo = qeth_core_get_drvinfo, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, +}; + +static const struct ethtool_ops qeth_l2_osn_ops = { + .get_strings = qeth_core_get_strings, + .get_ethtool_stats = qeth_core_get_ethtool_stats, + .get_sset_count = qeth_core_get_sset_count, + .get_drvinfo = qeth_core_get_drvinfo, +}; + +static const struct net_device_ops qeth_l2_netdev_ops = { + .ndo_open = qeth_l2_open, + .ndo_stop = qeth_l2_stop, + .ndo_get_stats = qeth_get_stats, + .ndo_start_xmit = qeth_l2_hard_start_xmit, + .ndo_features_check = qeth_features_check, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qeth_l2_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_set_mac_address = qeth_l2_set_mac_address, + .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features +}; + +static int qeth_l2_setup_netdev(struct qeth_card *card) +{ + int rc; + + if (qeth_netdev_is_registered(card->dev)) + return 0; + + card->dev->priv_flags |= IFF_UNICAST_FLT; + card->dev->netdev_ops = &qeth_l2_netdev_ops; + if (card->info.type == QETH_CARD_TYPE_OSN) { + card->dev->ethtool_ops = &qeth_l2_osn_ops; + card->dev->flags |= IFF_NOARP; + } else { + card->dev->ethtool_ops = &qeth_l2_ethtool_ops; + card->dev->needed_headroom = sizeof(struct qeth_hdr); + } + + if (card->info.type == QETH_CARD_TYPE_OSM) + card->dev->features |= NETIF_F_VLAN_CHALLENGED; + else + card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) { + card->dev->features |= NETIF_F_SG; + /* OSA 3S and earlier has no RX/TX support */ + if (qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM)) { + card->dev->hw_features |= NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_IP_CSUM; + } + } + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; + } + if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM) || + qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_RXCSUM; + card->dev->vlan_features |= NETIF_F_RXCSUM; + } + + qeth_l2_request_initial_mac(card); + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + rc = register_netdev(card->dev); + if (rc) + card->dev->netdev_ops = NULL; + return rc; +} + +static int qeth_l2_start_ipassists(struct qeth_card *card) +{ + /* configure isolation level */ + if (qeth_set_access_ctrl_online(card, 0)) + return -ENODEV; + return 0; +} + +static void qeth_l2_trace_features(struct qeth_card *card) +{ + /* Set BridgePort features */ + QETH_CARD_TEXT(card, 2, "featuSBP"); + QETH_CARD_HEX(card, 2, &card->options.sbp.supported_funcs, + sizeof(card->options.sbp.supported_funcs)); + /* VNIC Characteristics features */ + QETH_CARD_TEXT(card, 2, "feaVNICC"); + QETH_CARD_HEX(card, 2, &card->options.vnicc.sup_chars, + sizeof(card->options.vnicc.sup_chars)); +} + +static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + enum qeth_card_states recover_flag; + + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_DBF_TEXT(SETUP, 2, "setonlin"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + recover_flag = card->state; + rc = qeth_core_hardsetup_card(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); + rc = -ENODEV; + goto out_remove; + } + qeth_bridgeport_query_support(card); + if (card->options.sbp.supported_funcs) + dev_info(&card->gdev->dev, + "The device represents a Bridge Capable Port\n"); + + rc = qeth_l2_setup_netdev(card); + if (rc) + goto out_remove; + + if (card->info.type != QETH_CARD_TYPE_OSN && + !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + + if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { + if (card->info.hwtrap && + qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + + /* for the rx_bcast characteristic, init VNICC after setmac */ + qeth_l2_vnicc_init(card); + + qeth_trace_features(card); + qeth_l2_trace_features(card); + + qeth_l2_setup_bridgeport_attrs(card); + + card->state = CARD_STATE_HARDSETUP; + qeth_print_status_message(card); + + /* softsetup */ + QETH_DBF_TEXT(SETUP, 2, "softsetp"); + + if ((card->info.type == QETH_CARD_TYPE_OSD) || + (card->info.type == QETH_CARD_TYPE_OSX)) { + rc = qeth_l2_start_ipassists(card); + if (rc) + goto out_remove; + } + + if (card->info.type != QETH_CARD_TYPE_OSN) + qeth_l2_process_vlans(card); + + netif_tx_disable(card->dev); + + rc = qeth_init_qdio_queues(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + rc = -ENODEV; + goto out_remove; + } + card->state = CARD_STATE_SOFTSETUP; + if (card->lan_online) + netif_carrier_on(card->dev); + else + netif_carrier_off(card->dev); + + qeth_set_allowed_threads(card, 0xffffffff, 0); + + qeth_enable_hw_features(card->dev); + if (recover_flag == CARD_STATE_RECOVER) { + if (recovery_mode && + card->info.type != QETH_CARD_TYPE_OSN) { + __qeth_l2_open(card->dev); + qeth_l2_set_rx_mode(card->dev); + } else { + rtnl_lock(); + dev_open(card->dev); + rtnl_unlock(); + } + } + /* let user_space know that device is online */ + kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return 0; + +out_remove: + qeth_l2_stop_card(card, 0); + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); + if (recover_flag == CARD_STATE_RECOVER) + card->state = CARD_STATE_RECOVER; + else + card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return rc; +} + +static int qeth_l2_set_online(struct ccwgroup_device *gdev) +{ + return __qeth_l2_set_online(gdev, 0); +} + +static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev, + int recovery_mode) +{ + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + int rc = 0, rc2 = 0, rc3 = 0; + enum qeth_card_states recover_flag; + + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_DBF_TEXT(SETUP, 3, "setoffl"); + QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); + + netif_carrier_off(card->dev); + recover_flag = card->state; + if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } + qeth_l2_stop_card(card, recovery_mode); + rc = ccw_device_set_offline(CARD_DDEV(card)); + rc2 = ccw_device_set_offline(CARD_WDEV(card)); + rc3 = ccw_device_set_offline(CARD_RDEV(card)); + if (!rc) + rc = (rc2) ? rc2 : rc3; + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); + if (recover_flag == CARD_STATE_UP) + card->state = CARD_STATE_RECOVER; + /* let user_space know that device is offline */ + kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return 0; +} + +static int qeth_l2_set_offline(struct ccwgroup_device *cgdev) +{ + return __qeth_l2_set_offline(cgdev, 0); +} + +static int qeth_l2_recover(void *ptr) +{ + struct qeth_card *card; + int rc = 0; + + card = (struct qeth_card *) ptr; + QETH_CARD_TEXT(card, 2, "recover1"); + if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) + return 0; + QETH_CARD_TEXT(card, 2, "recover2"); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); + __qeth_l2_set_offline(card->gdev, 1); + rc = __qeth_l2_set_online(card->gdev, 1); + if (!rc) + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); + else { + qeth_close_dev(card); + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + } + qeth_clear_recovery_task(card); + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); + return 0; +} + +static int __init qeth_l2_init(void) +{ + pr_info("register layer 2 discipline\n"); + return 0; +} + +static void __exit qeth_l2_exit(void) +{ + pr_info("unregister layer 2 discipline\n"); +} + +static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + netif_device_detach(card->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + if (card->state == CARD_STATE_UP) { + if (card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + __qeth_l2_set_offline(card->gdev, 1); + } else + __qeth_l2_set_offline(card->gdev, 0); + return 0; +} + +static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + + if (card->state == CARD_STATE_RECOVER) { + rc = __qeth_l2_set_online(card->gdev, 1); + if (rc) { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + } else + rc = __qeth_l2_set_online(card->gdev, 0); +out: + qeth_set_allowed_threads(card, 0xffffffff, 0); + netif_device_attach(card->dev); + if (rc) + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + return rc; +} + +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l2_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + switch (cmd->hdr.command) { + case IPA_CMD_SETBRIDGEPORT_OSA: + case IPA_CMD_SETBRIDGEPORT_IQD: + if (cmd->data.sbp.hdr.command_code == + IPA_SBP_BRIDGE_PORT_STATE_CHANGE) { + qeth_bridge_state_change(card, cmd); + return 0; + } else + return 1; + case IPA_CMD_ADDRESS_CHANGE_NOTIF: + qeth_bridge_host_event(card, cmd); + return 0; + default: + return 1; + } +} + +struct qeth_discipline qeth_l2_discipline = { + .devtype = &qeth_l2_devtype, + .process_rx_buffer = qeth_l2_process_inbound_buffer, + .recover = qeth_l2_recover, + .setup = qeth_l2_probe_device, + .remove = qeth_l2_remove_device, + .set_online = qeth_l2_set_online, + .set_offline = qeth_l2_set_offline, + .freeze = qeth_l2_pm_suspend, + .thaw = qeth_l2_pm_resume, + .restore = qeth_l2_pm_resume, + .do_ioctl = NULL, + .control_event_handler = qeth_l2_control_event, +}; +EXPORT_SYMBOL_GPL(qeth_l2_discipline); + +static int qeth_osn_send_control_data(struct qeth_card *card, int len, + struct qeth_cmd_buffer *iob) +{ + struct qeth_channel *channel = iob->channel; + unsigned long flags; + int rc = 0; + + QETH_CARD_TEXT(card, 5, "osndctrd"); + + wait_event(card->wait_q, + atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); + qeth_prepare_control_data(card, len, iob); + QETH_CARD_TEXT(card, 6, "osnoirqp"); + spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); + rc = ccw_device_start_timeout(channel->ccwdev, channel->ccw, + (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); + spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); + if (rc) { + QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " + "ccw_device_start rc = %i\n", rc); + QETH_CARD_TEXT_(card, 2, " err%d", rc); + qeth_release_buffer(channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + } + return rc; +} + +static int qeth_osn_send_ipa_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, int data_len) +{ + u16 s1, s2; + + QETH_CARD_TEXT(card, 4, "osndipa"); + + qeth_prepare_ipa_cmd(card, iob); + s1 = (u16)(IPA_PDU_HEADER_SIZE + data_len); + s2 = (u16)data_len; + memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &s1, 2); + memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &s2, 2); + memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &s2, 2); + return qeth_osn_send_control_data(card, s1, iob); +} + +int qeth_osn_assist(struct net_device *dev, void *data, int data_len) +{ + struct qeth_cmd_buffer *iob; + struct qeth_card *card; + + if (!dev) + return -ENODEV; + card = dev->ml_priv; + if (!card) + return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnsdmc"); + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + iob = qeth_wait_for_buffer(&card->write); + memcpy(__ipa_cmd(iob), data, data_len); + return qeth_osn_send_ipa_cmd(card, iob, data_len); +} +EXPORT_SYMBOL(qeth_osn_assist); + +int qeth_osn_register(unsigned char *read_dev_no, struct net_device **dev, + int (*assist_cb)(struct net_device *, void *), + int (*data_cb)(struct sk_buff *)) +{ + struct qeth_card *card; + + *dev = qeth_l2_netdev_by_devno(read_dev_no); + if (*dev == NULL) + return -ENODEV; + card = (*dev)->ml_priv; + if (!card) + return -ENODEV; + QETH_CARD_TEXT(card, 2, "osnreg"); + if ((assist_cb == NULL) || (data_cb == NULL)) + return -EINVAL; + card->osn_info.assist_cb = assist_cb; + card->osn_info.data_cb = data_cb; + return 0; +} +EXPORT_SYMBOL(qeth_osn_register); + +void qeth_osn_deregister(struct net_device *dev) +{ + struct qeth_card *card; + + if (!dev) + return; + card = dev->ml_priv; + if (!card) + return; + QETH_CARD_TEXT(card, 2, "osndereg"); + card->osn_info.assist_cb = NULL; + card->osn_info.data_cb = NULL; + return; +} +EXPORT_SYMBOL(qeth_osn_deregister); + +/* SETBRIDGEPORT support, async notifications */ + +enum qeth_an_event_type {anev_reg_unreg, anev_abort, anev_reset}; + +/** + * qeth_bridge_emit_host_event() - bridgeport address change notification + * @card: qeth_card structure pointer, for udev events. + * @evtype: "normal" register/unregister, or abort, or reset. For abort + * and reset token and addr_lnid are unused and may be NULL. + * @code: event bitmask: high order bit 0x80 value 1 means removal of an + * object, 0 - addition of an object. + * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC. + * @token: "network token" structure identifying physical address of the port. + * @addr_lnid: pointer to structure with MAC address and VLAN ID. + * + * This function is called when registrations and deregistrations are + * reported by the hardware, and also when notifications are enabled - + * for all currently registered addresses. + */ +static void qeth_bridge_emit_host_event(struct qeth_card *card, + enum qeth_an_event_type evtype, + u8 code, struct net_if_token *token, struct mac_addr_lnid *addr_lnid) +{ + char str[7][32]; + char *env[8]; + int i = 0; + + switch (evtype) { + case anev_reg_unreg: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=%s", + (code & IPA_ADDR_CHANGE_CODE_REMOVAL) + ? "deregister" : "register"); + env[i] = str[i]; i++; + if (code & IPA_ADDR_CHANGE_CODE_VLANID) { + snprintf(str[i], sizeof(str[i]), "VLAN=%d", + addr_lnid->lnid); + env[i] = str[i]; i++; + } + if (code & IPA_ADDR_CHANGE_CODE_MACADDR) { + snprintf(str[i], sizeof(str[i]), "MAC=%pM", + addr_lnid->mac); + env[i] = str[i]; i++; + } + snprintf(str[i], sizeof(str[i]), "NTOK_BUSID=%x.%x.%04x", + token->cssid, token->ssid, token->devnum); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_IID=%02x", token->iid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHPID=%02x", + token->chpid); + env[i] = str[i]; i++; + snprintf(str[i], sizeof(str[i]), "NTOK_CHID=%04x", token->chid); + env[i] = str[i]; i++; + break; + case anev_abort: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=abort"); + env[i] = str[i]; i++; + break; + case anev_reset: + snprintf(str[i], sizeof(str[i]), "BRIDGEDHOST=reset"); + env[i] = str[i]; i++; + break; + } + env[i] = NULL; + kobject_uevent_env(&card->gdev->dev.kobj, KOBJ_CHANGE, env); +} + +struct qeth_bridge_state_data { + struct work_struct worker; + struct qeth_card *card; + struct qeth_sbp_state_change qports; +}; + +static void qeth_bridge_state_change_worker(struct work_struct *work) +{ + struct qeth_bridge_state_data *data = + container_of(work, struct qeth_bridge_state_data, worker); + /* We are only interested in the first entry - local port */ + struct qeth_sbp_port_entry *entry = &data->qports.entry[0]; + char env_locrem[32]; + char env_role[32]; + char env_state[32]; + char *env[] = { + env_locrem, + env_role, + env_state, + NULL + }; + + /* Role should not change by itself, but if it did, */ + /* information from the hardware is authoritative. */ + mutex_lock(&data->card->conf_mutex); + data->card->options.sbp.role = entry->role; + mutex_unlock(&data->card->conf_mutex); + + snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); + snprintf(env_role, sizeof(env_role), "ROLE=%s", + (entry->role == QETH_SBP_ROLE_NONE) ? "none" : + (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" : + (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" : + "<INVALID>"); + snprintf(env_state, sizeof(env_state), "STATE=%s", + (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" : + (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" : + (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" : + "<INVALID>"); + kobject_uevent_env(&data->card->gdev->dev.kobj, + KOBJ_CHANGE, env); + kfree(data); +} + +static void qeth_bridge_state_change(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_sbp_state_change *qports = + &cmd->data.sbp.data.state_change; + struct qeth_bridge_state_data *data; + int extrasize; + + QETH_CARD_TEXT(card, 2, "brstchng"); + if (qports->num_entries == 0) { + QETH_CARD_TEXT(card, 2, "BPempty"); + return; + } + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length); + return; + } + extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries; + data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize, + GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "BPSalloc"); + return; + } + INIT_WORK(&data->worker, qeth_bridge_state_change_worker); + data->card = card; + memcpy(&data->qports, qports, + sizeof(struct qeth_sbp_state_change) + extrasize); + queue_work(qeth_wq, &data->worker); +} + +struct qeth_bridge_host_data { + struct work_struct worker; + struct qeth_card *card; + struct qeth_ipacmd_addr_change hostevs; +}; + +static void qeth_bridge_host_event_worker(struct work_struct *work) +{ + struct qeth_bridge_host_data *data = + container_of(work, struct qeth_bridge_host_data, worker); + int i; + + if (data->hostevs.lost_event_mask) { + dev_info(&data->card->gdev->dev, +"Address notification from the Bridge Port stopped %s (%s)\n", + data->card->dev->name, + (data->hostevs.lost_event_mask == 0x01) + ? "Overflow" + : (data->hostevs.lost_event_mask == 0x02) + ? "Bridge port state change" + : "Unknown reason"); + mutex_lock(&data->card->conf_mutex); + data->card->options.sbp.hostnotification = 0; + mutex_unlock(&data->card->conf_mutex); + qeth_bridge_emit_host_event(data->card, anev_abort, + 0, NULL, NULL); + } else + for (i = 0; i < data->hostevs.num_entries; i++) { + struct qeth_ipacmd_addr_change_entry *entry = + &data->hostevs.entry[i]; + qeth_bridge_emit_host_event(data->card, + anev_reg_unreg, + entry->change_code, + &entry->token, &entry->addr_lnid); + } + kfree(data); +} + +static void qeth_bridge_host_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + struct qeth_ipacmd_addr_change *hostevs = + &cmd->data.addrchange; + struct qeth_bridge_host_data *data; + int extrasize; + + QETH_CARD_TEXT(card, 2, "brhostev"); + if (cmd->hdr.return_code != 0x0000) { + if (cmd->hdr.return_code == 0x0010) { + if (hostevs->lost_event_mask == 0x00) + hostevs->lost_event_mask = 0xff; + } else { + QETH_CARD_TEXT_(card, 2, "BPHe%04x", + cmd->hdr.return_code); + return; + } + } + extrasize = sizeof(struct qeth_ipacmd_addr_change_entry) * + hostevs->num_entries; + data = kzalloc(sizeof(struct qeth_bridge_host_data) + extrasize, + GFP_ATOMIC); + if (!data) { + QETH_CARD_TEXT(card, 2, "BPHalloc"); + return; + } + INIT_WORK(&data->worker, qeth_bridge_host_event_worker); + data->card = card; + memcpy(&data->hostevs, hostevs, + sizeof(struct qeth_ipacmd_addr_change) + extrasize); + queue_work(qeth_wq, &data->worker); +} + +/* SETBRIDGEPORT support; sending commands */ + +struct _qeth_sbp_cbctl { + u16 ipa_rc; + u16 cmd_rc; + union { + u32 supported; + struct { + enum qeth_sbp_roles *role; + enum qeth_sbp_states *state; + } qports; + } data; +}; + +/** + * qeth_bridgeport_makerc() - derive "traditional" error from hardware codes. + * @card: qeth_card structure pointer, for debug messages. + * @cbctl: state structure with hardware return codes. + * @setcmd: IPA command code + * + * Returns negative errno-compatible error indication or 0 on success. + */ +static int qeth_bridgeport_makerc(struct qeth_card *card, + struct _qeth_sbp_cbctl *cbctl, enum qeth_ipa_sbp_cmd setcmd) +{ + int rc; + int is_iqd = (card->info.type == QETH_CARD_TYPE_IQD); + + if ((is_iqd && (cbctl->ipa_rc == IPA_RC_SUCCESS)) || + (!is_iqd && (cbctl->ipa_rc == cbctl->cmd_rc))) + switch (cbctl->cmd_rc) { + case IPA_RC_SUCCESS: + rc = 0; + break; + case IPA_RC_L2_UNSUPPORTED_CMD: + case IPA_RC_UNSUPPORTED_COMMAND: + rc = -EOPNOTSUPP; + break; + case IPA_RC_SBP_OSA_NOT_CONFIGURED: + case IPA_RC_SBP_IQD_NOT_CONFIGURED: + rc = -ENODEV; /* maybe not the best code here? */ + dev_err(&card->gdev->dev, + "The device is not configured as a Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_OS_MISMATCH: + case IPA_RC_SBP_IQD_OS_MISMATCH: + rc = -EPERM; + dev_err(&card->gdev->dev, + "A Bridge Port is already configured by a different operating system\n"); + break; + case IPA_RC_SBP_OSA_ANO_DEV_PRIMARY: + case IPA_RC_SBP_IQD_ANO_DEV_PRIMARY: + switch (setcmd) { + case IPA_SBP_SET_PRIMARY_BRIDGE_PORT: + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The LAN already has a primary Bridge Port\n"); + break; + case IPA_SBP_SET_SECONDARY_BRIDGE_PORT: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a primary Bridge Port\n"); + break; + default: + rc = -EIO; + } + break; + case IPA_RC_SBP_OSA_CURRENT_SECOND: + case IPA_RC_SBP_IQD_CURRENT_SECOND: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a secondary Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_LIMIT_SECOND: + case IPA_RC_SBP_IQD_LIMIT_SECOND: + rc = -EEXIST; + dev_err(&card->gdev->dev, + "The LAN cannot have more secondary Bridge Ports\n"); + break; + case IPA_RC_SBP_OSA_CURRENT_PRIMARY: + case IPA_RC_SBP_IQD_CURRENT_PRIMARY: + rc = -EBUSY; + dev_err(&card->gdev->dev, + "The device is already a primary Bridge Port\n"); + break; + case IPA_RC_SBP_OSA_NOT_AUTHD_BY_ZMAN: + case IPA_RC_SBP_IQD_NOT_AUTHD_BY_ZMAN: + rc = -EACCES; + dev_err(&card->gdev->dev, + "The device is not authorized to be a Bridge Port\n"); + break; + default: + rc = -EIO; + } + else + switch (cbctl->ipa_rc) { + case IPA_RC_NOTSUPP: + rc = -EOPNOTSUPP; + break; + case IPA_RC_UNSUPPORTED_COMMAND: + rc = -EOPNOTSUPP; + break; + default: + rc = -EIO; + } + + if (rc) { + QETH_CARD_TEXT_(card, 2, "SBPi%04x", cbctl->ipa_rc); + QETH_CARD_TEXT_(card, 2, "SBPc%04x", cbctl->cmd_rc); + } + return rc; +} + +static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, + enum qeth_ipa_sbp_cmd sbp_cmd, + unsigned int cmd_length) +{ + enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? + IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0); + if (!iob) + return iob; + cmd = __ipa_cmd(iob); + cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + cmd_length; + cmd->data.sbp.hdr.command_code = sbp_cmd; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + return iob; +} + +static int qeth_bridgeport_query_support_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brqsupcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + if ((cbctl->ipa_rc == 0) && (cbctl->cmd_rc == 0)) { + cbctl->data.supported = + cmd->data.sbp.data.query_cmds_supp.supported_cmds; + } else { + cbctl->data.supported = 0; + } + return 0; +} + +/** + * qeth_bridgeport_query_support() - store bitmask of supported subfunctions. + * @card: qeth_card structure pointer. + * + * Sets bitmask of supported setbridgeport subfunctions in the qeth_card + * strucutre: card->options.sbp.supported_funcs. + */ +static void qeth_bridgeport_query_support(struct qeth_card *card) +{ + struct qeth_cmd_buffer *iob; + struct _qeth_sbp_cbctl cbctl; + + QETH_CARD_TEXT(card, 2, "brqsuppo"); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_sbp_query_cmds_supp)); + if (!iob) + return; + if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, + (void *)&cbctl) || + qeth_bridgeport_makerc(card, &cbctl, + IPA_SBP_QUERY_COMMANDS_SUPPORTED)) { + /* non-zero makerc signifies failure, and produce messages */ + card->options.sbp.role = QETH_SBP_ROLE_NONE; + return; + } + card->options.sbp.supported_funcs = cbctl.data.supported; +} + +static int qeth_bridgeport_query_ports_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + + QETH_CARD_TEXT(card, 2, "brqprtcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + if ((cbctl->ipa_rc != 0) || (cbctl->cmd_rc != 0)) + return 0; + if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) { + cbctl->cmd_rc = 0xffff; + QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length); + return 0; + } + /* first entry contains the state of the local port */ + if (qports->num_entries > 0) { + if (cbctl->data.qports.role) + *cbctl->data.qports.role = qports->entry[0].role; + if (cbctl->data.qports.state) + *cbctl->data.qports.state = qports->entry[0].state; + } + return 0; +} + +/** + * qeth_bridgeport_query_ports() - query local bridgeport status. + * @card: qeth_card structure pointer. + * @role: Role of the port: 0-none, 1-primary, 2-secondary. + * @state: State of the port: 0-inactive, 1-standby, 2-active. + * + * Returns negative errno-compatible error indication or 0 on success. + * + * 'role' and 'state' are not updated in case of hardware operation failure. + */ +int qeth_bridgeport_query_ports(struct qeth_card *card, + enum qeth_sbp_roles *role, enum qeth_sbp_states *state) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct _qeth_sbp_cbctl cbctl = { + .data = { + .qports = { + .role = role, + .state = state, + }, + }, + }; + + QETH_CARD_TEXT(card, 2, "brqports"); + if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) + return -EOPNOTSUPP; + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, + (void *)&cbctl); + if (rc < 0) + return rc; + return qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS); +} + +static int qeth_bridgeport_set_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; + struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param; + QETH_CARD_TEXT(card, 2, "brsetrcb"); + cbctl->ipa_rc = cmd->hdr.return_code; + cbctl->cmd_rc = cmd->data.sbp.hdr.return_code; + return 0; +} + +/** + * qeth_bridgeport_setrole() - Assign primary role to the port. + * @card: qeth_card structure pointer. + * @role: Role to assign. + * + * Returns negative errno-compatible error indication or 0 on success. + */ +int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) +{ + int rc = 0; + int cmdlength; + struct qeth_cmd_buffer *iob; + struct _qeth_sbp_cbctl cbctl; + enum qeth_ipa_sbp_cmd setcmd; + + QETH_CARD_TEXT(card, 2, "brsetrol"); + switch (role) { + case QETH_SBP_ROLE_NONE: + setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; + cmdlength = sizeof(struct qeth_sbp_reset_role); + break; + case QETH_SBP_ROLE_PRIMARY: + setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; + cmdlength = sizeof(struct qeth_sbp_set_primary); + break; + case QETH_SBP_ROLE_SECONDARY: + setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; + cmdlength = sizeof(struct qeth_sbp_set_secondary); + break; + default: + return -EINVAL; + } + if (!(card->options.sbp.supported_funcs & setcmd)) + return -EOPNOTSUPP; + iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); + if (!iob) + return -ENOMEM; + rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, + (void *)&cbctl); + if (rc < 0) + return rc; + return qeth_bridgeport_makerc(card, &cbctl, setcmd); +} + +/** + * qeth_anset_makerc() - derive "traditional" error from hardware codes. + * @card: qeth_card structure pointer, for debug messages. + * + * Returns negative errno-compatible error indication or 0 on success. + */ +static int qeth_anset_makerc(struct qeth_card *card, int pnso_rc, u16 response) +{ + int rc; + + if (pnso_rc == 0) + switch (response) { + case 0x0001: + rc = 0; + break; + case 0x0004: + case 0x0100: + case 0x0106: + rc = -EOPNOTSUPP; + dev_err(&card->gdev->dev, + "Setting address notification failed\n"); + break; + case 0x0107: + rc = -EAGAIN; + break; + default: + rc = -EIO; + } + else + rc = -EIO; + + if (rc) { + QETH_CARD_TEXT_(card, 2, "SBPp%04x", pnso_rc); + QETH_CARD_TEXT_(card, 2, "SBPr%04x", response); + } + return rc; +} + +static void qeth_bridgeport_an_set_cb(void *priv, + enum qdio_brinfo_entry_type type, void *entry) +{ + struct qeth_card *card = (struct qeth_card *)priv; + struct qdio_brinfo_entry_l2 *l2entry; + u8 code; + + if (type != l2_addr_lnid) { + WARN_ON_ONCE(1); + return; + } + + l2entry = (struct qdio_brinfo_entry_l2 *)entry; + code = IPA_ADDR_CHANGE_CODE_MACADDR; + if (l2entry->addr_lnid.lnid < VLAN_N_VID) + code |= IPA_ADDR_CHANGE_CODE_VLANID; + qeth_bridge_emit_host_event(card, anev_reg_unreg, code, + (struct net_if_token *)&l2entry->nit, + (struct mac_addr_lnid *)&l2entry->addr_lnid); +} + +/** + * qeth_bridgeport_an_set() - Enable or disable bridgeport address notification + * @card: qeth_card structure pointer. + * @enable: 0 - disable, non-zero - enable notifications + * + * Returns negative errno-compatible error indication or 0 on success. + * + * On enable, emits a series of address notifications udev events for all + * currently registered hosts. + */ +int qeth_bridgeport_an_set(struct qeth_card *card, int enable) +{ + int rc; + u16 response; + struct ccw_device *ddev; + struct subchannel_id schid; + + if (!card) + return -EINVAL; + if (!card->options.sbp.supported_funcs) + return -EOPNOTSUPP; + ddev = CARD_DDEV(card); + ccw_device_get_schid(ddev, &schid); + + if (enable) { + qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL); + rc = qdio_pnso_brinfo(schid, 1, &response, + qeth_bridgeport_an_set_cb, card); + } else + rc = qdio_pnso_brinfo(schid, 0, &response, NULL, NULL); + return qeth_anset_makerc(card, rc, response); +} + +static bool qeth_bridgeport_is_in_use(struct qeth_card *card) +{ + return (card->options.sbp.role || card->options.sbp.reflect_promisc || + card->options.sbp.hostnotification); +} + +/* VNIC Characteristics support */ + +/* handle VNICC IPA command return codes; convert to error codes */ +static int qeth_l2_vnicc_makerc(struct qeth_card *card, int ipa_rc) +{ + int rc; + + switch (ipa_rc) { + case IPA_RC_SUCCESS: + return ipa_rc; + case IPA_RC_L2_UNSUPPORTED_CMD: + case IPA_RC_NOTSUPP: + rc = -EOPNOTSUPP; + break; + case IPA_RC_VNICC_OOSEQ: + rc = -EALREADY; + break; + case IPA_RC_VNICC_VNICBP: + rc = -EBUSY; + break; + case IPA_RC_L2_ADDR_TABLE_FULL: + rc = -ENOSPC; + break; + case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP: + rc = -EACCES; + break; + default: + rc = -EIO; + } + + QETH_CARD_TEXT_(card, 2, "err%04x", ipa_rc); + return rc; +} + +/* generic VNICC request call back control */ +struct _qeth_l2_vnicc_request_cbctl { + u32 sub_cmd; + struct { + u32 vnic_char; + u32 timeout; + } param; + struct { + union{ + u32 *sup_cmds; + u32 *timeout; + }; + } result; +}; + +/* generic VNICC request call back */ +static int qeth_l2_vnicc_request_cb(struct qeth_card *card, + struct qeth_reply *reply, + unsigned long data) +{ + struct _qeth_l2_vnicc_request_cbctl *cbctl = + (struct _qeth_l2_vnicc_request_cbctl *) reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; + struct qeth_ipacmd_vnicc *rep = &cmd->data.vnicc; + + QETH_CARD_TEXT(card, 2, "vniccrcb"); + if (cmd->hdr.return_code) + return 0; + /* return results to caller */ + card->options.vnicc.sup_chars = rep->hdr.sup; + card->options.vnicc.cur_chars = rep->hdr.cur; + + if (cbctl->sub_cmd == IPA_VNICC_QUERY_CMDS) + *cbctl->result.sup_cmds = rep->query_cmds.sup_cmds; + + if (cbctl->sub_cmd == IPA_VNICC_GET_TIMEOUT) + *cbctl->result.timeout = rep->getset_timeout.timeout; + + return 0; +} + +/* generic VNICC request */ +static int qeth_l2_vnicc_request(struct qeth_card *card, + struct _qeth_l2_vnicc_request_cbctl *cbctl) +{ + struct qeth_ipacmd_vnicc *req; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + int rc; + + QETH_CARD_TEXT(card, 2, "vniccreq"); + + /* get new buffer for request */ + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_VNICC, 0); + if (!iob) + return -ENOMEM; + + /* create header for request */ + cmd = __ipa_cmd(iob); + req = &cmd->data.vnicc; + + /* create sub command header for request */ + req->sub_hdr.data_length = sizeof(req->sub_hdr); + req->sub_hdr.sub_command = cbctl->sub_cmd; + + /* create sub command specific request fields */ + switch (cbctl->sub_cmd) { + case IPA_VNICC_QUERY_CHARS: + break; + case IPA_VNICC_QUERY_CMDS: + req->sub_hdr.data_length += sizeof(req->query_cmds); + req->query_cmds.vnic_char = cbctl->param.vnic_char; + break; + case IPA_VNICC_ENABLE: + case IPA_VNICC_DISABLE: + req->sub_hdr.data_length += sizeof(req->set_char); + req->set_char.vnic_char = cbctl->param.vnic_char; + break; + case IPA_VNICC_SET_TIMEOUT: + req->getset_timeout.timeout = cbctl->param.timeout; + /* fallthrough */ + case IPA_VNICC_GET_TIMEOUT: + req->sub_hdr.data_length += sizeof(req->getset_timeout); + req->getset_timeout.vnic_char = cbctl->param.vnic_char; + break; + default: + qeth_release_buffer(iob->channel, iob); + return -EOPNOTSUPP; + } + + /* send request */ + rc = qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, + (void *) cbctl); + + return qeth_l2_vnicc_makerc(card, rc); +} + +/* VNICC query VNIC characteristics request */ +static int qeth_l2_vnicc_query_chars(struct qeth_card *card) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = IPA_VNICC_QUERY_CHARS; + + QETH_CARD_TEXT(card, 2, "vniccqch"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC query sub commands request */ +static int qeth_l2_vnicc_query_cmds(struct qeth_card *card, u32 vnic_char, + u32 *sup_cmds) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = IPA_VNICC_QUERY_CMDS; + cbctl.param.vnic_char = vnic_char; + cbctl.result.sup_cmds = sup_cmds; + + QETH_CARD_TEXT(card, 2, "vniccqcm"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC enable/disable characteristic request */ +static int qeth_l2_vnicc_set_char(struct qeth_card *card, u32 vnic_char, + u32 cmd) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = cmd; + cbctl.param.vnic_char = vnic_char; + + QETH_CARD_TEXT(card, 2, "vniccedc"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* VNICC get/set timeout for characteristic request */ +static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc, + u32 cmd, u32 *timeout) +{ + struct _qeth_l2_vnicc_request_cbctl cbctl; + + /* prepare callback control */ + cbctl.sub_cmd = cmd; + cbctl.param.vnic_char = vnicc; + if (cmd == IPA_VNICC_SET_TIMEOUT) + cbctl.param.timeout = *timeout; + if (cmd == IPA_VNICC_GET_TIMEOUT) + cbctl.result.timeout = timeout; + + QETH_CARD_TEXT(card, 2, "vniccgst"); + return qeth_l2_vnicc_request(card, &cbctl); +} + +/* set current VNICC flag state; called from sysfs store function */ +int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state) +{ + int rc = 0; + u32 cmd; + + QETH_CARD_TEXT(card, 2, "vniccsch"); + + /* check if characteristic and enable/disable are supported */ + if (!(card->options.vnicc.sup_chars & vnicc) || + !(card->options.vnicc.set_char_sup & vnicc)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* set enable/disable command and store wanted characteristic */ + if (state) { + cmd = IPA_VNICC_ENABLE; + card->options.vnicc.wanted_chars |= vnicc; + } else { + cmd = IPA_VNICC_DISABLE; + card->options.vnicc.wanted_chars &= ~vnicc; + } + + /* do we need to do anything? */ + if (card->options.vnicc.cur_chars == card->options.vnicc.wanted_chars) + return rc; + + /* if card is not ready, simply stop here */ + if (!qeth_card_hw_is_reachable(card)) { + if (state) + card->options.vnicc.cur_chars |= vnicc; + else + card->options.vnicc.cur_chars &= ~vnicc; + return rc; + } + + rc = qeth_l2_vnicc_set_char(card, vnicc, cmd); + if (rc) + card->options.vnicc.wanted_chars = + card->options.vnicc.cur_chars; + else { + /* successful online VNICC change; handle special cases */ + if (state && vnicc == QETH_VNICC_RX_BCAST) + card->options.vnicc.rx_bcast_enabled = true; + if (!state && vnicc == QETH_VNICC_LEARNING) + qeth_l2_vnicc_recover_timeout(card, vnicc, + &card->options.vnicc.learning_timeout); + } + + return rc; +} + +/* get current VNICC flag state; called from sysfs show function */ +int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgch"); + + /* check if characteristic is supported */ + if (!(card->options.vnicc.sup_chars & vnicc)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* if card is ready, query current VNICC state */ + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_query_chars(card); + + *state = (card->options.vnicc.cur_chars & vnicc) ? true : false; + return rc; +} + +/* set VNICC timeout; called from sysfs store function. Currently, only learning + * supports timeout + */ +int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccsto"); + + /* check if characteristic and set_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* do we need to do anything? */ + if (card->options.vnicc.learning_timeout == timeout) + return rc; + + /* if card is not ready, simply store the value internally and return */ + if (!qeth_card_hw_is_reachable(card)) { + card->options.vnicc.learning_timeout = timeout; + return rc; + } + + /* send timeout value to card; if successful, store value internally */ + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_SET_TIMEOUT, &timeout); + if (!rc) + card->options.vnicc.learning_timeout = timeout; + + return rc; +} + +/* get current VNICC timeout; called from sysfs show function. Currently, only + * learning supports timeout + */ +int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 2, "vniccgto"); + + /* check if characteristic and get_timeout are supported */ + if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) || + !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING)) + return -EOPNOTSUPP; + + if (qeth_bridgeport_is_in_use(card)) + return -EBUSY; + + /* if card is ready, get timeout. Otherwise, just return stored value */ + *timeout = card->options.vnicc.learning_timeout; + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l2_vnicc_getset_timeout(card, QETH_VNICC_LEARNING, + IPA_VNICC_GET_TIMEOUT, + timeout); + + return rc; +} + +/* check if VNICC is currently enabled */ +bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) +{ + if (!card->options.vnicc.sup_chars) + return false; + /* default values are only OK if rx_bcast was not enabled by user + * or the card is offline. + */ + if (card->options.vnicc.cur_chars == QETH_VNICC_DEFAULT) { + if (!card->options.vnicc.rx_bcast_enabled || + !qeth_card_hw_is_reachable(card)) + return false; + } + return true; +} + +/* recover user timeout setting */ +static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc, + u32 *timeout) +{ + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.getset_timeout_sup & vnicc && + !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT, + timeout)) + return false; + *timeout = QETH_VNICC_DEFAULT_TIMEOUT; + return true; +} + +/* recover user characteristic setting */ +static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc, + bool enable) +{ + u32 cmd = enable ? IPA_VNICC_ENABLE : IPA_VNICC_DISABLE; + + if (card->options.vnicc.sup_chars & vnicc && + card->options.vnicc.set_char_sup & vnicc && + !qeth_l2_vnicc_set_char(card, vnicc, cmd)) + return false; + card->options.vnicc.wanted_chars &= ~vnicc; + card->options.vnicc.wanted_chars |= QETH_VNICC_DEFAULT & vnicc; + return true; +} + +/* (re-)initialize VNICC */ +static void qeth_l2_vnicc_init(struct qeth_card *card) +{ + u32 *timeout = &card->options.vnicc.learning_timeout; + bool enable, error = false; + unsigned int chars_len, i; + unsigned long chars_tmp; + u32 sup_cmds, vnicc; + + QETH_CARD_TEXT(card, 2, "vniccini"); + /* reset rx_bcast */ + card->options.vnicc.rx_bcast_enabled = 0; + /* initial query and storage of VNIC characteristics */ + if (qeth_l2_vnicc_query_chars(card)) { + if (card->options.vnicc.wanted_chars != QETH_VNICC_DEFAULT || + *timeout != QETH_VNICC_DEFAULT_TIMEOUT) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); + /* fail quietly if user didn't change the default config */ + card->options.vnicc.sup_chars = 0; + card->options.vnicc.cur_chars = 0; + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; + return; + } + /* get supported commands for each supported characteristic */ + chars_tmp = card->options.vnicc.sup_chars; + chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) { + sup_cmds = 0; + error = true; + } + if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) && + (sup_cmds & IPA_VNICC_GET_TIMEOUT)) + card->options.vnicc.getset_timeout_sup |= vnicc; + else + card->options.vnicc.getset_timeout_sup &= ~vnicc; + if ((sup_cmds & IPA_VNICC_ENABLE) && + (sup_cmds & IPA_VNICC_DISABLE)) + card->options.vnicc.set_char_sup |= vnicc; + else + card->options.vnicc.set_char_sup &= ~vnicc; + } + /* enforce assumed default values and recover settings, if changed */ + error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, + timeout); + chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; + chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; + for_each_set_bit(i, &chars_tmp, chars_len) { + vnicc = BIT(i); + enable = card->options.vnicc.wanted_chars & vnicc; + error |= qeth_l2_vnicc_recover_char(card, vnicc, enable); + } + if (error) + dev_err(&card->gdev->dev, "Configuring the VNIC characteristics failed\n"); +} + +/* configure default values of VNIC characteristics */ +static void qeth_l2_vnicc_set_defaults(struct qeth_card *card) +{ + /* characteristics values */ + card->options.vnicc.sup_chars = QETH_VNICC_ALL; + card->options.vnicc.cur_chars = QETH_VNICC_DEFAULT; + card->options.vnicc.learning_timeout = QETH_VNICC_DEFAULT_TIMEOUT; + /* supported commands */ + card->options.vnicc.set_char_sup = QETH_VNICC_ALL; + card->options.vnicc.getset_timeout_sup = QETH_VNICC_LEARNING; + /* settings wanted by users */ + card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT; +} + +module_init(qeth_l2_init); +module_exit(qeth_l2_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth layer 2 discipline"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c new file mode 100644 index 000000000..f2c3b127b --- /dev/null +++ b/drivers/s390/net/qeth_l2_sys.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2013 + * Author(s): Eugene Crosser <eugene.crosser@ru.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include "qeth_core.h" +#include "qeth_l2.h" + +static ssize_t qeth_bridge_port_role_state_show(struct device *dev, + struct device_attribute *attr, char *buf, + int show_state) +{ + struct qeth_card *card = dev_get_drvdata(dev); + enum qeth_sbp_states state = QETH_SBP_STATE_INACTIVE; + int rc = 0; + char *word; + + if (!card) + return -EINVAL; + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + if (qeth_card_hw_is_reachable(card) && + card->options.sbp.supported_funcs) + rc = qeth_bridgeport_query_ports(card, + &card->options.sbp.role, &state); + if (!rc) { + if (show_state) + switch (state) { + case QETH_SBP_STATE_INACTIVE: + word = "inactive"; break; + case QETH_SBP_STATE_STANDBY: + word = "standby"; break; + case QETH_SBP_STATE_ACTIVE: + word = "active"; break; + default: + rc = -EIO; + } + else + switch (card->options.sbp.role) { + case QETH_SBP_ROLE_NONE: + word = "none"; break; + case QETH_SBP_ROLE_PRIMARY: + word = "primary"; break; + case QETH_SBP_ROLE_SECONDARY: + word = "secondary"; break; + default: + rc = -EIO; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "SBP%02x:%02x", + card->options.sbp.role, state); + else + rc = sprintf(buf, "%s\n", word); + } + + return rc; +} + +static ssize_t qeth_bridge_port_role_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + return qeth_bridge_port_role_state_show(dev, attr, buf, 0); +} + +static ssize_t qeth_bridge_port_role_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + enum qeth_sbp_roles role; + + if (!card) + return -EINVAL; + if (sysfs_streq(buf, "primary")) + role = QETH_SBP_ROLE_PRIMARY; + else if (sysfs_streq(buf, "secondary")) + role = QETH_SBP_ROLE_SECONDARY; + else if (sysfs_streq(buf, "none")) + role = QETH_SBP_ROLE_NONE; + else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (card->options.sbp.reflect_promisc) + /* Forbid direct manipulation */ + rc = -EPERM; + else if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_setrole(card, role); + if (!rc) + card->options.sbp.role = role; + } else + card->options.sbp.role = role; + + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_role, 0644, qeth_bridge_port_role_show, + qeth_bridge_port_role_store); + +static ssize_t qeth_bridge_port_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + return qeth_bridge_port_role_state_show(dev, attr, buf, 1); +} + +static DEVICE_ATTR(bridge_state, 0444, qeth_bridge_port_state_show, + NULL); + +static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int enabled; + + if (!card) + return -EINVAL; + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + enabled = card->options.sbp.hostnotification; + + return sprintf(buf, "%d\n", enabled); +} + +static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool enable; + int rc; + + if (!card) + return -EINVAL; + + rc = kstrtobool(buf, &enable); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (qeth_card_hw_is_reachable(card)) { + rc = qeth_bridgeport_an_set(card, enable); + if (!rc) + card->options.sbp.hostnotification = enable; + } else + card->options.sbp.hostnotification = enable; + + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_hostnotify, 0644, + qeth_bridgeport_hostnotification_show, + qeth_bridgeport_hostnotification_store); + +static ssize_t qeth_bridgeport_reflect_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *state; + + if (!card) + return -EINVAL; + + if (qeth_l2_vnicc_is_in_use(card)) + return sprintf(buf, "n/a (VNIC characteristics)\n"); + + if (card->options.sbp.reflect_promisc) { + if (card->options.sbp.reflect_promisc_primary) + state = "primary"; + else + state = "secondary"; + } else + state = "none"; + + return sprintf(buf, "%s\n", state); +} + +static ssize_t qeth_bridgeport_reflect_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int enable, primary; + int rc = 0; + + if (!card) + return -EINVAL; + + if (sysfs_streq(buf, "none")) { + enable = 0; + primary = 0; + } else if (sysfs_streq(buf, "primary")) { + enable = 1; + primary = 1; + } else if (sysfs_streq(buf, "secondary")) { + enable = 1; + primary = 0; + } else + return -EINVAL; + + mutex_lock(&card->conf_mutex); + + if (qeth_l2_vnicc_is_in_use(card)) + rc = -EBUSY; + else if (card->options.sbp.role != QETH_SBP_ROLE_NONE) + rc = -EPERM; + else { + card->options.sbp.reflect_promisc = enable; + card->options.sbp.reflect_promisc_primary = primary; + rc = 0; + } + + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(bridge_reflect_promisc, 0644, + qeth_bridgeport_reflect_show, + qeth_bridgeport_reflect_store); + +static struct attribute *qeth_l2_bridgeport_attrs[] = { + &dev_attr_bridge_role.attr, + &dev_attr_bridge_state.attr, + &dev_attr_bridge_hostnotify.attr, + &dev_attr_bridge_reflect_promisc.attr, + NULL, +}; + +static struct attribute_group qeth_l2_bridgeport_attr_group = { + .attrs = qeth_l2_bridgeport_attrs, +}; + +/** + * qeth_l2_setup_bridgeport_attrs() - set/restore attrs when turning online. + * @card: qeth_card structure pointer + * + * Note: this function is called with conf_mutex held by the caller + */ +void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card) +{ + int rc; + + if (!card) + return; + if (!card->options.sbp.supported_funcs) + return; + if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { + /* Conditional to avoid spurious error messages */ + qeth_bridgeport_setrole(card, card->options.sbp.role); + /* Let the callback function refresh the stored role value. */ + qeth_bridgeport_query_ports(card, + &card->options.sbp.role, NULL); + } + if (card->options.sbp.hostnotification) { + rc = qeth_bridgeport_an_set(card, 1); + if (rc) + card->options.sbp.hostnotification = 0; + } else + qeth_bridgeport_an_set(card, 0); +} + +/* VNIC CHARS support */ + +/* convert sysfs attr name to VNIC characteristic */ +static u32 qeth_l2_vnicc_sysfs_attr_to_char(const char *attr_name) +{ + if (sysfs_streq(attr_name, "flooding")) + return QETH_VNICC_FLOODING; + else if (sysfs_streq(attr_name, "mcast_flooding")) + return QETH_VNICC_MCAST_FLOODING; + else if (sysfs_streq(attr_name, "learning")) + return QETH_VNICC_LEARNING; + else if (sysfs_streq(attr_name, "takeover_setvmac")) + return QETH_VNICC_TAKEOVER_SETVMAC; + else if (sysfs_streq(attr_name, "takeover_learning")) + return QETH_VNICC_TAKEOVER_LEARNING; + else if (sysfs_streq(attr_name, "bridge_invisible")) + return QETH_VNICC_BRIDGE_INVISIBLE; + else if (sysfs_streq(attr_name, "rx_bcast")) + return QETH_VNICC_RX_BCAST; + + return 0; +} + +/* get current timeout setting */ +static ssize_t qeth_vnicc_timeout_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + if (!card) + return -EINVAL; + + rc = qeth_l2_vnicc_get_timeout(card, &timeout); + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", timeout); +} + +/* change timeout setting */ +static ssize_t qeth_vnicc_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + u32 timeout; + int rc; + + if (!card) + return -EINVAL; + + rc = kstrtou32(buf, 10, &timeout); + if (rc) + return rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_timeout(card, timeout); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +/* get current setting of characteristic */ +static ssize_t qeth_vnicc_char_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + if (!card) + return -EINVAL; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + rc = qeth_l2_vnicc_get_state(card, vnicc, &state); + + if (rc == -EBUSY) + return sprintf(buf, "n/a (BridgePort)\n"); + if (rc == -EOPNOTSUPP) + return sprintf(buf, "n/a\n"); + return rc ? rc : sprintf(buf, "%d\n", state); +} + +/* change setting of characteristic */ +static ssize_t qeth_vnicc_char_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool state; + u32 vnicc; + int rc; + + if (!card) + return -EINVAL; + + if (kstrtobool(buf, &state)) + return -EINVAL; + + vnicc = qeth_l2_vnicc_sysfs_attr_to_char(attr->attr.name); + mutex_lock(&card->conf_mutex); + rc = qeth_l2_vnicc_set_state(card, vnicc, state); + mutex_unlock(&card->conf_mutex); + + return rc ? rc : count; +} + +static DEVICE_ATTR(flooding, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(mcast_flooding, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(learning, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); +static DEVICE_ATTR(learning_timeout, 0644, qeth_vnicc_timeout_show, + qeth_vnicc_timeout_store); +static DEVICE_ATTR(takeover_setvmac, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(takeover_learning, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(bridge_invisible, 0644, qeth_vnicc_char_show, + qeth_vnicc_char_store); +static DEVICE_ATTR(rx_bcast, 0644, qeth_vnicc_char_show, qeth_vnicc_char_store); + +static struct attribute *qeth_l2_vnicc_attrs[] = { + &dev_attr_flooding.attr, + &dev_attr_mcast_flooding.attr, + &dev_attr_learning.attr, + &dev_attr_learning_timeout.attr, + &dev_attr_takeover_setvmac.attr, + &dev_attr_takeover_learning.attr, + &dev_attr_bridge_invisible.attr, + &dev_attr_rx_bcast.attr, + NULL, +}; + +static struct attribute_group qeth_l2_vnicc_attr_group = { + .attrs = qeth_l2_vnicc_attrs, + .name = "vnicc", +}; + +static const struct attribute_group *qeth_l2_only_attr_groups[] = { + &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, + NULL, +}; + +int qeth_l2_create_device_attributes(struct device *dev) +{ + return sysfs_create_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + +void qeth_l2_remove_device_attributes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, qeth_l2_only_attr_groups); +} + +const struct attribute_group *qeth_l2_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + /* l2 specific, see qeth_l2_only_attr_groups: */ + &qeth_l2_bridgeport_attr_group, + &qeth_l2_vnicc_attr_group, + NULL, +}; diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h new file mode 100644 index 000000000..87659cfc9 --- /dev/null +++ b/drivers/s390/net/qeth_l3.h @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#ifndef __QETH_L3_H__ +#define __QETH_L3_H__ + +#include "qeth_core.h" +#include <linux/hashtable.h> + +#define QETH_SNIFF_AVAIL 0x0008 + +enum qeth_ip_types { + QETH_IP_TYPE_NORMAL, + QETH_IP_TYPE_VIPA, + QETH_IP_TYPE_RXIP, +}; + +struct qeth_ipaddr { + struct hlist_node hnode; + enum qeth_ip_types type; + unsigned char mac[ETH_ALEN]; + u8 is_multicast:1; + u8 in_progress:1; + u8 disp_flag:2; + u8 ipato:1; /* ucast only */ + + /* is changed only for normal ip addresses + * for non-normal addresses it always is 1 + */ + int ref_counter; + enum qeth_prot_versions proto; + union { + struct { + unsigned int addr; + unsigned int mask; + } a4; + struct { + struct in6_addr addr; + unsigned int pfxlen; + } a6; + } u; +}; + +static inline void qeth_l3_init_ipaddr(struct qeth_ipaddr *addr, + enum qeth_ip_types type, + enum qeth_prot_versions proto) +{ + memset(addr, 0, sizeof(*addr)); + addr->type = type; + addr->proto = proto; + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; +} + +static inline bool qeth_l3_addr_match_ip(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + if (a1->proto != a2->proto) + return false; + if (a1->proto == QETH_PROT_IPV6) + return ipv6_addr_equal(&a1->u.a6.addr, &a2->u.a6.addr); + return a1->u.a4.addr == a2->u.a4.addr; +} + +static inline bool qeth_l3_addr_match_all(struct qeth_ipaddr *a1, + struct qeth_ipaddr *a2) +{ + /* Assumes that the pair was obtained via qeth_l3_addr_find_by_ip(), + * so 'proto' and 'addr' match for sure. + * + * For ucast: + * - 'mac' is always 0. + * - 'mask'/'pfxlen' for RXIP/VIPA is always 0. For NORMAL, matching + * values are required to avoid mixups in takeover eligibility. + * + * For mcast, + * - 'mac' is mapped from the IP, and thus always matches. + * - 'mask'/'pfxlen' is always 0. + */ + if (a1->type != a2->type) + return false; + if (a1->proto == QETH_PROT_IPV6) + return a1->u.a6.pfxlen == a2->u.a6.pfxlen; + return a1->u.a4.mask == a2->u.a4.mask; +} + +static inline u64 qeth_l3_ipaddr_hash(struct qeth_ipaddr *addr) +{ + u64 ret = 0; + u8 *point; + + if (addr->proto == QETH_PROT_IPV6) { + point = (u8 *) &addr->u.a6.addr; + ret = get_unaligned((u64 *)point) ^ + get_unaligned((u64 *) (point + 8)); + } + if (addr->proto == QETH_PROT_IPV4) { + point = (u8 *) &addr->u.a4.addr; + ret = get_unaligned((u32 *) point); + } + return ret; +} + +struct qeth_ipato_entry { + struct list_head entry; + enum qeth_prot_versions proto; + char addr[16]; + int mask_bits; +}; + +extern const struct attribute_group *qeth_l3_attr_groups[]; + +void qeth_l3_ipaddr_to_string(enum qeth_prot_versions, const __u8 *, char *); +int qeth_l3_create_device_attributes(struct device *); +void qeth_l3_remove_device_attributes(struct device *); +int qeth_l3_setrouting_v4(struct qeth_card *); +int qeth_l3_setrouting_v6(struct qeth_card *); +int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *); +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + int mask_bits); +void qeth_l3_update_ipato(struct qeth_card *card); +int qeth_l3_modify_hsuid(struct qeth_card *card, bool add); +int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, + enum qeth_ip_types type, + enum qeth_prot_versions proto); + +#endif /* __QETH_L3_H__ */ diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c new file mode 100644 index 000000000..52e0ae4dc --- /dev/null +++ b/drivers/s390/net/qeth_l3_main.c @@ -0,0 +1,3000 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007, 2009 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#define KMSG_COMPONENT "qeth" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/bitops.h> +#include <linux/string.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/etherdevice.h> +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/ipv6.h> +#include <linux/inetdevice.h> +#include <linux/igmp.h> +#include <linux/slab.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <linux/skbuff.h> + +#include <net/ip.h> +#include <net/arp.h> +#include <net/route.h> +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/ip6_fib.h> +#include <net/ip6_checksum.h> +#include <net/iucv/af_iucv.h> +#include <linux/hashtable.h> + +#include "qeth_l3.h" + + +static int qeth_l3_set_offline(struct ccwgroup_device *); +static int qeth_l3_stop(struct net_device *); +static void qeth_l3_set_rx_mode(struct net_device *dev); +static int qeth_l3_register_addr_entry(struct qeth_card *, + struct qeth_ipaddr *); +static int qeth_l3_deregister_addr_entry(struct qeth_card *, + struct qeth_ipaddr *); + +static void qeth_l3_ipaddr4_to_string(const __u8 *addr, char *buf) +{ + sprintf(buf, "%pI4", addr); +} + +static void qeth_l3_ipaddr6_to_string(const __u8 *addr, char *buf) +{ + sprintf(buf, "%pI6", addr); +} + +void qeth_l3_ipaddr_to_string(enum qeth_prot_versions proto, const __u8 *addr, + char *buf) +{ + if (proto == QETH_PROT_IPV4) + qeth_l3_ipaddr4_to_string(addr, buf); + else if (proto == QETH_PROT_IPV6) + qeth_l3_ipaddr6_to_string(addr, buf); +} + +static struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions prot) +{ + struct qeth_ipaddr *addr = kmalloc(sizeof(*addr), GFP_ATOMIC); + + if (addr) + qeth_l3_init_ipaddr(addr, QETH_IP_TYPE_NORMAL, prot); + return addr; +} + +static struct qeth_ipaddr *qeth_l3_find_addr_by_ip(struct qeth_card *card, + struct qeth_ipaddr *query) +{ + u64 key = qeth_l3_ipaddr_hash(query); + struct qeth_ipaddr *addr; + + if (query->is_multicast) { + hash_for_each_possible(card->ip_mc_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } else { + hash_for_each_possible(card->ip_htable, addr, hnode, key) + if (qeth_l3_addr_match_ip(addr, query)) + return addr; + } + return NULL; +} + +static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) +{ + int i, j; + u8 octet; + + for (i = 0; i < len; ++i) { + octet = addr[i]; + for (j = 7; j >= 0; --j) { + bits[i*8 + j] = octet & 1; + octet >>= 1; + } + } +} + +static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + struct qeth_ipato_entry *ipatoe; + u8 addr_bits[128] = {0, }; + u8 ipatoe_bits[128] = {0, }; + int rc = 0; + + if (!card->ipato.enabled) + return false; + if (addr->type != QETH_IP_TYPE_NORMAL) + return false; + + qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, + (addr->proto == QETH_PROT_IPV4)? 4:16); + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + if (addr->proto != ipatoe->proto) + continue; + qeth_l3_convert_addr_to_bits(ipatoe->addr, ipatoe_bits, + (ipatoe->proto == QETH_PROT_IPV4) ? + 4 : 16); + if (addr->proto == QETH_PROT_IPV4) + rc = !memcmp(addr_bits, ipatoe_bits, + min(32, ipatoe->mask_bits)); + else + rc = !memcmp(addr_bits, ipatoe_bits, + min(128, ipatoe->mask_bits)); + if (rc) + break; + } + /* invert? */ + if ((addr->proto == QETH_PROT_IPV4) && card->ipato.invert4) + rc = !rc; + else if ((addr->proto == QETH_PROT_IPV6) && card->ipato.invert6) + rc = !rc; + + return rc; +} + +static int qeth_l3_delete_ip(struct qeth_card *card, + struct qeth_ipaddr *tmp_addr) +{ + int rc = 0; + struct qeth_ipaddr *addr; + + if (tmp_addr->type == QETH_IP_TYPE_RXIP) + QETH_CARD_TEXT(card, 2, "delrxip"); + else if (tmp_addr->type == QETH_IP_TYPE_VIPA) + QETH_CARD_TEXT(card, 2, "delvipa"); + else + QETH_CARD_TEXT(card, 2, "delip"); + + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); + else { + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (!addr || !qeth_l3_addr_match_all(addr, tmp_addr)) + return -ENOENT; + + addr->ref_counter--; + if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + return rc; + if (addr->in_progress) + return -EINPROGRESS; + + if (qeth_card_hw_is_reachable(card)) + rc = qeth_l3_deregister_addr_entry(card, addr); + + hash_del(&addr->hnode); + kfree(addr); + + return rc; +} + +static int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) +{ + int rc = 0; + struct qeth_ipaddr *addr; + char buf[40]; + + if (tmp_addr->type == QETH_IP_TYPE_RXIP) + QETH_CARD_TEXT(card, 2, "addrxip"); + else if (tmp_addr->type == QETH_IP_TYPE_VIPA) + QETH_CARD_TEXT(card, 2, "addvipa"); + else + QETH_CARD_TEXT(card, 2, "addip"); + + if (tmp_addr->proto == QETH_PROT_IPV4) + QETH_CARD_HEX(card, 4, &tmp_addr->u.a4.addr, 4); + else { + QETH_CARD_HEX(card, 4, &tmp_addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 4, ((char *)&tmp_addr->u.a6.addr) + 8, 8); + } + + addr = qeth_l3_find_addr_by_ip(card, tmp_addr); + if (addr) { + if (tmp_addr->type != QETH_IP_TYPE_NORMAL) + return -EADDRINUSE; + if (qeth_l3_addr_match_all(addr, tmp_addr)) { + addr->ref_counter++; + return 0; + } + qeth_l3_ipaddr_to_string(tmp_addr->proto, (u8 *)&tmp_addr->u, + buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + return -EADDRINUSE; + } else { + addr = qeth_l3_get_addr_buffer(tmp_addr->proto); + if (!addr) + return -ENOMEM; + + memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); + addr->ref_counter = 1; + + if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { + QETH_CARD_TEXT(card, 2, "tkovaddr"); + addr->ipato = 1; + } + hash_add(card->ip_htable, &addr->hnode, + qeth_l3_ipaddr_hash(addr)); + + if (!qeth_card_hw_is_reachable(card)) { + addr->disp_flag = QETH_DISP_ADDR_ADD; + return 0; + } + + /* qeth_l3_register_addr_entry can go to sleep + * if we add a IPV4 addr. It is caused by the reason + * that SETIP ipa cmd starts ARP staff for IPV4 addr. + * Thus we should unlock spinlock, and make a protection + * using in_progress variable to indicate that there is + * an hardware operation with this IPV4 address + */ + if (addr->proto == QETH_PROT_IPV4) { + addr->in_progress = 1; + spin_unlock_bh(&card->ip_lock); + rc = qeth_l3_register_addr_entry(card, addr); + spin_lock_bh(&card->ip_lock); + addr->in_progress = 0; + } else + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc || (rc == IPA_RC_DUPLICATE_IP_ADDRESS) || + (rc == IPA_RC_LAN_OFFLINE)) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + if (addr->ref_counter < 1) { + qeth_l3_deregister_addr_entry(card, addr); + hash_del(&addr->hnode); + kfree(addr); + } + } else { + hash_del(&addr->hnode); + kfree(addr); + } + } + return rc; +} + +static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + + QETH_CARD_TEXT(card, 4, "clearip"); + + spin_lock_bh(&card->ip_lock); + + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (!recover) { + hash_del(&addr->hnode); + kfree(addr); + continue; + } + addr->disp_flag = QETH_DISP_ADDR_ADD; + } + + spin_unlock_bh(&card->ip_lock); + + spin_lock_bh(&card->mclock); + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + hash_del(&addr->hnode); + kfree(addr); + } + + spin_unlock_bh(&card->mclock); + + +} +static void qeth_l3_recover_ip(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i; + int rc; + + QETH_CARD_TEXT(card, 4, "recovrip"); + + spin_lock_bh(&card->ip_lock); + + hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) { + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + if (addr->proto == QETH_PROT_IPV4) { + addr->in_progress = 1; + spin_unlock_bh(&card->ip_lock); + rc = qeth_l3_register_addr_entry(card, addr); + spin_lock_bh(&card->ip_lock); + addr->in_progress = 0; + } else + rc = qeth_l3_register_addr_entry(card, addr); + + if (!rc) { + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + if (addr->ref_counter < 1) + qeth_l3_delete_ip(card, addr); + } else { + hash_del(&addr->hnode); + kfree(addr); + } + } + } + + spin_unlock_bh(&card->ip_lock); + +} + +static int qeth_l3_send_setdelmc(struct qeth_card *card, + struct qeth_ipaddr *addr, int ipacmd) +{ + int rc; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_CARD_TEXT(card, 4, "setdelmc"); + + iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + ether_addr_copy(cmd->data.setdelipm.mac, addr->mac); + if (addr->proto == QETH_PROT_IPV6) + memcpy(cmd->data.setdelipm.ip6, &addr->u.a6.addr, + sizeof(struct in6_addr)); + else + memcpy(&cmd->data.setdelipm.ip4, &addr->u.a4.addr, 4); + + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} + +static void qeth_l3_fill_netmask(u8 *netmask, unsigned int len) +{ + int i, j; + for (i = 0; i < 16; i++) { + j = (len) - (i * 8); + if (j >= 8) + netmask[i] = 0xff; + else if (j > 0) + netmask[i] = (u8)(0xFF00 >> j); + else + netmask[i] = 0; + } +} + +static u32 qeth_l3_get_setdelip_flags(struct qeth_ipaddr *addr, bool set) +{ + switch (addr->type) { + case QETH_IP_TYPE_RXIP: + return (set) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; + case QETH_IP_TYPE_VIPA: + return (set) ? QETH_IPA_SETIP_VIPA_FLAG : + QETH_IPA_DELIP_VIPA_FLAG; + default: + return (set && addr->ipato) ? QETH_IPA_SETIP_TAKEOVER_FLAG : 0; + } +} + +static int qeth_l3_send_setdelip(struct qeth_card *card, + struct qeth_ipaddr *addr, + enum qeth_ipa_cmds ipacmd) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + __u8 netmask[16]; + u32 flags; + + QETH_CARD_TEXT(card, 4, "setdelip"); + + iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + + flags = qeth_l3_get_setdelip_flags(addr, ipacmd == IPA_CMD_SETIP); + QETH_CARD_TEXT_(card, 4, "flags%02X", flags); + + if (addr->proto == QETH_PROT_IPV6) { + memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr, + sizeof(struct in6_addr)); + qeth_l3_fill_netmask(netmask, addr->u.a6.pfxlen); + memcpy(cmd->data.setdelip6.mask, netmask, + sizeof(struct in6_addr)); + cmd->data.setdelip6.flags = flags; + } else { + memcpy(cmd->data.setdelip4.ip_addr, &addr->u.a4.addr, 4); + memcpy(cmd->data.setdelip4.mask, &addr->u.a4.mask, 4); + cmd->data.setdelip4.flags = flags; + } + + return qeth_send_ipa_cmd(card, iob, NULL, NULL); +} + +static int qeth_l3_send_setrouting(struct qeth_card *card, + enum qeth_routing_types type, enum qeth_prot_versions prot) +{ + int rc; + struct qeth_ipa_cmd *cmd; + struct qeth_cmd_buffer *iob; + + QETH_CARD_TEXT(card, 4, "setroutg"); + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setrtg.type = (type); + rc = qeth_send_ipa_cmd(card, iob, NULL, NULL); + + return rc; +} + +static int qeth_l3_correct_routing_type(struct qeth_card *card, + enum qeth_routing_types *type, enum qeth_prot_versions prot) +{ + if (card->info.type == QETH_CARD_TYPE_IQD) { + switch (*type) { + case NO_ROUTER: + case PRIMARY_CONNECTOR: + case SECONDARY_CONNECTOR: + case MULTICAST_ROUTER: + return 0; + default: + goto out_inval; + } + } else { + switch (*type) { + case NO_ROUTER: + case PRIMARY_ROUTER: + case SECONDARY_ROUTER: + return 0; + case MULTICAST_ROUTER: + if (qeth_is_ipafunc_supported(card, prot, + IPA_OSA_MC_ROUTER)) + return 0; + default: + goto out_inval; + } + } +out_inval: + *type = NO_ROUTER; + return -EINVAL; +} + +int qeth_l3_setrouting_v4(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "setrtg4"); + + rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, + QETH_PROT_IPV4); + if (rc) + return rc; + + rc = qeth_l3_send_setrouting(card, card->options.route4.type, + QETH_PROT_IPV4); + if (rc) { + card->options.route4.type = NO_ROUTER; + QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" + " on %s. Type set to 'no router'.\n", rc, + QETH_CARD_IFNAME(card)); + } + return rc; +} + +int qeth_l3_setrouting_v6(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "setrtg6"); + + if (!qeth_is_supported(card, IPA_IPV6)) + return 0; + rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, + QETH_PROT_IPV6); + if (rc) + return rc; + + rc = qeth_l3_send_setrouting(card, card->options.route6.type, + QETH_PROT_IPV6); + if (rc) { + card->options.route6.type = NO_ROUTER; + QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" + " on %s. Type set to 'no router'.\n", rc, + QETH_CARD_IFNAME(card)); + } + return rc; +} + +/* + * IP address takeover related functions + */ + +/** + * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. + * + * Caller must hold ip_lock. + */ +void qeth_l3_update_ipato(struct qeth_card *card) +{ + struct qeth_ipaddr *addr; + unsigned int i; + + hash_for_each(card->ip_htable, i, addr, hnode) { + if (addr->type != QETH_IP_TYPE_NORMAL) + continue; + addr->ipato = qeth_l3_is_addr_covered_by_ipato(card, addr); + } +} + +static void qeth_l3_clear_ipato_list(struct qeth_card *card) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + + spin_lock_bh(&card->ip_lock); + + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { + list_del(&ipatoe->entry); + kfree(ipatoe); + } + + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); +} + +int qeth_l3_add_ipato_entry(struct qeth_card *card, + struct qeth_ipato_entry *new) +{ + struct qeth_ipato_entry *ipatoe; + int rc = 0; + + QETH_CARD_TEXT(card, 2, "addipato"); + + spin_lock_bh(&card->ip_lock); + + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + if (ipatoe->proto != new->proto) + continue; + if (!memcmp(ipatoe->addr, new->addr, + (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && + (ipatoe->mask_bits == new->mask_bits)) { + rc = -EEXIST; + break; + } + } + + if (!rc) { + list_add_tail(&new->entry, &card->ipato.entries); + qeth_l3_update_ipato(card); + } + + spin_unlock_bh(&card->ip_lock); + + return rc; +} + +int qeth_l3_del_ipato_entry(struct qeth_card *card, + enum qeth_prot_versions proto, u8 *addr, + int mask_bits) +{ + struct qeth_ipato_entry *ipatoe, *tmp; + int rc = -ENOENT; + + QETH_CARD_TEXT(card, 2, "delipato"); + + spin_lock_bh(&card->ip_lock); + + list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) { + if (ipatoe->proto != proto) + continue; + if (!memcmp(ipatoe->addr, addr, + (proto == QETH_PROT_IPV4)? 4:16) && + (ipatoe->mask_bits == mask_bits)) { + list_del(&ipatoe->entry); + qeth_l3_update_ipato(card); + kfree(ipatoe); + rc = 0; + } + } + + spin_unlock_bh(&card->ip_lock); + return rc; +} + +int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip, + enum qeth_ip_types type, + enum qeth_prot_versions proto) +{ + struct qeth_ipaddr addr; + int rc; + + qeth_l3_init_ipaddr(&addr, type, proto); + if (proto == QETH_PROT_IPV4) + memcpy(&addr.u.a4.addr, ip, 4); + else + memcpy(&addr.u.a6.addr, ip, 16); + + spin_lock_bh(&card->ip_lock); + rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); + spin_unlock_bh(&card->ip_lock); + return rc; +} + +int qeth_l3_modify_hsuid(struct qeth_card *card, bool add) +{ + struct qeth_ipaddr addr; + int rc, i; + + qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); + addr.u.a6.addr.s6_addr[0] = 0xfe; + addr.u.a6.addr.s6_addr[1] = 0x80; + for (i = 0; i < 8; i++) + addr.u.a6.addr.s6_addr[8+i] = card->options.hsuid[i]; + + spin_lock_bh(&card->ip_lock); + rc = add ? qeth_l3_add_ip(card, &addr) : qeth_l3_delete_ip(card, &addr); + spin_unlock_bh(&card->ip_lock); + return rc; +} + +static int qeth_l3_register_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + char buf[50]; + int rc = 0; + int cnt = 3; + + if (card->options.sniffer) + return 0; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_CARD_TEXT(card, 2, "setaddr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_CARD_TEXT(card, 2, "setaddr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); + } else { + QETH_CARD_TEXT(card, 2, "setaddr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); + } + do { + if (addr->is_multicast) + rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_SETIPM); + else + rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_SETIP); + if (rc) + QETH_CARD_TEXT(card, 2, "failed"); + } while ((--cnt > 0) && rc); + if (rc) { + QETH_CARD_TEXT(card, 2, "FAILED"); + qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); + dev_warn(&card->gdev->dev, + "Registering IP address %s failed\n", buf); + } + return rc; +} + +static int qeth_l3_deregister_addr_entry(struct qeth_card *card, + struct qeth_ipaddr *addr) +{ + int rc = 0; + + if (card->options.sniffer) + return 0; + + if (addr->proto == QETH_PROT_IPV4) { + QETH_CARD_TEXT(card, 2, "deladdr4"); + QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int)); + } else if (addr->proto == QETH_PROT_IPV6) { + QETH_CARD_TEXT(card, 2, "deladdr6"); + QETH_CARD_HEX(card, 3, &addr->u.a6.addr, 8); + QETH_CARD_HEX(card, 3, ((char *)&addr->u.a6.addr) + 8, 8); + } else { + QETH_CARD_TEXT(card, 2, "deladdr?"); + QETH_CARD_HEX(card, 3, addr, sizeof(struct qeth_ipaddr)); + } + if (addr->is_multicast) + rc = qeth_l3_send_setdelmc(card, addr, IPA_CMD_DELIPM); + else + rc = qeth_l3_send_setdelip(card, addr, IPA_CMD_DELIP); + if (rc) + QETH_CARD_TEXT(card, 2, "failed"); + + return rc; +} + +static int qeth_l3_setadapter_parms(struct qeth_card *card) +{ + int rc = 0; + + QETH_DBF_TEXT(SETUP, 2, "setadprm"); + + if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { + rc = qeth_setadpparms_change_macaddr(card); + if (rc) + dev_warn(&card->gdev->dev, "Reading the adapter MAC" + " address failed\n"); + } + + return rc; +} + +static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "ipaarp"); + + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + dev_info(&card->gdev->dev, + "ARP processing not supported on %s!\n", + QETH_CARD_IFNAME(card)); + return 0; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting ARP processing support for %s failed\n", + QETH_CARD_IFNAME(card)); + } + return rc; +} + +static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "stsrcmac"); + + if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { + dev_info(&card->gdev->dev, + "Inbound source MAC-address not supported on %s\n", + QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_SOURCE_MAC, + IPA_CMD_ASS_START, 0); + if (rc) + dev_warn(&card->gdev->dev, + "Starting source MAC-address support for %s failed\n", + QETH_CARD_IFNAME(card)); + return rc; +} + +static int qeth_l3_start_ipa_vlan(struct qeth_card *card) +{ + int rc = 0; + + QETH_CARD_TEXT(card, 3, "strtvlan"); + + if (!qeth_is_supported(card, IPA_FULL_VLAN)) { + dev_info(&card->gdev->dev, + "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_VLAN_PRIO, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting VLAN support for %s failed\n", + QETH_CARD_IFNAME(card)); + } else { + dev_info(&card->gdev->dev, "VLAN enabled\n"); + } + return rc; +} + +static int qeth_l3_start_ipa_multicast(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "stmcast"); + + if (!qeth_is_supported(card, IPA_MULTICASTING)) { + dev_info(&card->gdev->dev, + "Multicast not supported on %s\n", + QETH_CARD_IFNAME(card)); + return -EOPNOTSUPP; + } + + rc = qeth_send_simple_setassparms(card, IPA_MULTICASTING, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Starting multicast support for %s failed\n", + QETH_CARD_IFNAME(card)); + } else { + dev_info(&card->gdev->dev, "Multicast enabled\n"); + card->dev->flags |= IFF_MULTICAST; + } + return rc; +} + +static int qeth_l3_softsetup_ipv6(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "softipv6"); + + if (card->info.type == QETH_CARD_TYPE_IQD) + goto out; + + rc = qeth_send_simple_setassparms(card, IPA_IPV6, + IPA_CMD_ASS_START, 3); + if (rc) { + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + QETH_CARD_IFNAME(card)); + return rc; + } + rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_err(&card->gdev->dev, + "Activating IPv6 support for %s failed\n", + QETH_CARD_IFNAME(card)); + return rc; + } + rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, + "Enabling the passthrough mode for %s failed\n", + QETH_CARD_IFNAME(card)); + return rc; + } +out: + dev_info(&card->gdev->dev, "IPV6 enabled\n"); + return 0; +} + +static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 3, "strtipv6"); + + if (!qeth_is_supported(card, IPA_IPV6)) { + dev_info(&card->gdev->dev, + "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); + return 0; + } + return qeth_l3_softsetup_ipv6(card); +} + +static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) +{ + int rc; + + QETH_CARD_TEXT(card, 3, "stbrdcst"); + card->info.broadcast_capable = 0; + if (!qeth_is_supported(card, IPA_FILTERING)) { + dev_info(&card->gdev->dev, + "Broadcast not supported on %s\n", + QETH_CARD_IFNAME(card)); + rc = -EOPNOTSUPP; + goto out; + } + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_START, 0); + if (rc) { + dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " + "%s failed\n", QETH_CARD_IFNAME(card)); + goto out; + } + + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_CONFIGURE, 1); + if (rc) { + dev_warn(&card->gdev->dev, + "Setting up broadcast filtering for %s failed\n", + QETH_CARD_IFNAME(card)); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; + dev_info(&card->gdev->dev, "Broadcast enabled\n"); + rc = qeth_send_simple_setassparms(card, IPA_FILTERING, + IPA_CMD_ASS_ENABLE, 1); + if (rc) { + dev_warn(&card->gdev->dev, "Setting up broadcast echo " + "filtering for %s failed\n", QETH_CARD_IFNAME(card)); + goto out; + } + card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; +out: + if (card->info.broadcast_capable) + card->dev->flags |= IFF_BROADCAST; + else + card->dev->flags &= ~IFF_BROADCAST; + return rc; +} + +static int qeth_l3_start_ipassists(struct qeth_card *card) +{ + QETH_CARD_TEXT(card, 3, "strtipas"); + + if (qeth_set_access_ctrl_online(card, 0)) + return -EIO; + qeth_l3_start_ipa_arp_processing(card); /* go on*/ + qeth_l3_start_ipa_source_mac(card); /* go on*/ + qeth_l3_start_ipa_vlan(card); /* go on*/ + qeth_l3_start_ipa_multicast(card); /* go on*/ + qeth_l3_start_ipa_ipv6(card); /* go on*/ + qeth_l3_start_ipa_broadcast(card); /* go on*/ + return 0; +} + +static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0) + ether_addr_copy(card->dev->dev_addr, + cmd->data.create_destroy_addr.unique_id); + else + eth_random_addr(card->dev->dev_addr); + + return 0; +} + +static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "hsrmac"); + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, + QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = + card->info.unique_id; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_iqd_read_initial_mac_cb, + NULL); + return rc; +} + +static int qeth_l3_get_unique_id_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + + cmd = (struct qeth_ipa_cmd *) data; + if (cmd->hdr.return_code == 0) + card->info.unique_id = *((__u16 *) + &cmd->data.create_destroy_addr.unique_id[6]); + else { + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + dev_warn(&card->gdev->dev, "The network adapter failed to " + "generate a unique ID\n"); + } + return 0; +} + +static int qeth_l3_get_unique_id(struct qeth_card *card) +{ + int rc = 0; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "guniqeid"); + + if (!qeth_is_supported(card, IPA_IPV6)) { + card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | + UNIQUE_ID_NOT_BY_CARD; + return 0; + } + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR, + QETH_PROT_IPV6); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + *((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) = + card->info.unique_id; + + rc = qeth_send_ipa_cmd(card, iob, qeth_l3_get_unique_id_cb, NULL); + return rc; +} + +static int +qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply, + unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + __u16 rc; + + QETH_DBF_TEXT(SETUP, 2, "diastrcb"); + + cmd = (struct qeth_ipa_cmd *)data; + rc = cmd->hdr.return_code; + if (rc) + QETH_CARD_TEXT_(card, 2, "dxter%x", rc); + switch (cmd->data.diagass.action) { + case QETH_DIAGS_CMD_TRACE_QUERY: + break; + case QETH_DIAGS_CMD_TRACE_DISABLE: + switch (rc) { + case 0: + case IPA_RC_INVALID_SUBCMD: + card->info.promisc_mode = SET_PROMISC_MODE_OFF; + dev_info(&card->gdev->dev, "The HiperSockets network " + "traffic analyzer is deactivated\n"); + break; + default: + break; + } + break; + case QETH_DIAGS_CMD_TRACE_ENABLE: + switch (rc) { + case 0: + card->info.promisc_mode = SET_PROMISC_MODE_ON; + dev_info(&card->gdev->dev, "The HiperSockets network " + "traffic analyzer is activated\n"); + break; + case IPA_RC_HARDWARE_AUTH_ERROR: + dev_warn(&card->gdev->dev, "The device is not " + "authorized to run as a HiperSockets network " + "traffic analyzer\n"); + break; + case IPA_RC_TRACE_ALREADY_ACTIVE: + dev_warn(&card->gdev->dev, "A HiperSockets " + "network traffic analyzer is already " + "active in the HiperSockets LAN\n"); + break; + default: + break; + } + break; + default: + QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n", + cmd->data.diagass.action, QETH_CARD_IFNAME(card)); + } + + return 0; +} + +static int +qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + QETH_DBF_TEXT(SETUP, 2, "diagtrac"); + + iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.diagass.subcmd_len = 16; + cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE; + cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET; + cmd->data.diagass.action = diags_cmd; + return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); +} + +static void +qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) +{ + struct ip_mc_list *im4; + struct qeth_ipaddr *tmp, *ipm; + + QETH_CARD_TEXT(card, 4, "addmc"); + + tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); + if (!tmp) + return; + + for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL; + im4 = rcu_dereference(im4->next_rcu)) { + ip_eth_mc_map(im4->multiaddr, tmp->mac); + tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); + tmp->is_multicast = 1; + + ipm = qeth_l3_find_addr_by_ip(card, tmp); + if (ipm) { + /* for mcast, by-IP match means full match */ + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + } else { + ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); + if (!ipm) + continue; + ether_addr_copy(ipm->mac, tmp->mac); + ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); + ipm->is_multicast = 1; + ipm->disp_flag = QETH_DISP_ADDR_ADD; + hash_add(card->ip_mc_htable, + &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + } + } + + kfree(tmp); +} + +/* called with rcu_read_lock */ +static void qeth_l3_add_vlan_mc(struct qeth_card *card) +{ + struct in_device *in_dev; + u16 vid; + + QETH_CARD_TEXT(card, 4, "addmcvl"); + + if (!qeth_is_supported(card, IPA_FULL_VLAN)) + return; + + for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { + struct net_device *netdev; + + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), + vid); + if (netdev == NULL || + !(netdev->flags & IFF_UP)) + continue; + in_dev = __in_dev_get_rcu(netdev); + if (!in_dev) + continue; + qeth_l3_add_mc_to_hash(card, in_dev); + } +} + +static void qeth_l3_add_multicast_ipv4(struct qeth_card *card) +{ + struct in_device *in4_dev; + + QETH_CARD_TEXT(card, 4, "chkmcv4"); + + rcu_read_lock(); + in4_dev = __in_dev_get_rcu(card->dev); + if (in4_dev == NULL) + goto unlock; + qeth_l3_add_mc_to_hash(card, in4_dev); + qeth_l3_add_vlan_mc(card); +unlock: + rcu_read_unlock(); +} + +static void qeth_l3_add_mc6_to_hash(struct qeth_card *card, + struct inet6_dev *in6_dev) +{ + struct qeth_ipaddr *ipm; + struct ifmcaddr6 *im6; + struct qeth_ipaddr *tmp; + + QETH_CARD_TEXT(card, 4, "addmc6"); + + tmp = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (!tmp) + return; + + for (im6 = in6_dev->mc_list; im6 != NULL; im6 = im6->next) { + ipv6_eth_mc_map(&im6->mca_addr, tmp->mac); + memcpy(&tmp->u.a6.addr, &im6->mca_addr.s6_addr, + sizeof(struct in6_addr)); + tmp->is_multicast = 1; + + ipm = qeth_l3_find_addr_by_ip(card, tmp); + if (ipm) { + /* for mcast, by-IP match means full match */ + ipm->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + continue; + } + + ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); + if (!ipm) + continue; + + ether_addr_copy(ipm->mac, tmp->mac); + memcpy(&ipm->u.a6.addr, &im6->mca_addr.s6_addr, + sizeof(struct in6_addr)); + ipm->is_multicast = 1; + ipm->disp_flag = QETH_DISP_ADDR_ADD; + hash_add(card->ip_mc_htable, + &ipm->hnode, qeth_l3_ipaddr_hash(ipm)); + + } + kfree(tmp); +} + +/* called with rcu_read_lock */ +static void qeth_l3_add_vlan_mc6(struct qeth_card *card) +{ + struct inet6_dev *in_dev; + u16 vid; + + QETH_CARD_TEXT(card, 4, "admc6vl"); + + if (!qeth_is_supported(card, IPA_FULL_VLAN)) + return; + + for_each_set_bit(vid, card->active_vlans, VLAN_N_VID) { + struct net_device *netdev; + + netdev = __vlan_find_dev_deep_rcu(card->dev, htons(ETH_P_8021Q), + vid); + if (netdev == NULL || + !(netdev->flags & IFF_UP)) + continue; + in_dev = in6_dev_get(netdev); + if (!in_dev) + continue; + read_lock_bh(&in_dev->lock); + qeth_l3_add_mc6_to_hash(card, in_dev); + read_unlock_bh(&in_dev->lock); + in6_dev_put(in_dev); + } +} + +static void qeth_l3_add_multicast_ipv6(struct qeth_card *card) +{ + struct inet6_dev *in6_dev; + + QETH_CARD_TEXT(card, 4, "chkmcv6"); + + if (!qeth_is_supported(card, IPA_IPV6)) + return ; + in6_dev = in6_dev_get(card->dev); + if (!in6_dev) + return; + + rcu_read_lock(); + read_lock_bh(&in6_dev->lock); + qeth_l3_add_mc6_to_hash(card, in6_dev); + qeth_l3_add_vlan_mc6(card); + read_unlock_bh(&in6_dev->lock); + rcu_read_unlock(); + in6_dev_put(in6_dev); +} + +static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + set_bit(vid, card->active_vlans); + return 0; +} + +static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT_(card, 4, "kid:%d", vid); + + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "kidREC"); + return 0; + } + clear_bit(vid, card->active_vlans); + qeth_l3_set_rx_mode(dev); + return 0; +} + +static void qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr) +{ + if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { + u16 prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : + ETH_P_IP; + unsigned char tg_addr[ETH_ALEN]; + + skb_reset_network_header(skb); + switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { + case QETH_CAST_MULTICAST: + if (prot == ETH_P_IP) + ip_eth_mc_map(ip_hdr(skb)->daddr, tg_addr); + else + ipv6_eth_mc_map(&ipv6_hdr(skb)->daddr, tg_addr); + + card->stats.multicast++; + break; + case QETH_CAST_BROADCAST: + ether_addr_copy(tg_addr, card->dev->broadcast); + card->stats.multicast++; + break; + default: + if (card->options.sniffer) + skb->pkt_type = PACKET_OTHERHOST; + ether_addr_copy(tg_addr, card->dev->dev_addr); + } + + if (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_SRC_MAC_ADDR) + card->dev->header_ops->create(skb, card->dev, prot, + tg_addr, &hdr->hdr.l3.next_hop.rx.src_mac, + skb->len); + else + card->dev->header_ops->create(skb, card->dev, prot, + tg_addr, "FAKELL", skb->len); + } + + skb->protocol = eth_type_trans(skb, card->dev); + + /* copy VLAN tag from hdr into skb */ + if (!card->options.sniffer && + (hdr->hdr.l3.ext_flags & (QETH_HDR_EXT_VLAN_FRAME | + QETH_HDR_EXT_INCLUDE_VLAN_TAG))) { + u16 tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME) ? + hdr->hdr.l3.vlan_id : + hdr->hdr.l3.next_hop.rx.vlan_id; + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + } + + qeth_rx_csum(card, skb, hdr->hdr.l3.ext_flags); +} + +static int qeth_l3_process_inbound_buffer(struct qeth_card *card, + int budget, int *done) +{ + int work_done = 0; + struct sk_buff *skb; + struct qeth_hdr *hdr; + unsigned int len; + __u16 magic; + + *done = 0; + WARN_ON_ONCE(!budget); + while (budget) { + skb = qeth_core_get_next_skb(card, + &card->qdio.in_q->bufs[card->rx.b_index], + &card->rx.b_element, &card->rx.e_offset, &hdr); + if (!skb) { + *done = 1; + break; + } + switch (hdr->hdr.l3.id) { + case QETH_HEADER_TYPE_LAYER3: + magic = *(__u16 *)skb->data; + if ((card->info.type == QETH_CARD_TYPE_IQD) && + (magic == ETH_P_AF_IUCV)) { + skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); + len = skb->len; + card->dev->header_ops->create(skb, card->dev, 0, + card->dev->dev_addr, "FAKELL", len); + skb_reset_mac_header(skb); + netif_receive_skb(skb); + } else { + qeth_l3_rebuild_skb(card, skb, hdr); + len = skb->len; + napi_gro_receive(&card->napi, skb); + } + break; + case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */ + skb->protocol = eth_type_trans(skb, skb->dev); + len = skb->len; + netif_receive_skb(skb); + break; + default: + dev_kfree_skb_any(skb); + QETH_CARD_TEXT(card, 3, "inbunkno"); + QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr)); + continue; + } + work_done++; + budget--; + card->stats.rx_packets++; + card->stats.rx_bytes += len; + } + return work_done; +} + +static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) +{ + QETH_DBF_TEXT(SETUP, 2, "stopcard"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + qeth_set_allowed_threads(card, 0, 1); + if (card->options.sniffer && + (card->info.promisc_mode == SET_PROMISC_MODE_ON)) + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); + if (card->read.state == CH_STATE_UP && + card->write.state == CH_STATE_UP && + (card->state == CARD_STATE_UP)) { + if (recovery_mode) + qeth_l3_stop(card->dev); + else { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + card->state = CARD_STATE_SOFTSETUP; + } + if (card->state == CARD_STATE_SOFTSETUP) { + qeth_l3_clear_ip_htable(card, 1); + qeth_clear_ipacmd_list(card); + card->state = CARD_STATE_HARDSETUP; + } + if (card->state == CARD_STATE_HARDSETUP) { + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qeth_clear_working_pool_list(card); + card->state = CARD_STATE_DOWN; + } + if (card->state == CARD_STATE_DOWN) { + qeth_clear_cmd_buffers(&card->read); + qeth_clear_cmd_buffers(&card->write); + } +} + +/* + * test for and Switch promiscuous mode (on or off) + * either for guestlan or HiperSocket Sniffer + */ +static void +qeth_l3_handle_promisc_mode(struct qeth_card *card) +{ + struct net_device *dev = card->dev; + + if (((dev->flags & IFF_PROMISC) && + (card->info.promisc_mode == SET_PROMISC_MODE_ON)) || + (!(dev->flags & IFF_PROMISC) && + (card->info.promisc_mode == SET_PROMISC_MODE_OFF))) + return; + + if (card->info.guestlan) { /* Guestlan trace */ + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + qeth_setadp_promisc_mode(card); + } else if (card->options.sniffer && /* HiperSockets trace */ + qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { + if (dev->flags & IFF_PROMISC) { + QETH_CARD_TEXT(card, 3, "+promisc"); + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE); + } else { + QETH_CARD_TEXT(card, 3, "-promisc"); + qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE); + } + } +} + +static void qeth_l3_set_rx_mode(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_ipaddr *addr; + struct hlist_node *tmp; + int i, rc; + + QETH_CARD_TEXT(card, 3, "setmulti"); + if (qeth_threads_running(card, QETH_RECOVER_THREAD) && + (card->state != CARD_STATE_UP)) + return; + if (!card->options.sniffer) { + spin_lock_bh(&card->mclock); + + qeth_l3_add_multicast_ipv4(card); + qeth_l3_add_multicast_ipv6(card); + + hash_for_each_safe(card->ip_mc_htable, i, tmp, addr, hnode) { + switch (addr->disp_flag) { + case QETH_DISP_ADDR_DELETE: + rc = qeth_l3_deregister_addr_entry(card, addr); + if (!rc || rc == IPA_RC_MC_ADDR_NOT_FOUND) { + hash_del(&addr->hnode); + kfree(addr); + } + break; + case QETH_DISP_ADDR_ADD: + rc = qeth_l3_register_addr_entry(card, addr); + if (rc && rc != IPA_RC_LAN_OFFLINE) { + hash_del(&addr->hnode); + kfree(addr); + break; + } + addr->ref_counter = 1; + /* fall through */ + default: + /* for next call to set_rx_mode(): */ + addr->disp_flag = QETH_DISP_ADDR_DELETE; + } + } + + spin_unlock_bh(&card->mclock); + + if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) + return; + } + qeth_l3_handle_promisc_mode(card); +} + +static const char *qeth_l3_arp_get_error_cause(int *rc) +{ + switch (*rc) { + case QETH_IPA_ARP_RC_FAILED: + *rc = -EIO; + return "operation failed"; + case QETH_IPA_ARP_RC_NOTSUPP: + *rc = -EOPNOTSUPP; + return "operation not supported"; + case QETH_IPA_ARP_RC_OUT_OF_RANGE: + *rc = -EINVAL; + return "argument out of range"; + case QETH_IPA_ARP_RC_Q_NOTSUPP: + *rc = -EOPNOTSUPP; + return "query operation not supported"; + case QETH_IPA_ARP_RC_Q_NO_DATA: + *rc = -ENOENT; + return "no query data available"; + default: + return "unknown error"; + } +} + +static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) +{ + int tmp; + int rc; + + QETH_CARD_TEXT(card, 3, "arpstnoe"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_SET_NO_ENTRIES; + * thus we say EOPNOTSUPP for this ARP function + */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_SET_NO_ENTRIES, + no_entries); + if (rc) { + tmp = rc; + QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " + "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static __u32 get_arp_entry_size(struct qeth_card *card, + struct qeth_arp_query_data *qdata, + struct qeth_arp_entrytype *type, __u8 strip_entries) +{ + __u32 rc; + __u8 is_hsi; + + is_hsi = qdata->reply_bits == 5; + if (type->ip == QETHARP_IP_ADDR_V4) { + QETH_CARD_TEXT(card, 4, "arpev4"); + if (strip_entries) { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) : + sizeof(struct qeth_arp_qi_entry7_short); + } else { + rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) : + sizeof(struct qeth_arp_qi_entry7); + } + } else if (type->ip == QETHARP_IP_ADDR_V6) { + QETH_CARD_TEXT(card, 4, "arpev6"); + if (strip_entries) { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_short_ipv6) : + sizeof(struct qeth_arp_qi_entry7_short_ipv6); + } else { + rc = is_hsi ? + sizeof(struct qeth_arp_qi_entry5_ipv6) : + sizeof(struct qeth_arp_qi_entry7_ipv6); + } + } else { + QETH_CARD_TEXT(card, 4, "arpinv"); + rc = 0; + } + + return rc; +} + +static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot) +{ + return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) || + (type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6); +} + +static int qeth_l3_arp_query_cb(struct qeth_card *card, + struct qeth_reply *reply, unsigned long data) +{ + struct qeth_ipa_cmd *cmd; + struct qeth_arp_query_data *qdata; + struct qeth_arp_query_info *qinfo; + int i; + int e; + int entrybytes_done; + int stripped_bytes; + __u8 do_strip_entries; + + QETH_CARD_TEXT(card, 3, "arpquecb"); + + qinfo = (struct qeth_arp_query_info *) reply->param; + cmd = (struct qeth_ipa_cmd *) data; + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version); + if (cmd->hdr.return_code) { + QETH_CARD_TEXT(card, 4, "arpcberr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); + return 0; + } + if (cmd->data.setassparms.hdr.return_code) { + cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code; + QETH_CARD_TEXT(card, 4, "setaperr"); + QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code); + return 0; + } + qdata = &cmd->data.setassparms.data.query_arp; + QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries); + + do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0; + stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0; + entrybytes_done = 0; + for (e = 0; e < qdata->no_entries; ++e) { + char *cur_entry; + __u32 esize; + struct qeth_arp_entrytype *etype; + + cur_entry = &qdata->data + entrybytes_done; + etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type; + if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) { + QETH_CARD_TEXT(card, 4, "pmis"); + QETH_CARD_TEXT_(card, 4, "%i", etype->ip); + break; + } + esize = get_arp_entry_size(card, qdata, etype, + do_strip_entries); + QETH_CARD_TEXT_(card, 5, "esz%i", esize); + if (!esize) + break; + + if ((qinfo->udata_len - qinfo->udata_offset) < esize) { + QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM); + cmd->hdr.return_code = IPA_RC_ENOMEM; + goto out_error; + } + + memcpy(qinfo->udata + qinfo->udata_offset, + &qdata->data + entrybytes_done + stripped_bytes, + esize); + entrybytes_done += esize + stripped_bytes; + qinfo->udata_offset += esize; + ++qinfo->no_entries; + } + /* check if all replies received ... */ + if (cmd->data.setassparms.hdr.seq_no < + cmd->data.setassparms.hdr.number_of_replies) + return 1; + QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries); + memcpy(qinfo->udata, &qinfo->no_entries, 4); + /* keep STRIP_ENTRIES flag so the user program can distinguish + * stripped entries from normal ones */ + if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) + qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES; + memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2); + QETH_CARD_TEXT_(card, 4, "rc%i", 0); + return 0; +out_error: + i = 0; + memcpy(qinfo->udata, &i, 4); + return 0; +} + +static int qeth_l3_send_ipa_arp_cmd(struct qeth_card *card, + struct qeth_cmd_buffer *iob, int len, + int (*reply_cb)(struct qeth_card *, struct qeth_reply *, + unsigned long), + void *reply_param) +{ + QETH_CARD_TEXT(card, 4, "sendarp"); + + memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE); + memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data), + &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH); + return qeth_send_control_data(card, IPA_PDU_HEADER_SIZE + len, iob, + reply_cb, reply_param); +} + +static int qeth_l3_query_arp_cache_info(struct qeth_card *card, + enum qeth_prot_versions prot, + struct qeth_arp_query_info *qinfo) +{ + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + int tmp; + int rc; + + QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot); + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_QUERY_INFO, + sizeof(struct qeth_arp_query_data) + - sizeof(char), + prot); + if (!iob) + return -ENOMEM; + cmd = __ipa_cmd(iob); + cmd->data.setassparms.data.query_arp.request_bits = 0x000F; + cmd->data.setassparms.data.query_arp.reply_bits = 0; + cmd->data.setassparms.data.query_arp.no_entries = 0; + rc = qeth_l3_send_ipa_arp_cmd(card, iob, + QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN, + qeth_l3_arp_query_cb, (void *)qinfo); + if (rc) { + tmp = rc; + QETH_DBF_MESSAGE(2, + "Error while querying ARP cache on %s: %s " + "(0x%x/%d)\n", QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + + return rc; +} + +static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) +{ + struct qeth_arp_query_info qinfo = {0, }; + int rc; + + QETH_CARD_TEXT(card, 3, "arpquery"); + + if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ + IPA_ARP_PROCESSING)) { + QETH_CARD_TEXT(card, 3, "arpqnsup"); + rc = -EOPNOTSUPP; + goto out; + } + /* get size of userspace buffer and mask_bits -> 6 bytes */ + if (copy_from_user(&qinfo, udata, 6)) { + rc = -EFAULT; + goto out; + } + qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL); + if (!qinfo.udata) { + rc = -ENOMEM; + goto out; + } + qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET; + rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo); + if (rc) { + if (copy_to_user(udata, qinfo.udata, 4)) + rc = -EFAULT; + goto free_and_out; + } + if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) { + /* fails in case of GuestLAN QDIO mode */ + qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6, &qinfo); + } + if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) { + QETH_CARD_TEXT(card, 4, "qactf"); + rc = -EFAULT; + goto free_and_out; + } + QETH_CARD_TEXT(card, 4, "qacts"); + +free_and_out: + kfree(qinfo.udata); +out: + return rc; +} + +static int qeth_l3_arp_add_entry(struct qeth_card *card, + struct qeth_arp_cache_entry *entry) +{ + struct qeth_cmd_buffer *iob; + char buf[16]; + int tmp; + int rc; + + QETH_CARD_TEXT(card, 3, "arpadent"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_ADD_ENTRY; + * thus we say EOPNOTSUPP for this ARP function + */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_ADD_ENTRY, + sizeof(struct qeth_arp_cache_entry), + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + rc = qeth_send_setassparms(card, iob, + sizeof(struct qeth_arp_cache_entry), + (unsigned long) entry, + qeth_setassparms_cb, NULL); + if (rc) { + tmp = rc; + qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); + QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " + "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static int qeth_l3_arp_remove_entry(struct qeth_card *card, + struct qeth_arp_cache_entry *entry) +{ + struct qeth_cmd_buffer *iob; + char buf[16] = {0, }; + int tmp; + int rc; + + QETH_CARD_TEXT(card, 3, "arprment"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY; + * thus we say EOPNOTSUPP for this ARP function + */ + if (card->info.guestlan) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + memcpy(buf, entry, 12); + iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_REMOVE_ENTRY, + 12, + QETH_PROT_IPV4); + if (!iob) + return -ENOMEM; + rc = qeth_send_setassparms(card, iob, + 12, (unsigned long)buf, + qeth_setassparms_cb, NULL); + if (rc) { + tmp = rc; + memset(buf, 0, 16); + qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); + QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" + " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static int qeth_l3_arp_flush_cache(struct qeth_card *card) +{ + int rc; + int tmp; + + QETH_CARD_TEXT(card, 3, "arpflush"); + + /* + * currently GuestLAN only supports the ARP assist function + * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_FLUSH_CACHE; + * thus we say EOPNOTSUPP for this ARP function + */ + if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) + return -EOPNOTSUPP; + if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { + return -EOPNOTSUPP; + } + rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING, + IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); + if (rc) { + tmp = rc; + QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " + "(0x%x/%d)\n", QETH_CARD_IFNAME(card), + qeth_l3_arp_get_error_cause(&rc), tmp, tmp); + } + return rc; +} + +static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct qeth_arp_cache_entry arp_entry; + int rc = 0; + + switch (cmd) { + case SIOC_QETH_ARP_SET_NO_ENTRIES: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_set_no_entries(card, rq->ifr_ifru.ifru_ivalue); + break; + case SIOC_QETH_ARP_QUERY_INFO: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_ARP_ADD_ENTRY: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, + sizeof(struct qeth_arp_cache_entry))) + rc = -EFAULT; + else + rc = qeth_l3_arp_add_entry(card, &arp_entry); + break; + case SIOC_QETH_ARP_REMOVE_ENTRY: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data, + sizeof(struct qeth_arp_cache_entry))) + rc = -EFAULT; + else + rc = qeth_l3_arp_remove_entry(card, &arp_entry); + break; + case SIOC_QETH_ARP_FLUSH_CACHE: + if (!capable(CAP_NET_ADMIN)) { + rc = -EPERM; + break; + } + rc = qeth_l3_arp_flush_cache(card); + break; + default: + rc = -EOPNOTSUPP; + } + return rc; +} + +static int qeth_l3_get_cast_type(struct sk_buff *skb) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + + rcu_read_lock(); + dst = skb_dst(skb); + if (dst) + n = dst_neigh_lookup_skb(dst, skb); + if (n) { + int cast_type = n->type; + + rcu_read_unlock(); + neigh_release(n); + if ((cast_type == RTN_BROADCAST) || + (cast_type == RTN_MULTICAST) || + (cast_type == RTN_ANYCAST)) + return cast_type; + return RTN_UNICAST; + } + rcu_read_unlock(); + + /* no neighbour (eg AF_PACKET), fall back to target's IP address ... */ + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) + return ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + else if (be16_to_cpu(skb->protocol) == ETH_P_IP) + return ipv4_is_multicast(ip_hdr(skb)->daddr) ? + RTN_MULTICAST : RTN_UNICAST; + + /* ... and MAC address */ + if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, skb->dev->broadcast)) + return RTN_BROADCAST; + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) + return RTN_MULTICAST; + + /* default to unicast */ + return RTN_UNICAST; +} + +static void qeth_l3_fill_af_iucv_hdr(struct qeth_hdr *hdr, struct sk_buff *skb, + unsigned int data_len) +{ + char daddr[16]; + struct af_iucv_trans_hdr *iucv_hdr; + + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + hdr->hdr.l3.length = data_len; + hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + + iucv_hdr = (struct af_iucv_trans_hdr *)(skb_mac_header(skb) + ETH_HLEN); + memset(daddr, 0, sizeof(daddr)); + daddr[0] = 0xfe; + daddr[1] = 0x80; + memcpy(&daddr[8], iucv_hdr->destUserID, 8); + memcpy(hdr->hdr.l3.next_hop.ipv6_addr, daddr, 16); +} + +static u8 qeth_l3_cast_type_to_flag(int cast_type) +{ + if (cast_type == RTN_MULTICAST) + return QETH_CAST_MULTICAST; + if (cast_type == RTN_ANYCAST) + return QETH_CAST_ANYCAST; + if (cast_type == RTN_BROADCAST) + return QETH_CAST_BROADCAST; + return QETH_CAST_UNICAST; +} + +static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb, int ipv, int cast_type, + unsigned int data_len) +{ + memset(hdr, 0, sizeof(struct qeth_hdr)); + hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; + hdr->hdr.l3.length = data_len; + + /* + * before we're going to overwrite this location with next hop ip. + * v6 uses passthrough, v4 sets the tag in the QDIO header. + */ + if (skb_vlan_tag_present(skb)) { + if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) + hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; + else + hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; + hdr->hdr.l3.vlan_id = skb_vlan_tag_get(skb); + } + + if (!skb_is_gso(skb) && skb->ip_summed == CHECKSUM_PARTIAL) { + qeth_tx_csum(skb, &hdr->hdr.l3.ext_flags, ipv); + if (card->options.performance_stats) + card->perf_stats.tx_csum++; + } + + /* OSA only: */ + if (!ipv) { + hdr->hdr.l3.flags = QETH_HDR_PASSTHRU; + if (ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + skb->dev->broadcast)) + hdr->hdr.l3.flags |= QETH_CAST_BROADCAST; + else + hdr->hdr.l3.flags |= (cast_type == RTN_MULTICAST) ? + QETH_CAST_MULTICAST : QETH_CAST_UNICAST; + return; + } + + hdr->hdr.l3.flags = qeth_l3_cast_type_to_flag(cast_type); + rcu_read_lock(); + if (ipv == 4) { + struct rtable *rt = skb_rtable(skb); + + *((__be32 *) &hdr->hdr.l3.next_hop.ipv4.addr) = (rt) ? + rt_nexthop(rt, ip_hdr(skb)->daddr) : + ip_hdr(skb)->daddr; + } else { + /* IPv6 */ + const struct rt6_info *rt = skb_rt6_info(skb); + const struct in6_addr *next_hop; + + if (rt && !ipv6_addr_any(&rt->rt6i_gateway)) + next_hop = &rt->rt6i_gateway; + else + next_hop = &ipv6_hdr(skb)->daddr; + memcpy(hdr->hdr.l3.next_hop.ipv6_addr, next_hop, 16); + + hdr->hdr.l3.flags |= QETH_HDR_IPV6; + if (card->info.type != QETH_CARD_TYPE_IQD) + hdr->hdr.l3.flags |= QETH_HDR_PASSTHRU; + } + rcu_read_unlock(); +} + +static void qeth_tso_fill_header(struct qeth_card *card, + struct qeth_hdr *qhdr, struct sk_buff *skb) +{ + struct qeth_hdr_tso *hdr = (struct qeth_hdr_tso *)qhdr; + struct tcphdr *tcph = tcp_hdr(skb); + struct iphdr *iph = ip_hdr(skb); + struct ipv6hdr *ip6h = ipv6_hdr(skb); + + /*fix header to TSO values ...*/ + hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO; + /*set values which are fix for the first approach ...*/ + hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso); + hdr->ext.imb_hdr_no = 1; + hdr->ext.hdr_type = 1; + hdr->ext.hdr_version = 1; + hdr->ext.hdr_len = 28; + /*insert non-fix values */ + hdr->ext.mss = skb_shinfo(skb)->gso_size; + hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); + hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - + sizeof(struct qeth_hdr_tso)); + tcph->check = 0; + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { + ip6h->payload_len = 0; + tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, + 0, IPPROTO_TCP, 0); + } else { + /*OSA want us to set these values ...*/ + tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + iph->tot_len = 0; + iph->check = 0; + } +} + +/** + * qeth_l3_get_elements_no_tso() - find number of SBALEs for skb data for tso + * @card: qeth card structure, to check max. elems. + * @skb: SKB address + * @extra_elems: extra elems needed, to check against max. + * + * Returns the number of pages, and thus QDIO buffer elements, needed to cover + * skb data, including linear part and fragments, but excluding TCP header. + * (Exclusion of TCP header distinguishes it from qeth_get_elements_no().) + * Checks if the result plus extra_elems fits under the limit for the card. + * Returns 0 if it does not. + * Note: extra_elems is not included in the returned result. + */ +static int qeth_l3_get_elements_no_tso(struct qeth_card *card, + struct sk_buff *skb, int extra_elems) +{ + addr_t start = (addr_t)tcp_hdr(skb) + tcp_hdrlen(skb); + addr_t end = (addr_t)skb->data + skb_headlen(skb); + int elements = qeth_get_elements_for_frags(skb); + + if (start != end) + elements += qeth_get_elements_for_range(start, end); + + if ((elements + extra_elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { + QETH_DBF_MESSAGE(2, + "Invalid size of TSO IP packet (Number=%d / Length=%d). Discarded.\n", + elements + extra_elems, skb->len); + return 0; + } + return elements; +} + +static int qeth_l3_xmit_offload(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, + int cast_type) +{ + const unsigned int hw_hdr_len = sizeof(struct qeth_hdr); + unsigned int frame_len, elements; + unsigned char eth_hdr[ETH_HLEN]; + struct qeth_hdr *hdr = NULL; + unsigned int hd_len = 0; + int push_len, rc; + bool is_sg; + + /* re-use the L2 header area for the HW header: */ + rc = skb_cow_head(skb, hw_hdr_len - ETH_HLEN); + if (rc) + return rc; + skb_copy_from_linear_data(skb, eth_hdr, ETH_HLEN); + skb_pull(skb, ETH_HLEN); + frame_len = skb->len; + + push_len = qeth_add_hw_header(card, skb, &hdr, hw_hdr_len, 0, + &elements); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was added discontiguous from skb->data */ + hd_len = hw_hdr_len; + } + + if (skb->protocol == htons(ETH_P_AF_IUCV)) + qeth_l3_fill_af_iucv_hdr(hdr, skb, frame_len); + else + qeth_l3_fill_header(card, hdr, skb, ipv, cast_type, frame_len); + + is_sg = skb_is_nonlinear(skb); + if (IS_IQD(card)) { + rc = qeth_do_send_packet_fast(queue, skb, hdr, 0, hd_len); + } else { + /* TODO: drop skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, + elements); + } + + if (!rc) { + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; + } + } else { + if (!push_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) { + /* roll back to ETH header */ + skb_pull(skb, push_len); + skb_push(skb, ETH_HLEN); + skb_copy_to_linear_data(skb, eth_hdr, ETH_HLEN); + } + } + return rc; +} + +static int qeth_l3_xmit(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int ipv, int cast_type) +{ + int elements, len, rc; + __be16 *tag; + struct qeth_hdr *hdr = NULL; + int hdr_elements = 0; + struct sk_buff *new_skb = NULL; + int tx_bytes = skb->len; + unsigned int hd_len; + bool use_tso, is_sg; + + /* Ignore segment size from skb_is_gso(), 1 page is always used. */ + use_tso = skb_is_gso(skb) && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); + + /* create a clone with writeable headroom */ + new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr_tso) + + VLAN_HLEN); + if (!new_skb) + return -ENOMEM; + + if (ipv == 4) { + skb_pull(new_skb, ETH_HLEN); + } else if (skb_vlan_tag_present(new_skb)) { + skb_push(new_skb, VLAN_HLEN); + skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4); + skb_copy_to_linear_data_offset(new_skb, 4, + new_skb->data + 8, 4); + skb_copy_to_linear_data_offset(new_skb, 8, + new_skb->data + 12, 4); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); + } + + /* fix hardware limitation: as long as we do not have sbal + * chaining we can not send long frag lists + */ + if ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0))) { + rc = skb_linearize(new_skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + goto out; + } + + if (use_tso) { + hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); + memset(hdr, 0, sizeof(struct qeth_hdr_tso)); + qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, + new_skb->len - sizeof(struct qeth_hdr_tso)); + qeth_tso_fill_header(card, hdr, new_skb); + hdr_elements++; + } else { + hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); + qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type, + new_skb->len - sizeof(struct qeth_hdr)); + } + + elements = use_tso ? + qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : + qeth_get_elements_no(card, new_skb, hdr_elements, 0); + if (!elements) { + rc = -E2BIG; + goto out; + } + elements += hdr_elements; + + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { + hd_len = 0; + len = sizeof(struct qeth_hdr_layer3); + } + + if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) { + rc = -EINVAL; + goto out; + } + + is_sg = skb_is_nonlinear(new_skb); + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, hd_len, + elements); +out: + if (!rc) { + if (new_skb != skb) + dev_kfree_skb_any(skb); + if (card->options.performance_stats) { + card->perf_stats.buf_elements_sent += elements; + if (is_sg) + card->perf_stats.sg_skbs_sent++; + if (use_tso) { + card->perf_stats.large_send_bytes += tx_bytes; + card->perf_stats.large_send_cnt++; + } + } + } else { + if (new_skb != skb) + dev_kfree_skb_any(new_skb); + } + return rc; +} + +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + int cast_type = qeth_l3_get_cast_type(skb); + struct qeth_card *card = dev->ml_priv; + int ipv = qeth_get_ip_version(skb); + struct qeth_qdio_out_q *queue; + int tx_bytes = skb->len; + int rc; + + if (IS_IQD(card)) { + if (card->options.sniffer) + goto tx_drop; + if ((card->options.cq != QETH_CQ_ENABLED && !ipv) || + (card->options.cq == QETH_CQ_ENABLED && + skb->protocol != htons(ETH_P_AF_IUCV))) + goto tx_drop; + } + + if (card->state != CARD_STATE_UP || !card->lan_online) { + card->stats.tx_carrier_errors++; + goto tx_drop; + } + + if (cast_type == RTN_BROADCAST && !card->info.broadcast_capable) + goto tx_drop; + + queue = qeth_get_tx_queue(card, skb, ipv, cast_type); + + if (card->options.performance_stats) { + card->perf_stats.outbound_cnt++; + card->perf_stats.outbound_start_time = qeth_get_micros(); + } + netif_stop_queue(dev); + + if (IS_IQD(card) || (!skb_is_gso(skb) && ipv == 4)) + rc = qeth_l3_xmit_offload(card, skb, queue, ipv, cast_type); + else + rc = qeth_l3_xmit(card, skb, queue, ipv, cast_type); + + if (!rc) { + card->stats.tx_packets++; + card->stats.tx_bytes += tx_bytes; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ + +tx_drop: + card->stats.tx_dropped++; + card->stats.tx_errors++; + dev_kfree_skb_any(skb); + netif_wake_queue(dev); + return NETDEV_TX_OK; +} + +static int __qeth_l3_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + int rc = 0; + + QETH_CARD_TEXT(card, 4, "qethopen"); + if (card->state == CARD_STATE_UP) + return rc; + if (card->state != CARD_STATE_SOFTSETUP) + return -ENODEV; + card->data.state = CH_STATE_UP; + card->state = CARD_STATE_UP; + netif_start_queue(dev); + + if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) { + napi_enable(&card->napi); + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + } else + rc = -EIO; + return rc; +} + +static int qeth_l3_open(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 5, "qethope_"); + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { + QETH_CARD_TEXT(card, 3, "openREC"); + return -ERESTARTSYS; + } + return __qeth_l3_open(dev); +} + +static int qeth_l3_stop(struct net_device *dev) +{ + struct qeth_card *card = dev->ml_priv; + + QETH_CARD_TEXT(card, 4, "qethstop"); + netif_tx_disable(dev); + if (card->state == CARD_STATE_UP) { + card->state = CARD_STATE_SOFTSETUP; + napi_disable(&card->napi); + } + return 0; +} + +static const struct ethtool_ops qeth_l3_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_strings = qeth_core_get_strings, + .get_ethtool_stats = qeth_core_get_ethtool_stats, + .get_sset_count = qeth_core_get_sset_count, + .get_drvinfo = qeth_core_get_drvinfo, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, +}; + +/* + * we need NOARP for IPv4 but we want neighbor solicitation for IPv6. Setting + * NOARP on the netdevice is no option because it also turns off neighbor + * solicitation. For IPv4 we install a neighbor_setup function. We don't want + * arp resolution but we want the hard header (packet socket will work + * e.g. tcpdump) + */ +static int qeth_l3_neigh_setup_noarp(struct neighbour *n) +{ + n->nud_state = NUD_NOARP; + memcpy(n->ha, "FAKELL", 6); + n->output = n->ops->connected_output; + return 0; +} + +static int +qeth_l3_neigh_setup(struct net_device *dev, struct neigh_parms *np) +{ + if (np->tbl->family == AF_INET) + np->neigh_setup = qeth_l3_neigh_setup_noarp; + + return 0; +} + +static const struct net_device_ops qeth_l3_netdev_ops = { + .ndo_open = qeth_l3_open, + .ndo_stop = qeth_l3_stop, + .ndo_get_stats = qeth_get_stats, + .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qeth_l3_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, + .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, +}; + +static const struct net_device_ops qeth_l3_osa_netdev_ops = { + .ndo_open = qeth_l3_open, + .ndo_stop = qeth_l3_stop, + .ndo_get_stats = qeth_get_stats, + .ndo_start_xmit = qeth_l3_hard_start_xmit, + .ndo_features_check = qeth_features_check, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = qeth_l3_set_rx_mode, + .ndo_do_ioctl = qeth_do_ioctl, + .ndo_fix_features = qeth_fix_features, + .ndo_set_features = qeth_set_features, + .ndo_vlan_rx_add_vid = qeth_l3_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = qeth_l3_vlan_rx_kill_vid, + .ndo_tx_timeout = qeth_tx_timeout, + .ndo_neigh_setup = qeth_l3_neigh_setup, +}; + +static int qeth_l3_setup_netdev(struct qeth_card *card) +{ + int rc; + + if (qeth_netdev_is_registered(card->dev)) + return 0; + + if (card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSX) { + if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) || + (card->info.link_type == QETH_LINK_TYPE_HSTR)) { + pr_info("qeth_l3: ignoring TR device\n"); + return -ENODEV; + } + + card->dev->netdev_ops = &qeth_l3_osa_netdev_ops; + + /*IPv6 address autoconfiguration stuff*/ + qeth_l3_get_unique_id(card); + if (!(card->info.unique_id & UNIQUE_ID_NOT_BY_CARD)) + card->dev->dev_id = card->info.unique_id & 0xffff; + + if (!card->info.guestlan) { + card->dev->features |= NETIF_F_SG; + card->dev->hw_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + card->dev->vlan_features |= NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_IP_CSUM; + } + + if (qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6)) { + card->dev->hw_features |= NETIF_F_IPV6_CSUM; + card->dev->vlan_features |= NETIF_F_IPV6_CSUM; + } + } else if (card->info.type == QETH_CARD_TYPE_IQD) { + card->dev->flags |= IFF_NOARP; + card->dev->netdev_ops = &qeth_l3_netdev_ops; + + rc = qeth_l3_iqd_read_initial_mac(card); + if (rc) + goto out; + + if (card->options.hsuid[0]) + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + } else + return -ENODEV; + + card->dev->ethtool_ops = &qeth_l3_ethtool_ops; + card->dev->needed_headroom = sizeof(struct qeth_hdr) - ETH_HLEN; + card->dev->features |= NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + + netif_keep_dst(card->dev); + if (card->dev->hw_features & NETIF_F_TSO) + netif_set_gso_max_size(card->dev, + PAGE_SIZE * (QETH_MAX_BUFFER_ELEMENTS(card) - 1)); + + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); + rc = register_netdev(card->dev); +out: + if (rc) + card->dev->netdev_ops = NULL; + return rc; +} + +static const struct device_type qeth_l3_devtype = { + .name = "qeth_layer3", + .groups = qeth_l3_attr_groups, +}; + +static int qeth_l3_probe_device(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc; + + hash_init(card->ip_htable); + + if (gdev->dev.type == &qeth_generic_devtype) { + rc = qeth_l3_create_device_attributes(&gdev->dev); + if (rc) + return rc; + } + + hash_init(card->ip_mc_htable); + card->options.layer2 = 0; + card->info.hwtrap = 0; + return 0; +} + +static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) +{ + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + + if (cgdev->dev.type == &qeth_generic_devtype) + qeth_l3_remove_device_attributes(&cgdev->dev); + + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + + if (cgdev->state == CCWGROUP_ONLINE) + qeth_l3_set_offline(cgdev); + + cancel_work_sync(&card->close_dev_work); + if (qeth_netdev_is_registered(card->dev)) + unregister_netdev(card->dev); + qeth_l3_clear_ip_htable(card, 0); + qeth_l3_clear_ipato_list(card); +} + +static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + enum qeth_card_states recover_flag; + + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_DBF_TEXT(SETUP, 2, "setonlin"); + QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); + + recover_flag = card->state; + rc = qeth_core_hardsetup_card(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); + rc = -ENODEV; + goto out_remove; + } + + rc = qeth_l3_setup_netdev(card); + if (rc) + goto out_remove; + + if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { + if (card->info.hwtrap && + qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)) + card->info.hwtrap = 0; + } else + card->info.hwtrap = 0; + + card->state = CARD_STATE_HARDSETUP; + qeth_print_status_message(card); + + /* softsetup */ + QETH_DBF_TEXT(SETUP, 2, "softsetp"); + + rc = qeth_l3_setadapter_parms(card); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc); + if (!card->options.sniffer) { + rc = qeth_l3_start_ipassists(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); + goto out_remove; + } + rc = qeth_l3_setrouting_v4(card); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc); + rc = qeth_l3_setrouting_v6(card); + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc); + } + netif_tx_disable(card->dev); + + rc = qeth_init_qdio_queues(card); + if (rc) { + QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); + rc = -ENODEV; + goto out_remove; + } + card->state = CARD_STATE_SOFTSETUP; + + qeth_set_allowed_threads(card, 0xffffffff, 0); + qeth_l3_recover_ip(card); + if (card->lan_online) + netif_carrier_on(card->dev); + else + netif_carrier_off(card->dev); + + qeth_enable_hw_features(card->dev); + if (recover_flag == CARD_STATE_RECOVER) { + rtnl_lock(); + if (recovery_mode) { + __qeth_l3_open(card->dev); + qeth_l3_set_rx_mode(card->dev); + } else { + dev_open(card->dev); + } + rtnl_unlock(); + } + qeth_trace_features(card); + /* let user_space know that device is online */ + kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return 0; +out_remove: + qeth_l3_stop_card(card, 0); + ccw_device_set_offline(CARD_DDEV(card)); + ccw_device_set_offline(CARD_WDEV(card)); + ccw_device_set_offline(CARD_RDEV(card)); + qdio_free(CARD_DDEV(card)); + if (recover_flag == CARD_STATE_RECOVER) + card->state = CARD_STATE_RECOVER; + else + card->state = CARD_STATE_DOWN; + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return rc; +} + +static int qeth_l3_set_online(struct ccwgroup_device *gdev) +{ + return __qeth_l3_set_online(gdev, 0); +} + +static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev, + int recovery_mode) +{ + struct qeth_card *card = dev_get_drvdata(&cgdev->dev); + int rc = 0, rc2 = 0, rc3 = 0; + enum qeth_card_states recover_flag; + + mutex_lock(&card->discipline_mutex); + mutex_lock(&card->conf_mutex); + QETH_DBF_TEXT(SETUP, 3, "setoffl"); + QETH_DBF_HEX(SETUP, 3, &card, sizeof(void *)); + + netif_carrier_off(card->dev); + recover_flag = card->state; + if ((!recovery_mode && card->info.hwtrap) || card->info.hwtrap == 2) { + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + card->info.hwtrap = 1; + } + qeth_l3_stop_card(card, recovery_mode); + if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) { + rtnl_lock(); + call_netdevice_notifiers(NETDEV_REBOOT, card->dev); + rtnl_unlock(); + } + rc = ccw_device_set_offline(CARD_DDEV(card)); + rc2 = ccw_device_set_offline(CARD_WDEV(card)); + rc3 = ccw_device_set_offline(CARD_RDEV(card)); + if (!rc) + rc = (rc2) ? rc2 : rc3; + if (rc) + QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); + qdio_free(CARD_DDEV(card)); + if (recover_flag == CARD_STATE_UP) + card->state = CARD_STATE_RECOVER; + /* let user_space know that device is offline */ + kobject_uevent(&cgdev->dev.kobj, KOBJ_CHANGE); + mutex_unlock(&card->conf_mutex); + mutex_unlock(&card->discipline_mutex); + return 0; +} + +static int qeth_l3_set_offline(struct ccwgroup_device *cgdev) +{ + return __qeth_l3_set_offline(cgdev, 0); +} + +static int qeth_l3_recover(void *ptr) +{ + struct qeth_card *card; + int rc = 0; + + card = (struct qeth_card *) ptr; + QETH_CARD_TEXT(card, 2, "recover1"); + QETH_CARD_HEX(card, 2, &card, sizeof(void *)); + if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) + return 0; + QETH_CARD_TEXT(card, 2, "recover2"); + dev_warn(&card->gdev->dev, + "A recovery process has been started for the device\n"); + qeth_set_recovery_task(card); + __qeth_l3_set_offline(card->gdev, 1); + rc = __qeth_l3_set_online(card->gdev, 1); + if (!rc) + dev_info(&card->gdev->dev, + "Device successfully recovered!\n"); + else { + qeth_close_dev(card); + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + } + qeth_clear_recovery_task(card); + qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); + qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); + return 0; +} + +static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + + netif_device_detach(card->dev); + qeth_set_allowed_threads(card, 0, 1); + wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); + if (gdev->state == CCWGROUP_OFFLINE) + return 0; + if (card->state == CARD_STATE_UP) { + if (card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + __qeth_l3_set_offline(card->gdev, 1); + } else + __qeth_l3_set_offline(card->gdev, 0); + return 0; +} + +static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) +{ + struct qeth_card *card = dev_get_drvdata(&gdev->dev); + int rc = 0; + + if (gdev->state == CCWGROUP_OFFLINE) + goto out; + + if (card->state == CARD_STATE_RECOVER) { + rc = __qeth_l3_set_online(card->gdev, 1); + if (rc) { + rtnl_lock(); + dev_close(card->dev); + rtnl_unlock(); + } + } else + rc = __qeth_l3_set_online(card->gdev, 0); +out: + qeth_set_allowed_threads(card, 0xffffffff, 0); + netif_device_attach(card->dev); + if (rc) + dev_warn(&card->gdev->dev, "The qeth device driver " + "failed to recover an error on the device\n"); + return rc; +} + +/* Returns zero if the command is successfully "consumed" */ +static int qeth_l3_control_event(struct qeth_card *card, + struct qeth_ipa_cmd *cmd) +{ + return 1; +} + +struct qeth_discipline qeth_l3_discipline = { + .devtype = &qeth_l3_devtype, + .process_rx_buffer = qeth_l3_process_inbound_buffer, + .recover = qeth_l3_recover, + .setup = qeth_l3_probe_device, + .remove = qeth_l3_remove_device, + .set_online = qeth_l3_set_online, + .set_offline = qeth_l3_set_offline, + .freeze = qeth_l3_pm_suspend, + .thaw = qeth_l3_pm_resume, + .restore = qeth_l3_pm_resume, + .do_ioctl = qeth_l3_do_ioctl, + .control_event_handler = qeth_l3_control_event, +}; +EXPORT_SYMBOL_GPL(qeth_l3_discipline); + +static int qeth_l3_handle_ip_event(struct qeth_card *card, + struct qeth_ipaddr *addr, + unsigned long event) +{ + switch (event) { + case NETDEV_UP: + spin_lock_bh(&card->ip_lock); + qeth_l3_add_ip(card, addr); + spin_unlock_bh(&card->ip_lock); + return NOTIFY_OK; + case NETDEV_DOWN: + spin_lock_bh(&card->ip_lock); + qeth_l3_delete_ip(card, addr); + spin_unlock_bh(&card->ip_lock); + return NOTIFY_OK; + default: + return NOTIFY_DONE; + } +} + +static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) +{ + if (is_vlan_dev(dev)) + dev = vlan_dev_real_dev(dev); + if (dev->netdev_ops == &qeth_l3_osa_netdev_ops || + dev->netdev_ops == &qeth_l3_netdev_ops) + return (struct qeth_card *) dev->ml_priv; + return NULL; +} + +static int qeth_l3_ip_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + + struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; + struct net_device *dev = ifa->ifa_dev->dev; + struct qeth_ipaddr addr; + struct qeth_card *card; + + if (dev_net(dev) != &init_net) + return NOTIFY_DONE; + + card = qeth_l3_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ipevent"); + + qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV4); + addr.u.a4.addr = be32_to_cpu(ifa->ifa_address); + addr.u.a4.mask = be32_to_cpu(ifa->ifa_mask); + + return qeth_l3_handle_ip_event(card, &addr, event); +} + +static struct notifier_block qeth_l3_ip_notifier = { + qeth_l3_ip_event, + NULL, +}; + +static int qeth_l3_ip6_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; + struct net_device *dev = ifa->idev->dev; + struct qeth_ipaddr addr; + struct qeth_card *card; + + card = qeth_l3_get_card_from_dev(dev); + if (!card) + return NOTIFY_DONE; + QETH_CARD_TEXT(card, 3, "ip6event"); + if (!qeth_is_supported(card, IPA_IPV6)) + return NOTIFY_DONE; + + qeth_l3_init_ipaddr(&addr, QETH_IP_TYPE_NORMAL, QETH_PROT_IPV6); + addr.u.a6.addr = ifa->addr; + addr.u.a6.pfxlen = ifa->prefix_len; + + return qeth_l3_handle_ip_event(card, &addr, event); +} + +static struct notifier_block qeth_l3_ip6_notifier = { + qeth_l3_ip6_event, + NULL, +}; + +static int qeth_l3_register_notifiers(void) +{ + int rc; + + QETH_DBF_TEXT(SETUP, 5, "regnotif"); + rc = register_inetaddr_notifier(&qeth_l3_ip_notifier); + if (rc) + return rc; + rc = register_inet6addr_notifier(&qeth_l3_ip6_notifier); + if (rc) { + unregister_inetaddr_notifier(&qeth_l3_ip_notifier); + return rc; + } + return 0; +} + +static void qeth_l3_unregister_notifiers(void) +{ + QETH_DBF_TEXT(SETUP, 5, "unregnot"); + WARN_ON(unregister_inetaddr_notifier(&qeth_l3_ip_notifier)); + WARN_ON(unregister_inet6addr_notifier(&qeth_l3_ip6_notifier)); +} + +static int __init qeth_l3_init(void) +{ + pr_info("register layer 3 discipline\n"); + return qeth_l3_register_notifiers(); +} + +static void __exit qeth_l3_exit(void) +{ + qeth_l3_unregister_notifiers(); + pr_info("unregister layer 3 discipline\n"); +} + +module_init(qeth_l3_init); +module_exit(qeth_l3_exit); +MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>"); +MODULE_DESCRIPTION("qeth layer 3 discipline"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c new file mode 100644 index 000000000..45ac6d870 --- /dev/null +++ b/drivers/s390/net/qeth_l3_sys.c @@ -0,0 +1,1007 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2007 + * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, + * Frank Pavlic <fpavlic@de.ibm.com>, + * Thomas Spatzier <tspat@de.ibm.com>, + * Frank Blaschka <frank.blaschka@de.ibm.com> + */ + +#include <linux/slab.h> +#include <asm/ebcdic.h> +#include <linux/hashtable.h> +#include <linux/inet.h> +#include "qeth_l3.h" + +#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) + +static int qeth_l3_string_to_ipaddr(const char *buf, + enum qeth_prot_versions proto, u8 *addr) +{ + const char *end; + + if ((proto == QETH_PROT_IPV4 && !in4_pton(buf, -1, addr, -1, &end)) || + (proto == QETH_PROT_IPV6 && !in6_pton(buf, -1, addr, -1, &end))) + return -EINVAL; + return 0; +} + +static ssize_t qeth_l3_dev_route_show(struct qeth_card *card, + struct qeth_routing_info *route, char *buf) +{ + switch (route->type) { + case PRIMARY_ROUTER: + return sprintf(buf, "%s\n", "primary router"); + case SECONDARY_ROUTER: + return sprintf(buf, "%s\n", "secondary router"); + case MULTICAST_ROUTER: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "multicast router+"); + else + return sprintf(buf, "%s\n", "multicast router"); + case PRIMARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "primary connector+"); + else + return sprintf(buf, "%s\n", "primary connector"); + case SECONDARY_CONNECTOR: + if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO) + return sprintf(buf, "%s\n", "secondary connector+"); + else + return sprintf(buf, "%s\n", "secondary connector"); + default: + return sprintf(buf, "%s\n", "no"); + } +} + +static ssize_t qeth_l3_dev_route4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_route_show(card, &card->options.route4, buf); +} + +static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, + struct qeth_routing_info *route, enum qeth_prot_versions prot, + const char *buf, size_t count) +{ + enum qeth_routing_types old_route_type = route->type; + int rc = 0; + + mutex_lock(&card->conf_mutex); + if (sysfs_streq(buf, "no_router")) { + route->type = NO_ROUTER; + } else if (sysfs_streq(buf, "primary_connector")) { + route->type = PRIMARY_CONNECTOR; + } else if (sysfs_streq(buf, "secondary_connector")) { + route->type = SECONDARY_CONNECTOR; + } else if (sysfs_streq(buf, "primary_router")) { + route->type = PRIMARY_ROUTER; + } else if (sysfs_streq(buf, "secondary_router")) { + route->type = SECONDARY_ROUTER; + } else if (sysfs_streq(buf, "multicast_router")) { + route->type = MULTICAST_ROUTER; + } else { + rc = -EINVAL; + goto out; + } + if (qeth_card_hw_is_reachable(card) && + (old_route_type != route->type)) { + if (prot == QETH_PROT_IPV4) + rc = qeth_l3_setrouting_v4(card); + else if (prot == QETH_PROT_IPV6) + rc = qeth_l3_setrouting_v6(card); + } +out: + if (rc) + route->type = old_route_type; + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_route4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_route_store(card, &card->options.route4, + QETH_PROT_IPV4, buf, count); +} + +static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show, + qeth_l3_dev_route4_store); + +static ssize_t qeth_l3_dev_route6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_route_show(card, &card->options.route6, buf); +} + +static ssize_t qeth_l3_dev_route6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_route_store(card, &card->options.route6, + QETH_PROT_IPV6, buf, count); +} + +static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show, + qeth_l3_dev_route6_store); + +static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0); +} + +static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + int i, rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + i = simple_strtoul(buf, &tmp, 16); + if ((i == 0) || (i == 1)) + card->options.fake_broadcast = i; + else + rc = -EINVAL; +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show, + qeth_l3_dev_fake_broadcast_store); + +static ssize_t qeth_l3_dev_sniffer_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0); +} + +static ssize_t qeth_l3_dev_sniffer_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + int rc = 0; + unsigned long i; + + if (!card) + return -EINVAL; + + if (card->info.type != QETH_CARD_TYPE_IQD) + return -EPERM; + if (card->options.cq == QETH_CQ_ENABLED) + return -EPERM; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + rc = kstrtoul(buf, 16, &i); + if (rc) { + rc = -EINVAL; + goto out; + } + switch (i) { + case 0: + card->options.sniffer = i; + break; + case 1: + qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd); + if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) { + card->options.sniffer = i; + if (card->qdio.init_pool.buf_count != + QETH_IN_BUF_COUNT_MAX) + qeth_realloc_buffer_pool(card, + QETH_IN_BUF_COUNT_MAX); + } else + rc = -EPERM; + break; + default: + rc = -EINVAL; + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show, + qeth_l3_dev_sniffer_store); + + +static ssize_t qeth_l3_dev_hsuid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char tmp_hsuid[9]; + + if (!card) + return -EINVAL; + + if (card->info.type != QETH_CARD_TYPE_IQD) + return -EPERM; + + memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid)); + EBCASC(tmp_hsuid, 8); + return sprintf(buf, "%s\n", tmp_hsuid); +} + +static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + char *tmp; + int rc; + + if (!card) + return -EINVAL; + + if (card->info.type != QETH_CARD_TYPE_IQD) + return -EPERM; + if (card->state != CARD_STATE_DOWN && + card->state != CARD_STATE_RECOVER) + return -EPERM; + if (card->options.sniffer) + return -EPERM; + if (card->options.cq == QETH_CQ_NOTAVAILABLE) + return -EPERM; + + tmp = strsep((char **)&buf, "\n"); + if (strlen(tmp) > 8) + return -EINVAL; + + if (card->options.hsuid[0]) + /* delete old ip address */ + qeth_l3_modify_hsuid(card, false); + + if (strlen(tmp) == 0) { + /* delete ip address only */ + card->options.hsuid[0] = '\0'; + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + qeth_configure_cq(card, QETH_CQ_DISABLED); + return count; + } + + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) + return -EPERM; + + snprintf(card->options.hsuid, sizeof(card->options.hsuid), + "%-8s", tmp); + ASCEBC(card->options.hsuid, 8); + memcpy(card->dev->perm_addr, card->options.hsuid, 9); + + rc = qeth_l3_modify_hsuid(card, true); + + return rc ? rc : count; +} + +static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show, + qeth_l3_dev_hsuid_store); + + +static struct attribute *qeth_l3_device_attrs[] = { + &dev_attr_route4.attr, + &dev_attr_route6.attr, + &dev_attr_fake_broadcast.attr, + &dev_attr_sniffer.attr, + &dev_attr_hsuid.attr, + NULL, +}; + +static const struct attribute_group qeth_l3_device_attr_group = { + .attrs = qeth_l3_device_attrs, +}; + +static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->ipato.enabled? 1:0); +} + +static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool enable; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if ((card->state != CARD_STATE_DOWN) && + (card->state != CARD_STATE_RECOVER)) { + rc = -EPERM; + goto out; + } + + if (sysfs_streq(buf, "toggle")) { + enable = !card->ipato.enabled; + } else if (kstrtobool(buf, &enable)) { + rc = -EINVAL; + goto out; + } + + if (card->ipato.enabled != enable) { + card->ipato.enabled = enable; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_enable, enable, 0644, + qeth_l3_dev_ipato_enable_show, + qeth_l3_dev_ipato_enable_store); + +static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->ipato.invert4? 1:0); +} + +static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool invert; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert4; + } else if (kstrtobool(buf, &invert)) { + rc = -EINVAL; + goto out; + } + + if (card->ipato.invert4 != invert) { + card->ipato.invert4 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644, + qeth_l3_dev_ipato_invert4_show, + qeth_l3_dev_ipato_invert4_store); + +static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card, + enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + char addr_str[40]; + int entry_len; /* length of 1 entry string, differs between v4 and v6 */ + int i = 0; + + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; + /* add strlen for "/<mask>\n" */ + entry_len += (proto == QETH_PROT_IPV4)? 5 : 6; + spin_lock_bh(&card->ip_lock); + list_for_each_entry(ipatoe, &card->ipato.entries, entry) { + if (ipatoe->proto != proto) + continue; + /* String must not be longer than PAGE_SIZE. So we check if + * string length gets near PAGE_SIZE. Then we can savely display + * the next IPv6 address (worst case, compared to IPv4) */ + if ((PAGE_SIZE - i) <= entry_len) + break; + qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str); + i += snprintf(buf + i, PAGE_SIZE - i, + "%s/%i\n", addr_str, ipatoe->mask_bits); + } + spin_unlock_bh(&card->ip_lock); + i += snprintf(buf + i, PAGE_SIZE - i, "\n"); + + return i; +} + +static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4); +} + +static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, + u8 *addr, int *mask_bits) +{ + const char *start, *end; + char *tmp; + char buffer[40] = {0, }; + + start = buf; + /* get address string */ + end = strchr(start, '/'); + if (!end || (end - start >= 40)) { + return -EINVAL; + } + strncpy(buffer, start, end - start); + if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { + return -EINVAL; + } + start = end + 1; + *mask_bits = simple_strtoul(start, &tmp, 10); + if (!strlen(start) || + (tmp == start) || + (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { + return -EINVAL; + } + return 0; +} + +static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + struct qeth_ipato_entry *ipatoe; + u8 addr[16]; + int mask_bits; + int rc = 0; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); + if (rc) + goto out; + + ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); + if (!ipatoe) { + rc = -ENOMEM; + goto out; + } + ipatoe->proto = proto; + memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16); + ipatoe->mask_bits = mask_bits; + + rc = qeth_l3_add_ipato_entry(card, ipatoe); + if (rc) + kfree(ipatoe); +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_add4, add4, 0644, + qeth_l3_dev_ipato_add4_show, + qeth_l3_dev_ipato_add4_store); + +static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int mask_bits; + int rc = 0; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits); + if (!rc) + rc = qeth_l3_del_ipato_entry(card, proto, addr, mask_bits); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL, + qeth_l3_dev_ipato_del4_store); + +static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return sprintf(buf, "%i\n", card->ipato.invert6? 1:0); +} + +static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + bool invert; + int rc = 0; + + if (!card) + return -EINVAL; + + mutex_lock(&card->conf_mutex); + if (sysfs_streq(buf, "toggle")) { + invert = !card->ipato.invert6; + } else if (kstrtobool(buf, &invert)) { + rc = -EINVAL; + goto out; + } + + if (card->ipato.invert6 != invert) { + card->ipato.invert6 = invert; + spin_lock_bh(&card->ip_lock); + qeth_l3_update_ipato(card); + spin_unlock_bh(&card->ip_lock); + } +out: + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644, + qeth_l3_dev_ipato_invert6_show, + qeth_l3_dev_ipato_invert6_store); + + +static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6); +} + +static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_add6, add6, 0644, + qeth_l3_dev_ipato_add6_show, + qeth_l3_dev_ipato_add6_store); + +static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL, + qeth_l3_dev_ipato_del6_store); + +static struct attribute *qeth_ipato_device_attrs[] = { + &dev_attr_ipato_enable.attr, + &dev_attr_ipato_invert4.attr, + &dev_attr_ipato_add4.attr, + &dev_attr_ipato_del4.attr, + &dev_attr_ipato_invert6.attr, + &dev_attr_ipato_add6.attr, + &dev_attr_ipato_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_ipato_group = { + .name = "ipa_takeover", + .attrs = qeth_ipato_device_attrs, +}; + +static ssize_t qeth_l3_dev_ip_add_show(struct device *dev, char *buf, + enum qeth_prot_versions proto, + enum qeth_ip_types type) +{ + struct qeth_card *card = dev_get_drvdata(dev); + struct qeth_ipaddr *ipaddr; + char addr_str[40]; + int str_len = 0; + int entry_len; /* length of 1 entry string, differs between v4 and v6 */ + int i; + + if (!card) + return -EINVAL; + + entry_len = (proto == QETH_PROT_IPV4)? 12 : 40; + entry_len += 2; /* \n + terminator */ + spin_lock_bh(&card->ip_lock); + hash_for_each(card->ip_htable, i, ipaddr, hnode) { + if (ipaddr->proto != proto || ipaddr->type != type) + continue; + /* String must not be longer than PAGE_SIZE. So we check if + * string length gets near PAGE_SIZE. Then we can savely display + * the next IPv6 address (worst case, compared to IPv4) */ + if ((PAGE_SIZE - str_len) <= entry_len) + break; + qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u, + addr_str); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n", + addr_str); + } + spin_unlock_bh(&card->ip_lock); + str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n"); + + return str_len; +} + +static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_VIPA); +} + +static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, + u8 *addr) +{ + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { + return -EINVAL; + } + return 0; +} + +static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16] = {0, }; + int rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_vipae(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, true, addr, + QETH_IP_TYPE_VIPA, proto); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_add4, add4, 0644, + qeth_l3_dev_vipa_add4_show, + qeth_l3_dev_vipa_add4_store); + +static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_vipae(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, false, addr, + QETH_IP_TYPE_VIPA, proto); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL, + qeth_l3_dev_vipa_del4_store); + +static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_VIPA); +} + +static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_add6, add6, 0644, + qeth_l3_dev_vipa_add6_show, + qeth_l3_dev_vipa_add6_store); + +static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL, + qeth_l3_dev_vipa_del6_store); + +static struct attribute *qeth_vipa_device_attrs[] = { + &dev_attr_vipa_add4.attr, + &dev_attr_vipa_del4.attr, + &dev_attr_vipa_add6.attr, + &dev_attr_vipa_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_vipa_group = { + .name = "vipa", + .attrs = qeth_vipa_device_attrs, +}; + +static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV4, + QETH_IP_TYPE_RXIP); +} + +static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, + u8 *addr) +{ + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { + return -EINVAL; + } + if (proto == QETH_PROT_IPV4) { + memcpy(&ipv4_addr, addr, sizeof(ipv4_addr)); + if (ipv4_is_multicast(ipv4_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } else if (proto == QETH_PROT_IPV6) { + memcpy(&ipv6_addr, addr, sizeof(ipv6_addr)); + if (ipv6_addr_is_multicast(&ipv6_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } + + return 0; +} + +static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16] = {0, }; + int rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_rxipe(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, true, addr, + QETH_IP_TYPE_RXIP, proto); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_add4, add4, 0644, + qeth_l3_dev_rxip_add4_show, + qeth_l3_dev_rxip_add4_store); + +static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count, + struct qeth_card *card, enum qeth_prot_versions proto) +{ + u8 addr[16]; + int rc; + + mutex_lock(&card->conf_mutex); + rc = qeth_l3_parse_rxipe(buf, proto, addr); + if (!rc) + rc = qeth_l3_modify_rxip_vipa(card, false, addr, + QETH_IP_TYPE_RXIP, proto); + mutex_unlock(&card->conf_mutex); + return rc ? rc : count; +} + +static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4); +} + +static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL, + qeth_l3_dev_rxip_del4_store); + +static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return qeth_l3_dev_ip_add_show(dev, buf, QETH_PROT_IPV6, + QETH_IP_TYPE_RXIP); +} + +static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_add6, add6, 0644, + qeth_l3_dev_rxip_add6_show, + qeth_l3_dev_rxip_add6_store); + +static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct qeth_card *card = dev_get_drvdata(dev); + + if (!card) + return -EINVAL; + + return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6); +} + +static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL, + qeth_l3_dev_rxip_del6_store); + +static struct attribute *qeth_rxip_device_attrs[] = { + &dev_attr_rxip_add4.attr, + &dev_attr_rxip_del4.attr, + &dev_attr_rxip_add6.attr, + &dev_attr_rxip_del6.attr, + NULL, +}; + +static const struct attribute_group qeth_device_rxip_group = { + .name = "rxip", + .attrs = qeth_rxip_device_attrs, +}; + +static const struct attribute_group *qeth_l3_only_attr_groups[] = { + &qeth_l3_device_attr_group, + &qeth_device_ipato_group, + &qeth_device_vipa_group, + &qeth_device_rxip_group, + NULL, +}; + +int qeth_l3_create_device_attributes(struct device *dev) +{ + return sysfs_create_groups(&dev->kobj, qeth_l3_only_attr_groups); +} + +void qeth_l3_remove_device_attributes(struct device *dev) +{ + sysfs_remove_groups(&dev->kobj, qeth_l3_only_attr_groups); +} + +const struct attribute_group *qeth_l3_attr_groups[] = { + &qeth_device_attr_group, + &qeth_device_blkt_group, + /* l3 specific, see qeth_l3_only_attr_groups: */ + &qeth_l3_device_attr_group, + &qeth_device_ipato_group, + &qeth_device_vipa_group, + &qeth_device_rxip_group, + NULL, +}; diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c new file mode 100644 index 000000000..066b5c3aa --- /dev/null +++ b/drivers/s390/net/smsgiucv.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * IUCV special message driver + * + * Copyright IBM Corp. 2003, 2009 + * + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/errno.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <net/iucv/iucv.h> +#include <asm/cpcmd.h> +#include <asm/ebcdic.h> +#include "smsgiucv.h" + +struct smsg_callback { + struct list_head list; + const char *prefix; + int len; + void (*callback)(const char *from, char *str); +}; + +MODULE_AUTHOR + ("(C) 2003 IBM Corporation by Martin Schwidefsky (schwidefsky@de.ibm.com)"); +MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); + +static struct iucv_path *smsg_path; +/* dummy device used as trigger for PM functions */ +static struct device *smsg_dev; + +static DEFINE_SPINLOCK(smsg_list_lock); +static LIST_HEAD(smsg_list); +static int iucv_path_connected; + +static int smsg_path_pending(struct iucv_path *, u8 *, u8 *); +static void smsg_message_pending(struct iucv_path *, struct iucv_message *); + +static struct iucv_handler smsg_handler = { + .path_pending = smsg_path_pending, + .message_pending = smsg_message_pending, +}; + +static int smsg_path_pending(struct iucv_path *path, u8 *ipvmid, u8 *ipuser) +{ + if (strncmp(ipvmid, "*MSG ", 8) != 0) + return -EINVAL; + /* Path pending from *MSG. */ + return iucv_path_accept(path, &smsg_handler, "SMSGIUCV ", NULL); +} + +static void smsg_message_pending(struct iucv_path *path, + struct iucv_message *msg) +{ + struct smsg_callback *cb; + unsigned char *buffer; + unsigned char sender[9]; + int rc, i; + + buffer = kmalloc(msg->length + 1, GFP_ATOMIC | GFP_DMA); + if (!buffer) { + iucv_message_reject(path, msg); + return; + } + rc = iucv_message_receive(path, msg, 0, buffer, msg->length, NULL); + if (rc == 0) { + buffer[msg->length] = 0; + EBCASC(buffer, msg->length); + memcpy(sender, buffer, 8); + sender[8] = 0; + /* Remove trailing whitespace from the sender name. */ + for (i = 7; i >= 0; i--) { + if (sender[i] != ' ' && sender[i] != '\t') + break; + sender[i] = 0; + } + spin_lock(&smsg_list_lock); + list_for_each_entry(cb, &smsg_list, list) + if (strncmp(buffer + 8, cb->prefix, cb->len) == 0) { + cb->callback(sender, buffer + 8); + break; + } + spin_unlock(&smsg_list_lock); + } + kfree(buffer); +} + +int smsg_register_callback(const char *prefix, + void (*callback)(const char *from, char *str)) +{ + struct smsg_callback *cb; + + cb = kmalloc(sizeof(struct smsg_callback), GFP_KERNEL); + if (!cb) + return -ENOMEM; + cb->prefix = prefix; + cb->len = strlen(prefix); + cb->callback = callback; + spin_lock_bh(&smsg_list_lock); + list_add_tail(&cb->list, &smsg_list); + spin_unlock_bh(&smsg_list_lock); + return 0; +} + +void smsg_unregister_callback(const char *prefix, + void (*callback)(const char *from, + char *str)) +{ + struct smsg_callback *cb, *tmp; + + spin_lock_bh(&smsg_list_lock); + cb = NULL; + list_for_each_entry(tmp, &smsg_list, list) + if (tmp->callback == callback && + strcmp(tmp->prefix, prefix) == 0) { + cb = tmp; + list_del(&cb->list); + break; + } + spin_unlock_bh(&smsg_list_lock); + kfree(cb); +} + +static int smsg_pm_freeze(struct device *dev) +{ +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "smsg_pm_freeze\n"); +#endif + if (smsg_path && iucv_path_connected) { + iucv_path_sever(smsg_path, NULL); + iucv_path_connected = 0; + } + return 0; +} + +static int smsg_pm_restore_thaw(struct device *dev) +{ + int rc; + +#ifdef CONFIG_PM_DEBUG + printk(KERN_WARNING "smsg_pm_restore_thaw\n"); +#endif + if (smsg_path && !iucv_path_connected) { + memset(smsg_path, 0, sizeof(*smsg_path)); + smsg_path->msglim = 255; + smsg_path->flags = 0; + rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", + NULL, NULL, NULL); +#ifdef CONFIG_PM_DEBUG + if (rc) + printk(KERN_ERR + "iucv_path_connect returned with rc %i\n", rc); +#endif + if (!rc) + iucv_path_connected = 1; + cpcmd("SET SMSG IUCV", NULL, 0, NULL); + } + return 0; +} + +static const struct dev_pm_ops smsg_pm_ops = { + .freeze = smsg_pm_freeze, + .thaw = smsg_pm_restore_thaw, + .restore = smsg_pm_restore_thaw, +}; + +static struct device_driver smsg_driver = { + .owner = THIS_MODULE, + .name = SMSGIUCV_DRV_NAME, + .bus = &iucv_bus, + .pm = &smsg_pm_ops, +}; + +static void __exit smsg_exit(void) +{ + cpcmd("SET SMSG OFF", NULL, 0, NULL); + device_unregister(smsg_dev); + iucv_unregister(&smsg_handler, 1); + driver_unregister(&smsg_driver); +} + +static int __init smsg_init(void) +{ + int rc; + + if (!MACHINE_IS_VM) { + rc = -EPROTONOSUPPORT; + goto out; + } + rc = driver_register(&smsg_driver); + if (rc != 0) + goto out; + rc = iucv_register(&smsg_handler, 1); + if (rc) + goto out_driver; + smsg_path = iucv_path_alloc(255, 0, GFP_KERNEL); + if (!smsg_path) { + rc = -ENOMEM; + goto out_register; + } + rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", + NULL, NULL, NULL); + if (rc) + goto out_free_path; + else + iucv_path_connected = 1; + smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); + if (!smsg_dev) { + rc = -ENOMEM; + goto out_free_path; + } + dev_set_name(smsg_dev, "smsg_iucv"); + smsg_dev->bus = &iucv_bus; + smsg_dev->parent = iucv_root; + smsg_dev->release = (void (*)(struct device *))kfree; + smsg_dev->driver = &smsg_driver; + rc = device_register(smsg_dev); + if (rc) + goto out_put; + + cpcmd("SET SMSG IUCV", NULL, 0, NULL); + return 0; + +out_put: + put_device(smsg_dev); +out_free_path: + iucv_path_free(smsg_path); + smsg_path = NULL; +out_register: + iucv_unregister(&smsg_handler, 1); +out_driver: + driver_unregister(&smsg_driver); +out: + return rc; +} + +module_init(smsg_init); +module_exit(smsg_exit); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(smsg_register_callback); +EXPORT_SYMBOL(smsg_unregister_callback); diff --git a/drivers/s390/net/smsgiucv.h b/drivers/s390/net/smsgiucv.h new file mode 100644 index 000000000..a0d6c6130 --- /dev/null +++ b/drivers/s390/net/smsgiucv.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IUCV special message driver + * + * Copyright IBM Corp. 2003 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) + */ + +#define SMSGIUCV_DRV_NAME "SMSGIUCV" + +int smsg_register_callback(const char *, + void (*)(const char *, char *)); +void smsg_unregister_callback(const char *, + void (*)(const char *, char *)); + diff --git a/drivers/s390/net/smsgiucv_app.c b/drivers/s390/net/smsgiucv_app.c new file mode 100644 index 000000000..0a263999f --- /dev/null +++ b/drivers/s390/net/smsgiucv_app.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Deliver z/VM CP special messages (SMSG) as uevents. + * + * The driver registers for z/VM CP special messages with the + * "APP" prefix. Incoming messages are delivered to user space + * as uevents. + * + * Copyright IBM Corp. 2010 + * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> + * + */ +#define KMSG_COMPONENT "smsgiucv_app" +#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt + +#include <linux/ctype.h> +#include <linux/err.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/kobject.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <net/iucv/iucv.h> +#include "smsgiucv.h" + +/* prefix used for SMSG registration */ +#define SMSG_PREFIX "APP" + +/* SMSG related uevent environment variables */ +#define ENV_SENDER_STR "SMSG_SENDER=" +#define ENV_SENDER_LEN (strlen(ENV_SENDER_STR) + 8 + 1) +#define ENV_PREFIX_STR "SMSG_ID=" +#define ENV_PREFIX_LEN (strlen(ENV_PREFIX_STR) + \ + strlen(SMSG_PREFIX) + 1) +#define ENV_TEXT_STR "SMSG_TEXT=" +#define ENV_TEXT_LEN(msg) (strlen(ENV_TEXT_STR) + strlen((msg)) + 1) + +/* z/VM user ID which is permitted to send SMSGs + * If the value is undefined or empty (""), special messages are + * accepted from any z/VM user ID. */ +static char *sender; +module_param(sender, charp, 0400); +MODULE_PARM_DESC(sender, "z/VM user ID from which CP SMSGs are accepted"); + +/* SMSG device representation */ +static struct device *smsg_app_dev; + +/* list element for queuing received messages for delivery */ +struct smsg_app_event { + struct list_head list; + char *buf; + char *envp[4]; +}; + +/* queue for outgoing uevents */ +static LIST_HEAD(smsg_event_queue); +static DEFINE_SPINLOCK(smsg_event_queue_lock); + +static void smsg_app_event_free(struct smsg_app_event *ev) +{ + kfree(ev->buf); + kfree(ev); +} + +static struct smsg_app_event *smsg_app_event_alloc(const char *from, + const char *msg) +{ + struct smsg_app_event *ev; + + ev = kzalloc(sizeof(*ev), GFP_ATOMIC); + if (!ev) + return NULL; + + ev->buf = kzalloc(ENV_SENDER_LEN + ENV_PREFIX_LEN + + ENV_TEXT_LEN(msg), GFP_ATOMIC); + if (!ev->buf) { + kfree(ev); + return NULL; + } + + /* setting up environment pointers into buf */ + ev->envp[0] = ev->buf; + ev->envp[1] = ev->envp[0] + ENV_SENDER_LEN; + ev->envp[2] = ev->envp[1] + ENV_PREFIX_LEN; + ev->envp[3] = NULL; + + /* setting up environment: sender, prefix name, and message text */ + snprintf(ev->envp[0], ENV_SENDER_LEN, ENV_SENDER_STR "%s", from); + snprintf(ev->envp[1], ENV_PREFIX_LEN, ENV_PREFIX_STR "%s", SMSG_PREFIX); + snprintf(ev->envp[2], ENV_TEXT_LEN(msg), ENV_TEXT_STR "%s", msg); + + return ev; +} + +static void smsg_event_work_fn(struct work_struct *work) +{ + LIST_HEAD(event_queue); + struct smsg_app_event *p, *n; + struct device *dev; + + dev = get_device(smsg_app_dev); + if (!dev) + return; + + spin_lock_bh(&smsg_event_queue_lock); + list_splice_init(&smsg_event_queue, &event_queue); + spin_unlock_bh(&smsg_event_queue_lock); + + list_for_each_entry_safe(p, n, &event_queue, list) { + list_del(&p->list); + kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, p->envp); + smsg_app_event_free(p); + } + + put_device(dev); +} +static DECLARE_WORK(smsg_event_work, smsg_event_work_fn); + +static void smsg_app_callback(const char *from, char *msg) +{ + struct smsg_app_event *se; + + /* check if the originating z/VM user ID matches + * the configured sender. */ + if (sender && strlen(sender) > 0 && strcmp(from, sender) != 0) + return; + + /* get start of message text (skip prefix and leading blanks) */ + msg += strlen(SMSG_PREFIX); + while (*msg && isspace(*msg)) + msg++; + if (*msg == '\0') + return; + + /* allocate event list element and its environment */ + se = smsg_app_event_alloc(from, msg); + if (!se) + return; + + /* queue event and schedule work function */ + spin_lock(&smsg_event_queue_lock); + list_add_tail(&se->list, &smsg_event_queue); + spin_unlock(&smsg_event_queue_lock); + + schedule_work(&smsg_event_work); + return; +} + +static int __init smsgiucv_app_init(void) +{ + struct device_driver *smsgiucv_drv; + int rc; + + if (!MACHINE_IS_VM) + return -ENODEV; + + smsg_app_dev = kzalloc(sizeof(*smsg_app_dev), GFP_KERNEL); + if (!smsg_app_dev) + return -ENOMEM; + + smsgiucv_drv = driver_find(SMSGIUCV_DRV_NAME, &iucv_bus); + if (!smsgiucv_drv) { + kfree(smsg_app_dev); + return -ENODEV; + } + + rc = dev_set_name(smsg_app_dev, KMSG_COMPONENT); + if (rc) { + kfree(smsg_app_dev); + goto fail; + } + smsg_app_dev->bus = &iucv_bus; + smsg_app_dev->parent = iucv_root; + smsg_app_dev->release = (void (*)(struct device *)) kfree; + smsg_app_dev->driver = smsgiucv_drv; + rc = device_register(smsg_app_dev); + if (rc) { + put_device(smsg_app_dev); + goto fail; + } + + /* convert sender to uppercase characters */ + if (sender) { + int len = strlen(sender); + while (len--) + sender[len] = toupper(sender[len]); + } + + /* register with the smsgiucv device driver */ + rc = smsg_register_callback(SMSG_PREFIX, smsg_app_callback); + if (rc) { + device_unregister(smsg_app_dev); + goto fail; + } + + rc = 0; +fail: + return rc; +} +module_init(smsgiucv_app_init); + +static void __exit smsgiucv_app_exit(void) +{ + /* unregister callback */ + smsg_unregister_callback(SMSG_PREFIX, smsg_app_callback); + + /* cancel pending work and flush any queued event work */ + cancel_work_sync(&smsg_event_work); + smsg_event_work_fn(&smsg_event_work); + + device_unregister(smsg_app_dev); +} +module_exit(smsgiucv_app_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Deliver z/VM CP SMSG as uevents"); +MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); |