diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:15 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-08 04:15:15 +0000 |
commit | 68c1b0995e963349e50f8a8b00ebc140908f7882 (patch) | |
tree | ae3788542761985ee15e3d1ca439706040d1eb0c /drivers/net | |
parent | Releasing progress-linux version 4.19.269-1progress5u1. (diff) | |
download | linux-68c1b0995e963349e50f8a8b00ebc140908f7882.tar.xz linux-68c1b0995e963349e50f8a8b00ebc140908f7882.zip |
Merging upstream version 4.19.282.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
157 files changed, 3003 insertions, 6086 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index cab5c1cc9..4e4adacb5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2096,10 +2096,10 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in /* called with rcu_read_lock() */ static int bond_miimon_inspect(struct bonding *bond) { + bool ignore_updelay = false; int link_state, commit = 0; struct list_head *iter; struct slave *slave; - bool ignore_updelay; ignore_updelay = !rcu_dereference(bond->curr_active_slave); @@ -3993,6 +3993,29 @@ err: bond_slave_arr_work_rearm(bond, 1); } +static void bond_skip_slave(struct bond_up_slave *slaves, + struct slave *skipslave) +{ + int idx; + + /* Rare situation where caller has asked to skip a specific + * slave but allocation failed (most likely!). BTW this is + * only possible when the call is initiated from + * __bond_release_one(). In this situation; overwrite the + * skipslave entry in the array with the last entry from the + * array to avoid a situation where the xmit path may choose + * this to-be-skipped slave to send a packet out. + */ + for (idx = 0; slaves && idx < slaves->count; idx++) { + if (skipslave == slaves->arr[idx]) { + slaves->arr[idx] = + slaves->arr[slaves->count - 1]; + slaves->count--; + break; + } + } +} + /* Build the usable slaves array in control path for modes that use xmit-hash * to determine the slave interface - * (a) BOND_MODE_8023AD @@ -4063,27 +4086,9 @@ int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave) if (old_arr) kfree_rcu(old_arr, rcu); out: - if (ret != 0 && skipslave) { - int idx; - - /* Rare situation where caller has asked to skip a specific - * slave but allocation failed (most likely!). BTW this is - * only possible when the call is initiated from - * __bond_release_one(). In this situation; overwrite the - * skipslave entry in the array with the last entry from the - * array to avoid a situation where the xmit path may choose - * this to-be-skipped slave to send a packet out. - */ - old_arr = rtnl_dereference(bond->slave_arr); - for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) { - if (skipslave == old_arr->arr[idx]) { - old_arr->arr[idx] = - old_arr->arr[old_arr->count-1]; - old_arr->count--; - break; - } - } - } + if (ret != 0 && skipslave) + bond_skip_slave(rtnl_dereference(bond->slave_arr), skipslave); + return ret; } diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index ffdee5aeb..d46599871 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c @@ -290,7 +290,6 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: - cf->data[3] = ecc & SJA1000_ECC_SEG; break; } @@ -298,6 +297,9 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, if (!(ecc & SJA1000_ECC_DIR)) cf->data[2] |= CAN_ERR_PROT_TX; + /* Bit stream position in CAN frame as the error was detected */ + cf->data[3] = ecc & SJA1000_ECC_SEG; + if (priv->can.state == CAN_STATE_ERROR_WARNING || priv->can.state == CAN_STATE_ERROR_PASSIVE) { cf->data[1] = (txerr > rxerr) ? diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h index 62958f04a..5699531f8 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb.h +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb.h @@ -76,6 +76,14 @@ struct kvaser_usb_tx_urb_context { int dlc; }; +struct kvaser_usb_busparams { + __le32 bitrate; + u8 tseg1; + u8 tseg2; + u8 sjw; + u8 nsamples; +} __packed; + struct kvaser_usb { struct usb_device *udev; struct usb_interface *intf; @@ -104,13 +112,19 @@ struct kvaser_usb_net_priv { struct can_priv can; struct can_berr_counter bec; + /* subdriver-specific data */ + void *sub_priv; + struct kvaser_usb *dev; struct net_device *netdev; int channel; - struct completion start_comp, stop_comp, flush_comp; + struct completion start_comp, stop_comp, flush_comp, + get_busparams_comp; struct usb_anchor tx_submitted; + struct kvaser_usb_busparams busparams_nominal, busparams_data; + spinlock_t tx_contexts_lock; /* lock for active_tx_contexts */ int active_tx_contexts; struct kvaser_usb_tx_urb_context tx_contexts[]; @@ -120,11 +134,15 @@ struct kvaser_usb_net_priv { * struct kvaser_usb_dev_ops - Device specific functions * @dev_set_mode: used for can.do_set_mode * @dev_set_bittiming: used for can.do_set_bittiming + * @dev_get_busparams: readback arbitration busparams * @dev_set_data_bittiming: used for can.do_set_data_bittiming + * @dev_get_data_busparams: readback data busparams * @dev_get_berr_counter: used for can.do_get_berr_counter * * @dev_setup_endpoints: setup USB in and out endpoints * @dev_init_card: initialize card + * @dev_init_channel: initialize channel + * @dev_remove_channel: uninitialize channel * @dev_get_software_info: get software info * @dev_get_software_details: get software details * @dev_get_card_info: get card info @@ -140,12 +158,18 @@ struct kvaser_usb_net_priv { */ struct kvaser_usb_dev_ops { int (*dev_set_mode)(struct net_device *netdev, enum can_mode mode); - int (*dev_set_bittiming)(struct net_device *netdev); - int (*dev_set_data_bittiming)(struct net_device *netdev); + int (*dev_set_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_busparams)(struct kvaser_usb_net_priv *priv); + int (*dev_set_data_bittiming)(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams); + int (*dev_get_data_busparams)(struct kvaser_usb_net_priv *priv); int (*dev_get_berr_counter)(const struct net_device *netdev, struct can_berr_counter *bec); int (*dev_setup_endpoints)(struct kvaser_usb *dev); int (*dev_init_card)(struct kvaser_usb *dev); + int (*dev_init_channel)(struct kvaser_usb_net_priv *priv); + void (*dev_remove_channel)(struct kvaser_usb_net_priv *priv); int (*dev_get_software_info)(struct kvaser_usb *dev); int (*dev_get_software_details)(struct kvaser_usb *dev); int (*dev_get_card_info)(struct kvaser_usb *dev); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 379df22d5..da449d046 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -416,10 +416,6 @@ static int kvaser_usb_open(struct net_device *netdev) if (err) return err; - err = kvaser_usb_setup_rx_urbs(dev); - if (err) - goto error; - err = ops->dev_set_opt_mode(priv); if (err) goto error; @@ -510,6 +506,93 @@ static int kvaser_usb_close(struct net_device *netdev) return 0; } +static int kvaser_usb_set_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *bt = &priv->can.bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = bt->prop_seg + bt->phase_seg1; + int tseg2 = bt->phase_seg2; + int sjw = bt->sjw; + int err = -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(bt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) + busparams.nsamples = 3; + else + busparams.nsamples = 1; + + err = ops->dev_set_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_busparams(priv); + if (err) { + /* Treat EOPNOTSUPP as success */ + if (err == -EOPNOTSUPP) + err = 0; + return err; + } + + if (memcmp(&busparams, &priv->busparams_nominal, + sizeof(priv->busparams_nominal)) != 0) + err = -EINVAL; + + return err; +} + +static int kvaser_usb_set_data_bittiming(struct net_device *netdev) +{ + struct kvaser_usb_net_priv *priv = netdev_priv(netdev); + struct kvaser_usb *dev = priv->dev; + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; + struct can_bittiming *dbt = &priv->can.data_bittiming; + + struct kvaser_usb_busparams busparams; + int tseg1 = dbt->prop_seg + dbt->phase_seg1; + int tseg2 = dbt->phase_seg2; + int sjw = dbt->sjw; + int err; + + if (!ops->dev_set_data_bittiming || + !ops->dev_get_data_busparams) + return -EOPNOTSUPP; + + busparams.bitrate = cpu_to_le32(dbt->bitrate); + busparams.sjw = (u8)sjw; + busparams.tseg1 = (u8)tseg1; + busparams.tseg2 = (u8)tseg2; + busparams.nsamples = 1; + + err = ops->dev_set_data_bittiming(netdev, &busparams); + if (err) + return err; + + err = kvaser_usb_setup_rx_urbs(priv->dev); + if (err) + return err; + + err = ops->dev_get_data_busparams(priv); + if (err) + return err; + + if (memcmp(&busparams, &priv->busparams_data, + sizeof(priv->busparams_data)) != 0) + err = -EINVAL; + + return err; +} + static void kvaser_usb_write_bulk_callback(struct urb *urb) { struct kvaser_usb_tx_urb_context *context = urb->context; @@ -645,6 +728,7 @@ static const struct net_device_ops kvaser_usb_netdev_ops = { static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) { + const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int i; for (i = 0; i < dev->nchannels; i++) { @@ -660,6 +744,9 @@ static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) if (!dev->nets[i]) continue; + if (ops->dev_remove_channel) + ops->dev_remove_channel(dev->nets[i]); + free_candev(dev->nets[i]->netdev); } } @@ -692,6 +779,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) init_completion(&priv->start_comp); init_completion(&priv->stop_comp); init_completion(&priv->flush_comp); + init_completion(&priv->get_busparams_comp); priv->can.ctrlmode_supported = 0; priv->dev = dev; @@ -704,7 +792,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; - priv->can.do_set_bittiming = ops->dev_set_bittiming; + priv->can.do_set_bittiming = kvaser_usb_set_bittiming; priv->can.do_set_mode = ops->dev_set_mode; if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) @@ -716,7 +804,7 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; - priv->can.do_set_data_bittiming = ops->dev_set_data_bittiming; + priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; @@ -728,17 +816,26 @@ static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) dev->nets[channel] = priv; + if (ops->dev_init_channel) { + err = ops->dev_init_channel(priv); + if (err) + goto err; + } + err = register_candev(netdev); if (err) { dev_err(&dev->intf->dev, "Failed to register CAN device\n"); - free_candev(netdev); - dev->nets[channel] = NULL; - return err; + goto err; } netdev_dbg(netdev, "device registered\n"); return 0; + +err: + free_candev(netdev); + dev->nets[channel] = NULL; + return err; } static int kvaser_usb_probe(struct usb_interface *intf, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 45d278724..233bbfeaa 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -43,6 +43,8 @@ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; /* Minihydra command IDs */ #define CMD_SET_BUSPARAMS_REQ 16 +#define CMD_GET_BUSPARAMS_REQ 17 +#define CMD_GET_BUSPARAMS_RESP 18 #define CMD_GET_CHIP_STATE_REQ 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_DRIVERMODE_REQ 21 @@ -193,21 +195,26 @@ struct kvaser_cmd_chip_state_event { #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 struct kvaser_cmd_set_busparams { - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 nsamples; + struct kvaser_usb_busparams busparams_nominal; u8 reserved0[4]; - __le32 bitrate_d; - u8 tseg1_d; - u8 tseg2_d; - u8 sjw_d; - u8 nsamples_d; + struct kvaser_usb_busparams busparams_data; u8 canfd_mode; u8 reserved1[7]; } __packed; +/* Busparam type */ +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 +#define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 +struct kvaser_cmd_get_busparams_req { + u8 type; + u8 reserved[27]; +} __packed; + +struct kvaser_cmd_get_busparams_res { + struct kvaser_usb_busparams busparams; + u8 reserved[20]; +} __packed; + /* Ctrl modes */ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 @@ -278,6 +285,8 @@ struct kvaser_cmd { struct kvaser_cmd_error_event error_event; struct kvaser_cmd_set_busparams set_busparams_req; + struct kvaser_cmd_get_busparams_req get_busparams_req; + struct kvaser_cmd_get_busparams_res get_busparams_res; struct kvaser_cmd_chip_state_event chip_state_event; @@ -293,6 +302,7 @@ struct kvaser_cmd { #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) +#define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) @@ -359,6 +369,10 @@ struct kvaser_cmd_ext { } __packed; } __packed; +struct kvaser_usb_net_hydra_priv { + int pending_get_busparams_type; +}; + static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .name = "kvaser_usb_kcan", .tseg1_min = 1, @@ -504,6 +518,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -511,6 +526,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -527,7 +543,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -543,6 +559,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC); @@ -550,14 +567,14 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, return -ENOMEM; cmd->header.cmd_no = cmd_no; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd_async(priv, cmd, - kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); @@ -701,6 +718,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; + size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; @@ -712,13 +730,14 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -812,6 +831,39 @@ static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, complete(&priv->flush_comp); } +static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_hydra_priv *hydra; + + priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); + if (!priv) + return; + + hydra = priv->sub_priv; + if (!hydra) + return; + + switch (hydra->pending_get_busparams_type) { + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: + memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: + memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, + sizeof(priv->busparams_nominal)); + break; + default: + dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", + hydra->pending_get_busparams_type); + break; + } + hydra->pending_get_busparams_type = -1; + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, u8 bus_status, @@ -1099,6 +1151,7 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, struct kvaser_usb_net_priv *priv; unsigned long irq_flags; bool one_shot_fail = false; + bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); @@ -1117,10 +1170,13 @@ static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } + + is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && + flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; - if (!one_shot_fail) { + if (!one_shot_fail && !is_err_frame) { struct net_device_stats *stats = &priv->netdev->stats; stats->tx_packets++; @@ -1294,6 +1350,10 @@ static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, kvaser_usb_hydra_state_event(dev, cmd); break; + case CMD_GET_BUSPARAMS_RESP: + kvaser_usb_hydra_get_busparams_reply(dev, cmd); + break; + case CMD_ERROR_EVENT: kvaser_usb_hydra_error_event(dev, cmd); break; @@ -1494,15 +1554,61 @@ static int kvaser_usb_hydra_set_mode(struct net_device *netdev, return err; } -static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, + int busparams_type) +{ + struct kvaser_usb *dev = priv->dev; + struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; + struct kvaser_cmd *cmd; + size_t cmd_len; + int err; + + if (!hydra) + return -EINVAL; + + cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + kvaser_usb_hydra_set_cmd_dest_he + (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); + kvaser_usb_hydra_set_cmd_transid + (cmd, kvaser_usb_hydra_get_next_transid(dev)); + cmd->get_busparams_req.type = busparams_type; + hydra->pending_get_busparams_type = busparams_type; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return err; +} + +static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); +} + +static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) +{ + return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); +} + +static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = bt->prop_seg + bt->phase_seg1; - int tseg2 = bt->phase_seg2; - int sjw = bt->sjw; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1510,33 +1616,29 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; - cmd->set_busparams_req.bitrate = cpu_to_le32(bt->bitrate); - cmd->set_busparams_req.sjw = (u8)sjw; - cmd->set_busparams_req.tseg1 = (u8)tseg1; - cmd->set_busparams_req.tseg2 = (u8)tseg2; - cmd->set_busparams_req.nsamples = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, + sizeof(cmd->set_busparams_req.busparams_nominal)); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } -static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) +static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *dbt = &priv->can.data_bittiming; struct kvaser_usb *dev = priv->dev; - int tseg1 = dbt->prop_seg + dbt->phase_seg1; - int tseg2 = dbt->phase_seg2; - int sjw = dbt->sjw; + size_t cmd_len; int err; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); @@ -1544,11 +1646,9 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; - cmd->set_busparams_req.bitrate_d = cpu_to_le32(dbt->bitrate); - cmd->set_busparams_req.sjw_d = (u8)sjw; - cmd->set_busparams_req.tseg1_d = (u8)tseg1; - cmd->set_busparams_req.tseg2_d = (u8)tseg2; - cmd->set_busparams_req.nsamples_d = 1; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); + memcpy(&cmd->set_busparams_req.busparams_data, busparams, + sizeof(cmd->set_busparams_req.busparams_data)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) @@ -1564,7 +1664,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); @@ -1655,6 +1755,19 @@ static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_hydra_priv *hydra; + + hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); + if (!hydra) + return -ENOMEM; + + priv->sub_priv = hydra; + + return 0; +} + static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; @@ -1679,6 +1792,7 @@ static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; + size_t cmd_len; int err; u32 flags; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; @@ -1688,6 +1802,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); @@ -1695,7 +1810,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; @@ -1811,6 +1926,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; + size_t cmd_len; int err; if ((priv->can.ctrlmode & @@ -1826,6 +1942,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; + cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid @@ -1835,7 +1952,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; - err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); + err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; @@ -1997,10 +2114,13 @@ kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { .dev_set_mode = kvaser_usb_hydra_set_mode, .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, + .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, + .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, .dev_init_card = kvaser_usb_hydra_init_card, + .dev_init_channel = kvaser_usb_hydra_init_channel, .dev_get_software_info = kvaser_usb_hydra_get_software_info, .dev_get_software_details = kvaser_usb_hydra_get_software_details, .dev_get_card_info = kvaser_usb_hydra_get_card_info, diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 15380cc08..f06d63db9 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -20,6 +20,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/usb.h> +#include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> @@ -55,6 +56,9 @@ #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 +#define CMD_GET_BUS_PARAMS 17 +#define CMD_GET_BUS_PARAMS_REPLY 18 +#define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 @@ -69,10 +73,13 @@ #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 +#define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 +#define CMD_GET_CAPABILITIES_REQ 95 +#define CMD_GET_CAPABILITIES_RESP 96 #define CMD_LEAF_LOG_MESSAGE 106 @@ -82,6 +89,8 @@ #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) +#define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) + /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) @@ -156,11 +165,7 @@ struct usbcan_cmd_softinfo { struct kvaser_cmd_busparams { u8 tid; u8 channel; - __le32 bitrate; - u8 tseg1; - u8 tseg2; - u8 sjw; - u8 no_samp; + struct kvaser_usb_busparams busparams; } __packed; struct kvaser_cmd_tx_can { @@ -229,7 +234,7 @@ struct kvaser_cmd_tx_acknowledge_header { u8 tid; } __packed; -struct leaf_cmd_error_event { +struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; @@ -241,7 +246,7 @@ struct leaf_cmd_error_event { u8 error_factor; } __packed; -struct usbcan_cmd_error_event { +struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; @@ -253,6 +258,28 @@ struct usbcan_cmd_error_event { __le16 time; } __packed; +/* CMD_ERROR_EVENT error codes */ +#define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 +#define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 + +struct leaf_cmd_error_event { + u8 tid; + u8 error_code; + __le16 timestamp[3]; + __le16 padding; + __le16 info1; + __le16 info2; +} __packed; + +struct usbcan_cmd_error_event { + u8 tid; + u8 error_code; + __le16 info1; + __le16 info2; + __le16 timestamp; + __le16 padding; +} __packed; + struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; @@ -277,6 +304,28 @@ struct leaf_cmd_log_message { u8 data[8]; } __packed; +/* Sub commands for cap_req and cap_res */ +#define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 +#define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 +struct kvaser_cmd_cap_req { + __le16 padding0; + __le16 cap_cmd; + __le16 padding1; + __le16 channel; +} __packed; + +/* Status codes for cap_res */ +#define KVASER_USB_LEAF_CAP_STAT_OK 0x00 +#define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 +#define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 +struct kvaser_cmd_cap_res { + __le16 padding; + __le16 cap_cmd; + __le16 status; + __le32 mask; + __le32 value; +} __packed; + struct kvaser_cmd { u8 len; u8 id; @@ -292,14 +341,18 @@ struct kvaser_cmd { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; - struct leaf_cmd_error_event error_event; + struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; + struct leaf_cmd_error_event error_event; + struct kvaser_cmd_cap_req cap_req; + struct kvaser_cmd_cap_res cap_res; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; + struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; } __packed usbcan; @@ -322,7 +375,10 @@ static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), - [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), + [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), + [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), + [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), /* ignored events: */ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, }; @@ -336,7 +392,8 @@ static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), - [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), + [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), + [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), /* ignored events: */ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, }; @@ -364,6 +421,12 @@ struct kvaser_usb_err_summary { }; }; +struct kvaser_usb_net_leaf_priv { + struct kvaser_usb_net_priv *net; + + struct delayed_work chip_state_req_work; +}; + static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { .name = "kvaser_usb_ucii", .tseg1_min = 4, @@ -607,6 +670,9 @@ static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); + if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) + dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; + if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock @@ -694,6 +760,116 @@ static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) return 0; } +static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, + u16 cap_cmd_req, u16 *status) +{ + struct kvaser_usb_dev_card_data *card_data = &dev->card_data; + struct kvaser_cmd *cmd; + u32 value = 0; + u32 mask = 0; + u16 cap_cmd_res; + int err; + int i; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->id = CMD_GET_CAPABILITIES_REQ; + cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); + cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); + + err = kvaser_usb_send_cmd(dev, cmd, cmd->len); + if (err) + goto end; + + err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); + if (err) + goto end; + + *status = le16_to_cpu(cmd->u.leaf.cap_res.status); + + if (*status != KVASER_USB_LEAF_CAP_STAT_OK) + goto end; + + cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + value = le32_to_cpu(cmd->u.leaf.cap_res.value); + mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); + break; + default: + dev_warn(&dev->intf->dev, "Unknown capability command %u\n", + cap_cmd_res); + break; + } + + for (i = 0; i < dev->nchannels; i++) { + if (BIT(i) & (value & mask)) { + switch (cap_cmd_res) { + case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: + card_data->ctrlmode_supported |= + CAN_CTRLMODE_LISTENONLY; + break; + case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: + card_data->capabilities |= + KVASER_USB_CAP_BERR_CAP; + break; + } + } + } + +end: + kfree(cmd); + + return err; +} + +static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) +{ + int err; + u16 status; + + if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { + dev_info(&dev->intf->dev, + "No extended capability support. Upgrade device firmware.\n"); + return 0; + } + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", + status); + + err = kvaser_usb_leaf_get_single_capability(dev, + KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, + &status); + if (err) + return err; + if (status) + dev_info(&dev->intf->dev, + "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", + status); + + return 0; +} + +static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) +{ + int err = 0; + + if (dev->driver_info->family == KVASER_LEAF) + err = kvaser_usb_leaf_get_capabilities_leaf(dev); + + return err; +} + static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -722,7 +898,7 @@ static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ - if (priv->can.restart_ms && priv->can.state >= CAN_STATE_BUS_OFF) { + if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { struct sk_buff *skb; struct can_frame *cf; @@ -778,6 +954,16 @@ static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, return err; } +static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) +{ + struct kvaser_usb_net_leaf_priv *leaf = + container_of(work, struct kvaser_usb_net_leaf_priv, + chip_state_req_work.work); + struct kvaser_usb_net_priv *priv = leaf->net; + + kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); +} + static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, @@ -796,20 +982,16 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; - } else if (es->status & M16C_STATE_BUS_ERROR) { + } else if ((es->status & M16C_STATE_BUS_ERROR) && + cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ - if (cur_state < CAN_STATE_BUS_OFF) { - if (es->txerr >= 128 || es->rxerr >= 128) - new_state = CAN_STATE_ERROR_PASSIVE; - else if (es->txerr >= 96 || es->rxerr >= 96) - new_state = CAN_STATE_ERROR_WARNING; - else if (cur_state > CAN_STATE_ERROR_ACTIVE) - new_state = CAN_STATE_ERROR_ACTIVE; - } - } - - if (!es->status) + } else if (es->txerr >= 128 || es->rxerr >= 128) { + new_state = CAN_STATE_ERROR_PASSIVE; + } else if (es->txerr >= 96 || es->rxerr >= 96) { + new_state = CAN_STATE_ERROR_WARNING; + } else { new_state = CAN_STATE_ERROR_ACTIVE; + } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; @@ -819,7 +1001,7 @@ kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, } if (priv->can.restart_ms && - cur_state >= CAN_STATE_BUS_OFF && + cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; @@ -853,6 +1035,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; + struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { @@ -862,8 +1045,13 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } priv = dev->nets[es->channel]; + leaf = priv->sub_priv; stats = &priv->netdev->stats; + /* Ignore e.g. state change to bus-off reported just after stopping */ + if (!netif_running(priv->netdev)) + return; + /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * @@ -878,6 +1066,14 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); new_state = priv->can.state; + /* If there are errors, request status updates periodically as we do + * not get automatic notifications of improved state. + */ + if (new_state < CAN_STATE_BUS_OFF && + (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE)) + schedule_delayed_work(&leaf->chip_state_req_work, + msecs_to_jiffies(500)); + skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; @@ -895,7 +1091,7 @@ static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, } if (priv->can.restart_ms && - old_state >= CAN_STATE_BUS_OFF && + old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); @@ -995,11 +1191,11 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, case CMD_CAN_ERROR_EVENT: es.channel = 0; - es.status = cmd->u.usbcan.error_event.status_ch0; - es.txerr = cmd->u.usbcan.error_event.tx_errors_count_ch0; - es.rxerr = cmd->u.usbcan.error_event.rx_errors_count_ch0; + es.status = cmd->u.usbcan.can_error_event.status_ch0; + es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; + es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch1; + cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. @@ -1007,13 +1203,13 @@ static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; - es.status = cmd->u.usbcan.error_event.status_ch1; + es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = - cmd->u.usbcan.error_event.tx_errors_count_ch1; + cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = - cmd->u.usbcan.error_event.rx_errors_count_ch1; + cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = - cmd->u.usbcan.error_event.status_ch0; + cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; @@ -1030,11 +1226,11 @@ static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, switch (cmd->id) { case CMD_CAN_ERROR_EVENT: - es.channel = cmd->u.leaf.error_event.channel; - es.status = cmd->u.leaf.error_event.status; - es.txerr = cmd->u.leaf.error_event.tx_errors_count; - es.rxerr = cmd->u.leaf.error_event.rx_errors_count; - es.leaf.error_factor = cmd->u.leaf.error_event.error_factor; + es.channel = cmd->u.leaf.can_error_event.channel; + es.status = cmd->u.leaf.can_error_event.status; + es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; + es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; + es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; @@ -1166,6 +1362,74 @@ static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, netif_rx(skb); } +static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u16 info1 = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); + break; + case KVASER_USBCAN: + info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); + break; + } + + /* info1 will contain the offending cmd_no */ + switch (info1) { + case CMD_SET_CTRL_MODE: + dev_warn(&dev->intf->dev, + "CMD_SET_CTRL_MODE error in parameter\n"); + break; + + case CMD_SET_BUS_PARAMS: + dev_warn(&dev->intf->dev, + "CMD_SET_BUS_PARAMS error in parameter\n"); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled parameter error event cmd_no (%u)\n", + info1); + break; + } +} + +static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + u8 error_code = 0; + + switch (dev->driver_info->family) { + case KVASER_LEAF: + error_code = cmd->u.leaf.error_event.error_code; + break; + case KVASER_USBCAN: + error_code = cmd->u.usbcan.error_event.error_code; + break; + } + + switch (error_code) { + case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: + /* Received additional CAN message, when firmware TX queue is + * already full. Something is wrong with the driver. + * This should never happen! + */ + dev_err(&dev->intf->dev, + "Received error event TX_QUEUE_FULL\n"); + break; + case KVASER_USB_LEAF_ERROR_EVENT_PARAM: + kvaser_usb_leaf_error_event_parameter(dev, cmd); + break; + + default: + dev_warn(&dev->intf->dev, + "Unhandled error event (%d)\n", error_code); + break; + } +} + static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1206,6 +1470,25 @@ static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, complete(&priv->stop_comp); } +static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, + const struct kvaser_cmd *cmd) +{ + struct kvaser_usb_net_priv *priv; + u8 channel = cmd->u.busparams.channel; + + if (channel >= dev->nchannels) { + dev_err(&dev->intf->dev, + "Invalid channel number (%d)\n", channel); + return; + } + + priv = dev->nets[channel]; + memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, + sizeof(priv->busparams_nominal)); + + complete(&priv->get_busparams_comp); +} + static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { @@ -1244,6 +1527,14 @@ static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; + case CMD_ERROR_EVENT: + kvaser_usb_leaf_error_event(dev, cmd); + break; + + case CMD_GET_BUS_PARAMS_REPLY: + kvaser_usb_leaf_get_busparams_reply(dev, cmd); + break; + /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) @@ -1340,10 +1631,13 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; reinit_completion(&priv->stop_comp); + cancel_delayed_work(&leaf->chip_state_req_work); + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); if (err) @@ -1390,10 +1684,35 @@ static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) return 0; } -static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) +static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf; + + leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); + if (!leaf) + return -ENOMEM; + + leaf->net = priv; + INIT_DELAYED_WORK(&leaf->chip_state_req_work, + kvaser_usb_leaf_chip_state_req_work); + + priv->sub_priv = leaf; + + return 0; +} + +static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) +{ + struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; + + if (leaf) + cancel_delayed_work_sync(&leaf->chip_state_req_work); +} + +static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, + const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); - struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; @@ -1406,15 +1725,8 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; - cmd->u.busparams.bitrate = cpu_to_le32(bt->bitrate); - cmd->u.busparams.sjw = bt->sjw; - cmd->u.busparams.tseg1 = bt->prop_seg + bt->phase_seg1; - cmd->u.busparams.tseg2 = bt->phase_seg2; - - if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) - cmd->u.busparams.no_samp = 3; - else - cmd->u.busparams.no_samp = 1; + memcpy(&cmd->u.busparams.busparams, busparams, + sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); @@ -1422,6 +1734,27 @@ static int kvaser_usb_leaf_set_bittiming(struct net_device *netdev) return rc; } +static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) +{ + int err; + + if (priv->dev->driver_info->family == KVASER_USBCAN) + return -EOPNOTSUPP; + + reinit_completion(&priv->get_busparams_comp); + + err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, + priv->channel); + if (err) + return err; + + if (!wait_for_completion_timeout(&priv->get_busparams_comp, + msecs_to_jiffies(KVASER_USB_TIMEOUT))) + return -ETIMEDOUT; + + return 0; +} + static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { @@ -1483,14 +1816,18 @@ static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, + .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, + .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, + .dev_init_channel = kvaser_usb_leaf_init_channel, + .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, - .dev_get_capabilities = NULL, + .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index ea1de0600..093458fbd 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -58,6 +58,10 @@ #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 +/* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ +#define MCBA_VER_TERMINATION_ON 0 +#define MCBA_VER_TERMINATION_OFF 1 + #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 @@ -480,7 +484,7 @@ static void mcba_usb_process_ka_usb(struct mcba_priv *priv, priv->usb_ka_first_pass = false; } - if (msg->termination_state) + if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; @@ -800,9 +804,9 @@ static int mcba_set_termination(struct net_device *netdev, u16 term) }; if (term == MCBA_TERMINATION_ENABLED) - usb_msg.termination = 1; + usb_msg.termination = MCBA_VER_TERMINATION_ON; else - usb_msg.termination = 0; + usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c index c628d0980..1d52cb3e4 100644 --- a/drivers/net/dsa/b53/b53_mmap.c +++ b/drivers/net/dsa/b53/b53_mmap.c @@ -215,6 +215,18 @@ static int b53_mmap_write64(struct b53_device *dev, u8 page, u8 reg, return 0; } +static int b53_mmap_phy_read16(struct b53_device *dev, int addr, int reg, + u16 *value) +{ + return -EIO; +} + +static int b53_mmap_phy_write16(struct b53_device *dev, int addr, int reg, + u16 value) +{ + return -EIO; +} + static const struct b53_io_ops b53_mmap_ops = { .read8 = b53_mmap_read8, .read16 = b53_mmap_read16, @@ -226,6 +238,8 @@ static const struct b53_io_ops b53_mmap_ops = { .write32 = b53_mmap_write32, .write48 = b53_mmap_write48, .write64 = b53_mmap_write64, + .phy_read16 = b53_mmap_phy_read16, + .phy_write16 = b53_mmap_phy_write16, }; static int b53_mmap_probe(struct platform_device *pdev) diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c index 03dc075ff..f976b3d64 100644 --- a/drivers/net/dsa/lan9303-core.c +++ b/drivers/net/dsa/lan9303-core.c @@ -1010,9 +1010,11 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port, ret = lan9303_read_switch_port( chip, port, lan9303_mib[u].offset, ®); - if (ret) + if (ret) { dev_warn(chip->dev, "Reading status port %d reg %u failed\n", port, lan9303_mib[u].offset); + reg = 0; + } data[u] = reg; } } diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index be064bcfd..6b310f723 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2237,9 +2237,14 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port) * If this is the upstream port for this switch, enable * forwarding of unknown unicasts and multicasts. */ - reg = MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP | - MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP | + reg = MV88E6185_PORT_CTL0_USE_TAG | MV88E6185_PORT_CTL0_USE_IP | MV88E6XXX_PORT_CTL0_STATE_FORWARDING; + /* Forward any IPv4 IGMP or IPv6 MLD frames received + * by a USER port to the CPU port to allow snooping. + */ + if (dsa_is_user_port(ds, port)) + reg |= MV88E6XXX_PORT_CTL0_IGMP_MLD_SNOOP; + err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_CTL0, reg); if (err) return err; diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index d3d44e07a..414b99082 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -825,7 +825,7 @@ lance_start_xmit(struct sk_buff *skb, struct net_device *dev) lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len ); head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP; dev->stats.tx_bytes += skb->len; - dev_kfree_skb( skb ); + dev_consume_skb_irq(skb); lp->cur_tx++; while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) { lp->cur_tx -= TX_RING_SIZE; diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index b56d84c7d..a21f4f82a 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -997,7 +997,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb, skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len); lp->tx_ring[entry].base = ((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000; - dev_kfree_skb(skb); + dev_consume_skb_irq(skb); } else { lp->tx_skbuff[entry] = skb; lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 4666084ed..9d6fe5a89 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c @@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata) netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n"); } +static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata) +{ + unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; + + /* From MAC ver 30H the TFCR is per priority, instead of per queue */ + if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) + return max_q_count; + else + return min_t(unsigned int, pdata->tx_q_count, max_q_count); +} + static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) { - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Clear MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); /* Clear MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); @@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) { struct ieee_pfc *pfc = pdata->pfc; struct ieee_ets *ets = pdata->ets; - unsigned int max_q_count, q_count; unsigned int reg, reg_val; - unsigned int i; + unsigned int i, q_count; /* Set MTL flow control */ for (i = 0; i < pdata->rx_q_count; i++) { @@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) } /* Set MAC flow control */ - max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; - q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); + q_count = xgbe_get_fc_queue_count(pdata); reg = MAC_Q0TFCR; for (i = 0; i < q_count; i++) { reg_val = XGMAC_IOREAD(pdata, reg); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 35659f0db..c1fb1e625 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -1140,6 +1140,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata) devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + tasklet_kill(&pdata->tasklet_dev); + tasklet_kill(&pdata->tasklet_ecc); + if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) devm_free_irq(pdata->dev, pdata->ecc_irq, pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c index 4d9062d35..530043742 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c @@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) xgbe_i2c_disable(pdata); xgbe_i2c_clear_all_interrupts(pdata); - if (pdata->dev_irq != pdata->i2c_irq) + if (pdata->dev_irq != pdata->i2c_irq) { devm_free_irq(pdata->dev, pdata->i2c_irq, pdata); + tasklet_kill(&pdata->tasklet_i2c); + } } static int xgbe_i2c_start(struct xgbe_prv_data *pdata) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 156a0bc8a..7840eb4cd 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, reg |= XGBE_KR_TRAINING_ENABLE; reg |= XGBE_KR_TRAINING_START; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); + pdata->kr_start_time = jiffies; netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); @@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) xgbe_switch_mode(pdata); + pdata->an_result = XGBE_AN_READY; + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; @@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; + unsigned long kr_time; + int wait; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ); if (time_after(jiffies, link_timeout)) { + if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) && + pdata->phy.autoneg == AUTONEG_ENABLE) { + /* AN restart should not happen while KR training is in progress. + * The while loop ensures no AN restart during KR training, + * waits up to 500ms and AN restart is triggered only if KR + * training is failed. + */ + wait = XGBE_KR_TRAINING_WAIT_ITER; + while (wait--) { + kr_time = pdata->kr_start_time + + msecs_to_jiffies(XGBE_AN_MS_TIMEOUT); + if (time_after(jiffies, kr_time)) + break; + /* AN restart is not required, if AN result is COMPLETE */ + if (pdata->an_result == XGBE_AN_COMPLETE) + return; + usleep_range(10000, 11000); + } + } netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } @@ -1390,8 +1414,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); - if (pdata->dev_irq != pdata->an_irq) + if (pdata->dev_irq != pdata->an_irq) { devm_free_irq(pdata->dev, pdata->an_irq, pdata); + tasklet_kill(&pdata->tasklet_an); + } pdata->phy_if.phy_impl.stop(pdata); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index d54e6e138..e32649c25 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -188,6 +188,7 @@ enum xgbe_sfp_cable { XGBE_SFP_CABLE_UNKNOWN = 0, XGBE_SFP_CABLE_ACTIVE, XGBE_SFP_CABLE_PASSIVE, + XGBE_SFP_CABLE_FIBER, }; enum xgbe_sfp_base { @@ -235,10 +236,7 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_BR 12 #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a -#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 -#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 -#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 @@ -825,29 +823,22 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, enum xgbe_sfp_speed sfp_speed) { - u8 *sfp_base, min, max; + u8 *sfp_base, min; sfp_base = sfp_eeprom->base; switch (sfp_speed) { case XGBE_SFP_SPEED_1000: min = XGBE_SFP_BASE_BR_1GBE_MIN; - max = XGBE_SFP_BASE_BR_1GBE_MAX; break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; - if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], - XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) - max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; - else - max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return false; } - return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && - (sfp_base[XGBE_SFP_BASE_BR] <= max)); + return sfp_base[XGBE_SFP_BASE_BR] >= min; } static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) @@ -1136,16 +1127,18 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); - /* Assume ACTIVE cable unless told it is PASSIVE */ + /* Assume FIBER cable unless told otherwise */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; - } else { + } else if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_ACTIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; + } else { + phy_data->sfp_cable = XGBE_SFP_CABLE_FIBER; } /* Determine the type of SFP */ - if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && + if (phy_data->sfp_cable != XGBE_SFP_CABLE_FIBER && xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 0c93a552b..729307a96 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -290,6 +290,7 @@ /* Auto-negotiation */ #define XGBE_AN_MS_TIMEOUT 500 #define XGBE_LINK_TIMEOUT 5 +#define XGBE_KR_TRAINING_WAIT_ITER 50 #define XGBE_SGMII_AN_LINK_STATUS BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) @@ -1266,6 +1267,7 @@ struct xgbe_prv_data { unsigned int parallel_detect; unsigned int fec_ability; unsigned long an_start; + unsigned long kr_start_time; enum xgbe_an_mode an_mode; /* I2C support */ diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index ab6ce8554..a0ce69990 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c @@ -1510,7 +1510,7 @@ static void bmac_tx_timeout(struct timer_list *t) i = bp->tx_empty; ++dev->stats.tx_errors; if (i != bp->tx_fill) { - dev_kfree_skb(bp->tx_bufs[i]); + dev_kfree_skb_irq(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; if (++i >= N_TX_RING) i = 0; bp->tx_empty = i; diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c index 68b9ee489..8f5e0cc00 100644 --- a/drivers/net/ethernet/apple/mace.c +++ b/drivers/net/ethernet/apple/mace.c @@ -840,7 +840,7 @@ static void mace_tx_timeout(struct timer_list *t) if (mp->tx_bad_runt) { mp->tx_bad_runt = 0; } else if (i != mp->tx_fill) { - dev_kfree_skb(mp->tx_bufs[i]); + dev_kfree_skb_irq(mp->tx_bufs[i]); if (++i >= N_TX_RING) i = 0; mp->tx_empty = i; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 77de92eb0..b880c0b96 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -228,12 +228,12 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1; bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY; - if (ci->pkg == BCMA_PKG_ID_BCM47188 || - ci->pkg == BCMA_PKG_ID_BCM47186) { + if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || + (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII; bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; } - if (ci->pkg == BCMA_PKG_ID_BCM5358) + if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII; break; case BCMA_CHIP_ID_BCM53573: diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index dc1062122..d79281c6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6118,10 +6118,14 @@ int bnxt_reserve_rings(struct bnxt *bp) netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); return rc; } - if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { + if (tcs && (bp->tx_nr_rings_per_tc * tcs != + bp->tx_nr_rings - bp->tx_nr_rings_xdp)) { netdev_err(bp->dev, "tx ring reservation failure\n"); netdev_reset_tc(bp->dev); - bp->tx_nr_rings_per_tc = bp->tx_nr_rings; + if (bp->tx_nr_rings_xdp) + bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp; + else + bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } bp->num_stat_ctxs = bp->cp_nr_rings; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 96ef2dd46..84bcb3ce0 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1825,6 +1825,14 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring, __func__, p_index, ring->c_index, ring->read_ptr, dma_length_status); + if (unlikely(len > RX_BUF_LENGTH)) { + netif_err(priv, rx_status, dev, "oversized packet\n"); + dev->stats.rx_length_errors++; + dev->stats.rx_errors++; + dev_kfree_skb_any(skb); + goto next; + } + if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) { netif_err(priv, rx_status, dev, "dropping fragmented packet!\n"); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index d1ca3d3f5..2cf144bbe 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -11189,7 +11189,7 @@ static void tg3_reset_task(struct work_struct *work) rtnl_lock(); tg3_full_lock(tp, 0); - if (!netif_running(tp->dev)) { + if (tp->pcierr_recovery || !netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); rtnl_unlock(); @@ -18240,6 +18240,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, netdev_info(netdev, "PCI I/O error detected\n"); + /* Want to make sure that the reset task doesn't run */ + tg3_reset_task_cancel(tp); + rtnl_lock(); /* Could be second call or maybe we don't have netdev yet */ @@ -18256,9 +18259,6 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, tg3_timer_stop(tp); - /* Want to make sure that the reset task doesn't run */ - tg3_reset_task_cancel(tp); - netif_device_detach(netdev); /* Clean up software state, even if MMIO is blocked */ diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 50331b202..d58f5bbb8 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -707,6 +707,10 @@ static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) } #endif addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); +#ifdef CONFIG_MACB_USE_HWSTAMP + if (bp->hw_dma_cap & HW_DMA_CAP_PTP) + addr &= ~GEM_BIT(DMA_RXVALID); +#endif return addr; } @@ -1738,7 +1742,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; - int headroom = skb_headroom(*skb); int tailroom = skb_tailroom(*skb); struct sk_buff *nskb; u32 fcs; @@ -1752,9 +1755,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) /* FCS could be appeded to tailroom. */ if (tailroom >= ETH_FCS_LEN) goto add_fcs; - /* FCS could be appeded by moving data to headroom. */ - else if (!cloned && headroom + tailroom >= ETH_FCS_LEN) - padlen = 0; /* No room for FCS, need to reallocate skb. */ else padlen = ETH_FCS_LEN; @@ -1763,10 +1763,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) padlen += ETH_FCS_LEN; } - if (!cloned && headroom + tailroom >= padlen) { - (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len); - skb_set_tail_pointer(*skb, (*skb)->len); - } else { + if (cloned || tailroom < padlen) { nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC); if (!nskb) return -ENOMEM; diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 5a847941c..f7d126a26 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -558,11 +558,11 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); + spin_unlock_irqrestore(&bp->lock, flags); + /* free the buffer */ dev_kfree_skb(skb); - spin_unlock_irqrestore(&bp->lock, flags); - return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 1ab613eb5..b542aba6f 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -235,20 +235,27 @@ config I40E_DCB If unsure, say N. +# this is here to allow seamless migration from I40EVF --> IAVF name +# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF +config IAVF + tristate config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" + select IAVF depends on PCI_MSI ---help--- This driver supports virtual functions for Intel XL710, - X710, X722, and all devices advertising support for Intel - Ethernet Adaptive Virtual Function devices. For more + X710, X722, XXV710, and all devices advertising support for + Intel Ethernet Adaptive Virtual Function devices. For more information on how to identify your adapter, go to the Adapter & Driver ID Guide that can be located at: - <http://support.intel.com> + <https://support.intel.com> + + This driver was formerly named i40evf. To compile this driver as a module, choose M here. The module - will be called i40evf. MSI-X interrupt support is required + will be called iavf. MSI-X interrupt support is required for this driver to work correctly. config ICE diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index 807a4f8c7..b91153df6 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -12,6 +12,6 @@ obj-$(CONFIG_IXGBE) += ixgbe/ obj-$(CONFIG_IXGBEVF) += ixgbevf/ obj-$(CONFIG_I40E) += i40e/ obj-$(CONFIG_IXGB) += ixgb/ -obj-$(CONFIG_I40EVF) += i40evf/ +obj-$(CONFIG_IAVF) += iavf/ obj-$(CONFIG_FM10K) += fm10k/ obj-$(CONFIG_ICE) += ice/ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 0629f87a2..202f734f8 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -5230,31 +5230,6 @@ static void e1000_watchdog_task(struct work_struct *work) ew32(TARC(0), tarc0); } - /* disable TSO for pcie and 10/100 speeds, to avoid - * some hardware issues - */ - if (!(adapter->flags & FLAG_TSO_FORCE)) { - switch (adapter->link_speed) { - case SPEED_10: - case SPEED_100: - e_info("10/100 speed: disabling TSO\n"); - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - break; - case SPEED_1000: - netdev->features |= NETIF_F_TSO; - netdev->features |= NETIF_F_TSO6; - break; - default: - /* oops */ - break; - } - if (hw->mac.type == e1000_pch_spt) { - netdev->features &= ~NETIF_F_TSO; - netdev->features &= ~NETIF_F_TSO6; - } - } - /* enable transmits in the hardware, need to do this * after setting TARC(0) */ @@ -7191,6 +7166,32 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) NETIF_F_RXCSUM | NETIF_F_HW_CSUM); + /* disable TSO for pcie and 10/100 speeds to avoid + * some hardware issues and for i219 to fix transfer + * speed being capped at 60% + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + if (hw->mac.type == e1000_pch_spt) { + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + } + } + /* Set user-changeable features (subset of all device features) */ netdev->hw_features = netdev->features; netdev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c index ef4d3762b..ca229b0ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.c +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c @@ -44,7 +44,7 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw, return 0; } -struct i40e_diag_reg_test_info i40e_reg_list[] = { +const struct i40e_diag_reg_test_info i40e_reg_list[] = { /* offset mask elements stride */ {I40E_QTX_CTL(0), 0x0000FFBF, 1, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)}, @@ -78,27 +78,28 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw) { i40e_status ret_code = 0; u32 reg, mask; + u32 elements; u32 i, j; for (i = 0; i40e_reg_list[i].offset != 0 && !ret_code; i++) { + elements = i40e_reg_list[i].elements; /* set actual reg range for dynamically allocated resources */ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) && hw->func_caps.num_tx_qp != 0) - i40e_reg_list[i].elements = hw->func_caps.num_tx_qp; + elements = hw->func_caps.num_tx_qp; if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) || i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) || i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) || i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) && hw->func_caps.num_msix_vectors != 0) - i40e_reg_list[i].elements = - hw->func_caps.num_msix_vectors - 1; + elements = hw->func_caps.num_msix_vectors - 1; /* test register access */ mask = i40e_reg_list[i].mask; - for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) { + for (j = 0; j < elements && !ret_code; j++) { reg = i40e_reg_list[i].offset + (j * i40e_reg_list[i].stride); ret_code = i40e_diag_reg_pattern_test(hw, reg, mask); diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.h b/drivers/net/ethernet/intel/i40e/i40e_diag.h index c3340f320..1db7c6d57 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_diag.h +++ b/drivers/net/ethernet/intel/i40e/i40e_diag.h @@ -20,7 +20,7 @@ struct i40e_diag_reg_test_info { u32 stride; /* bytes between each element */ }; -extern struct i40e_diag_reg_test_info i40e_reg_list[]; +extern const struct i40e_diag_reg_test_info i40e_reg_list[]; i40e_status i40e_diag_reg_test(struct i40e_hw *hw); i40e_status i40e_diag_eeprom_test(struct i40e_hw *hw); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8a5baaf40..a90872053 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2671,7 +2671,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) struct i40e_pf *pf = vsi->back; if (i40e_enabled_xdp_vsi(vsi)) { - int frame_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + int frame_size = new_mtu + I40E_PACKET_HDR_PAD; if (frame_size > i40e_max_xdp_frame_size(vsi)) return -EINVAL; @@ -9702,8 +9702,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) pf->hw.aq.asq_last_status)); } /* reinit the misc interrupt */ - if (pf->flags & I40E_FLAG_MSIX_ENABLED) + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { ret = i40e_setup_misc_vector(pf); + if (ret) + goto end_unlock; + } /* Add a filter to drop all Flow control frames from any VSI from being * transmitted. By doing so we stop a malicious VF from sending out @@ -11834,6 +11837,8 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, } br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; @@ -12482,15 +12487,15 @@ static int i40e_add_vsi(struct i40e_vsi *vsi) vsi->id = ctxt.vsi_number; } - vsi->active_filters = 0; - clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); spin_lock_bh(&vsi->mac_filter_hash_lock); + vsi->active_filters = 0; /* If macvlan filters already exist, force them to get loaded */ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { f->state = I40E_FILTER_NEW; f_count++; } spin_unlock_bh(&vsi->mac_filter_hash_lock); + clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); if (f_count) { vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 240083201..1527c67b4 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2595,7 +2595,7 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, !is_multicast_ether_addr(addr) && vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) { dev_err(&pf->pdev->dev, - "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n"); + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); return -EPERM; } } @@ -4019,9 +4019,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) mac, vf_id); } - /* Force the VF driver stop so it has to reload with new MAC address */ + /* Force the VF interface down so it has to bring up with new MAC + * address + */ i40e_vc_disable_vf(vf); - dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); + dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: return ret; diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile deleted file mode 100644 index 3c5c6e962..000000000 --- a/drivers/net/ethernet/intel/i40evf/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -# Copyright(c) 2013 - 2018 Intel Corporation. - -# -## Makefile for the Intel(R) 40GbE VF driver -# -# - -ccflags-y += -I$(src) -subdir-ccflags-y += -I$(src) - -obj-$(CONFIG_I40EVF) += i40evf.o - -i40evf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ - i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o - diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h deleted file mode 100644 index 5fd852946..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h +++ /dev/null @@ -1,2717 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_ADMINQ_CMD_H_ -#define _I40E_ADMINQ_CMD_H_ - -/* This header file defines the i40e Admin Queue commands and is shared between - * i40e Firmware and Software. - * - * This file needs to comply with the Linux Kernel coding style. - */ - -#define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 -#define I40E_FW_API_VERSION_MINOR_X710 0x0007 - -#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ - I40E_FW_API_VERSION_MINOR_X710 : \ - I40E_FW_API_VERSION_MINOR_X722) - -/* API version 1.7 implements additional link and PHY-specific APIs */ -#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 - -struct i40e_aq_desc { - __le16 flags; - __le16 opcode; - __le16 datalen; - __le16 retval; - __le32 cookie_high; - __le32 cookie_low; - union { - struct { - __le32 param0; - __le32 param1; - __le32 param2; - __le32 param3; - } internal; - struct { - __le32 param0; - __le32 param1; - __le32 addr_high; - __le32 addr_low; - } external; - u8 raw[16]; - } params; -}; - -/* Flags sub-structure - * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | - * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | - */ - -/* command flags and offsets*/ -#define I40E_AQ_FLAG_DD_SHIFT 0 -#define I40E_AQ_FLAG_CMP_SHIFT 1 -#define I40E_AQ_FLAG_ERR_SHIFT 2 -#define I40E_AQ_FLAG_VFE_SHIFT 3 -#define I40E_AQ_FLAG_LB_SHIFT 9 -#define I40E_AQ_FLAG_RD_SHIFT 10 -#define I40E_AQ_FLAG_VFC_SHIFT 11 -#define I40E_AQ_FLAG_BUF_SHIFT 12 -#define I40E_AQ_FLAG_SI_SHIFT 13 -#define I40E_AQ_FLAG_EI_SHIFT 14 -#define I40E_AQ_FLAG_FE_SHIFT 15 - -#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ -#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ -#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ -#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ -#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ -#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ -#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ -#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ -#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ -#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ -#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ - -/* error codes */ -enum i40e_admin_queue_err { - I40E_AQ_RC_OK = 0, /* success */ - I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ - I40E_AQ_RC_ENOENT = 2, /* No such element */ - I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ - I40E_AQ_RC_EINTR = 4, /* operation interrupted */ - I40E_AQ_RC_EIO = 5, /* I/O error */ - I40E_AQ_RC_ENXIO = 6, /* No such resource */ - I40E_AQ_RC_E2BIG = 7, /* Arg too long */ - I40E_AQ_RC_EAGAIN = 8, /* Try again */ - I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ - I40E_AQ_RC_EACCES = 10, /* Permission denied */ - I40E_AQ_RC_EFAULT = 11, /* Bad address */ - I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ - I40E_AQ_RC_EEXIST = 13, /* object already exists */ - I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ - I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ - I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ - I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ - I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ - I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ - I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ - I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ - I40E_AQ_RC_EFBIG = 22, /* File too large */ -}; - -/* Admin Queue command opcodes */ -enum i40e_admin_queue_opc { - /* aq commands */ - i40e_aqc_opc_get_version = 0x0001, - i40e_aqc_opc_driver_version = 0x0002, - i40e_aqc_opc_queue_shutdown = 0x0003, - i40e_aqc_opc_set_pf_context = 0x0004, - - /* resource ownership */ - i40e_aqc_opc_request_resource = 0x0008, - i40e_aqc_opc_release_resource = 0x0009, - - i40e_aqc_opc_list_func_capabilities = 0x000A, - i40e_aqc_opc_list_dev_capabilities = 0x000B, - - /* Proxy commands */ - i40e_aqc_opc_set_proxy_config = 0x0104, - i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, - - /* LAA */ - i40e_aqc_opc_mac_address_read = 0x0107, - i40e_aqc_opc_mac_address_write = 0x0108, - - /* PXE */ - i40e_aqc_opc_clear_pxe_mode = 0x0110, - - /* WoL commands */ - i40e_aqc_opc_set_wol_filter = 0x0120, - i40e_aqc_opc_get_wake_reason = 0x0121, - - /* internal switch commands */ - i40e_aqc_opc_get_switch_config = 0x0200, - i40e_aqc_opc_add_statistics = 0x0201, - i40e_aqc_opc_remove_statistics = 0x0202, - i40e_aqc_opc_set_port_parameters = 0x0203, - i40e_aqc_opc_get_switch_resource_alloc = 0x0204, - i40e_aqc_opc_set_switch_config = 0x0205, - i40e_aqc_opc_rx_ctl_reg_read = 0x0206, - i40e_aqc_opc_rx_ctl_reg_write = 0x0207, - - i40e_aqc_opc_add_vsi = 0x0210, - i40e_aqc_opc_update_vsi_parameters = 0x0211, - i40e_aqc_opc_get_vsi_parameters = 0x0212, - - i40e_aqc_opc_add_pv = 0x0220, - i40e_aqc_opc_update_pv_parameters = 0x0221, - i40e_aqc_opc_get_pv_parameters = 0x0222, - - i40e_aqc_opc_add_veb = 0x0230, - i40e_aqc_opc_update_veb_parameters = 0x0231, - i40e_aqc_opc_get_veb_parameters = 0x0232, - - i40e_aqc_opc_delete_element = 0x0243, - - i40e_aqc_opc_add_macvlan = 0x0250, - i40e_aqc_opc_remove_macvlan = 0x0251, - i40e_aqc_opc_add_vlan = 0x0252, - i40e_aqc_opc_remove_vlan = 0x0253, - i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, - i40e_aqc_opc_add_tag = 0x0255, - i40e_aqc_opc_remove_tag = 0x0256, - i40e_aqc_opc_add_multicast_etag = 0x0257, - i40e_aqc_opc_remove_multicast_etag = 0x0258, - i40e_aqc_opc_update_tag = 0x0259, - i40e_aqc_opc_add_control_packet_filter = 0x025A, - i40e_aqc_opc_remove_control_packet_filter = 0x025B, - i40e_aqc_opc_add_cloud_filters = 0x025C, - i40e_aqc_opc_remove_cloud_filters = 0x025D, - i40e_aqc_opc_clear_wol_switch_filters = 0x025E, - - i40e_aqc_opc_add_mirror_rule = 0x0260, - i40e_aqc_opc_delete_mirror_rule = 0x0261, - - /* Dynamic Device Personalization */ - i40e_aqc_opc_write_personalization_profile = 0x0270, - i40e_aqc_opc_get_personalization_profile_list = 0x0271, - - /* DCB commands */ - i40e_aqc_opc_dcb_ignore_pfc = 0x0301, - i40e_aqc_opc_dcb_updated = 0x0302, - i40e_aqc_opc_set_dcb_parameters = 0x0303, - - /* TX scheduler */ - i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, - i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, - i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, - i40e_aqc_opc_query_vsi_bw_config = 0x0408, - i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, - i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, - - i40e_aqc_opc_enable_switching_comp_ets = 0x0413, - i40e_aqc_opc_modify_switching_comp_ets = 0x0414, - i40e_aqc_opc_disable_switching_comp_ets = 0x0415, - i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, - i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, - i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, - i40e_aqc_opc_query_port_ets_config = 0x0419, - i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, - i40e_aqc_opc_suspend_port_tx = 0x041B, - i40e_aqc_opc_resume_port_tx = 0x041C, - i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - - /* phy commands*/ - i40e_aqc_opc_get_phy_abilities = 0x0600, - i40e_aqc_opc_set_phy_config = 0x0601, - i40e_aqc_opc_set_mac_config = 0x0603, - i40e_aqc_opc_set_link_restart_an = 0x0605, - i40e_aqc_opc_get_link_status = 0x0607, - i40e_aqc_opc_set_phy_int_mask = 0x0613, - i40e_aqc_opc_get_local_advt_reg = 0x0614, - i40e_aqc_opc_set_local_advt_reg = 0x0615, - i40e_aqc_opc_get_partner_advt = 0x0616, - i40e_aqc_opc_set_lb_modes = 0x0618, - i40e_aqc_opc_get_phy_wol_caps = 0x0621, - i40e_aqc_opc_set_phy_debug = 0x0622, - i40e_aqc_opc_upload_ext_phy_fm = 0x0625, - i40e_aqc_opc_run_phy_activity = 0x0626, - i40e_aqc_opc_set_phy_register = 0x0628, - i40e_aqc_opc_get_phy_register = 0x0629, - - /* NVM commands */ - i40e_aqc_opc_nvm_read = 0x0701, - i40e_aqc_opc_nvm_erase = 0x0702, - i40e_aqc_opc_nvm_update = 0x0703, - i40e_aqc_opc_nvm_config_read = 0x0704, - i40e_aqc_opc_nvm_config_write = 0x0705, - i40e_aqc_opc_oem_post_update = 0x0720, - i40e_aqc_opc_thermal_sensor = 0x0721, - - /* virtualization commands */ - i40e_aqc_opc_send_msg_to_pf = 0x0801, - i40e_aqc_opc_send_msg_to_vf = 0x0802, - i40e_aqc_opc_send_msg_to_peer = 0x0803, - - /* alternate structure */ - i40e_aqc_opc_alternate_write = 0x0900, - i40e_aqc_opc_alternate_write_indirect = 0x0901, - i40e_aqc_opc_alternate_read = 0x0902, - i40e_aqc_opc_alternate_read_indirect = 0x0903, - i40e_aqc_opc_alternate_write_done = 0x0904, - i40e_aqc_opc_alternate_set_mode = 0x0905, - i40e_aqc_opc_alternate_clear_port = 0x0906, - - /* LLDP commands */ - i40e_aqc_opc_lldp_get_mib = 0x0A00, - i40e_aqc_opc_lldp_update_mib = 0x0A01, - i40e_aqc_opc_lldp_add_tlv = 0x0A02, - i40e_aqc_opc_lldp_update_tlv = 0x0A03, - i40e_aqc_opc_lldp_delete_tlv = 0x0A04, - i40e_aqc_opc_lldp_stop = 0x0A05, - i40e_aqc_opc_lldp_start = 0x0A06, - - /* Tunnel commands */ - i40e_aqc_opc_add_udp_tunnel = 0x0B00, - i40e_aqc_opc_del_udp_tunnel = 0x0B01, - i40e_aqc_opc_set_rss_key = 0x0B02, - i40e_aqc_opc_set_rss_lut = 0x0B03, - i40e_aqc_opc_get_rss_key = 0x0B04, - i40e_aqc_opc_get_rss_lut = 0x0B05, - - /* Async Events */ - i40e_aqc_opc_event_lan_overflow = 0x1001, - - /* OEM commands */ - i40e_aqc_opc_oem_parameter_change = 0xFE00, - i40e_aqc_opc_oem_device_status_change = 0xFE01, - i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, - i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, - - /* debug commands */ - i40e_aqc_opc_debug_read_reg = 0xFF03, - i40e_aqc_opc_debug_write_reg = 0xFF04, - i40e_aqc_opc_debug_modify_reg = 0xFF07, - i40e_aqc_opc_debug_dump_internals = 0xFF08, -}; - -/* command structures and indirect data structures */ - -/* Structure naming conventions: - * - no suffix for direct command descriptor structures - * - _data for indirect sent data - * - _resp for indirect return data (data which is both will use _data) - * - _completion for direct return data - * - _element_ for repeated elements (may also be _data or _resp) - * - * Command structures are expected to overlay the params.raw member of the basic - * descriptor, and as such cannot exceed 16 bytes in length. - */ - -/* This macro is used to generate a compilation error if a structure - * is not exactly the correct length. It gives a divide by zero error if the - * structure is not of the correct size, otherwise it creates an enum that is - * never used. - */ -#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ - { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } - -/* This macro is used extensively to ensure that command structures are 16 - * bytes in length as they have to map to the raw array of that size. - */ -#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) - -/* internal (0x00XX) commands */ - -/* Get version (direct 0x0001) */ -struct i40e_aqc_get_version { - __le32 rom_ver; - __le32 fw_build; - __le16 fw_major; - __le16 fw_minor; - __le16 api_major; - __le16 api_minor; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_version); - -/* Send driver version (indirect 0x0002) */ -struct i40e_aqc_driver_version { - u8 driver_major_ver; - u8 driver_minor_ver; - u8 driver_build_ver; - u8 driver_subbuild_ver; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_driver_version); - -/* Queue Shutdown (direct 0x0003) */ -struct i40e_aqc_queue_shutdown { - __le32 driver_unloading; -#define I40E_AQ_DRIVER_UNLOADING 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); - -/* Set PF context (0x0004, direct) */ -struct i40e_aqc_set_pf_context { - u8 pf_id; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context); - -/* Request resource ownership (direct 0x0008) - * Release resource ownership (direct 0x0009) - */ -#define I40E_AQ_RESOURCE_NVM 1 -#define I40E_AQ_RESOURCE_SDP 2 -#define I40E_AQ_RESOURCE_ACCESS_READ 1 -#define I40E_AQ_RESOURCE_ACCESS_WRITE 2 -#define I40E_AQ_RESOURCE_NVM_READ_TIMEOUT 3000 -#define I40E_AQ_RESOURCE_NVM_WRITE_TIMEOUT 180000 - -struct i40e_aqc_request_resource { - __le16 resource_id; - __le16 access_type; - __le32 timeout; - __le32 resource_number; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_request_resource); - -/* Get function capabilities (indirect 0x000A) - * Get device capabilities (indirect 0x000B) - */ -struct i40e_aqc_list_capabilites { - u8 command_flags; -#define I40E_AQ_LIST_CAP_PF_INDEX_EN 1 - u8 pf_index; - u8 reserved[2]; - __le32 count; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_list_capabilites); - -struct i40e_aqc_list_capabilities_element_resp { - __le16 id; - u8 major_rev; - u8 minor_rev; - __le32 number; - __le32 logical_id; - __le32 phys_id; - u8 reserved[16]; -}; - -/* list of caps */ - -#define I40E_AQ_CAP_ID_SWITCH_MODE 0x0001 -#define I40E_AQ_CAP_ID_MNG_MODE 0x0002 -#define I40E_AQ_CAP_ID_NPAR_ACTIVE 0x0003 -#define I40E_AQ_CAP_ID_OS2BMC_CAP 0x0004 -#define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005 -#define I40E_AQ_CAP_ID_ALTERNATE_RAM 0x0006 -#define I40E_AQ_CAP_ID_WOL_AND_PROXY 0x0008 -#define I40E_AQ_CAP_ID_SRIOV 0x0012 -#define I40E_AQ_CAP_ID_VF 0x0013 -#define I40E_AQ_CAP_ID_VMDQ 0x0014 -#define I40E_AQ_CAP_ID_8021QBG 0x0015 -#define I40E_AQ_CAP_ID_8021QBR 0x0016 -#define I40E_AQ_CAP_ID_VSI 0x0017 -#define I40E_AQ_CAP_ID_DCB 0x0018 -#define I40E_AQ_CAP_ID_FCOE 0x0021 -#define I40E_AQ_CAP_ID_ISCSI 0x0022 -#define I40E_AQ_CAP_ID_RSS 0x0040 -#define I40E_AQ_CAP_ID_RXQ 0x0041 -#define I40E_AQ_CAP_ID_TXQ 0x0042 -#define I40E_AQ_CAP_ID_MSIX 0x0043 -#define I40E_AQ_CAP_ID_VF_MSIX 0x0044 -#define I40E_AQ_CAP_ID_FLOW_DIRECTOR 0x0045 -#define I40E_AQ_CAP_ID_1588 0x0046 -#define I40E_AQ_CAP_ID_IWARP 0x0051 -#define I40E_AQ_CAP_ID_LED 0x0061 -#define I40E_AQ_CAP_ID_SDP 0x0062 -#define I40E_AQ_CAP_ID_MDIO 0x0063 -#define I40E_AQ_CAP_ID_WSR_PROT 0x0064 -#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 -#define I40E_AQ_CAP_ID_FLEX10 0x00F1 -#define I40E_AQ_CAP_ID_CEM 0x00F2 - -/* Set CPPM Configuration (direct 0x0103) */ -struct i40e_aqc_cppm_configuration { - __le16 command_flags; -#define I40E_AQ_CPPM_EN_LTRC 0x0800 -#define I40E_AQ_CPPM_EN_DMCTH 0x1000 -#define I40E_AQ_CPPM_EN_DMCTLX 0x2000 -#define I40E_AQ_CPPM_EN_HPTC 0x4000 -#define I40E_AQ_CPPM_EN_DMARC 0x8000 - __le16 ttlx; - __le32 dmacr; - __le16 dmcth; - u8 hptc; - u8 reserved; - __le32 pfltrc; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration); - -/* Set ARP Proxy command / response (indirect 0x0104) */ -struct i40e_aqc_arp_proxy_data { - __le16 command_flags; -#define I40E_AQ_ARP_INIT_IPV4 0x0800 -#define I40E_AQ_ARP_UNSUP_CTL 0x1000 -#define I40E_AQ_ARP_ENA 0x2000 -#define I40E_AQ_ARP_ADD_IPV4 0x4000 -#define I40E_AQ_ARP_DEL_IPV4 0x8000 - __le16 table_id; - __le32 enabled_offloads; -#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020 -#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800 - __le32 ip_addr; - u8 mac_addr[6]; - u8 reserved[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x14, i40e_aqc_arp_proxy_data); - -/* Set NS Proxy Table Entry Command (indirect 0x0105) */ -struct i40e_aqc_ns_proxy_data { - __le16 table_idx_mac_addr_0; - __le16 table_idx_mac_addr_1; - __le16 table_idx_ipv6_0; - __le16 table_idx_ipv6_1; - __le16 control; -#define I40E_AQ_NS_PROXY_ADD_0 0x0001 -#define I40E_AQ_NS_PROXY_DEL_0 0x0002 -#define I40E_AQ_NS_PROXY_ADD_1 0x0004 -#define I40E_AQ_NS_PROXY_DEL_1 0x0008 -#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010 -#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020 -#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040 -#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080 -#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100 -#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200 -#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400 -#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800 -#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000 - u8 mac_addr_0[6]; - u8 mac_addr_1[6]; - u8 local_mac_addr[6]; - u8 ipv6_addr_0[16]; /* Warning! spec specifies BE byte order */ - u8 ipv6_addr_1[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x3c, i40e_aqc_ns_proxy_data); - -/* Manage LAA Command (0x0106) - obsolete */ -struct i40e_aqc_mng_laa { - __le16 command_flags; -#define I40E_AQ_LAA_FLAG_WR 0x8000 - u8 reserved[2]; - __le32 sal; - __le16 sah; - u8 reserved2[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mng_laa); - -/* Manage MAC Address Read Command (indirect 0x0107) */ -struct i40e_aqc_mac_address_read { - __le16 command_flags; -#define I40E_AQC_LAN_ADDR_VALID 0x10 -#define I40E_AQC_SAN_ADDR_VALID 0x20 -#define I40E_AQC_PORT_ADDR_VALID 0x40 -#define I40E_AQC_WOL_ADDR_VALID 0x80 -#define I40E_AQC_MC_MAG_EN_VALID 0x100 -#define I40E_AQC_ADDR_VALID_MASK 0x3F0 - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_read); - -struct i40e_aqc_mac_address_read_data { - u8 pf_lan_mac[6]; - u8 pf_san_mac[6]; - u8 port_mac[6]; - u8 pf_wol_mac[6]; -}; - -I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data); - -/* Manage MAC Address Write Command (0x0108) */ -struct i40e_aqc_mac_address_write { - __le16 command_flags; -#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000 -#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000 -#define I40E_AQC_WRITE_TYPE_PORT 0x8000 -#define I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG 0xC000 -#define I40E_AQC_WRITE_TYPE_MASK 0xC000 - - __le16 mac_sah; - __le32 mac_sal; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_mac_address_write); - -/* PXE commands (0x011x) */ - -/* Clear PXE Command and response (direct 0x0110) */ -struct i40e_aqc_clear_pxe { - u8 rx_cnt; - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe); - -/* Set WoL Filter (0x0120) */ - -struct i40e_aqc_set_wol_filter { - __le16 filter_index; -#define I40E_AQC_MAX_NUM_WOL_FILTERS 8 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15 -#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \ - I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT) - -#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0 -#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \ - I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT) - __le16 cmd_flags; -#define I40E_AQC_SET_WOL_FILTER 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000 -#define I40E_AQC_SET_WOL_FILTER_WOL_PRESERVE_ON_PFR 0x2000 -#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0 -#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1 - __le16 valid_flags; -#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000 -#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000 - u8 reserved[2]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter); - -struct i40e_aqc_set_wol_filter_data { - u8 filter[128]; - u8 mask[16]; -}; - -I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data); - -/* Get Wake Reason (0x0121) */ - -struct i40e_aqc_get_wake_reason_completion { - u8 reserved_1[2]; - __le16 wake_reason; -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT) -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8 -#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \ - I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT) - u8 reserved_2[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion); - -/* Switch configuration commands (0x02xx) */ - -/* Used by many indirect commands that only pass an seid and a buffer in the - * command - */ -struct i40e_aqc_switch_seid { - __le16 seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_switch_seid); - -/* Get Switch Configuration command (indirect 0x0200) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_switch_config_header_resp { - __le16 num_reported; - __le16 num_total; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_config_header_resp); - -struct i40e_aqc_switch_config_element_resp { - u8 element_type; -#define I40E_AQ_SW_ELEM_TYPE_MAC 1 -#define I40E_AQ_SW_ELEM_TYPE_PF 2 -#define I40E_AQ_SW_ELEM_TYPE_VF 3 -#define I40E_AQ_SW_ELEM_TYPE_EMP 4 -#define I40E_AQ_SW_ELEM_TYPE_BMC 5 -#define I40E_AQ_SW_ELEM_TYPE_PV 16 -#define I40E_AQ_SW_ELEM_TYPE_VEB 17 -#define I40E_AQ_SW_ELEM_TYPE_PA 18 -#define I40E_AQ_SW_ELEM_TYPE_VSI 19 - u8 revision; -#define I40E_AQ_SW_ELEM_REV_1 1 - __le16 seid; - __le16 uplink_seid; - __le16 downlink_seid; - u8 reserved[3]; - u8 connection_type; -#define I40E_AQ_CONN_TYPE_REGULAR 0x1 -#define I40E_AQ_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_CONN_TYPE_CASCADED 0x3 - __le16 scheduler_id; - __le16 element_info; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_config_element_resp); - -/* Get Switch Configuration (indirect 0x0200) - * an array of elements are returned in the response buffer - * the first in the array is the header, remainder are elements - */ -struct i40e_aqc_get_switch_config_resp { - struct i40e_aqc_get_switch_config_header_resp header; - struct i40e_aqc_switch_config_element_resp element[1]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_switch_config_resp); - -/* Add Statistics (direct 0x0201) - * Remove Statistics (direct 0x0202) - */ -struct i40e_aqc_add_remove_statistics { - __le16 seid; - __le16 vlan; - __le16 stat_index; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_statistics); - -/* Set Port Parameters command (direct 0x0203) */ -struct i40e_aqc_set_port_parameters { - __le16 command_flags; -#define I40E_AQ_SET_P_PARAMS_SAVE_BAD_PACKETS 1 -#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */ -#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4 - __le16 bad_frame_vsi; -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0 -#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF - __le16 default_seid; /* reserved for command */ - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_port_parameters); - -/* Get Switch Resource Allocation (indirect 0x0204) */ -struct i40e_aqc_get_switch_resource_alloc { - u8 num_entries; /* reserved for command */ - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_switch_resource_alloc); - -/* expect an array of these structs in the response buffer */ -struct i40e_aqc_switch_resource_alloc_element_resp { - u8 resource_type; -#define I40E_AQ_RESOURCE_TYPE_VEB 0x0 -#define I40E_AQ_RESOURCE_TYPE_VSI 0x1 -#define I40E_AQ_RESOURCE_TYPE_MACADDR 0x2 -#define I40E_AQ_RESOURCE_TYPE_STAG 0x3 -#define I40E_AQ_RESOURCE_TYPE_ETAG 0x4 -#define I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH 0x5 -#define I40E_AQ_RESOURCE_TYPE_UNICAST_HASH 0x6 -#define I40E_AQ_RESOURCE_TYPE_VLAN 0x7 -#define I40E_AQ_RESOURCE_TYPE_VSI_LIST_ENTRY 0x8 -#define I40E_AQ_RESOURCE_TYPE_ETAG_LIST_ENTRY 0x9 -#define I40E_AQ_RESOURCE_TYPE_VLAN_STAT_POOL 0xA -#define I40E_AQ_RESOURCE_TYPE_MIRROR_RULE 0xB -#define I40E_AQ_RESOURCE_TYPE_QUEUE_SETS 0xC -#define I40E_AQ_RESOURCE_TYPE_VLAN_FILTERS 0xD -#define I40E_AQ_RESOURCE_TYPE_INNER_MAC_FILTERS 0xF -#define I40E_AQ_RESOURCE_TYPE_IP_FILTERS 0x10 -#define I40E_AQ_RESOURCE_TYPE_GRE_VN_KEYS 0x11 -#define I40E_AQ_RESOURCE_TYPE_VN2_KEYS 0x12 -#define I40E_AQ_RESOURCE_TYPE_TUNNEL_PORTS 0x13 - u8 reserved1; - __le16 guaranteed; - __le16 total; - __le16 used; - __le16 total_unalloced; - u8 reserved2[6]; -}; - -I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp); - -/* Set Switch Configuration (direct 0x0205) */ -struct i40e_aqc_set_switch_config { - __le16 flags; -/* flags used for both fields below */ -#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001 -#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002 - __le16 valid_flags; - /* The ethertype in switch_tag is dropped on ingress and used - * internally by the switch. Set this to zero for the default - * of 0x88a8 (802.1ad). Should be zero for firmware API - * versions lower than 1.7. - */ - __le16 switch_tag; - /* The ethertypes in first_tag and second_tag are used to - * match the outer and inner VLAN tags (respectively) when HW - * double VLAN tagging is enabled via the set port parameters - * AQ command. Otherwise these are both ignored. Set them to - * zero for their defaults of 0x8100 (802.1Q). Should be zero - * for firmware API versions lower than 1.7. - */ - __le16 first_tag; - __le16 second_tag; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); - -/* Read Receive control registers (direct 0x0206) - * Write Receive control registers (direct 0x0207) - * used for accessing Rx control registers that can be - * slow and need special handling when under high Rx load - */ -struct i40e_aqc_rx_ctl_reg_read_write { - __le32 reserved1; - __le32 address; - __le32 reserved2; - __le32 value; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write); - -/* Add VSI (indirect 0x0210) - * this indirect command uses struct i40e_aqc_vsi_properties_data - * as the indirect buffer (128 bytes) - * - * Update VSI (indirect 0x211) - * uses the same data structure as Add VSI - * - * Get VSI (indirect 0x0212) - * uses the same completion and data structure as Add VSI - */ -struct i40e_aqc_add_get_update_vsi { - __le16 uplink_seid; - u8 connection_type; -#define I40E_AQ_VSI_CONN_TYPE_NORMAL 0x1 -#define I40E_AQ_VSI_CONN_TYPE_DEFAULT 0x2 -#define I40E_AQ_VSI_CONN_TYPE_CASCADED 0x3 - u8 reserved1; - u8 vf_id; - u8 reserved2; - __le16 vsi_flags; -#define I40E_AQ_VSI_TYPE_SHIFT 0x0 -#define I40E_AQ_VSI_TYPE_MASK (0x3 << I40E_AQ_VSI_TYPE_SHIFT) -#define I40E_AQ_VSI_TYPE_VF 0x0 -#define I40E_AQ_VSI_TYPE_VMDQ2 0x1 -#define I40E_AQ_VSI_TYPE_PF 0x2 -#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3 -#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi); - -struct i40e_aqc_add_get_update_vsi_completion { - __le16 seid; - __le16 vsi_number; - __le16 vsi_used; - __le16 vsi_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_get_update_vsi_completion); - -struct i40e_aqc_vsi_properties_data { - /* first 96 byte are written by SW */ - __le16 valid_sections; -#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 -#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 -#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 -#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 -#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 -#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 -#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 -#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 -#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 -#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 - /* switch section */ - __le16 switch_id; /* 12bit id combined with flags below */ -#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 -#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) -#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 -#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 -#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 - u8 sw_reserved[2]; - /* security section */ - u8 sec_flags; -#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 -#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 - u8 sec_reserved; - /* VLAN section */ - __le16 pvid; /* VLANS include priority bits */ - __le16 fcoe_pvid; - u8 port_vlan_flags; -#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 -#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ - I40E_AQ_VSI_PVLAN_MODE_SHIFT) -#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 -#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 -#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 -#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 -#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 -#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ - I40E_AQ_VSI_PVLAN_EMOD_SHIFT) -#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 -#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 -#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 -#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 - u8 pvlan_reserved[3]; - /* ingress egress up sections */ - __le32 ingress_table; /* bitmap, 3 bits per up */ -#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 -#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 -#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 -#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 -#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 -#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 -#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 -#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) -#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 -#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ - I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) - __le32 egress_table; /* same defines as for ingress table */ - /* cascaded PV section */ - __le16 cas_pv_tag; - u8 cas_pv_flags; -#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ - I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) -#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 -#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 -#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 -#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 -#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 -#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 - u8 cas_pv_reserved; - /* queue mapping section */ - __le16 mapping_flags; -#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 -#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 - __le16 queue_mapping[16]; -#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 -#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) - __le16 tc_mapping[8]; -#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 -#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ - I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) -#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 -#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ - I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) - /* queueing option section */ - u8 queueing_opt_flags; -#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 -#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 -#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 -#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 -#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 - u8 queueing_opt_reserved[3]; - /* scheduler section */ - u8 up_enable_bits; - u8 sched_reserved; - /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress tbl */ - u8 cmd_reserved[8]; - /* last 32 bytes are written by FW */ - __le16 qs_handle[8]; -#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF - __le16 stat_counter_idx; - __le16 sched_id; - u8 resp_reserved[12]; -}; - -I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); - -/* Add Port Virtualizer (direct 0x0220) - * also used for update PV (direct 0x0221) but only flags are used - * (IS_CTRL_PORT only works on add PV) - */ -struct i40e_aqc_add_update_pv { - __le16 command_flags; -#define I40E_AQC_PV_FLAG_PV_TYPE 0x1 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_STAG_EN 0x2 -#define I40E_AQC_PV_FLAG_FWD_UNKNOWN_ETAG_EN 0x4 -#define I40E_AQC_PV_FLAG_IS_CTRL_PORT 0x8 - __le16 uplink_seid; - __le16 connected_seid; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv); - -struct i40e_aqc_add_update_pv_completion { - /* reserved for update; for add also encodes error if rc == ENOSPC */ - __le16 pv_seid; -#define I40E_AQC_PV_ERR_FLAG_NO_PV 0x1 -#define I40E_AQC_PV_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_PV_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_PV_ERR_FLAG_NO_ENTRY 0x8 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_update_pv_completion); - -/* Get PV Params (direct 0x0222) - * uses i40e_aqc_switch_seid for the descriptor - */ - -struct i40e_aqc_get_pv_params_completion { - __le16 seid; - __le16 default_stag; - __le16 pv_flags; /* same flags as add_pv */ -#define I40E_AQC_GET_PV_PV_TYPE 0x1 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_STAG 0x2 -#define I40E_AQC_GET_PV_FRWD_UNKNOWN_ETAG 0x4 - u8 reserved[8]; - __le16 default_port_seid; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_pv_params_completion); - -/* Add VEB (direct 0x0230) */ -struct i40e_aqc_add_veb { - __le16 uplink_seid; - __le16 downlink_seid; - __le16 veb_flags; -#define I40E_AQC_ADD_VEB_FLOATING 0x1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT 1 -#define I40E_AQC_ADD_VEB_PORT_TYPE_MASK (0x3 << \ - I40E_AQC_ADD_VEB_PORT_TYPE_SHIFT) -#define I40E_AQC_ADD_VEB_PORT_TYPE_DEFAULT 0x2 -#define I40E_AQC_ADD_VEB_PORT_TYPE_DATA 0x4 -#define I40E_AQC_ADD_VEB_ENABLE_L2_FILTER 0x8 /* deprecated */ -#define I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS 0x10 - u8 enable_tcs; - u8 reserved[9]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb); - -struct i40e_aqc_add_veb_completion { - u8 reserved[6]; - __le16 switch_seid; - /* also encodes error if rc == ENOSPC; codes are the same as add_pv */ - __le16 veb_seid; -#define I40E_AQC_VEB_ERR_FLAG_NO_VEB 0x1 -#define I40E_AQC_VEB_ERR_FLAG_NO_SCHED 0x2 -#define I40E_AQC_VEB_ERR_FLAG_NO_COUNTER 0x4 -#define I40E_AQC_VEB_ERR_FLAG_NO_ENTRY 0x8 - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_veb_completion); - -/* Get VEB Parameters (direct 0x0232) - * uses i40e_aqc_switch_seid for the descriptor - */ -struct i40e_aqc_get_veb_parameters_completion { - __le16 seid; - __le16 switch_id; - __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ - __le16 statistic_index; - __le16 vebs_used; - __le16 vebs_free; - u8 reserved[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); - -/* Delete Element (direct 0x0243) - * uses the generic i40e_aqc_switch_seid - */ - -/* Add MAC-VLAN (indirect 0x0250) */ - -/* used for the command for most vlan commands */ -struct i40e_aqc_macvlan { - __le16 num_addresses; - __le16 seid[3]; -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) -#define I40E_AQC_MACVLAN_CMD_SEID_VALID 0x8000 - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_macvlan); - -/* indirect data for command and response */ -struct i40e_aqc_add_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - __le16 flags; -#define I40E_AQC_MACVLAN_ADD_PERFECT_MATCH 0x0001 -#define I40E_AQC_MACVLAN_ADD_HASH_MATCH 0x0002 -#define I40E_AQC_MACVLAN_ADD_IGNORE_VLAN 0x0004 -#define I40E_AQC_MACVLAN_ADD_TO_QUEUE 0x0008 -#define I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC 0x0010 - __le16 queue_number; -#define I40E_AQC_MACVLAN_CMD_QUEUE_SHIFT 0 -#define I40E_AQC_MACVLAN_CMD_QUEUE_MASK (0x7FF << \ - I40E_AQC_MACVLAN_CMD_SEID_NUM_SHIFT) - /* response section */ - u8 match_method; -#define I40E_AQC_MM_PERFECT_MATCH 0x01 -#define I40E_AQC_MM_HASH_MATCH 0x02 -#define I40E_AQC_MM_ERR_NO_RES 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_macvlan_completion { - __le16 perfect_mac_used; - __le16 perfect_mac_free; - __le16 unicast_hash_free; - __le16 multicast_hash_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_macvlan_completion); - -/* Remove MAC-VLAN (indirect 0x0251) - * uses i40e_aqc_macvlan for the descriptor - * data points to an array of num_addresses of elements - */ - -struct i40e_aqc_remove_macvlan_element_data { - u8 mac_addr[6]; - __le16 vlan_tag; - u8 flags; -#define I40E_AQC_MACVLAN_DEL_PERFECT_MATCH 0x01 -#define I40E_AQC_MACVLAN_DEL_HASH_MATCH 0x02 -#define I40E_AQC_MACVLAN_DEL_IGNORE_VLAN 0x08 -#define I40E_AQC_MACVLAN_DEL_ALL_VSIS 0x10 - u8 reserved[3]; - /* reply section */ - u8 error_code; -#define I40E_AQC_REMOVE_MACVLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_MACVLAN_FAIL 0xFF - u8 reply_reserved[3]; -}; - -/* Add VLAN (indirect 0x0252) - * Remove VLAN (indirect 0x0253) - * use the generic i40e_aqc_macvlan for the command - */ -struct i40e_aqc_add_remove_vlan_element_data { - __le16 vlan_tag; - u8 vlan_flags; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_LOCAL 0x1 -#define I40E_AQC_ADD_PVLAN_TYPE_SHIFT 1 -#define I40E_AQC_ADD_PVLAN_TYPE_MASK (0x3 << I40E_AQC_ADD_PVLAN_TYPE_SHIFT) -#define I40E_AQC_ADD_PVLAN_TYPE_REGULAR 0x0 -#define I40E_AQC_ADD_PVLAN_TYPE_PRIMARY 0x2 -#define I40E_AQC_ADD_PVLAN_TYPE_SECONDARY 0x4 -#define I40E_AQC_VLAN_PTYPE_SHIFT 3 -#define I40E_AQC_VLAN_PTYPE_MASK (0x3 << I40E_AQC_VLAN_PTYPE_SHIFT) -#define I40E_AQC_VLAN_PTYPE_REGULAR_VSI 0x0 -#define I40E_AQC_VLAN_PTYPE_PROMISC_VSI 0x8 -#define I40E_AQC_VLAN_PTYPE_COMMUNITY_VSI 0x10 -#define I40E_AQC_VLAN_PTYPE_ISOLATED_VSI 0x18 -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_ALL 0x1 - u8 reserved; - u8 result; -/* flags for add VLAN */ -#define I40E_AQC_ADD_VLAN_SUCCESS 0x0 -#define I40E_AQC_ADD_VLAN_FAIL_REQUEST 0xFE -#define I40E_AQC_ADD_VLAN_FAIL_RESOURCE 0xFF -/* flags for remove VLAN */ -#define I40E_AQC_REMOVE_VLAN_SUCCESS 0x0 -#define I40E_AQC_REMOVE_VLAN_FAIL 0xFF - u8 reserved1[3]; -}; - -struct i40e_aqc_add_remove_vlan_completion { - u8 reserved[4]; - __le16 vlans_used; - __le16 vlans_free; - __le32 addr_high; - __le32 addr_low; -}; - -/* Set VSI Promiscuous Modes (direct 0x0254) */ -struct i40e_aqc_set_vsi_promiscuous_modes { - __le16 promiscuous_flags; - __le16 valid_flags; -/* flags used for both fields above */ -#define I40E_AQC_SET_VSI_PROMISC_UNICAST 0x01 -#define I40E_AQC_SET_VSI_PROMISC_MULTICAST 0x02 -#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 -#define I40E_AQC_SET_VSI_DEFAULT 0x08 -#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 - __le16 seid; -#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF - __le16 vlan_tag; -#define I40E_AQC_SET_VSI_VLAN_MASK 0x0FFF -#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes); - -/* Add S/E-tag command (direct 0x0255) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_add_tag { - __le16 flags; -#define I40E_AQC_ADD_TAG_FLAG_TO_QUEUE 0x0001 - __le16 seid; -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - __le16 queue_number; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_tag); - -struct i40e_aqc_add_remove_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_tag_completion); - -/* Remove S/E-tag command (direct 0x0256) - * Uses generic i40e_aqc_add_remove_tag_completion for completion - */ -struct i40e_aqc_remove_tag { - __le16 seid; -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_REMOVE_TAG_CMD_SEID_NUM_SHIFT) - __le16 tag; - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_tag); - -/* Add multicast E-Tag (direct 0x0257) - * del multicast E-Tag (direct 0x0258) only uses pv_seid and etag fields - * and no external data - */ -struct i40e_aqc_add_remove_mcast_etag { - __le16 pv_seid; - __le16 etag; - u8 num_unicast_etags; - u8 reserved[3]; - __le32 addr_high; /* address of array of 2-byte s-tags */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag); - -struct i40e_aqc_add_remove_mcast_etag_completion { - u8 reserved[4]; - __le16 mcast_etags_used; - __le16 mcast_etags_free; - __le32 addr_high; - __le32 addr_low; - -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_mcast_etag_completion); - -/* Update S/E-Tag (direct 0x0259) */ -struct i40e_aqc_update_tag { - __le16 seid; -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_UPDATE_TAG_CMD_SEID_NUM_SHIFT) - __le16 old_tag; - __le16 new_tag; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag); - -struct i40e_aqc_update_tag_completion { - u8 reserved[12]; - __le16 tags_used; - __le16 tags_free; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_update_tag_completion); - -/* Add Control Packet filter (direct 0x025A) - * Remove Control Packet filter (direct 0x025B) - * uses the i40e_aqc_add_oveb_cloud, - * and the generic direct completion structure - */ -struct i40e_aqc_add_remove_control_packet_filter { - u8 mac[6]; - __le16 etype; - __le16 flags; -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC 0x0001 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP 0x0002 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE 0x0004 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TX 0x0008 -#define I40E_AQC_ADD_CONTROL_PACKET_FLAGS_RX 0x0000 - __le16 seid; -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CONTROL_PACKET_CMD_SEID_NUM_SHIFT) - __le16 queue; - u8 reserved[2]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter); - -struct i40e_aqc_add_remove_control_packet_filter_completion { - __le16 mac_etype_used; - __le16 etype_used; - __le16 mac_etype_free; - __le16 etype_free; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_control_packet_filter_completion); - -/* Add Cloud filters (indirect 0x025C) - * Remove Cloud filters (indirect 0x025D) - * uses the i40e_aqc_add_remove_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_aqc_add_remove_cloud_filters { - u8 num_filters; - u8 reserved; - __le16 seid; -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \ - I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT) - u8 big_buffer_flag; -#define I40E_AQC_ADD_CLOUD_CMD_BB 1 - u8 reserved2[3]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters); - -struct i40e_aqc_cloud_filters_element_data { - u8 outer_mac[6]; - u8 inner_mac[6]; - __le16 inner_vlan; - union { - struct { - u8 reserved[12]; - u8 data[4]; - } v4; - struct { - u8 data[16]; - } v6; - struct { - __le16 data[8]; - } raw_v6; - } ipaddr; - __le16 flags; -#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \ - I40E_AQC_ADD_CLOUD_FILTER_SHIFT) -/* 0x0000 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001 -/* 0x0002 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004 -/* 0x0005 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_TEN_ID 0x0006 -/* 0x0007 reserved */ -/* 0x0008 reserved */ -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC 0x0009 -#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A -#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B -#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C -/* 0x0010 to 0x0017 is for custom filters */ -#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */ -#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */ - -#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080 -#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6 -#define I40E_AQC_ADD_CLOUD_VNK_MASK 0x00C0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV4 0 -#define I40E_AQC_ADD_CLOUD_FLAGS_IPV6 0x0100 - -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT 9 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK 0x1E00 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN 0 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC 1 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE 2 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_RESERVED 4 -#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN_GPE 5 - -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_MAC 0x2000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_INNER_MAC 0x4000 -#define I40E_AQC_ADD_CLOUD_FLAGS_SHARED_OUTER_IP 0x8000 - - __le32 tenant_id; - u8 reserved[4]; - __le16 queue_number; -#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0 -#define I40E_AQC_ADD_CLOUD_QUEUE_MASK (0x7FF << \ - I40E_AQC_ADD_CLOUD_QUEUE_SHIFT) - u8 reserved2[14]; - /* response section */ - u8 allocation_result; -#define I40E_AQC_ADD_CLOUD_FILTER_SUCCESS 0x0 -#define I40E_AQC_ADD_CLOUD_FILTER_FAIL 0xFF - u8 response_reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data); - -/* i40e_aqc_cloud_filters_element_bb is used when - * I40E_AQC_ADD_CLOUD_CMD_BB flag is set. - */ -struct i40e_aqc_cloud_filters_element_bb { - struct i40e_aqc_cloud_filters_element_data element; - u16 general_fields[32]; -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29 -#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30 -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb); - -struct i40e_aqc_remove_cloud_filters_completion { - __le16 perfect_ovlan_used; - __le16 perfect_ovlan_free; - __le16 vlan_used; - __le16 vlan_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion); - -/* Replace filter Command 0x025F - * uses the i40e_aqc_replace_cloud_filters, - * and the generic indirect completion structure - */ -struct i40e_filter_data { - u8 filter_type; - u8 input[3]; -}; - -I40E_CHECK_STRUCT_LEN(4, i40e_filter_data); - -struct i40e_aqc_replace_cloud_filters_cmd { - u8 valid_flags; -#define I40E_AQC_REPLACE_L1_FILTER 0x0 -#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1 -#define I40E_AQC_GET_CLOUD_FILTERS 0x2 -#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4 -#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8 - u8 old_filter_type; - u8 new_filter_type; - u8 tr_bit; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd); - -struct i40e_aqc_replace_cloud_filters_cmd_buf { - u8 data[32]; -/* Filter type INPUT codes*/ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED BIT(7) - -/* Field Vector offsets */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11 -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14 -/* big FLU */ -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15 - -#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37 - struct i40e_filter_data filters[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf); - -/* Add Mirror Rule (indirect or direct 0x0260) - * Delete Mirror Rule (indirect or direct 0x0261) - * note: some rule types (4,5) do not use an external buffer. - * take care to set the flags correctly. - */ -struct i40e_aqc_add_delete_mirror_rule { - __le16 seid; - __le16 rule_type; -#define I40E_AQC_MIRROR_RULE_TYPE_SHIFT 0 -#define I40E_AQC_MIRROR_RULE_TYPE_MASK (0x7 << \ - I40E_AQC_MIRROR_RULE_TYPE_SHIFT) -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_INGRESS 1 -#define I40E_AQC_MIRROR_RULE_TYPE_VPORT_EGRESS 2 -#define I40E_AQC_MIRROR_RULE_TYPE_VLAN 3 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_INGRESS 4 -#define I40E_AQC_MIRROR_RULE_TYPE_ALL_EGRESS 5 - __le16 num_entries; - __le16 destination; /* VSI for add, rule id for delete */ - __le32 addr_high; /* address of array of 2-byte VSI or VLAN ids */ - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule); - -struct i40e_aqc_add_delete_mirror_rule_completion { - u8 reserved[2]; - __le16 rule_id; /* only used on add */ - __le16 mirror_rules_used; - __le16 mirror_rules_free; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion); - -/* Dynamic Device Personalization */ -struct i40e_aqc_write_personalization_profile { - u8 flags; - u8 reserved[3]; - __le32 profile_track_id; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_write_personalization_profile); - -struct i40e_aqc_write_ddp_resp { - __le32 error_offset; - __le32 error_info; - __le32 addr_high; - __le32 addr_low; -}; - -struct i40e_aqc_get_applied_profiles { - u8 flags; -#define I40E_AQC_GET_DDP_GET_CONF 0x1 -#define I40E_AQC_GET_DDP_GET_RDPU_CONF 0x2 - u8 rsv[3]; - __le32 reserved; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_applied_profiles); - -/* DCB 0x03xx*/ - -/* PFC Ignore (direct 0x0301) - * the command and response use the same descriptor structure - */ -struct i40e_aqc_pfc_ignore { - u8 tc_bitmap; - u8 command_flags; /* unused on response */ -#define I40E_AQC_PFC_IGNORE_SET 0x80 -#define I40E_AQC_PFC_IGNORE_CLEAR 0x0 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pfc_ignore); - -/* DCB Update (direct 0x0302) uses the i40e_aq_desc structure - * with no parameters - */ - -/* TX scheduler 0x04xx */ - -/* Almost all the indirect commands use - * this generic struct to pass the SEID in param0 - */ -struct i40e_aqc_tx_sched_ind { - __le16 vsi_seid; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tx_sched_ind); - -/* Several commands respond with a set of queue set handles */ -struct i40e_aqc_qs_handles_resp { - __le16 qs_handles[8]; -}; - -/* Configure VSI BW limits (direct 0x0400) */ -struct i40e_aqc_configure_vsi_bw_limit { - __le16 vsi_seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_credit; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_vsi_bw_limit); - -/* Configure VSI Bandwidth Limit per Traffic Type (indirect 0x0406) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_ets_sla_bw_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credits[8]; /* FW writesback QS handles here */ - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_vsi_ets_sla_bw_data); - -/* Configure VSI Bandwidth Allocation per Traffic Type (indirect 0x0407) - * responds with i40e_aqc_qs_handles_resp - */ -struct i40e_aqc_configure_vsi_tc_bw_data { - u8 tc_valid_bits; - u8 reserved[3]; - u8 tc_bw_credits[8]; - u8 reserved1[4]; - __le16 qs_handles[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_vsi_tc_bw_data); - -/* Query vsi bw configuration (indirect 0x0408) */ -struct i40e_aqc_query_vsi_bw_config_resp { - u8 tc_valid_bits; - u8 tc_suspended_bits; - u8 reserved[14]; - __le16 qs_handles[8]; - u8 reserved1[4]; - __le16 port_bw_limit; - u8 reserved2[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved3[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_vsi_bw_config_resp); - -/* Query VSI Bandwidth Allocation per Traffic Type (indirect 0x040A) */ -struct i40e_aqc_query_vsi_ets_sla_config_resp { - u8 tc_valid_bits; - u8 reserved[3]; - u8 share_credits[8]; - __le16 credits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_vsi_ets_sla_config_resp); - -/* Configure Switching Component Bandwidth Limit (direct 0x0410) */ -struct i40e_aqc_configure_switching_comp_bw_limit { - __le16 seid; - u8 reserved[2]; - __le16 credit; - u8 reserved1[2]; - u8 max_bw; /* 0-3, limit = 2^max */ - u8 reserved2[7]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit); - -/* Enable Physical Port ETS (indirect 0x0413) - * Modify Physical Port ETS (indirect 0x0414) - * Disable Physical Port ETS (indirect 0x0415) - */ -struct i40e_aqc_configure_switching_comp_ets_data { - u8 reserved[4]; - u8 tc_valid_bits; - u8 seepage; -#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1 - u8 tc_strict_priority_flags; - u8 reserved1[17]; - u8 tc_bw_share_credits[8]; - u8 reserved2[96]; -}; - -I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_configure_switching_comp_ets_data); - -/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */ -struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { - u8 tc_valid_bits; - u8 reserved[15]; - __le16 tc_bw_credit[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved1[28]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, - i40e_aqc_configure_switching_comp_ets_bw_limit_data); - -/* Configure Switching Component Bandwidth Allocation per Tc - * (indirect 0x0417) - */ -struct i40e_aqc_configure_switching_comp_bw_config_data { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits; /* bool */ - u8 tc_bw_share_credits[8]; - u8 reserved1[20]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_configure_switching_comp_bw_config_data); - -/* Query Switching Component Configuration (indirect 0x0418) */ -struct i40e_aqc_query_switching_comp_ets_config_resp { - u8 tc_valid_bits; - u8 reserved[35]; - __le16 port_bw_limit; - u8 reserved1[2]; - u8 tc_bw_max; /* 0-3, limit = 2^max */ - u8 reserved2[23]; -}; - -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_query_switching_comp_ets_config_resp); - -/* Query PhysicalPort ETS Configuration (indirect 0x0419) */ -struct i40e_aqc_query_port_ets_config_resp { - u8 reserved[4]; - u8 tc_valid_bits; - u8 reserved1; - u8 tc_strict_priority_bits; - u8 reserved2; - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit reserved, limit = 2^max */ - __le16 tc_bw_max[2]; - u8 reserved3[32]; -}; - -I40E_CHECK_STRUCT_LEN(0x44, i40e_aqc_query_port_ets_config_resp); - -/* Query Switching Component Bandwidth Allocation per Traffic Type - * (indirect 0x041A) - */ -struct i40e_aqc_query_switching_comp_bw_config_resp { - u8 tc_valid_bits; - u8 reserved[2]; - u8 absolute_credits_enable; /* bool */ - u8 tc_bw_share_credits[8]; - __le16 tc_bw_limits[8]; - - /* 4 bits per tc 0-7, 4th bit is reserved, limit = 2^max */ - __le16 tc_bw_max[2]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_query_switching_comp_bw_config_resp); - -/* Suspend/resume port TX traffic - * (direct 0x041B and 0x041C) uses the generic SEID struct - */ - -/* Configure partition BW - * (indirect 0x041D) - */ -struct i40e_aqc_configure_partition_bw_data { - __le16 pf_valid_bits; - u8 min_bw[16]; /* guaranteed bandwidth */ - u8 max_bw[16]; /* bandwidth limit */ -}; - -I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); - -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ - -/* set in param0 for get phy abilities to report qualified modules */ -#define I40E_AQ_PHY_REPORT_QUALIFIED_MODULES 0x0001 -#define I40E_AQ_PHY_REPORT_INITIAL_VALUES 0x0002 - -enum i40e_aq_phy_type { - I40E_PHY_TYPE_SGMII = 0x0, - I40E_PHY_TYPE_1000BASE_KX = 0x1, - I40E_PHY_TYPE_10GBASE_KX4 = 0x2, - I40E_PHY_TYPE_10GBASE_KR = 0x3, - I40E_PHY_TYPE_40GBASE_KR4 = 0x4, - I40E_PHY_TYPE_XAUI = 0x5, - I40E_PHY_TYPE_XFI = 0x6, - I40E_PHY_TYPE_SFI = 0x7, - I40E_PHY_TYPE_XLAUI = 0x8, - I40E_PHY_TYPE_XLPPI = 0x9, - I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA, - I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB, - I40E_PHY_TYPE_10GBASE_AOC = 0xC, - I40E_PHY_TYPE_40GBASE_AOC = 0xD, - I40E_PHY_TYPE_UNRECOGNIZED = 0xE, - I40E_PHY_TYPE_UNSUPPORTED = 0xF, - I40E_PHY_TYPE_100BASE_TX = 0x11, - I40E_PHY_TYPE_1000BASE_T = 0x12, - I40E_PHY_TYPE_10GBASE_T = 0x13, - I40E_PHY_TYPE_10GBASE_SR = 0x14, - I40E_PHY_TYPE_10GBASE_LR = 0x15, - I40E_PHY_TYPE_10GBASE_SFPP_CU = 0x16, - I40E_PHY_TYPE_10GBASE_CR1 = 0x17, - I40E_PHY_TYPE_40GBASE_CR4 = 0x18, - I40E_PHY_TYPE_40GBASE_SR4 = 0x19, - I40E_PHY_TYPE_40GBASE_LR4 = 0x1A, - I40E_PHY_TYPE_1000BASE_SX = 0x1B, - I40E_PHY_TYPE_1000BASE_LX = 0x1C, - I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, - I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, - I40E_PHY_TYPE_25GBASE_KR = 0x1F, - I40E_PHY_TYPE_25GBASE_CR = 0x20, - I40E_PHY_TYPE_25GBASE_SR = 0x21, - I40E_PHY_TYPE_25GBASE_LR = 0x22, - I40E_PHY_TYPE_25GBASE_AOC = 0x23, - I40E_PHY_TYPE_25GBASE_ACC = 0x24, - I40E_PHY_TYPE_MAX, - I40E_PHY_TYPE_NOT_SUPPORTED_HIGH_TEMP = 0xFD, - I40E_PHY_TYPE_EMPTY = 0xFE, - I40E_PHY_TYPE_DEFAULT = 0xFF, -}; - -#define I40E_LINK_SPEED_100MB_SHIFT 0x1 -#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 -#define I40E_LINK_SPEED_10GB_SHIFT 0x3 -#define I40E_LINK_SPEED_40GB_SHIFT 0x4 -#define I40E_LINK_SPEED_20GB_SHIFT 0x5 -#define I40E_LINK_SPEED_25GB_SHIFT 0x6 - -enum i40e_aq_link_speed { - I40E_LINK_SPEED_UNKNOWN = 0, - I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), - I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), - I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), - I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), - I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), - I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), -}; - -struct i40e_aqc_module_desc { - u8 oui[3]; - u8 reserved1; - u8 part_number[16]; - u8 revision[4]; - u8 reserved2[8]; -}; - -I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_module_desc); - -struct i40e_aq_get_phy_abilities_resp { - __le32 phy_type; /* bitmap using the above enum for offsets */ - u8 link_speed; /* bitmap using the above enum bit patterns */ - u8 abilities; -#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01 -#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02 -#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04 -#define I40E_AQ_PHY_LINK_ENABLED 0x08 -#define I40E_AQ_PHY_AN_ENABLED 0x10 -#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20 -#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40 -#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80 - __le16 eee_capability; -#define I40E_AQ_EEE_100BASE_TX 0x0002 -#define I40E_AQ_EEE_1000BASE_T 0x0004 -#define I40E_AQ_EEE_10GBASE_T 0x0008 -#define I40E_AQ_EEE_1000BASE_KX 0x0010 -#define I40E_AQ_EEE_10GBASE_KX4 0x0020 -#define I40E_AQ_EEE_10GBASE_KR 0x0040 - __le32 eeer_val; - u8 d3_lpan; -#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01 - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 -#define I40E_AQ_PHY_TYPE_EXT_25G_AOC 0x10 -#define I40E_AQ_PHY_TYPE_EXT_25G_ACC 0x20 - u8 fec_cfg_curr_mod_ext_info; -#define I40E_AQ_ENABLE_FEC_KR 0x01 -#define I40E_AQ_ENABLE_FEC_RS 0x02 -#define I40E_AQ_REQUEST_FEC_KR 0x04 -#define I40E_AQ_REQUEST_FEC_RS 0x08 -#define I40E_AQ_ENABLE_FEC_AUTO 0x10 -#define I40E_AQ_FEC -#define I40E_AQ_MODULE_TYPE_EXT_MASK 0xE0 -#define I40E_AQ_MODULE_TYPE_EXT_SHIFT 5 - - u8 ext_comp_code; - u8 phy_id[4]; - u8 module_type[3]; - u8 qualified_module_count; -#define I40E_AQ_PHY_MAX_QMS 16 - struct i40e_aqc_module_desc qualified_module[I40E_AQ_PHY_MAX_QMS]; -}; - -I40E_CHECK_STRUCT_LEN(0x218, i40e_aq_get_phy_abilities_resp); - -/* Set PHY Config (direct 0x0601) */ -struct i40e_aq_set_phy_config { /* same bits as above in all */ - __le32 phy_type; - u8 link_speed; - u8 abilities; -/* bits 0-2 use the values from get_phy_abilities_resp */ -#define I40E_AQ_PHY_ENABLE_LINK 0x08 -#define I40E_AQ_PHY_ENABLE_AN 0x10 -#define I40E_AQ_PHY_ENABLE_ATOMIC_LINK 0x20 - __le16 eee_capability; - __le32 eeer; - u8 low_power_ctrl; - u8 phy_type_ext; -#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01 -#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02 -#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04 -#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08 - u8 fec_config; -#define I40E_AQ_SET_FEC_ABILITY_KR BIT(0) -#define I40E_AQ_SET_FEC_ABILITY_RS BIT(1) -#define I40E_AQ_SET_FEC_REQUEST_KR BIT(2) -#define I40E_AQ_SET_FEC_REQUEST_RS BIT(3) -#define I40E_AQ_SET_FEC_AUTO BIT(4) -#define I40E_AQ_PHY_FEC_CONFIG_SHIFT 0x0 -#define I40E_AQ_PHY_FEC_CONFIG_MASK (0x1F << I40E_AQ_PHY_FEC_CONFIG_SHIFT) - u8 reserved; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config); - -/* Set MAC Config command data structure (direct 0x0603) */ -struct i40e_aq_set_mac_config { - __le16 max_frame_size; - u8 params; -#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04 -#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78 -#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7 -#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6 -#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5 -#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4 -#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3 -#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2 -#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1 - u8 tx_timer_priority; /* bitmap */ - __le16 tx_timer_value; - __le16 fc_refresh_threshold; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_set_mac_config); - -/* Restart Auto-Negotiation (direct 0x605) */ -struct i40e_aqc_set_link_restart_an { - u8 command; -#define I40E_AQ_PHY_RESTART_AN 0x02 -#define I40E_AQ_PHY_LINK_ENABLE 0x04 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_link_restart_an); - -/* Get Link Status cmd & response data structure (direct 0x0607) */ -struct i40e_aqc_get_link_status { - __le16 command_flags; /* only field set on command */ -#define I40E_AQ_LSE_MASK 0x3 -#define I40E_AQ_LSE_NOP 0x0 -#define I40E_AQ_LSE_DISABLE 0x2 -#define I40E_AQ_LSE_ENABLE 0x3 -/* only response uses this flag */ -#define I40E_AQ_LSE_IS_ENABLED 0x1 - u8 phy_type; /* i40e_aq_phy_type */ - u8 link_speed; /* i40e_aq_link_speed */ - u8 link_info; -#define I40E_AQ_LINK_UP 0x01 /* obsolete */ -#define I40E_AQ_LINK_UP_FUNCTION 0x01 -#define I40E_AQ_LINK_FAULT 0x02 -#define I40E_AQ_LINK_FAULT_TX 0x04 -#define I40E_AQ_LINK_FAULT_RX 0x08 -#define I40E_AQ_LINK_FAULT_REMOTE 0x10 -#define I40E_AQ_LINK_UP_PORT 0x20 -#define I40E_AQ_MEDIA_AVAILABLE 0x40 -#define I40E_AQ_SIGNAL_DETECT 0x80 - u8 an_info; -#define I40E_AQ_AN_COMPLETED 0x01 -#define I40E_AQ_LP_AN_ABILITY 0x02 -#define I40E_AQ_PD_FAULT 0x04 -#define I40E_AQ_FEC_EN 0x08 -#define I40E_AQ_PHY_LOW_POWER 0x10 -#define I40E_AQ_LINK_PAUSE_TX 0x20 -#define I40E_AQ_LINK_PAUSE_RX 0x40 -#define I40E_AQ_QUALIFIED_MODULE 0x80 - u8 ext_info; -#define I40E_AQ_LINK_PHY_TEMP_ALARM 0x01 -#define I40E_AQ_LINK_XCESSIVE_ERRORS 0x02 -#define I40E_AQ_LINK_TX_SHIFT 0x02 -#define I40E_AQ_LINK_TX_MASK (0x03 << I40E_AQ_LINK_TX_SHIFT) -#define I40E_AQ_LINK_TX_ACTIVE 0x00 -#define I40E_AQ_LINK_TX_DRAINED 0x01 -#define I40E_AQ_LINK_TX_FLUSHED 0x03 -#define I40E_AQ_LINK_FORCED_40G 0x10 -/* 25G Error Codes */ -#define I40E_AQ_25G_NO_ERR 0X00 -#define I40E_AQ_25G_NOT_PRESENT 0X01 -#define I40E_AQ_25G_NVM_CRC_ERR 0X02 -#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03 -#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04 -#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05 - u8 loopback; /* use defines from i40e_aqc_set_lb_mode */ -/* Since firmware API 1.7 loopback field keeps power class info as well */ -#define I40E_AQ_LOOPBACK_MASK 0x07 -#define I40E_AQ_PWR_CLASS_SHIFT_LB 6 -#define I40E_AQ_PWR_CLASS_MASK_LB (0x03 << I40E_AQ_PWR_CLASS_SHIFT_LB) - __le16 max_frame_size; - u8 config; -#define I40E_AQ_CONFIG_FEC_KR_ENA 0x01 -#define I40E_AQ_CONFIG_FEC_RS_ENA 0x02 -#define I40E_AQ_CONFIG_CRC_ENA 0x04 -#define I40E_AQ_CONFIG_PACING_MASK 0x78 - union { - struct { - u8 power_desc; -#define I40E_AQ_LINK_POWER_CLASS_1 0x00 -#define I40E_AQ_LINK_POWER_CLASS_2 0x01 -#define I40E_AQ_LINK_POWER_CLASS_3 0x02 -#define I40E_AQ_LINK_POWER_CLASS_4 0x03 -#define I40E_AQ_PWR_CLASS_MASK 0x03 - u8 reserved[4]; - }; - struct { - u8 link_type[4]; - u8 link_type_ext; - }; - }; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status); - -/* Set event mask command (direct 0x613) */ -struct i40e_aqc_set_phy_int_mask { - u8 reserved[8]; - __le16 event_mask; -#define I40E_AQ_EVENT_LINK_UPDOWN 0x0002 -#define I40E_AQ_EVENT_MEDIA_NA 0x0004 -#define I40E_AQ_EVENT_LINK_FAULT 0x0008 -#define I40E_AQ_EVENT_PHY_TEMP_ALARM 0x0010 -#define I40E_AQ_EVENT_EXCESSIVE_ERRORS 0x0020 -#define I40E_AQ_EVENT_SIGNAL_DETECT 0x0040 -#define I40E_AQ_EVENT_AN_COMPLETED 0x0080 -#define I40E_AQ_EVENT_MODULE_QUAL_FAIL 0x0100 -#define I40E_AQ_EVENT_PORT_TX_SUSPENDED 0x0200 - u8 reserved1[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_int_mask); - -/* Get Local AN advt register (direct 0x0614) - * Set Local AN advt register (direct 0x0615) - * Get Link Partner AN advt register (direct 0x0616) - */ -struct i40e_aqc_an_advt_reg { - __le32 local_an_reg0; - __le16 local_an_reg1; - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_an_advt_reg); - -/* Set Loopback mode (0x0618) */ -struct i40e_aqc_set_lb_mode { - __le16 lb_mode; -#define I40E_AQ_LB_PHY_LOCAL 0x01 -#define I40E_AQ_LB_PHY_REMOTE 0x02 -#define I40E_AQ_LB_MAC_LOCAL 0x04 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode); - -/* Set PHY Debug command (0x0622) */ -struct i40e_aqc_set_phy_debug { - u8 command_flags; -#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \ - I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT) -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 -#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 -#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug); - -enum i40e_aq_phy_reg_type { - I40E_AQC_PHY_REG_INTERNAL = 0x1, - I40E_AQC_PHY_REG_EXERNAL_BASET = 0x2, - I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3 -}; - -/* Run PHY Activity (0x0626) */ -struct i40e_aqc_run_phy_activity { - __le16 activity_id; - u8 flags; - u8 reserved1; - __le32 control; - __le32 data; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity); - -/* Set PHY Register command (0x0628) */ -/* Get PHY Register command (0x0629) */ -struct i40e_aqc_phy_register_access { - u8 phy_interface; -#define I40E_AQ_PHY_REG_ACCESS_INTERNAL 0 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1 -#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2 - u8 dev_address; - u8 reserved1[2]; - __le32 reg_address; - __le32 reg_value; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access); - -/* NVM Read command (indirect 0x0701) - * NVM Erase commands (direct 0x0702) - * NVM Update commands (indirect 0x0703) - */ -struct i40e_aqc_nvm_update { - u8 command_flags; -#define I40E_AQ_NVM_LAST_CMD 0x01 -#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20 -#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40 -#define I40E_AQ_NVM_FLASH_ONLY 0x80 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_SELECTED 0x03 -#define I40E_AQ_NVM_PRESERVATION_FLAGS_ALL 0x01 - u8 module_pointer; - __le16 length; - __le32 offset; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update); - -/* NVM Config Read (indirect 0x0704) */ -struct i40e_aqc_nvm_config_read { - __le16 cmd_flags; -#define I40E_AQ_ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1 -#define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 -#define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 - __le16 element_count; - __le16 element_id; /* Feature/field ID */ - __le16 element_id_msw; /* MSWord of field ID */ - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read); - -/* NVM Config Write (indirect 0x0705) */ -struct i40e_aqc_nvm_config_write { - __le16 cmd_flags; - __le16 element_count; - u8 reserved[4]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); - -/* Used for 0x0704 as well as for 0x0705 commands */ -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ - BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT) -struct i40e_aqc_nvm_config_data_feature { - __le16 feature_id; -#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 -#define I40E_AQ_ANVM_FEATURE_OPTION_DWORD_MAP 0x08 -#define I40E_AQ_ANVM_FEATURE_OPTION_POR_CSR 0x10 - __le16 feature_options; - __le16 feature_selection; -}; - -I40E_CHECK_STRUCT_LEN(0x6, i40e_aqc_nvm_config_data_feature); - -struct i40e_aqc_nvm_config_data_immediate_field { - __le32 field_id; - __le32 field_value; - __le16 field_options; - __le16 reserved; -}; - -I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); - -/* OEM Post Update (indirect 0x0720) - * no command data struct used - */ - struct i40e_aqc_nvm_oem_post_update { -#define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 - u8 sel_data; - u8 reserved[7]; -}; - -I40E_CHECK_STRUCT_LEN(0x8, i40e_aqc_nvm_oem_post_update); - -struct i40e_aqc_nvm_oem_post_update_buffer { - u8 str_len; - u8 dev_addr; - __le16 eeprom_addr; - u8 data[36]; -}; - -I40E_CHECK_STRUCT_LEN(0x28, i40e_aqc_nvm_oem_post_update_buffer); - -/* Thermal Sensor (indirect 0x0721) - * read or set thermal sensor configs and values - * takes a sensor and command specific data buffer, not detailed here - */ -struct i40e_aqc_thermal_sensor { - u8 sensor_action; -#define I40E_AQ_THERMAL_SENSOR_READ_CONFIG 0 -#define I40E_AQ_THERMAL_SENSOR_SET_CONFIG 1 -#define I40E_AQ_THERMAL_SENSOR_READ_TEMP 2 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_thermal_sensor); - -/* Send to PF command (indirect 0x0801) id is only used by PF - * Send to VF command (indirect 0x0802) id is only used by PF - * Send to Peer PF command (indirect 0x0803) - */ -struct i40e_aqc_pf_vf_message { - __le32 id; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); - -/* Alternate structure */ - -/* Direct write (direct 0x0900) - * Direct read (direct 0x0902) - */ -struct i40e_aqc_alternate_write { - __le32 address0; - __le32 data0; - __le32 address1; - __le32 data1; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write); - -/* Indirect write (indirect 0x0901) - * Indirect read (indirect 0x0903) - */ - -struct i40e_aqc_alternate_ind_write { - __le32 address; - __le32 length; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_ind_write); - -/* Done alternate write (direct 0x0904) - * uses i40e_aq_desc - */ -struct i40e_aqc_alternate_write_done { - __le16 cmd_flags; -#define I40E_AQ_ALTERNATE_MODE_BIOS_MASK 1 -#define I40E_AQ_ALTERNATE_MODE_BIOS_LEGACY 0 -#define I40E_AQ_ALTERNATE_MODE_BIOS_UEFI 1 -#define I40E_AQ_ALTERNATE_RESET_NEEDED 2 - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_write_done); - -/* Set OEM mode (direct 0x0905) */ -struct i40e_aqc_alternate_set_mode { - __le32 mode; -#define I40E_AQ_ALTERNATE_MODE_NONE 0 -#define I40E_AQ_ALTERNATE_MODE_OEM 1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_alternate_set_mode); - -/* Clear port Alternate RAM (direct 0x0906) uses i40e_aq_desc */ - -/* async events 0x10xx */ - -/* Lan Queue Overflow Event (direct, 0x1001) */ -struct i40e_aqc_lan_overflow { - __le32 prtdcb_rupto; - __le32 otx_ctl; - u8 reserved[8]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lan_overflow); - -/* Get LLDP MIB (indirect 0x0A00) */ -struct i40e_aqc_lldp_get_mib { - u8 type; - u8 reserved1; -#define I40E_AQ_LLDP_MIB_TYPE_MASK 0x3 -#define I40E_AQ_LLDP_MIB_LOCAL 0x0 -#define I40E_AQ_LLDP_MIB_REMOTE 0x1 -#define I40E_AQ_LLDP_MIB_LOCAL_AND_REMOTE 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_MASK 0xC -#define I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT 0x2 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE 0x0 -#define I40E_AQ_LLDP_BRIDGE_TYPE_NON_TPMR 0x1 -#define I40E_AQ_LLDP_TX_SHIFT 0x4 -#define I40E_AQ_LLDP_TX_MASK (0x03 << I40E_AQ_LLDP_TX_SHIFT) -/* TX pause flags use I40E_AQ_LINK_TX_* above */ - __le16 local_len; - __le16 remote_len; - u8 reserved2[2]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_get_mib); - -/* Configure LLDP MIB Change Event (direct 0x0A01) - * also used for the event (with type in the command field) - */ -struct i40e_aqc_lldp_update_mib { - u8 command; -#define I40E_AQ_LLDP_MIB_UPDATE_ENABLE 0x0 -#define I40E_AQ_LLDP_MIB_UPDATE_DISABLE 0x1 - u8 reserved[7]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_mib); - -/* Add LLDP TLV (indirect 0x0A02) - * Delete LLDP TLV (indirect 0x0A04) - */ -struct i40e_aqc_lldp_add_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved1[1]; - __le16 len; - u8 reserved2[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_add_tlv); - -/* Update LLDP TLV (indirect 0x0A03) */ -struct i40e_aqc_lldp_update_tlv { - u8 type; /* only nearest bridge and non-TPMR from 0x0A00 */ - u8 reserved; - __le16 old_len; - __le16 new_offset; - __le16 new_len; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv); - -/* Stop LLDP (direct 0x0A05) */ -struct i40e_aqc_lldp_stop { - u8 command; -#define I40E_AQ_LLDP_AGENT_STOP 0x0 -#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop); - -/* Start LLDP (direct 0x0A06) */ - -struct i40e_aqc_lldp_start { - u8 command; -#define I40E_AQ_LLDP_AGENT_START 0x1 - u8 reserved[15]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start); - -/* Set DCB (direct 0x0303) */ -struct i40e_aqc_set_dcb_parameters { - u8 command; -#define I40E_AQ_DCB_SET_AGENT 0x1 -#define I40E_DCB_VALID 0x1 - u8 valid_flags; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_set_dcb_parameters); - -/* Apply MIB changes (0x0A07) - * uses the generic struc as it contains no data - */ - -/* Add Udp Tunnel command and completion (direct 0x0B00) */ -struct i40e_aqc_add_udp_tunnel { - __le16 udp_port; - u8 reserved0[3]; - u8 protocol_type; -#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00 -#define I40E_AQC_TUNNEL_TYPE_NGE 0x01 -#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10 -#define I40E_AQC_TUNNEL_TYPE_VXLAN_GPE 0x11 - u8 reserved1[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); - -struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; - u8 filter_entry_index; - u8 multiple_pfs; -#define I40E_AQC_SINGLE_PF 0x0 -#define I40E_AQC_MULTIPLE_PFS 0x1 - u8 total_filters; - u8 reserved[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel_completion); - -/* remove UDP Tunnel command (0x0B01) */ -struct i40e_aqc_remove_udp_tunnel { - u8 reserved[2]; - u8 index; /* 0 to 15 */ - u8 reserved2[13]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_udp_tunnel); - -struct i40e_aqc_del_udp_tunnel_completion { - __le16 udp_port; - u8 index; /* 0 to 15 */ - u8 multiple_pfs; - u8 total_filters_used; - u8 reserved1[11]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion); - -struct i40e_aqc_get_set_rss_key { -#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) - __le16 vsi_id; - u8 reserved[6]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); - -struct i40e_aqc_get_set_rss_key_data { - u8 standard_rss_key[0x28]; - u8 extended_hash_key[0xc]; -}; - -I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); - -struct i40e_aqc_get_set_rss_lut { -#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) -#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ - I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) - __le16 vsi_id; -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ - BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) - -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 -#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 - __le16 flags; - u8 reserved[4]; - __le32 addr_high; - __le32 addr_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); - -/* tunnel key structure 0x0B10 */ - -struct i40e_aqc_tunnel_key_structure_A0 { - __le16 key1_off; - __le16 key1_len; - __le16 key2_off; - __le16 key2_len; - __le16 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 resreved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure_A0); - -struct i40e_aqc_tunnel_key_structure { - u8 key1_off; - u8 key2_off; - u8 key1_len; /* 0 to 15 */ - u8 key2_len; /* 0 to 15 */ - u8 flags; -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDE 0x01 -/* response flags */ -#define I40E_AQC_TUNNEL_KEY_STRUCT_SUCCESS 0x01 -#define I40E_AQC_TUNNEL_KEY_STRUCT_MODIFIED 0x02 -#define I40E_AQC_TUNNEL_KEY_STRUCT_OVERRIDDEN 0x03 - u8 network_key_index; -#define I40E_AQC_NETWORK_KEY_INDEX_VXLAN 0x0 -#define I40E_AQC_NETWORK_KEY_INDEX_NGE 0x1 -#define I40E_AQC_NETWORK_KEY_INDEX_FLEX_MAC_IN_UDP 0x2 -#define I40E_AQC_NETWORK_KEY_INDEX_GRE 0x3 - u8 reserved[10]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_tunnel_key_structure); - -/* OEM mode commands (direct 0xFE0x) */ -struct i40e_aqc_oem_param_change { - __le32 param_type; -#define I40E_AQ_OEM_PARAM_TYPE_PF_CTL 0 -#define I40E_AQ_OEM_PARAM_TYPE_BW_CTL 1 -#define I40E_AQ_OEM_PARAM_MAC 2 - __le32 param_value1; - __le16 param_value2; - u8 reserved[6]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_param_change); - -struct i40e_aqc_oem_state_change { - __le32 state; -#define I40E_AQ_OEM_STATE_LINK_DOWN 0x0 -#define I40E_AQ_OEM_STATE_LINK_UP 0x1 - u8 reserved[12]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_oem_state_change); - -/* Initialize OCSD (0xFE02, direct) */ -struct i40e_aqc_opc_oem_ocsd_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocsd_memory_block_addr_high; - __le32 ocsd_memory_block_addr_low; - __le32 requested_update_interval; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocsd_initialize); - -/* Initialize OCBB (0xFE03, direct) */ -struct i40e_aqc_opc_oem_ocbb_initialize { - u8 type_status; - u8 reserved1[3]; - __le32 ocbb_memory_block_addr_high; - __le32 ocbb_memory_block_addr_low; - u8 reserved2[4]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_opc_oem_ocbb_initialize); - -/* debug commands */ - -/* get device id (0xFF00) uses the generic structure */ - -/* set test more (0xFF01, internal) */ - -struct i40e_acq_set_test_mode { - u8 mode; -#define I40E_AQ_TEST_PARTIAL 0 -#define I40E_AQ_TEST_FULL 1 -#define I40E_AQ_TEST_NVM 2 - u8 reserved[3]; - u8 command; -#define I40E_AQ_TEST_OPEN 0 -#define I40E_AQ_TEST_CLOSE 1 -#define I40E_AQ_TEST_INC 2 - u8 reserved2[3]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_acq_set_test_mode); - -/* Debug Read Register command (0xFF03) - * Debug Write Register command (0xFF04) - */ -struct i40e_aqc_debug_reg_read_write { - __le32 reserved; - __le32 address; - __le32 value_high; - __le32 value_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_reg_read_write); - -/* Scatter/gather Reg Read (indirect 0xFF05) - * Scatter/gather Reg Write (indirect 0xFF06) - */ - -/* i40e_aq_desc is used for the command */ -struct i40e_aqc_debug_reg_sg_element_data { - __le32 address; - __le32 value; -}; - -/* Debug Modify register (direct 0xFF07) */ -struct i40e_aqc_debug_modify_reg { - __le32 address; - __le32 value; - __le32 clear_mask; - __le32 set_mask; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_reg); - -/* dump internal data (0xFF08, indirect) */ - -#define I40E_AQ_CLUSTER_ID_AUX 0 -#define I40E_AQ_CLUSTER_ID_SWITCH_FLU 1 -#define I40E_AQ_CLUSTER_ID_TXSCHED 2 -#define I40E_AQ_CLUSTER_ID_HMC 3 -#define I40E_AQ_CLUSTER_ID_MAC0 4 -#define I40E_AQ_CLUSTER_ID_MAC1 5 -#define I40E_AQ_CLUSTER_ID_MAC2 6 -#define I40E_AQ_CLUSTER_ID_MAC3 7 -#define I40E_AQ_CLUSTER_ID_DCB 8 -#define I40E_AQ_CLUSTER_ID_EMP_MEM 9 -#define I40E_AQ_CLUSTER_ID_PKT_BUF 10 -#define I40E_AQ_CLUSTER_ID_ALTRAM 11 - -struct i40e_aqc_debug_dump_internals { - u8 cluster_id; - u8 table_id; - __le16 data_size; - __le32 idx; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_dump_internals); - -struct i40e_aqc_debug_modify_internals { - u8 cluster_id; - u8 cluster_specific_params[7]; - __le32 address_high; - __le32 address_low; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aqc_debug_modify_internals); - -#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h deleted file mode 100644 index 1c78de838..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h +++ /dev/null @@ -1,215 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_HMC_H_ -#define _I40E_HMC_H_ - -#define I40E_HMC_MAX_BP_COUNT 512 - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -#define I40E_HMC_INFO_SIGNATURE 0x484D5347 /* HMSG */ -#define I40E_HMC_PD_CNT_IN_SD 512 -#define I40E_HMC_DIRECT_BP_SIZE 0x200000 /* 2M */ -#define I40E_HMC_PAGED_BP_SIZE 4096 -#define I40E_HMC_PD_BP_BUF_ALIGNMENT 4096 -#define I40E_FIRST_VF_FPM_ID 16 - -struct i40e_hmc_obj_info { - u64 base; /* base addr in FPM */ - u32 max_cnt; /* max count available for this hmc func */ - u32 cnt; /* count of objects driver actually wants to create */ - u64 size; /* size in bytes of one object */ -}; - -enum i40e_sd_entry_type { - I40E_SD_TYPE_INVALID = 0, - I40E_SD_TYPE_PAGED = 1, - I40E_SD_TYPE_DIRECT = 2 -}; - -struct i40e_hmc_bp { - enum i40e_sd_entry_type entry_type; - struct i40e_dma_mem addr; /* populate to be used by hw */ - u32 sd_pd_index; - u32 ref_cnt; -}; - -struct i40e_hmc_pd_entry { - struct i40e_hmc_bp bp; - u32 sd_index; - bool rsrc_pg; - bool valid; -}; - -struct i40e_hmc_pd_table { - struct i40e_dma_mem pd_page_addr; /* populate to be used by hw */ - struct i40e_hmc_pd_entry *pd_entry; /* [512] for sw book keeping */ - struct i40e_virt_mem pd_entry_virt_mem; /* virt mem for pd_entry */ - - u32 ref_cnt; - u32 sd_index; -}; - -struct i40e_hmc_sd_entry { - enum i40e_sd_entry_type entry_type; - bool valid; - - union { - struct i40e_hmc_pd_table pd_table; - struct i40e_hmc_bp bp; - } u; -}; - -struct i40e_hmc_sd_table { - struct i40e_virt_mem addr; /* used to track sd_entry allocations */ - u32 sd_cnt; - u32 ref_cnt; - struct i40e_hmc_sd_entry *sd_entry; /* (sd_cnt*512) entries max */ -}; - -struct i40e_hmc_info { - u32 signature; - /* equals to pci func num for PF and dynamically allocated for VFs */ - u8 hmc_fn_id; - u16 first_sd_index; /* index of the first available SD */ - - /* hmc objects */ - struct i40e_hmc_obj_info *hmc_obj; - struct i40e_virt_mem hmc_obj_virt_mem; - struct i40e_hmc_sd_table sd_table; -}; - -#define I40E_INC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt++) -#define I40E_INC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt++) -#define I40E_INC_BP_REFCNT(bp) ((bp)->ref_cnt++) - -#define I40E_DEC_SD_REFCNT(sd_table) ((sd_table)->ref_cnt--) -#define I40E_DEC_PD_REFCNT(pd_table) ((pd_table)->ref_cnt--) -#define I40E_DEC_BP_REFCNT(bp) ((bp)->ref_cnt--) - -/** - * I40E_SET_PF_SD_ENTRY - marks the sd entry as valid in the hardware - * @hw: pointer to our hw struct - * @pa: pointer to physical address - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_SET_PF_SD_ENTRY(hw, pa, sd_index, type) \ -{ \ - u32 val1, val2, val3; \ - val1 = (u32)(upper_32_bits(pa)); \ - val2 = (u32)(pa) | (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) | \ - BIT(I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, val1); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_CLEAR_PF_SD_ENTRY - marks the sd entry as invalid in the hardware - * @hw: pointer to our hw struct - * @sd_index: segment descriptor index - * @type: if sd entry is direct or paged - **/ -#define I40E_CLEAR_PF_SD_ENTRY(hw, sd_index, type) \ -{ \ - u32 val2, val3; \ - val2 = (I40E_HMC_MAX_BP_COUNT << \ - I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) | \ - ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) << \ - I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT); \ - val3 = (sd_index) | BIT_ULL(I40E_PFHMC_SDCMD_PMSDWR_SHIFT); \ - wr32((hw), I40E_PFHMC_SDDATAHIGH, 0); \ - wr32((hw), I40E_PFHMC_SDDATALOW, val2); \ - wr32((hw), I40E_PFHMC_SDCMD, val3); \ -} - -/** - * I40E_INVALIDATE_PF_HMC_PD - Invalidates the pd cache in the hardware - * @hw: pointer to our hw struct - * @sd_idx: segment descriptor index - * @pd_idx: page descriptor index - **/ -#define I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, pd_idx) \ - wr32((hw), I40E_PFHMC_PDINV, \ - (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \ - ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT))) - -/** - * I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit - * @hmc_info: pointer to the HMC configuration information structure - * @type: type of HMC resources we're searching - * @index: starting index for the object - * @cnt: number of objects we're trying to create - * @sd_idx: pointer to return index of the segment descriptor in question - * @sd_limit: pointer to return the maximum number of segment descriptors - * - * This function calculates the segment descriptor index and index limit - * for the resource defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_SD_INDEX_LIMIT(hmc_info, type, index, cnt, sd_idx, sd_limit)\ -{ \ - u64 fpm_addr, fpm_limit; \ - fpm_addr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (index); \ - fpm_limit = fpm_addr + (hmc_info)->hmc_obj[(type)].size * (cnt);\ - *(sd_idx) = (u32)(fpm_addr / I40E_HMC_DIRECT_BP_SIZE); \ - *(sd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_DIRECT_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(sd_limit) += 1; \ -} - -/** - * I40E_FIND_PD_INDEX_LIMIT - finds page descriptor index limit - * @hmc_info: pointer to the HMC configuration information struct - * @type: HMC resource type we're examining - * @idx: starting index for the object - * @cnt: number of objects we're trying to create - * @pd_index: pointer to return page descriptor index - * @pd_limit: pointer to return page descriptor index limit - * - * Calculates the page descriptor index and index limit for the resource - * defined by i40e_hmc_rsrc_type. - **/ -#define I40E_FIND_PD_INDEX_LIMIT(hmc_info, type, idx, cnt, pd_index, pd_limit)\ -{ \ - u64 fpm_adr, fpm_limit; \ - fpm_adr = (hmc_info)->hmc_obj[(type)].base + \ - (hmc_info)->hmc_obj[(type)].size * (idx); \ - fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt); \ - *(pd_index) = (u32)(fpm_adr / I40E_HMC_PAGED_BP_SIZE); \ - *(pd_limit) = (u32)((fpm_limit - 1) / I40E_HMC_PAGED_BP_SIZE); \ - /* add one more to the limit to correct our range */ \ - *(pd_limit) += 1; \ -} -i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 sd_index, - enum i40e_sd_entry_type type, - u64 direct_mode_sz); - -i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 pd_index, - struct i40e_dma_mem *rsrc_pg); -i40e_status i40e_remove_pd_bp(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); -i40e_status i40e_prep_remove_pd_page(struct i40e_hmc_info *hmc_info, - u32 idx); -i40e_status i40e_remove_pd_page_new(struct i40e_hw *hw, - struct i40e_hmc_info *hmc_info, - u32 idx, bool is_pf); - -#endif /* _I40E_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h deleted file mode 100644 index 82b00f70a..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h +++ /dev/null @@ -1,158 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_LAN_HMC_H_ -#define _I40E_LAN_HMC_H_ - -/* forward-declare the HW struct for the compiler */ -struct i40e_hw; - -/* HMC element context information */ - -/* Rx queue context data - * - * The sizes of the variables may be larger than needed due to crossing byte - * boundaries. If we do not have the width of the variable set to the correct - * size then we could end up shifting bits off the top of the variable when the - * variable is at the top of a byte and crosses over into the next byte. - */ -struct i40e_hmc_obj_rxq { - u16 head; - u16 cpuid; /* bigger than needed, see above for reason */ - u64 base; - u16 qlen; -#define I40E_RXQ_CTX_DBUFF_SHIFT 7 - u16 dbuff; /* bigger than needed, see above for reason */ -#define I40E_RXQ_CTX_HBUFF_SHIFT 6 - u16 hbuff; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 fc_ena; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; - u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; - u16 lrxqthresh; /* bigger than needed, see above for reason */ - u8 prefena; /* NOTE: normally must be set to 1 at init */ -}; - -/* Tx queue context data -* -* The sizes of the variables may be larger than needed due to crossing byte -* boundaries. If we do not have the width of the variable set to the correct -* size then we could end up shifting bits off the top of the variable when the -* variable is at the top of a byte and crosses over into the next byte. -*/ -struct i40e_hmc_obj_txq { - u16 head; - u8 new_context; - u64 base; - u8 fc_ena; - u8 timesync_ena; - u8 fd_ena; - u8 alt_vlan_ena; - u16 thead_wb; - u8 cpuid; - u8 head_wb_ena; - u16 qlen; - u8 tphrdesc_ena; - u8 tphrpacket_ena; - u8 tphwdesc_ena; - u64 head_wb_addr; - u32 crc; - u16 rdylist; - u8 rdylist_act; -}; - -/* for hsplit_0 field of Rx HMC context */ -enum i40e_hmc_obj_rx_hsplit_0 { - I40E_HMC_OBJ_RX_HSPLIT_0_NO_SPLIT = 0, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 = 1, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP = 2, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP = 4, - I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP = 8, -}; - -/* fcoe_cntx and fcoe_filt are for debugging purpose only */ -struct i40e_hmc_obj_fcoe_cntx { - u32 rsv[32]; -}; - -struct i40e_hmc_obj_fcoe_filt { - u32 rsv[8]; -}; - -/* Context sizes for LAN objects */ -enum i40e_hmc_lan_object_size { - I40E_HMC_LAN_OBJ_SZ_8 = 0x3, - I40E_HMC_LAN_OBJ_SZ_16 = 0x4, - I40E_HMC_LAN_OBJ_SZ_32 = 0x5, - I40E_HMC_LAN_OBJ_SZ_64 = 0x6, - I40E_HMC_LAN_OBJ_SZ_128 = 0x7, - I40E_HMC_LAN_OBJ_SZ_256 = 0x8, - I40E_HMC_LAN_OBJ_SZ_512 = 0x9, -}; - -#define I40E_HMC_L2OBJ_BASE_ALIGNMENT 512 -#define I40E_HMC_OBJ_SIZE_TXQ 128 -#define I40E_HMC_OBJ_SIZE_RXQ 32 -#define I40E_HMC_OBJ_SIZE_FCOE_CNTX 128 -#define I40E_HMC_OBJ_SIZE_FCOE_FILT 64 - -enum i40e_hmc_lan_rsrc_type { - I40E_HMC_LAN_FULL = 0, - I40E_HMC_LAN_TX = 1, - I40E_HMC_LAN_RX = 2, - I40E_HMC_FCOE_CTX = 3, - I40E_HMC_FCOE_FILT = 4, - I40E_HMC_LAN_MAX = 5 -}; - -enum i40e_hmc_model { - I40E_HMC_MODEL_DIRECT_PREFERRED = 0, - I40E_HMC_MODEL_DIRECT_ONLY = 1, - I40E_HMC_MODEL_PAGED_ONLY = 2, - I40E_HMC_MODEL_UNKNOWN, -}; - -struct i40e_hmc_lan_create_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; - enum i40e_sd_entry_type entry_type; - u64 direct_mode_sz; -}; - -struct i40e_hmc_lan_delete_obj_info { - struct i40e_hmc_info *hmc_info; - u32 rsrc_type; - u32 start_idx; - u32 count; -}; - -i40e_status i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num, - u32 rxq_num, u32 fcoe_cntx_num, - u32 fcoe_filt_num); -i40e_status i40e_configure_lan_hmc(struct i40e_hw *hw, - enum i40e_hmc_model model); -i40e_status i40e_shutdown_lan_hmc(struct i40e_hw *hw); - -i40e_status i40e_clear_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_tx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_txq *s); -i40e_status i40e_clear_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue); -i40e_status i40e_set_lan_rx_queue_context(struct i40e_hw *hw, - u16 queue, - struct i40e_hmc_obj_rxq *s); - -#endif /* _I40E_LAN_HMC_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h deleted file mode 100644 index a358f4b9d..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h +++ /dev/null @@ -1,130 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_PROTOTYPE_H_ -#define _I40E_PROTOTYPE_H_ - -#include "i40e_type.h" -#include "i40e_alloc.h" -#include <linux/avf/virtchnl.h> - -/* Prototypes for shared code functions that are not in - * the standard function pointer structures. These are - * mostly because they are needed even before the init - * has happened and will assist in the early SW and FW - * setup. - */ - -/* adminq functions */ -i40e_status i40evf_init_adminq(struct i40e_hw *hw); -i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); -void i40e_adminq_init_ring_data(struct i40e_hw *hw); -i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, - struct i40e_arq_event_info *e, - u16 *events_pending); -i40e_status i40evf_asq_send_command(struct i40e_hw *hw, - struct i40e_aq_desc *desc, - void *buff, /* can be NULL */ - u16 buff_size, - struct i40e_asq_cmd_details *cmd_details); -bool i40evf_asq_done(struct i40e_hw *hw); - -/* debug function for adminq */ -void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, - void *desc, void *buffer, u16 buf_len); - -void i40e_idle_aq(struct i40e_hw *hw); -void i40evf_resume_aq(struct i40e_hw *hw); -bool i40evf_check_asq_alive(struct i40e_hw *hw); -i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); -const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); -const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); - -i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, - bool pf_lut, u8 *lut, u16 lut_size); -i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); -i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, - u16 seid, - struct i40e_aqc_get_set_rss_key_data *key); - -i40e_status i40e_set_mac_type(struct i40e_hw *hw); - -extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40evf_ptype_lookup[ptype]; -} - -/* prototype for functions used for SW locks */ - -/* i40e_common for VF drivers*/ -void i40e_vf_parse_hw_config(struct i40e_hw *hw, - struct virtchnl_vf_resource *msg); -i40e_status i40e_vf_reset(struct i40e_hw *hw); -i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, - enum virtchnl_ops v_opcode, - i40e_status v_retval, - u8 *msg, u16 msglen, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_set_filter_control(struct i40e_hw *hw, - struct i40e_filter_control_settings *settings); -i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, - u8 *mac_addr, u16 ethtype, u16 flags, - u16 vsi_seid, u16 queue, bool is_add, - struct i40e_control_filter_stats *stats, - struct i40e_asq_cmd_details *cmd_details); -void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, - u16 vsi_seid); -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr); -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val); -i40e_status i40e_aq_set_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details); -i40e_status i40e_aq_get_phy_register(struct i40e_hw *hw, - u8 phy_select, u8 dev_addr, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details); - -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, - u16 reg, u8 phy_addr, u16 value); -i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 *value); -i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page, u16 reg, - u8 phy_addr, u16 value); -u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num); -i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw, - u32 time, u32 interval); -i40e_status i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details * - cmd_details); -i40e_status i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details * - cmd_details); -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_header); -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *i40e_seg, - u32 track_id); -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id); -#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h deleted file mode 100644 index 49e1f57d9..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_register.h +++ /dev/null @@ -1,313 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_REGISTER_H_ -#define _I40E_REGISTER_H_ - -#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */ -#define I40E_VFMSIX_PBA1_MAX_INDEX 19 -#define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG1_MAX_INDEX 639 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD1_MAX_INDEX 639 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639 -#define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT) -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ -#define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0 -#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT) -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ -#define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0 -#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT) -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ -#define I40E_VF_ARQH1_ARQH_SHIFT 0 -#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ -#define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0 -#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT) -#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 -#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) -#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 -#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) -#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 -#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) -#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 -#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ -#define I40E_VF_ARQT1_ARQT_SHIFT 0 -#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT) -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ -#define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0 -#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT) -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ -#define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0 -#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT) -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ -#define I40E_VF_ATQH1_ATQH_SHIFT 0 -#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT) -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ -#define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0 -#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT) -#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 -#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) -#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 -#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) -#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 -#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) -#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 -#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ -#define I40E_VF_ATQT1_ATQT_SHIFT 0 -#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT) -#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ -#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 -#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) -#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ -#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT) -#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15 -#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 -#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1 -#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 -#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 -#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 -#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25 -#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT) -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31 -#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT) -#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) -#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 -#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT) -#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ -#define I40E_VFINT_ICR01_INTEVENT_SHIFT 0 -#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1 -#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2 -#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3 -#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT) -#define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4 -#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT) -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25 -#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT) -#define I40E_VFINT_ICR01_ADMINQ_SHIFT 30 -#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT) -#define I40E_VFINT_ICR01_SWINT_SHIFT 31 -#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT) -#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */ -#define I40E_VFINT_ITR01_MAX_INDEX 2 -#define I40E_VFINT_ITR01_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT) -#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ -#define I40E_VFINT_ITRN1_MAX_INDEX 2 -#define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0 -#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT) -#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: CORER */ -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2 -#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT) -#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_QRX_TAIL1_MAX_INDEX 15 -#define I40E_QRX_TAIL1_TAIL_SHIFT 0 -#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT) -#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ -#define I40E_QTX_TAIL1_MAX_INDEX 15 -#define I40E_QTX_TAIL1_TAIL_SHIFT 0 -#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT) -#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */ -#define I40E_VFMSIX_PBA_PENBIT_SHIFT 0 -#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT) -#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TADD_MAX_INDEX 16 -#define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0 -#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT) -#define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2 -#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT) -#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TMSG_MAX_INDEX 16 -#define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0 -#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT) -#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TUADD_MAX_INDEX 16 -#define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0 -#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT) -#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */ -#define I40E_VFMSIX_TVCTRL_MAX_INDEX 16 -#define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0 -#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT) -#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0 -#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4 -#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT) -#define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8 -#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT) -#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */ -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0 -#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT) -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4 -#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT) -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8 -#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16 -#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT) -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24 -#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT) -#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ -#define I40E_VFQF_HENA_MAX_INDEX 1 -#define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0 -#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT) -#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ -#define I40E_VFQF_HKEY_MAX_INDEX 12 -#define I40E_VFQF_HKEY_KEY_0_SHIFT 0 -#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT) -#define I40E_VFQF_HKEY_KEY_1_SHIFT 8 -#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT) -#define I40E_VFQF_HKEY_KEY_2_SHIFT 16 -#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT) -#define I40E_VFQF_HKEY_KEY_3_SHIFT 24 -#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT) -#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ -#define I40E_VFQF_HLUT_MAX_INDEX 15 -#define I40E_VFQF_HLUT_LUT0_SHIFT 0 -#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT) -#define I40E_VFQF_HLUT_LUT1_SHIFT 8 -#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT) -#define I40E_VFQF_HLUT_LUT2_SHIFT 16 -#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT) -#define I40E_VFQF_HLUT_LUT3_SHIFT 24 -#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT) -#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */ -#define I40E_VFQF_HREGION_MAX_INDEX 7 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT) -#define I40E_VFQF_HREGION_REGION_0_SHIFT 1 -#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT) -#define I40E_VFQF_HREGION_REGION_1_SHIFT 5 -#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT) -#define I40E_VFQF_HREGION_REGION_2_SHIFT 9 -#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT) -#define I40E_VFQF_HREGION_REGION_3_SHIFT 13 -#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT) -#define I40E_VFQF_HREGION_REGION_4_SHIFT 17 -#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT) -#define I40E_VFQF_HREGION_REGION_5_SHIFT 21 -#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT) -#define I40E_VFQF_HREGION_REGION_6_SHIFT 25 -#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT) -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28 -#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT) -#define I40E_VFQF_HREGION_REGION_7_SHIFT 29 -#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT) -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT) -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 -#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) -#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */ -#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0 -#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT) -#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */ -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0 -#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT) -#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */ -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0 -#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT) -#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */ -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0 -#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4 -#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16 -#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT) -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31 -#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT) -#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */ -#define I40E_VFPE_CQACK1_PECQID_SHIFT 0 -#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT) -#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */ -#define I40E_VFPE_CQARM1_PECQID_SHIFT 0 -#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT) -#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */ -#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0 -#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT) -#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */ -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0 -#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT) -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16 -#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT) -#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */ -#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0 -#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT) -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31 -#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT) -#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */ -#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0 -#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT) -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16 -#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT) -#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */ -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0 -#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT) -#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */ -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0 -#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT) -#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */ -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0 -#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT) -#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */ -#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0 -#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT) -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20 -#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT) -#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h deleted file mode 100644 index 094387db3..000000000 --- a/drivers/net/ethernet/intel/i40evf/i40e_type.h +++ /dev/null @@ -1,1496 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* Copyright(c) 2013 - 2018 Intel Corporation. */ - -#ifndef _I40E_TYPE_H_ -#define _I40E_TYPE_H_ - -#include "i40e_status.h" -#include "i40e_osdep.h" -#include "i40e_register.h" -#include "i40e_adminq.h" -#include "i40e_hmc.h" -#include "i40e_lan_hmc.h" -#include "i40e_devids.h" - -/* I40E_MASK is a macro used on 32 bit registers */ -#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) - -#define I40E_MAX_VSI_QP 16 -#define I40E_MAX_VF_VSI 3 -#define I40E_MAX_CHAINED_RX_BUFFERS 5 -#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16 - -/* Max default timeout in ms, */ -#define I40E_MAX_NVM_TIMEOUT 18000 - -/* Max timeout in ms for the phy to respond */ -#define I40E_MAX_PHY_TIMEOUT 500 - -/* Switch from ms to the 1usec global time (this is the GTIME resolution) */ -#define I40E_MS_TO_GTIME(time) ((time) * 1000) - -/* forward declaration */ -struct i40e_hw; -typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); - -/* Data type manipulation macros. */ - -#define I40E_DESC_UNUSED(R) \ - ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ - (R)->next_to_clean - (R)->next_to_use - 1) - -/* bitfields for Tx queue mapping in QTX_CTL */ -#define I40E_QTX_CTL_VF_QUEUE 0x0 -#define I40E_QTX_CTL_VM_QUEUE 0x1 -#define I40E_QTX_CTL_PF_QUEUE 0x2 - -/* debug masks - set these bits in hw->debug_mask to control output */ -enum i40e_debug_mask { - I40E_DEBUG_INIT = 0x00000001, - I40E_DEBUG_RELEASE = 0x00000002, - - I40E_DEBUG_LINK = 0x00000010, - I40E_DEBUG_PHY = 0x00000020, - I40E_DEBUG_HMC = 0x00000040, - I40E_DEBUG_NVM = 0x00000080, - I40E_DEBUG_LAN = 0x00000100, - I40E_DEBUG_FLOW = 0x00000200, - I40E_DEBUG_DCB = 0x00000400, - I40E_DEBUG_DIAG = 0x00000800, - I40E_DEBUG_FD = 0x00001000, - I40E_DEBUG_PACKAGE = 0x00002000, - - I40E_DEBUG_AQ_MESSAGE = 0x01000000, - I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, - I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, - I40E_DEBUG_AQ_COMMAND = 0x06000000, - I40E_DEBUG_AQ = 0x0F000000, - - I40E_DEBUG_USER = 0xF0000000, - - I40E_DEBUG_ALL = 0xFFFFFFFF -}; - -/* These are structs for managing the hardware information and the operations. - * The structures of function pointers are filled out at init time when we - * know for sure exactly which hardware we're working with. This gives us the - * flexibility of using the same main driver code but adapting to slightly - * different hardware needs as new parts are developed. For this architecture, - * the Firmware and AdminQ are intended to insulate the driver from most of the - * future changes, but these structures will also do part of the job. - */ -enum i40e_mac_type { - I40E_MAC_UNKNOWN = 0, - I40E_MAC_XL710, - I40E_MAC_VF, - I40E_MAC_X722, - I40E_MAC_X722_VF, - I40E_MAC_GENERIC, -}; - -enum i40e_media_type { - I40E_MEDIA_TYPE_UNKNOWN = 0, - I40E_MEDIA_TYPE_FIBER, - I40E_MEDIA_TYPE_BASET, - I40E_MEDIA_TYPE_BACKPLANE, - I40E_MEDIA_TYPE_CX4, - I40E_MEDIA_TYPE_DA, - I40E_MEDIA_TYPE_VIRTUAL -}; - -enum i40e_fc_mode { - I40E_FC_NONE = 0, - I40E_FC_RX_PAUSE, - I40E_FC_TX_PAUSE, - I40E_FC_FULL, - I40E_FC_PFC, - I40E_FC_DEFAULT -}; - -enum i40e_set_fc_aq_failures { - I40E_SET_FC_AQ_FAIL_NONE = 0, - I40E_SET_FC_AQ_FAIL_GET = 1, - I40E_SET_FC_AQ_FAIL_SET = 2, - I40E_SET_FC_AQ_FAIL_UPDATE = 4, - I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6 -}; - -enum i40e_vsi_type { - I40E_VSI_MAIN = 0, - I40E_VSI_VMDQ1 = 1, - I40E_VSI_VMDQ2 = 2, - I40E_VSI_CTRL = 3, - I40E_VSI_FCOE = 4, - I40E_VSI_MIRROR = 5, - I40E_VSI_SRIOV = 6, - I40E_VSI_FDIR = 7, - I40E_VSI_TYPE_UNKNOWN -}; - -enum i40e_queue_type { - I40E_QUEUE_TYPE_RX = 0, - I40E_QUEUE_TYPE_TX, - I40E_QUEUE_TYPE_PE_CEQ, - I40E_QUEUE_TYPE_UNKNOWN -}; - -struct i40e_link_status { - enum i40e_aq_phy_type phy_type; - enum i40e_aq_link_speed link_speed; - u8 link_info; - u8 an_info; - u8 req_fec_info; - u8 fec_info; - u8 ext_info; - u8 loopback; - /* is Link Status Event notification to SW enabled */ - bool lse_enable; - u16 max_frame_size; - bool crc_enable; - u8 pacing; - u8 requested_speeds; - u8 module_type[3]; - /* 1st byte: module identifier */ -#define I40E_MODULE_TYPE_SFP 0x03 -#define I40E_MODULE_TYPE_QSFP 0x0D - /* 2nd byte: ethernet compliance codes for 10/40G */ -#define I40E_MODULE_TYPE_40G_ACTIVE 0x01 -#define I40E_MODULE_TYPE_40G_LR4 0x02 -#define I40E_MODULE_TYPE_40G_SR4 0x04 -#define I40E_MODULE_TYPE_40G_CR4 0x08 -#define I40E_MODULE_TYPE_10G_BASE_SR 0x10 -#define I40E_MODULE_TYPE_10G_BASE_LR 0x20 -#define I40E_MODULE_TYPE_10G_BASE_LRM 0x40 -#define I40E_MODULE_TYPE_10G_BASE_ER 0x80 - /* 3rd byte: ethernet compliance codes for 1G */ -#define I40E_MODULE_TYPE_1000BASE_SX 0x01 -#define I40E_MODULE_TYPE_1000BASE_LX 0x02 -#define I40E_MODULE_TYPE_1000BASE_CX 0x04 -#define I40E_MODULE_TYPE_1000BASE_T 0x08 -}; - -struct i40e_phy_info { - struct i40e_link_status link_info; - struct i40e_link_status link_info_old; - bool get_link_info; - enum i40e_media_type media_type; - /* all the phy types the NVM is capable of */ - u64 phy_types; -}; - -#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII) -#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) -#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) -#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) -#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) -#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI) -#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI) -#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI) -#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI) -#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) -#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) -#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX) -#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T) -#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) -#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) -#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) -#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) -#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) -#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) -#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) -#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) -#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) -#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \ - BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) -#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) -/* Defining the macro I40E_TYPE_OFFSET to implement a bit shift for some - * PHY types. There is an unused bit (31) in the I40E_CAP_PHY_TYPE_* bit - * fields but no corresponding gap in the i40e_aq_phy_type enumeration. So, - * a shift is needed to adjust for this with values larger than 31. The - * only affected values are I40E_PHY_TYPE_25GBASE_*. - */ -#define I40E_PHY_TYPE_OFFSET 1 -#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_PHY_TYPE_25GBASE_KR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_PHY_TYPE_25GBASE_CR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_PHY_TYPE_25GBASE_SR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_PHY_TYPE_25GBASE_LR + \ - I40E_PHY_TYPE_OFFSET) -#define I40E_HW_CAP_MAX_GPIO 30 -/* Capabilities of a PF or a VF or the whole device */ -struct i40e_hw_capabilities { - u32 switch_mode; -#define I40E_NVM_IMAGE_TYPE_EVB 0x0 -#define I40E_NVM_IMAGE_TYPE_CLOUD 0x2 -#define I40E_NVM_IMAGE_TYPE_UDP_CLOUD 0x3 - - u32 management_mode; - u32 mng_protocols_over_mctp; -#define I40E_MNG_PROTOCOL_PLDM 0x2 -#define I40E_MNG_PROTOCOL_OEM_COMMANDS 0x4 -#define I40E_MNG_PROTOCOL_NCSI 0x8 - u32 npar_enable; - u32 os2bmc; - u32 valid_functions; - bool sr_iov_1_1; - bool vmdq; - bool evb_802_1_qbg; /* Edge Virtual Bridging */ - bool evb_802_1_qbh; /* Bridge Port Extension */ - bool dcb; - bool fcoe; - bool iscsi; /* Indicates iSCSI enabled */ - bool flex10_enable; - bool flex10_capable; - u32 flex10_mode; -#define I40E_FLEX10_MODE_UNKNOWN 0x0 -#define I40E_FLEX10_MODE_DCC 0x1 -#define I40E_FLEX10_MODE_DCI 0x2 - - u32 flex10_status; -#define I40E_FLEX10_STATUS_DCC_ERROR 0x1 -#define I40E_FLEX10_STATUS_VC_MODE 0x2 - - bool sec_rev_disabled; - bool update_disabled; -#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 -#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 - - bool mgmt_cem; - bool ieee_1588; - bool iwarp; - bool fd; - u32 fd_filters_guaranteed; - u32 fd_filters_best_effort; - bool rss; - u32 rss_table_size; - u32 rss_table_entry_width; - bool led[I40E_HW_CAP_MAX_GPIO]; - bool sdp[I40E_HW_CAP_MAX_GPIO]; - u32 nvm_image_type; - u32 num_flow_director_filters; - u32 num_vfs; - u32 vf_base_id; - u32 num_vsis; - u32 num_rx_qp; - u32 num_tx_qp; - u32 base_queue; - u32 num_msix_vectors; - u32 num_msix_vectors_vf; - u32 led_pin_num; - u32 sdp_pin_num; - u32 mdio_port_num; - u32 mdio_port_mode; - u8 rx_buf_chain_len; - u32 enabled_tcmap; - u32 maxtc; - u64 wr_csr_prot; -}; - -struct i40e_mac_info { - enum i40e_mac_type type; - u8 addr[ETH_ALEN]; - u8 perm_addr[ETH_ALEN]; - u8 san_addr[ETH_ALEN]; - u16 max_fcoeq; -}; - -enum i40e_aq_resources_ids { - I40E_NVM_RESOURCE_ID = 1 -}; - -enum i40e_aq_resource_access_type { - I40E_RESOURCE_READ = 1, - I40E_RESOURCE_WRITE -}; - -struct i40e_nvm_info { - u64 hw_semaphore_timeout; /* usec global time (GTIME resolution) */ - u32 timeout; /* [ms] */ - u16 sr_size; /* Shadow RAM size in words */ - bool blank_nvm_mode; /* is NVM empty (no FW present)*/ - u16 version; /* NVM package version */ - u32 eetrack; /* NVM data version */ - u32 oem_ver; /* OEM version info */ -}; - -/* definitions used in NVM update support */ - -enum i40e_nvmupd_cmd { - I40E_NVMUPD_INVALID, - I40E_NVMUPD_READ_CON, - I40E_NVMUPD_READ_SNT, - I40E_NVMUPD_READ_LCB, - I40E_NVMUPD_READ_SA, - I40E_NVMUPD_WRITE_ERA, - I40E_NVMUPD_WRITE_CON, - I40E_NVMUPD_WRITE_SNT, - I40E_NVMUPD_WRITE_LCB, - I40E_NVMUPD_WRITE_SA, - I40E_NVMUPD_CSUM_CON, - I40E_NVMUPD_CSUM_SA, - I40E_NVMUPD_CSUM_LCB, - I40E_NVMUPD_STATUS, - I40E_NVMUPD_EXEC_AQ, - I40E_NVMUPD_GET_AQ_RESULT, - I40E_NVMUPD_GET_AQ_EVENT, -}; - -enum i40e_nvmupd_state { - I40E_NVMUPD_STATE_INIT, - I40E_NVMUPD_STATE_READING, - I40E_NVMUPD_STATE_WRITING, - I40E_NVMUPD_STATE_INIT_WAIT, - I40E_NVMUPD_STATE_WRITE_WAIT, - I40E_NVMUPD_STATE_ERROR -}; - -/* nvm_access definition and its masks/shifts need to be accessible to - * application, core driver, and shared code. Where is the right file? - */ -#define I40E_NVM_READ 0xB -#define I40E_NVM_WRITE 0xC - -#define I40E_NVM_MOD_PNT_MASK 0xFF - -#define I40E_NVM_TRANS_SHIFT 8 -#define I40E_NVM_TRANS_MASK (0xf << I40E_NVM_TRANS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SHIFT 12 -#define I40E_NVM_PRESERVATION_FLAGS_MASK \ - (0x3 << I40E_NVM_PRESERVATION_FLAGS_SHIFT) -#define I40E_NVM_PRESERVATION_FLAGS_SELECTED 0x01 -#define I40E_NVM_PRESERVATION_FLAGS_ALL 0x02 -#define I40E_NVM_CON 0x0 -#define I40E_NVM_SNT 0x1 -#define I40E_NVM_LCB 0x2 -#define I40E_NVM_SA (I40E_NVM_SNT | I40E_NVM_LCB) -#define I40E_NVM_ERA 0x4 -#define I40E_NVM_CSUM 0x8 -#define I40E_NVM_AQE 0xe -#define I40E_NVM_EXEC 0xf - -#define I40E_NVM_ADAPT_SHIFT 16 -#define I40E_NVM_ADAPT_MASK (0xffff << I40E_NVM_ADAPT_SHIFT) - -#define I40E_NVMUPD_MAX_DATA 4096 -#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */ - -struct i40e_nvm_access { - u32 command; - u32 config; - u32 offset; /* in bytes */ - u32 data_size; /* in bytes */ - u8 data[1]; -}; - -/* (Q)SFP module access definitions */ -#define I40E_I2C_EEPROM_DEV_ADDR 0xA0 -#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2 -#define I40E_MODULE_TYPE_ADDR 0x00 -#define I40E_MODULE_REVISION_ADDR 0x01 -#define I40E_MODULE_SFF_8472_COMP 0x5E -#define I40E_MODULE_SFF_8472_SWAP 0x5C -#define I40E_MODULE_SFF_ADDR_MODE 0x04 -#define I40E_MODULE_TYPE_QSFP_PLUS 0x0D -#define I40E_MODULE_TYPE_QSFP28 0x11 -#define I40E_MODULE_QSFP_MAX_LEN 640 - -/* PCI bus types */ -enum i40e_bus_type { - i40e_bus_type_unknown = 0, - i40e_bus_type_pci, - i40e_bus_type_pcix, - i40e_bus_type_pci_express, - i40e_bus_type_reserved -}; - -/* PCI bus speeds */ -enum i40e_bus_speed { - i40e_bus_speed_unknown = 0, - i40e_bus_speed_33 = 33, - i40e_bus_speed_66 = 66, - i40e_bus_speed_100 = 100, - i40e_bus_speed_120 = 120, - i40e_bus_speed_133 = 133, - i40e_bus_speed_2500 = 2500, - i40e_bus_speed_5000 = 5000, - i40e_bus_speed_8000 = 8000, - i40e_bus_speed_reserved -}; - -/* PCI bus widths */ -enum i40e_bus_width { - i40e_bus_width_unknown = 0, - i40e_bus_width_pcie_x1 = 1, - i40e_bus_width_pcie_x2 = 2, - i40e_bus_width_pcie_x4 = 4, - i40e_bus_width_pcie_x8 = 8, - i40e_bus_width_32 = 32, - i40e_bus_width_64 = 64, - i40e_bus_width_reserved -}; - -/* Bus parameters */ -struct i40e_bus_info { - enum i40e_bus_speed speed; - enum i40e_bus_width width; - enum i40e_bus_type type; - - u16 func; - u16 device; - u16 lan_id; - u16 bus_id; -}; - -/* Flow control (FC) parameters */ -struct i40e_fc_info { - enum i40e_fc_mode current_mode; /* FC mode in effect */ - enum i40e_fc_mode requested_mode; /* FC mode requested by caller */ -}; - -#define I40E_MAX_TRAFFIC_CLASS 8 -#define I40E_MAX_USER_PRIORITY 8 -#define I40E_DCBX_MAX_APPS 32 -#define I40E_LLDPDU_SIZE 1500 - -/* IEEE 802.1Qaz ETS Configuration data */ -struct i40e_ieee_ets_config { - u8 willing; - u8 cbs; - u8 maxtcs; - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz ETS Recommendation data */ -struct i40e_ieee_ets_recommend { - u8 prioritytable[I40E_MAX_TRAFFIC_CLASS]; - u8 tcbwtable[I40E_MAX_TRAFFIC_CLASS]; - u8 tsatable[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* IEEE 802.1Qaz PFC Configuration data */ -struct i40e_ieee_pfc_config { - u8 willing; - u8 mbc; - u8 pfccap; - u8 pfcenable; -}; - -/* IEEE 802.1Qaz Application Priority data */ -struct i40e_ieee_app_priority_table { - u8 priority; - u8 selector; - u16 protocolid; -}; - -struct i40e_dcbx_config { - u32 numapps; - u32 tlv_status; /* CEE mode TLV status */ - struct i40e_ieee_ets_config etscfg; - struct i40e_ieee_ets_recommend etsrec; - struct i40e_ieee_pfc_config pfc; - struct i40e_ieee_app_priority_table app[I40E_DCBX_MAX_APPS]; -}; - -/* Port hardware description */ -struct i40e_hw { - u8 __iomem *hw_addr; - void *back; - - /* subsystem structs */ - struct i40e_phy_info phy; - struct i40e_mac_info mac; - struct i40e_bus_info bus; - struct i40e_nvm_info nvm; - struct i40e_fc_info fc; - - /* pci info */ - u16 device_id; - u16 vendor_id; - u16 subsystem_device_id; - u16 subsystem_vendor_id; - u8 revision_id; - u8 port; - bool adapter_stopped; - - /* capabilities for entire device and PCI func */ - struct i40e_hw_capabilities dev_caps; - struct i40e_hw_capabilities func_caps; - - /* Flow Director shared filter space */ - u16 fdir_shared_filter_count; - - /* device profile info */ - u8 pf_id; - u16 main_vsi_seid; - - /* for multi-function MACs */ - u16 partition_id; - u16 num_partitions; - u16 num_ports; - - /* Closest numa node to the device */ - u16 numa_node; - - /* Admin Queue info */ - struct i40e_adminq_info aq; - - /* state of nvm update process */ - enum i40e_nvmupd_state nvmupd_state; - struct i40e_aq_desc nvm_wb_desc; - struct i40e_aq_desc nvm_aq_event_desc; - struct i40e_virt_mem nvm_buff; - bool nvm_release_on_done; - u16 nvm_wait_opcode; - - /* HMC info */ - struct i40e_hmc_info hmc; /* HMC info struct */ - - /* LLDP/DCBX Status */ - u16 dcbx_status; - -#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) - - /* DCBX info */ - struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */ - struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */ - struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */ - - /* Used in set switch config AQ command */ - u16 switch_tag; - u16 first_tag; - u16 second_tag; - - /* debug mask */ - u32 debug_mask; - char err_str[16]; -}; - -static inline bool i40e_is_vf(struct i40e_hw *hw) -{ - return (hw->mac.type == I40E_MAC_VF || - hw->mac.type == I40E_MAC_X722_VF); -} - -struct i40e_driver_version { - u8 major_version; - u8 minor_version; - u8 build_version; - u8 subbuild_version; - u8 driver_string[32]; -}; - -/* RX Descriptors */ -union i40e_16byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fd_id; /* Flow director filter id */ - __le32 fcoe_param; /* FCoE DDP Context id */ - } hi_dword; - } qword0; - struct { - /* ext status/error/pktype/length */ - __le64 status_error_len; - } qword1; - } wb; /* writeback */ -}; - -union i40e_32byte_rx_desc { - struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_buffer_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; - } read; - struct { - struct { - struct { - union { - __le16 mirroring_status; - __le16 fcoe_ctx_id; - } mirr_fcoe; - __le16 l2tag1; - } lo_dword; - union { - __le32 rss; /* RSS Hash */ - __le32 fcoe_param; /* FCoE DDP Context id */ - /* Flow director filter id in case of - * Programming status desc WB - */ - __le32 fd_id; - } hi_dword; - } qword0; - struct { - /* status/error/pktype/length */ - __le64 status_error_len; - } qword1; - struct { - __le16 ext_status; /* extended status */ - __le16 rsvd; - __le16 l2tag2_1; - __le16 l2tag2_2; - } qword2; - struct { - union { - __le32 flex_bytes_lo; - __le32 pe_status; - } lo_dword; - union { - __le32 flex_bytes_hi; - __le32 fd_id; - } hi_dword; - } qword3; - } wb; /* writeback */ -}; - -enum i40e_rx_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_STATUS_DD_SHIFT = 0, - I40E_RX_DESC_STATUS_EOF_SHIFT = 1, - I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, - I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, - I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, - I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ - I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, - /* Note: Bit 8 is reserved in X710 and XL710 */ - I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, - I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ - I40E_RX_DESC_STATUS_FLM_SHIFT = 11, - I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ - I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, - I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, - I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ - /* Note: For non-tunnel packets INT_UDP_0 is the right status for - * UDP header - */ - I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, - I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ -}; - -#define I40E_RXD_QW1_STATUS_SHIFT 0 -#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ - << I40E_RXD_QW1_STATUS_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ - I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) - -#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT -#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ - BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) - -enum i40e_rx_desc_fltstat_values { - I40E_RX_DESC_FLTSTAT_NO_DATA = 0, - I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ - I40E_RX_DESC_FLTSTAT_RSV = 2, - I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, -}; - -#define I40E_RXD_QW1_ERROR_SHIFT 19 -#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) - -enum i40e_rx_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_ERROR_RXE_SHIFT = 0, - I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, - I40E_RX_DESC_ERROR_HBO_SHIFT = 2, - I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ - I40E_RX_DESC_ERROR_IPE_SHIFT = 3, - I40E_RX_DESC_ERROR_L4E_SHIFT = 4, - I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, - I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, - I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 -}; - -enum i40e_rx_desc_error_l3l4e_fcoe_masks { - I40E_RX_DESC_ERROR_L3L4E_NONE = 0, - I40E_RX_DESC_ERROR_L3L4E_PROT = 1, - I40E_RX_DESC_ERROR_L3L4E_FC = 2, - I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, - I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 -}; - -#define I40E_RXD_QW1_PTYPE_SHIFT 30 -#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) - -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 ptype:8; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - -#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 -#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ - I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 -#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ - I40E_RXD_QW1_LENGTH_HBUF_SHIFT) - -#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 -#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) - -enum i40e_rx_desc_ext_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, - I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, - I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ - I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, - I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, - I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, -}; - -enum i40e_rx_desc_pe_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ - I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ - I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ - I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, - I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, - I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, - I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, - I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, - I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 -}; - -#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 -#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 - -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 -#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ - I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) - -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 -#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ - I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) - -enum i40e_rx_prog_status_desc_status_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ -}; - -enum i40e_rx_prog_status_desc_prog_id_masks { - I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, -}; - -enum i40e_rx_prog_status_desc_error_bits { - /* Note: These are predefined bit offsets */ - I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, - I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, - I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, - I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 -}; - -/* TX Descriptor */ -struct i40e_tx_desc { - __le64 buffer_addr; /* Address of descriptor's data buf */ - __le64 cmd_type_offset_bsz; -}; - -#define I40E_TXD_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) - -enum i40e_tx_desc_dtype_value { - I40E_TX_DESC_DTYPE_DATA = 0x0, - I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ - I40E_TX_DESC_DTYPE_CONTEXT = 0x1, - I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, - I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, - I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, - I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, - I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, - I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, - I40E_TX_DESC_DTYPE_DESC_DONE = 0xF -}; - -#define I40E_TXD_QW1_CMD_SHIFT 4 -#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) - -enum i40e_tx_desc_cmd_bits { - I40E_TX_DESC_CMD_EOP = 0x0001, - I40E_TX_DESC_CMD_RS = 0x0002, - I40E_TX_DESC_CMD_ICRC = 0x0004, - I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, - I40E_TX_DESC_CMD_DUMMY = 0x0010, - I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ - I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ - I40E_TX_DESC_CMD_FCOET = 0x0080, - I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ - I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ -}; - -#define I40E_TXD_QW1_OFFSET_SHIFT 16 -#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ - I40E_TXD_QW1_OFFSET_SHIFT) - -enum i40e_tx_desc_length_fields { - /* Note: These are predefined bit offsets */ - I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ - I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ - I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ -}; - -#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 -#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ - I40E_TXD_QW1_TX_BUF_SZ_SHIFT) - -#define I40E_TXD_QW1_L2TAG1_SHIFT 48 -#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) - -/* Context descriptors */ -struct i40e_tx_context_desc { - __le32 tunneling_params; - __le16 l2tag2; - __le16 rsvd; - __le64 type_cmd_tso_mss; -}; - -#define I40E_TXD_CTX_QW1_DTYPE_SHIFT 0 -#define I40E_TXD_CTX_QW1_DTYPE_MASK (0xFUL << I40E_TXD_CTX_QW1_DTYPE_SHIFT) - -#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 -#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) - -enum i40e_tx_ctx_desc_cmd_bits { - I40E_TX_CTX_DESC_TSO = 0x01, - I40E_TX_CTX_DESC_TSYN = 0x02, - I40E_TX_CTX_DESC_IL2TAG2 = 0x04, - I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, - I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, - I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, - I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, - I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, - I40E_TX_CTX_DESC_SWPE = 0x40 -}; - -#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 -#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ - I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) - -#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 -#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ - I40E_TXD_CTX_QW1_MSS_SHIFT) - -#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 -#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) - -#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 -#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ - I40E_TXD_CTX_QW0_EXT_IP_SHIFT) - -enum i40e_tx_ctx_desc_eipt_offload { - I40E_TX_CTX_EXT_IP_NONE = 0x0, - I40E_TX_CTX_EXT_IP_IPV6 = 0x1, - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, - I40E_TX_CTX_EXT_IP_IPV4 = 0x3 -}; - -#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 -#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ - I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 -#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) -#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) - -#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 -#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ - BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) - -#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK - -#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 -#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ - I40E_TXD_CTX_QW0_NATLEN_SHIFT) - -#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 -#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ - I40E_TXD_CTX_QW0_DECTTL_SHIFT) - -#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 -#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) -struct i40e_filter_program_desc { - __le32 qindex_flex_ptype_vsi; - __le32 rsvd; - __le32 dtype_cmd_cntindex; - __le32 fd_id; -}; -#define I40E_TXD_FLTR_QW0_QINDEX_SHIFT 0 -#define I40E_TXD_FLTR_QW0_QINDEX_MASK (0x7FFUL << \ - I40E_TXD_FLTR_QW0_QINDEX_SHIFT) -#define I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT 11 -#define I40E_TXD_FLTR_QW0_FLEXOFF_MASK (0x7UL << \ - I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) -#define I40E_TXD_FLTR_QW0_PCTYPE_SHIFT 17 -#define I40E_TXD_FLTR_QW0_PCTYPE_MASK (0x3FUL << \ - I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) - -/* Packet Classifier Types for filters */ -enum i40e_filter_pctype { - /* Note: Values 0-28 are reserved for future use. - * Value 29, 30, 32 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, - I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, - I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, - I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, - I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, - I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, - /* Note: Values 37-38 are reserved for future use. - * Value 39, 40, 42 are not supported on XL710 and X710. - */ - I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, - I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, - I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, - I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, - I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, - I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, - I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, - /* Note: Value 47 is reserved for future use */ - I40E_FILTER_PCTYPE_FCOE_OX = 48, - I40E_FILTER_PCTYPE_FCOE_RX = 49, - I40E_FILTER_PCTYPE_FCOE_OTHER = 50, - /* Note: Values 51-62 are reserved for future use */ - I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, -}; - -enum i40e_filter_program_desc_dest { - I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET = 0x0, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX = 0x1, - I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_OTHER = 0x2, -}; - -enum i40e_filter_program_desc_fd_status { - I40E_FILTER_PROGRAM_DESC_FD_STATUS_NONE = 0x0, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID = 0x1, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID_4FLEX_BYTES = 0x2, - I40E_FILTER_PROGRAM_DESC_FD_STATUS_8FLEX_BYTES = 0x3, -}; - -#define I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT 23 -#define I40E_TXD_FLTR_QW0_DEST_VSI_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) - -#define I40E_TXD_FLTR_QW1_CMD_SHIFT 4 -#define I40E_TXD_FLTR_QW1_CMD_MASK (0xFFFFULL << \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) - -#define I40E_TXD_FLTR_QW1_PCMD_SHIFT (0x0ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_PCMD_MASK (0x7ULL << I40E_TXD_FLTR_QW1_PCMD_SHIFT) - -enum i40e_filter_program_desc_pcmd { - I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE = 0x1, - I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE = 0x2, -}; - -#define I40E_TXD_FLTR_QW1_DEST_SHIFT (0x3ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_DEST_MASK (0x3ULL << I40E_TXD_FLTR_QW1_DEST_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT (0x7ULL + I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_CNT_ENA_MASK BIT_ULL(I40E_TXD_FLTR_QW1_CNT_ENA_SHIFT) - -#define I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT (0x9ULL + \ - I40E_TXD_FLTR_QW1_CMD_SHIFT) -#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \ - I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) - -#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20 -#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \ - I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) - -enum i40e_filter_type { - I40E_FLOW_DIRECTOR_FLTR = 0, - I40E_PE_QUAD_HASH_FLTR = 1, - I40E_ETHERTYPE_FLTR, - I40E_FCOE_CTX_FLTR, - I40E_MAC_VLAN_FLTR, - I40E_HASH_FLTR -}; - -struct i40e_vsi_context { - u16 seid; - u16 uplink_seid; - u16 vsi_number; - u16 vsis_allocated; - u16 vsis_unallocated; - u16 flags; - u8 pf_num; - u8 vf_num; - u8 connection_type; - struct i40e_aqc_vsi_properties_data info; -}; - -struct i40e_veb_context { - u16 seid; - u16 uplink_seid; - u16 veb_number; - u16 vebs_allocated; - u16 vebs_unallocated; - u16 flags; - struct i40e_aqc_get_veb_parameters_completion info; -}; - -/* Statistics collected by each port, VSI, VEB, and S-channel */ -struct i40e_eth_stats { - u64 rx_bytes; /* gorc */ - u64 rx_unicast; /* uprc */ - u64 rx_multicast; /* mprc */ - u64 rx_broadcast; /* bprc */ - u64 rx_discards; /* rdpc */ - u64 rx_unknown_protocol; /* rupp */ - u64 tx_bytes; /* gotc */ - u64 tx_unicast; /* uptc */ - u64 tx_multicast; /* mptc */ - u64 tx_broadcast; /* bptc */ - u64 tx_discards; /* tdpc */ - u64 tx_errors; /* tepc */ -}; - -/* Statistics collected per VEB per TC */ -struct i40e_veb_tc_stats { - u64 tc_rx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_rx_bytes[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_packets[I40E_MAX_TRAFFIC_CLASS]; - u64 tc_tx_bytes[I40E_MAX_TRAFFIC_CLASS]; -}; - -/* Statistics collected by the MAC */ -struct i40e_hw_port_stats { - /* eth stats collected by the port */ - struct i40e_eth_stats eth; - - /* additional port specific stats */ - u64 tx_dropped_link_down; /* tdold */ - u64 crc_errors; /* crcerrs */ - u64 illegal_bytes; /* illerrc */ - u64 error_bytes; /* errbc */ - u64 mac_local_faults; /* mlfc */ - u64 mac_remote_faults; /* mrfc */ - u64 rx_length_errors; /* rlec */ - u64 link_xon_rx; /* lxonrxc */ - u64 link_xoff_rx; /* lxoffrxc */ - u64 priority_xon_rx[8]; /* pxonrxc[8] */ - u64 priority_xoff_rx[8]; /* pxoffrxc[8] */ - u64 link_xon_tx; /* lxontxc */ - u64 link_xoff_tx; /* lxofftxc */ - u64 priority_xon_tx[8]; /* pxontxc[8] */ - u64 priority_xoff_tx[8]; /* pxofftxc[8] */ - u64 priority_xon_2_xoff[8]; /* pxon2offc[8] */ - u64 rx_size_64; /* prc64 */ - u64 rx_size_127; /* prc127 */ - u64 rx_size_255; /* prc255 */ - u64 rx_size_511; /* prc511 */ - u64 rx_size_1023; /* prc1023 */ - u64 rx_size_1522; /* prc1522 */ - u64 rx_size_big; /* prc9522 */ - u64 rx_undersize; /* ruc */ - u64 rx_fragments; /* rfc */ - u64 rx_oversize; /* roc */ - u64 rx_jabber; /* rjc */ - u64 tx_size_64; /* ptc64 */ - u64 tx_size_127; /* ptc127 */ - u64 tx_size_255; /* ptc255 */ - u64 tx_size_511; /* ptc511 */ - u64 tx_size_1023; /* ptc1023 */ - u64 tx_size_1522; /* ptc1522 */ - u64 tx_size_big; /* ptc9522 */ - u64 mac_short_packet_dropped; /* mspdc */ - u64 checksum_error; /* xec */ - /* flow director stats */ - u64 fd_atr_match; - u64 fd_sb_match; - u64 fd_atr_tunnel_match; - u32 fd_atr_status; - u32 fd_sb_status; - /* EEE LPI */ - u32 tx_lpi_status; - u32 rx_lpi_status; - u64 tx_lpi_count; /* etlpic */ - u64 rx_lpi_count; /* erlpic */ -}; - -/* Checksum and Shadow RAM pointers */ -#define I40E_SR_NVM_CONTROL_WORD 0x00 -#define I40E_EMP_MODULE_PTR 0x0F -#define I40E_SR_EMP_MODULE_PTR 0x48 -#define I40E_NVM_OEM_VER_OFF 0x83 -#define I40E_SR_NVM_DEV_STARTER_VERSION 0x18 -#define I40E_SR_NVM_WAKE_ON_LAN 0x19 -#define I40E_SR_ALTERNATE_SAN_MAC_ADDRESS_PTR 0x27 -#define I40E_SR_NVM_EETRACK_LO 0x2D -#define I40E_SR_NVM_EETRACK_HI 0x2E -#define I40E_SR_VPD_PTR 0x2F -#define I40E_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E -#define I40E_SR_SW_CHECKSUM_WORD 0x3F - -/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */ -#define I40E_SR_VPD_MODULE_MAX_SIZE 1024 -#define I40E_SR_PCIE_ALT_MODULE_MAX_SIZE 1024 -#define I40E_SR_CONTROL_WORD_1_SHIFT 0x06 -#define I40E_SR_CONTROL_WORD_1_MASK (0x03 << I40E_SR_CONTROL_WORD_1_SHIFT) -#define I40E_SR_CONTROL_WORD_1_NVM_BANK_VALID BIT(5) -#define I40E_SR_NVM_MAP_STRUCTURE_TYPE BIT(12) -#define I40E_PTR_TYPE BIT(15) - -/* Shadow RAM related */ -#define I40E_SR_SECTOR_SIZE_IN_WORDS 0x800 -#define I40E_SR_WORDS_IN_1KB 512 -/* Checksum should be calculated such that after adding all the words, - * including the checksum word itself, the sum should be 0xBABA. - */ -#define I40E_SR_SW_CHECKSUM_BASE 0xBABA - -#define I40E_SRRD_SRCTL_ATTEMPTS 100000 - -enum i40e_switch_element_types { - I40E_SWITCH_ELEMENT_TYPE_MAC = 1, - I40E_SWITCH_ELEMENT_TYPE_PF = 2, - I40E_SWITCH_ELEMENT_TYPE_VF = 3, - I40E_SWITCH_ELEMENT_TYPE_EMP = 4, - I40E_SWITCH_ELEMENT_TYPE_BMC = 6, - I40E_SWITCH_ELEMENT_TYPE_PE = 16, - I40E_SWITCH_ELEMENT_TYPE_VEB = 17, - I40E_SWITCH_ELEMENT_TYPE_PA = 18, - I40E_SWITCH_ELEMENT_TYPE_VSI = 19, -}; - -/* Supported EtherType filters */ -enum i40e_ether_type_index { - I40E_ETHER_TYPE_1588 = 0, - I40E_ETHER_TYPE_FIP = 1, - I40E_ETHER_TYPE_OUI_EXTENDED = 2, - I40E_ETHER_TYPE_MAC_CONTROL = 3, - I40E_ETHER_TYPE_LLDP = 4, - I40E_ETHER_TYPE_EVB_PROTOCOL1 = 5, - I40E_ETHER_TYPE_EVB_PROTOCOL2 = 6, - I40E_ETHER_TYPE_QCN_CNM = 7, - I40E_ETHER_TYPE_8021X = 8, - I40E_ETHER_TYPE_ARP = 9, - I40E_ETHER_TYPE_RSV1 = 10, - I40E_ETHER_TYPE_RSV2 = 11, -}; - -/* Filter context base size is 1K */ -#define I40E_HASH_FILTER_BASE_SIZE 1024 -/* Supported Hash filter values */ -enum i40e_hash_filter_size { - I40E_HASH_FILTER_SIZE_1K = 0, - I40E_HASH_FILTER_SIZE_2K = 1, - I40E_HASH_FILTER_SIZE_4K = 2, - I40E_HASH_FILTER_SIZE_8K = 3, - I40E_HASH_FILTER_SIZE_16K = 4, - I40E_HASH_FILTER_SIZE_32K = 5, - I40E_HASH_FILTER_SIZE_64K = 6, - I40E_HASH_FILTER_SIZE_128K = 7, - I40E_HASH_FILTER_SIZE_256K = 8, - I40E_HASH_FILTER_SIZE_512K = 9, - I40E_HASH_FILTER_SIZE_1M = 10, -}; - -/* DMA context base size is 0.5K */ -#define I40E_DMA_CNTX_BASE_SIZE 512 -/* Supported DMA context values */ -enum i40e_dma_cntx_size { - I40E_DMA_CNTX_SIZE_512 = 0, - I40E_DMA_CNTX_SIZE_1K = 1, - I40E_DMA_CNTX_SIZE_2K = 2, - I40E_DMA_CNTX_SIZE_4K = 3, - I40E_DMA_CNTX_SIZE_8K = 4, - I40E_DMA_CNTX_SIZE_16K = 5, - I40E_DMA_CNTX_SIZE_32K = 6, - I40E_DMA_CNTX_SIZE_64K = 7, - I40E_DMA_CNTX_SIZE_128K = 8, - I40E_DMA_CNTX_SIZE_256K = 9, -}; - -/* Supported Hash look up table (LUT) sizes */ -enum i40e_hash_lut_size { - I40E_HASH_LUT_SIZE_128 = 0, - I40E_HASH_LUT_SIZE_512 = 1, -}; - -/* Structure to hold a per PF filter control settings */ -struct i40e_filter_control_settings { - /* number of PE Quad Hash filter buckets */ - enum i40e_hash_filter_size pe_filt_num; - /* number of PE Quad Hash contexts */ - enum i40e_dma_cntx_size pe_cntx_num; - /* number of FCoE filter buckets */ - enum i40e_hash_filter_size fcoe_filt_num; - /* number of FCoE DDP contexts */ - enum i40e_dma_cntx_size fcoe_cntx_num; - /* size of the Hash LUT */ - enum i40e_hash_lut_size hash_lut_size; - /* enable FDIR filters for PF and its VFs */ - bool enable_fdir; - /* enable Ethertype filters for PF and its VFs */ - bool enable_ethtype; - /* enable MAC/VLAN filters for PF and its VFs */ - bool enable_macvlan; -}; - -/* Structure to hold device level control filter counts */ -struct i40e_control_filter_stats { - u16 mac_etype_used; /* Used perfect match MAC/EtherType filters */ - u16 etype_used; /* Used perfect EtherType filters */ - u16 mac_etype_free; /* Un-used perfect match MAC/EtherType filters */ - u16 etype_free; /* Un-used perfect EtherType filters */ -}; - -enum i40e_reset_type { - I40E_RESET_POR = 0, - I40E_RESET_CORER = 1, - I40E_RESET_GLOBR = 2, - I40E_RESET_EMPR = 3, -}; - -/* IEEE 802.1AB LLDP Agent Variables from NVM */ -#define I40E_NVM_LLDP_CFG_PTR 0x06 -#define I40E_SR_LLDP_CFG_PTR 0x31 - -/* RSS Hash Table Size */ -#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 - -/* INPUT SET MASK for RSS, flow director and flexible payload */ -#define I40E_FD_INSET_L3_SRC_SHIFT 47 -#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_SRC_SHIFT) -#define I40E_FD_INSET_L3_DST_SHIFT 35 -#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_L3_DST_SHIFT) -#define I40E_FD_INSET_L4_SRC_SHIFT 34 -#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_SRC_SHIFT) -#define I40E_FD_INSET_L4_DST_SHIFT 33 -#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \ - I40E_FD_INSET_L4_DST_SHIFT) -#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31 -#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \ - I40E_FD_INSET_VERIFY_TAG_SHIFT) - -#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17 -#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD50_SHIFT) -#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16 -#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD51_SHIFT) -#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15 -#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD52_SHIFT) -#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14 -#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD53_SHIFT) -#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13 -#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD54_SHIFT) -#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12 -#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD55_SHIFT) -#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11 -#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD56_SHIFT) -#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10 -#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \ - I40E_FD_INSET_FLEX_WORD57_SHIFT) - -/* Version format for Dynamic Device Personalization(DDP) */ -struct i40e_ddp_version { - u8 major; - u8 minor; - u8 update; - u8 draft; -}; - -#define I40E_DDP_NAME_SIZE 32 - -/* Package header */ -struct i40e_package_header { - struct i40e_ddp_version version; - u32 segment_count; - u32 segment_offset[1]; -}; - -/* Generic segment header */ -struct i40e_generic_seg_header { -#define SEGMENT_TYPE_METADATA 0x00000001 -#define SEGMENT_TYPE_NOTES 0x00000002 -#define SEGMENT_TYPE_I40E 0x00000011 -#define SEGMENT_TYPE_X722 0x00000012 - u32 type; - struct i40e_ddp_version version; - u32 size; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_metadata_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - u32 track_id; - char name[I40E_DDP_NAME_SIZE]; -}; - -struct i40e_device_id_entry { - u32 vendor_dev_id; - u32 sub_vendor_dev_id; -}; - -struct i40e_profile_segment { - struct i40e_generic_seg_header header; - struct i40e_ddp_version version; - char name[I40E_DDP_NAME_SIZE]; - u32 device_table_count; - struct i40e_device_id_entry device_table[1]; -}; - -struct i40e_section_table { - u32 section_count; - u32 section_offset[1]; -}; - -struct i40e_profile_section_header { - u16 tbl_size; - u16 data_end; - struct { -#define SECTION_TYPE_INFO 0x00000010 -#define SECTION_TYPE_MMIO 0x00000800 -#define SECTION_TYPE_AQ 0x00000801 -#define SECTION_TYPE_NOTE 0x80000000 -#define SECTION_TYPE_NAME 0x80000001 - u32 type; - u32 offset; - u32 size; - } section; -}; - -struct i40e_profile_info { - u32 track_id; - struct i40e_ddp_version version; - u8 op; -#define I40E_DDP_ADD_TRACKID 0x01 -#define I40E_DDP_REMOVE_TRACKID 0x02 - u8 reserved[7]; - u8 name[I40E_DDP_NAME_SIZE]; -}; -#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile new file mode 100644 index 000000000..1b050d9d5 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2013 - 2018 Intel Corporation. +# +# Makefile for the Intel(R) Ethernet Adaptive Virtual Function (iavf) +# driver +# +# + +ccflags-y += -I$(src) +subdir-ccflags-y += -I$(src) + +obj-$(CONFIG_IAVF) += iavf.o + +iavf-objs := i40evf_main.o i40evf_ethtool.o i40evf_virtchnl.o \ + i40e_txrx.o i40e_common.o i40e_adminq.o i40evf_client.o diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/iavf/i40e_adminq.c index 21a0dbf6c..32e0e2d9c 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.c @@ -8,16 +8,6 @@ #include "i40e_prototype.h" /** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == i40e_aqc_opc_nvm_erase) || - (desc->opcode == i40e_aqc_opc_nvm_update); -} - -/** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure * @@ -569,9 +559,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - if (hw->nvm_buff.va) - i40e_free_virt_mem(hw, &hw->nvm_buff); - return ret_code; } @@ -951,17 +938,3 @@ clean_arq_element_err: return ret_code; } - -void i40evf_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/iavf/i40e_adminq.h index 1f264b9b6..1f264b9b6 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq.h diff --git a/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h new file mode 100644 index 000000000..493bdc533 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_adminq_cmd.h @@ -0,0 +1,528 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_ADMINQ_CMD_H_ +#define _I40E_ADMINQ_CMD_H_ + +/* This header file defines the i40e Admin Queue commands and is shared between + * i40e Firmware and Software. + * + * This file needs to comply with the Linux Kernel coding style. + */ + +#define I40E_FW_API_VERSION_MAJOR 0x0001 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X710 0x0007 + +#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ + I40E_FW_API_VERSION_MINOR_X710 : \ + I40E_FW_API_VERSION_MINOR_X722) + +/* API version 1.7 implements additional link and PHY-specific APIs */ +#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 + +struct i40e_aq_desc { + __le16 flags; + __le16 opcode; + __le16 datalen; + __le16 retval; + __le32 cookie_high; + __le32 cookie_low; + union { + struct { + __le32 param0; + __le32 param1; + __le32 param2; + __le32 param3; + } internal; + struct { + __le32 param0; + __le32 param1; + __le32 addr_high; + __le32 addr_low; + } external; + u8 raw[16]; + } params; +}; + +/* Flags sub-structure + * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 | + * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE | + */ + +/* command flags and offsets*/ +#define I40E_AQ_FLAG_DD_SHIFT 0 +#define I40E_AQ_FLAG_CMP_SHIFT 1 +#define I40E_AQ_FLAG_ERR_SHIFT 2 +#define I40E_AQ_FLAG_VFE_SHIFT 3 +#define I40E_AQ_FLAG_LB_SHIFT 9 +#define I40E_AQ_FLAG_RD_SHIFT 10 +#define I40E_AQ_FLAG_VFC_SHIFT 11 +#define I40E_AQ_FLAG_BUF_SHIFT 12 +#define I40E_AQ_FLAG_SI_SHIFT 13 +#define I40E_AQ_FLAG_EI_SHIFT 14 +#define I40E_AQ_FLAG_FE_SHIFT 15 + +#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */ +#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */ +#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */ +#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */ +#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */ +#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */ +#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */ +#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */ +#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */ +#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */ +#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */ + +/* error codes */ +enum i40e_admin_queue_err { + I40E_AQ_RC_OK = 0, /* success */ + I40E_AQ_RC_EPERM = 1, /* Operation not permitted */ + I40E_AQ_RC_ENOENT = 2, /* No such element */ + I40E_AQ_RC_ESRCH = 3, /* Bad opcode */ + I40E_AQ_RC_EINTR = 4, /* operation interrupted */ + I40E_AQ_RC_EIO = 5, /* I/O error */ + I40E_AQ_RC_ENXIO = 6, /* No such resource */ + I40E_AQ_RC_E2BIG = 7, /* Arg too long */ + I40E_AQ_RC_EAGAIN = 8, /* Try again */ + I40E_AQ_RC_ENOMEM = 9, /* Out of memory */ + I40E_AQ_RC_EACCES = 10, /* Permission denied */ + I40E_AQ_RC_EFAULT = 11, /* Bad address */ + I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */ + I40E_AQ_RC_EEXIST = 13, /* object already exists */ + I40E_AQ_RC_EINVAL = 14, /* Invalid argument */ + I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */ + I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */ + I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */ + I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */ + I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */ + I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */ + I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */ + I40E_AQ_RC_EFBIG = 22, /* File too large */ +}; + +/* Admin Queue command opcodes */ +enum i40e_admin_queue_opc { + /* aq commands */ + i40e_aqc_opc_get_version = 0x0001, + i40e_aqc_opc_driver_version = 0x0002, + i40e_aqc_opc_queue_shutdown = 0x0003, + i40e_aqc_opc_set_pf_context = 0x0004, + + /* resource ownership */ + i40e_aqc_opc_request_resource = 0x0008, + i40e_aqc_opc_release_resource = 0x0009, + + i40e_aqc_opc_list_func_capabilities = 0x000A, + i40e_aqc_opc_list_dev_capabilities = 0x000B, + + /* Proxy commands */ + i40e_aqc_opc_set_proxy_config = 0x0104, + i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105, + + /* LAA */ + i40e_aqc_opc_mac_address_read = 0x0107, + i40e_aqc_opc_mac_address_write = 0x0108, + + /* PXE */ + i40e_aqc_opc_clear_pxe_mode = 0x0110, + + /* WoL commands */ + i40e_aqc_opc_set_wol_filter = 0x0120, + i40e_aqc_opc_get_wake_reason = 0x0121, + + /* internal switch commands */ + i40e_aqc_opc_get_switch_config = 0x0200, + i40e_aqc_opc_add_statistics = 0x0201, + i40e_aqc_opc_remove_statistics = 0x0202, + i40e_aqc_opc_set_port_parameters = 0x0203, + i40e_aqc_opc_get_switch_resource_alloc = 0x0204, + i40e_aqc_opc_set_switch_config = 0x0205, + i40e_aqc_opc_rx_ctl_reg_read = 0x0206, + i40e_aqc_opc_rx_ctl_reg_write = 0x0207, + + i40e_aqc_opc_add_vsi = 0x0210, + i40e_aqc_opc_update_vsi_parameters = 0x0211, + i40e_aqc_opc_get_vsi_parameters = 0x0212, + + i40e_aqc_opc_add_pv = 0x0220, + i40e_aqc_opc_update_pv_parameters = 0x0221, + i40e_aqc_opc_get_pv_parameters = 0x0222, + + i40e_aqc_opc_add_veb = 0x0230, + i40e_aqc_opc_update_veb_parameters = 0x0231, + i40e_aqc_opc_get_veb_parameters = 0x0232, + + i40e_aqc_opc_delete_element = 0x0243, + + i40e_aqc_opc_add_macvlan = 0x0250, + i40e_aqc_opc_remove_macvlan = 0x0251, + i40e_aqc_opc_add_vlan = 0x0252, + i40e_aqc_opc_remove_vlan = 0x0253, + i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254, + i40e_aqc_opc_add_tag = 0x0255, + i40e_aqc_opc_remove_tag = 0x0256, + i40e_aqc_opc_add_multicast_etag = 0x0257, + i40e_aqc_opc_remove_multicast_etag = 0x0258, + i40e_aqc_opc_update_tag = 0x0259, + i40e_aqc_opc_add_control_packet_filter = 0x025A, + i40e_aqc_opc_remove_control_packet_filter = 0x025B, + i40e_aqc_opc_add_cloud_filters = 0x025C, + i40e_aqc_opc_remove_cloud_filters = 0x025D, + i40e_aqc_opc_clear_wol_switch_filters = 0x025E, + + i40e_aqc_opc_add_mirror_rule = 0x0260, + i40e_aqc_opc_delete_mirror_rule = 0x0261, + + /* Dynamic Device Personalization */ + i40e_aqc_opc_write_personalization_profile = 0x0270, + i40e_aqc_opc_get_personalization_profile_list = 0x0271, + + /* DCB commands */ + i40e_aqc_opc_dcb_ignore_pfc = 0x0301, + i40e_aqc_opc_dcb_updated = 0x0302, + i40e_aqc_opc_set_dcb_parameters = 0x0303, + + /* TX scheduler */ + i40e_aqc_opc_configure_vsi_bw_limit = 0x0400, + i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406, + i40e_aqc_opc_configure_vsi_tc_bw = 0x0407, + i40e_aqc_opc_query_vsi_bw_config = 0x0408, + i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A, + i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410, + + i40e_aqc_opc_enable_switching_comp_ets = 0x0413, + i40e_aqc_opc_modify_switching_comp_ets = 0x0414, + i40e_aqc_opc_disable_switching_comp_ets = 0x0415, + i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416, + i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417, + i40e_aqc_opc_query_switching_comp_ets_config = 0x0418, + i40e_aqc_opc_query_port_ets_config = 0x0419, + i40e_aqc_opc_query_switching_comp_bw_config = 0x041A, + i40e_aqc_opc_suspend_port_tx = 0x041B, + i40e_aqc_opc_resume_port_tx = 0x041C, + i40e_aqc_opc_configure_partition_bw = 0x041D, + /* hmc */ + i40e_aqc_opc_query_hmc_resource_profile = 0x0500, + i40e_aqc_opc_set_hmc_resource_profile = 0x0501, + + /* phy commands*/ + i40e_aqc_opc_get_phy_abilities = 0x0600, + i40e_aqc_opc_set_phy_config = 0x0601, + i40e_aqc_opc_set_mac_config = 0x0603, + i40e_aqc_opc_set_link_restart_an = 0x0605, + i40e_aqc_opc_get_link_status = 0x0607, + i40e_aqc_opc_set_phy_int_mask = 0x0613, + i40e_aqc_opc_get_local_advt_reg = 0x0614, + i40e_aqc_opc_set_local_advt_reg = 0x0615, + i40e_aqc_opc_get_partner_advt = 0x0616, + i40e_aqc_opc_set_lb_modes = 0x0618, + i40e_aqc_opc_get_phy_wol_caps = 0x0621, + i40e_aqc_opc_set_phy_debug = 0x0622, + i40e_aqc_opc_upload_ext_phy_fm = 0x0625, + i40e_aqc_opc_run_phy_activity = 0x0626, + i40e_aqc_opc_set_phy_register = 0x0628, + i40e_aqc_opc_get_phy_register = 0x0629, + + /* NVM commands */ + i40e_aqc_opc_nvm_read = 0x0701, + i40e_aqc_opc_nvm_erase = 0x0702, + i40e_aqc_opc_nvm_update = 0x0703, + i40e_aqc_opc_nvm_config_read = 0x0704, + i40e_aqc_opc_nvm_config_write = 0x0705, + i40e_aqc_opc_oem_post_update = 0x0720, + i40e_aqc_opc_thermal_sensor = 0x0721, + + /* virtualization commands */ + i40e_aqc_opc_send_msg_to_pf = 0x0801, + i40e_aqc_opc_send_msg_to_vf = 0x0802, + i40e_aqc_opc_send_msg_to_peer = 0x0803, + + /* alternate structure */ + i40e_aqc_opc_alternate_write = 0x0900, + i40e_aqc_opc_alternate_write_indirect = 0x0901, + i40e_aqc_opc_alternate_read = 0x0902, + i40e_aqc_opc_alternate_read_indirect = 0x0903, + i40e_aqc_opc_alternate_write_done = 0x0904, + i40e_aqc_opc_alternate_set_mode = 0x0905, + i40e_aqc_opc_alternate_clear_port = 0x0906, + + /* LLDP commands */ + i40e_aqc_opc_lldp_get_mib = 0x0A00, + i40e_aqc_opc_lldp_update_mib = 0x0A01, + i40e_aqc_opc_lldp_add_tlv = 0x0A02, + i40e_aqc_opc_lldp_update_tlv = 0x0A03, + i40e_aqc_opc_lldp_delete_tlv = 0x0A04, + i40e_aqc_opc_lldp_stop = 0x0A05, + i40e_aqc_opc_lldp_start = 0x0A06, + + /* Tunnel commands */ + i40e_aqc_opc_add_udp_tunnel = 0x0B00, + i40e_aqc_opc_del_udp_tunnel = 0x0B01, + i40e_aqc_opc_set_rss_key = 0x0B02, + i40e_aqc_opc_set_rss_lut = 0x0B03, + i40e_aqc_opc_get_rss_key = 0x0B04, + i40e_aqc_opc_get_rss_lut = 0x0B05, + + /* Async Events */ + i40e_aqc_opc_event_lan_overflow = 0x1001, + + /* OEM commands */ + i40e_aqc_opc_oem_parameter_change = 0xFE00, + i40e_aqc_opc_oem_device_status_change = 0xFE01, + i40e_aqc_opc_oem_ocsd_initialize = 0xFE02, + i40e_aqc_opc_oem_ocbb_initialize = 0xFE03, + + /* debug commands */ + i40e_aqc_opc_debug_read_reg = 0xFF03, + i40e_aqc_opc_debug_write_reg = 0xFF04, + i40e_aqc_opc_debug_modify_reg = 0xFF07, + i40e_aqc_opc_debug_dump_internals = 0xFF08, +}; + +/* command structures and indirect data structures */ + +/* Structure naming conventions: + * - no suffix for direct command descriptor structures + * - _data for indirect sent data + * - _resp for indirect return data (data which is both will use _data) + * - _completion for direct return data + * - _element_ for repeated elements (may also be _data or _resp) + * + * Command structures are expected to overlay the params.raw member of the basic + * descriptor, and as such cannot exceed 16 bytes in length. + */ + +/* This macro is used to generate a compilation error if a structure + * is not exactly the correct length. It gives a divide by zero error if the + * structure is not of the correct size, otherwise it creates an enum that is + * never used. + */ +#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \ + { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) } + +/* This macro is used extensively to ensure that command structures are 16 + * bytes in length as they have to map to the raw array of that size. + */ +#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X) + +/* Queue Shutdown (direct 0x0003) */ +struct i40e_aqc_queue_shutdown { + __le32 driver_unloading; +#define I40E_AQ_DRIVER_UNLOADING 0x1 + u8 reserved[12]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown); + +struct i40e_aqc_vsi_properties_data { + /* first 96 byte are written by SW */ + __le16 valid_sections; +#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001 +#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002 +#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004 +#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008 +#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010 +#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020 +#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040 +#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080 +#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100 +#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200 + /* switch section */ + __le16 switch_id; /* 12bit id combined with flags below */ +#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000 +#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT) +#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000 +#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000 +#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000 + u8 sw_reserved[2]; + /* security section */ + u8 sec_flags; +#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02 +#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04 + u8 sec_reserved; + /* VLAN section */ + __le16 pvid; /* VLANS include priority bits */ + __le16 fcoe_pvid; + u8 port_vlan_flags; +#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00 +#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \ + I40E_AQ_VSI_PVLAN_MODE_SHIFT) +#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01 +#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02 +#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03 +#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04 +#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03 +#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \ + I40E_AQ_VSI_PVLAN_EMOD_SHIFT) +#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0 +#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08 +#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10 +#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18 + u8 pvlan_reserved[3]; + /* ingress egress up sections */ + __le32 ingress_table; /* bitmap, 3 bits per up */ +#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0 +#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP0_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3 +#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP1_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6 +#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP2_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9 +#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP3_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12 +#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP4_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15 +#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP5_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18 +#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP6_SHIFT) +#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21 +#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \ + I40E_AQ_VSI_UP_TABLE_UP7_SHIFT) + __le32 egress_table; /* same defines as for ingress table */ + /* cascaded PV section */ + __le16 cas_pv_tag; + u8 cas_pv_flags; +#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \ + I40E_AQ_VSI_CAS_PV_TAGX_SHIFT) +#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00 +#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01 +#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02 +#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10 +#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20 +#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40 + u8 cas_pv_reserved; + /* queue mapping section */ + __le16 mapping_flags; +#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0 +#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1 + __le16 queue_mapping[16]; +#define I40E_AQ_VSI_QUEUE_SHIFT 0x0 +#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT) + __le16 tc_mapping[8]; +#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0 +#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \ + I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) +#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9 +#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \ + I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + /* queueing option section */ + u8 queueing_opt_flags; +#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04 +#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08 +#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10 +#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00 +#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40 + u8 queueing_opt_reserved[3]; + /* scheduler section */ + u8 up_enable_bits; + u8 sched_reserved; + /* outer up section */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ + u8 cmd_reserved[8]; + /* last 32 bytes are written by FW */ + __le16 qs_handle[8]; +#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF + __le16 stat_counter_idx; + __le16 sched_id; + u8 resp_reserved[12]; +}; + +I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data); + +/* Get VEB Parameters (direct 0x0232) + * uses i40e_aqc_switch_seid for the descriptor + */ +struct i40e_aqc_get_veb_parameters_completion { + __le16 seid; + __le16 switch_id; + __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */ + __le16 statistic_index; + __le16 vebs_used; + __le16 vebs_free; + u8 reserved[4]; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion); + +#define I40E_LINK_SPEED_100MB_SHIFT 0x1 +#define I40E_LINK_SPEED_1000MB_SHIFT 0x2 +#define I40E_LINK_SPEED_10GB_SHIFT 0x3 +#define I40E_LINK_SPEED_40GB_SHIFT 0x4 +#define I40E_LINK_SPEED_20GB_SHIFT 0x5 +#define I40E_LINK_SPEED_25GB_SHIFT 0x6 + +enum i40e_aq_link_speed { + I40E_LINK_SPEED_UNKNOWN = 0, + I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT), + I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT), + I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT), + I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT), + I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT), + I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT), +}; + +/* Send to PF command (indirect 0x0801) id is only used by PF + * Send to VF command (indirect 0x0802) id is only used by PF + * Send to Peer PF command (indirect 0x0803) + */ +struct i40e_aqc_pf_vf_message { + __le32 id; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message); + +struct i40e_aqc_get_set_rss_key { +#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) + __le16 vsi_id; + u8 reserved[6]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key); + +struct i40e_aqc_get_set_rss_key_data { + u8 standard_rss_key[0x28]; + u8 extended_hash_key[0xc]; +}; + +I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data); + +struct i40e_aqc_get_set_rss_lut { +#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15) +#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \ + I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) + __le16 vsi_id; +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \ + BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) + +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0 +#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1 + __le16 flags; + u8 reserved[4]; + __le32 addr_high; + __le32 addr_low; +}; + +I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut); +#endif /* _I40E_ADMINQ_CMD_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/iavf/i40e_alloc.h index cb8689222..cb8689222 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h +++ b/drivers/net/ethernet/intel/iavf/i40e_alloc.h diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/iavf/i40e_common.c index eea280ba4..f34091d96 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_common.c +++ b/drivers/net/ethernet/intel/iavf/i40e_common.c @@ -525,7 +525,6 @@ i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } - /* The i40evf_ptype_lookup table is used to convert from the 8-bit ptype in the * hardware to a bit-field that can be used by SW to more easily determine the * packet type. @@ -892,135 +891,6 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { }; /** - * i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: ptr to register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to read the Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw, - u32 reg_addr, u32 *reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - if (!reg_val) - return I40E_ERR_PARAM; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_read); - - cmd_resp->address = cpu_to_le32(reg_addr); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - if (status == 0) - *reg_val = le32_to_cpu(cmd_resp->value); - - return status; -} - -/** - * i40evf_read_rx_ctl - read from an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - **/ -u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - u32 val = 0; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_read_register(hw, reg_addr, - &val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - val = rd32(hw, reg_addr); - - return val; -} - -/** - * i40evf_aq_rx_ctl_write_register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - * @cmd_details: pointer to command details structure or NULL - * - * Use the firmware to write to an Rx control register, - * especially useful if the Rx unit is under heavy pressure - **/ -i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw, - u32 reg_addr, u32 reg_val, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_rx_ctl_reg_read_write *cmd = - (struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_rx_ctl_reg_write); - - cmd->address = cpu_to_le32(reg_addr); - cmd->value = cpu_to_le32(reg_val); - - status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** - * i40evf_write_rx_ctl - write to an Rx control register - * @hw: pointer to the hw struct - * @reg_addr: register address - * @reg_val: register value - **/ -void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val) -{ - i40e_status status = 0; - bool use_register; - int retry = 5; - - use_register = (((hw->aq.api_maj_ver == 1) && - (hw->aq.api_min_ver < 5)) || - (hw->mac.type == I40E_MAC_X722)); - if (!use_register) { -do_retry: - status = i40evf_aq_rx_ctl_write_register(hw, reg_addr, - reg_val, NULL); - if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) { - usleep_range(1000, 2000); - retry--; - goto do_retry; - } - } - - /* if the AQ access failed, try the old-fashioned way */ - if (status || use_register) - wr32(hw, reg_addr, reg_val); -} - -/** * i40e_aq_send_msg_to_pf * @hw: pointer to the hardware structure * @v_opcode: opcodes for VF-PF communication @@ -1110,211 +980,3 @@ i40e_status i40e_vf_reset(struct i40e_hw *hw) return i40e_aq_send_msg_to_pf(hw, VIRTCHNL_OP_RESET_VF, 0, NULL, 0, NULL); } - -/** - * i40evf_aq_write_ddp - Write dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @track_id: package tracking id - * @error_offset: returns error offset - * @error_info: returns error information - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_write_ddp(struct i40e_hw *hw, void *buff, - u16 buff_size, u32 track_id, - u32 *error_offset, u32 *error_info, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_write_personalization_profile *cmd = - (struct i40e_aqc_write_personalization_profile *) - &desc.params.raw; - struct i40e_aqc_write_ddp_resp *resp; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_write_personalization_profile); - - desc.flags |= cpu_to_le16(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - - desc.datalen = cpu_to_le16(buff_size); - - cmd->profile_track_id = cpu_to_le32(track_id); - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - if (!status) { - resp = (struct i40e_aqc_write_ddp_resp *)&desc.params.raw; - if (error_offset) - *error_offset = le32_to_cpu(resp->error_offset); - if (error_info) - *error_info = le32_to_cpu(resp->error_info); - } - - return status; -} - -/** - * i40evf_aq_get_ddp_list - Read dynamic device personalization (ddp) - * @hw: pointer to the hw struct - * @buff: command buffer (size in bytes = buff_size) - * @buff_size: buffer size in bytes - * @flags: AdminQ command flags - * @cmd_details: pointer to command details structure or NULL - **/ -enum -i40e_status_code i40evf_aq_get_ddp_list(struct i40e_hw *hw, void *buff, - u16 buff_size, u8 flags, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aqc_get_applied_profiles *cmd = - (struct i40e_aqc_get_applied_profiles *)&desc.params.raw; - i40e_status status; - - i40evf_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_get_personalization_profile_list); - - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF); - if (buff_size > I40E_AQ_LARGE_BUF) - desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); - desc.datalen = cpu_to_le16(buff_size); - - cmd->flags = flags; - - status = i40evf_asq_send_command(hw, &desc, buff, buff_size, cmd_details); - - return status; -} - -/** - * i40evf_find_segment_in_package - * @segment_type: the segment type to search for (i.e., SEGMENT_TYPE_I40E) - * @pkg_hdr: pointer to the package header to be searched - * - * This function searches a package file for a particular segment type. On - * success it returns a pointer to the segment header, otherwise it will - * return NULL. - **/ -struct i40e_generic_seg_header * -i40evf_find_segment_in_package(u32 segment_type, - struct i40e_package_header *pkg_hdr) -{ - struct i40e_generic_seg_header *segment; - u32 i; - - /* Search all package segments for the requested segment type */ - for (i = 0; i < pkg_hdr->segment_count; i++) { - segment = - (struct i40e_generic_seg_header *)((u8 *)pkg_hdr + - pkg_hdr->segment_offset[i]); - - if (segment->type == segment_type) - return segment; - } - - return NULL; -} - -/** - * i40evf_write_profile - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package to be downloaded - * @track_id: package tracking id - * - * Handles the download of a complete package. - */ -enum i40e_status_code -i40evf_write_profile(struct i40e_hw *hw, struct i40e_profile_segment *profile, - u32 track_id) -{ - i40e_status status = 0; - struct i40e_section_table *sec_tbl; - struct i40e_profile_section_header *sec = NULL; - u32 dev_cnt; - u32 vendor_dev_id; - u32 *nvm; - u32 section_size = 0; - u32 offset = 0, info = 0; - u32 i; - - dev_cnt = profile->device_table_count; - - for (i = 0; i < dev_cnt; i++) { - vendor_dev_id = profile->device_table[i].vendor_dev_id; - if ((vendor_dev_id >> 16) == PCI_VENDOR_ID_INTEL) - if (hw->device_id == (vendor_dev_id & 0xFFFF)) - break; - } - if (i == dev_cnt) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, "Device doesn't support DDP"); - return I40E_ERR_DEVICE_NOT_SUPPORTED; - } - - nvm = (u32 *)&profile->device_table[dev_cnt]; - sec_tbl = (struct i40e_section_table *)&nvm[nvm[0] + 1]; - - for (i = 0; i < sec_tbl->section_count; i++) { - sec = (struct i40e_profile_section_header *)((u8 *)profile + - sec_tbl->section_offset[i]); - - /* Skip 'AQ', 'note' and 'name' sections */ - if (sec->section.type != SECTION_TYPE_MMIO) - continue; - - section_size = sec->section.size + - sizeof(struct i40e_profile_section_header); - - /* Write profile */ - status = i40evf_aq_write_ddp(hw, (void *)sec, (u16)section_size, - track_id, &offset, &info, NULL); - if (status) { - i40e_debug(hw, I40E_DEBUG_PACKAGE, - "Failed to write profile: offset %d, info %d", - offset, info); - break; - } - } - return status; -} - -/** - * i40evf_add_pinfo_to_list - * @hw: pointer to the hardware structure - * @profile: pointer to the profile segment of the package - * @profile_info_sec: buffer for information section - * @track_id: package tracking id - * - * Register a profile to the list of loaded profiles. - */ -enum i40e_status_code -i40evf_add_pinfo_to_list(struct i40e_hw *hw, - struct i40e_profile_segment *profile, - u8 *profile_info_sec, u32 track_id) -{ - i40e_status status = 0; - struct i40e_profile_section_header *sec = NULL; - struct i40e_profile_info *pinfo; - u32 offset = 0, info = 0; - - sec = (struct i40e_profile_section_header *)profile_info_sec; - sec->tbl_size = 1; - sec->data_end = sizeof(struct i40e_profile_section_header) + - sizeof(struct i40e_profile_info); - sec->section.type = SECTION_TYPE_INFO; - sec->section.offset = sizeof(struct i40e_profile_section_header); - sec->section.size = sizeof(struct i40e_profile_info); - pinfo = (struct i40e_profile_info *)(profile_info_sec + - sec->section.offset); - pinfo->track_id = track_id; - pinfo->version = profile->version; - pinfo->op = I40E_DDP_ADD_TRACKID; - memcpy(pinfo->name, profile->name, I40E_DDP_NAME_SIZE); - - status = i40evf_aq_write_ddp(hw, (void *)sec, sec->data_end, - track_id, &offset, &info, NULL); - return status; -} diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/iavf/i40e_devids.h index f300bf271..f300bf271 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h +++ b/drivers/net/ethernet/intel/iavf/i40e_devids.h diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/iavf/i40e_osdep.h index 3ddddb464..3ddddb464 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h +++ b/drivers/net/ethernet/intel/iavf/i40e_osdep.h diff --git a/drivers/net/ethernet/intel/iavf/i40e_prototype.h b/drivers/net/ethernet/intel/iavf/i40e_prototype.h new file mode 100644 index 000000000..ef7f74489 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_prototype.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_PROTOTYPE_H_ +#define _I40E_PROTOTYPE_H_ + +#include "i40e_type.h" +#include "i40e_alloc.h" +#include <linux/avf/virtchnl.h> + +/* Prototypes for shared code functions that are not in + * the standard function pointer structures. These are + * mostly because they are needed even before the init + * has happened and will assist in the early SW and FW + * setup. + */ + +/* adminq functions */ +i40e_status i40evf_init_adminq(struct i40e_hw *hw); +i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw); +void i40e_adminq_init_ring_data(struct i40e_hw *hw); +i40e_status i40evf_clean_arq_element(struct i40e_hw *hw, + struct i40e_arq_event_info *e, + u16 *events_pending); +i40e_status i40evf_asq_send_command(struct i40e_hw *hw, + struct i40e_aq_desc *desc, + void *buff, /* can be NULL */ + u16 buff_size, + struct i40e_asq_cmd_details *cmd_details); +bool i40evf_asq_done(struct i40e_hw *hw); + +/* debug function for adminq */ +void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, + void *desc, void *buffer, u16 buf_len); + +void i40e_idle_aq(struct i40e_hw *hw); +void i40evf_resume_aq(struct i40e_hw *hw); +bool i40evf_check_asq_alive(struct i40e_hw *hw); +i40e_status i40evf_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); +const char *i40evf_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err); +const char *i40evf_stat_str(struct i40e_hw *hw, i40e_status stat_err); + +i40e_status i40evf_aq_get_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_set_rss_lut(struct i40e_hw *hw, u16 seid, + bool pf_lut, u8 *lut, u16 lut_size); +i40e_status i40evf_aq_get_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); +i40e_status i40evf_aq_set_rss_key(struct i40e_hw *hw, + u16 seid, + struct i40e_aqc_get_set_rss_key_data *key); + +i40e_status i40e_set_mac_type(struct i40e_hw *hw); + +extern struct i40e_rx_ptype_decoded i40evf_ptype_lookup[]; + +static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) +{ + return i40evf_ptype_lookup[ptype]; +} + +/* i40e_common for VF drivers*/ +void i40e_vf_parse_hw_config(struct i40e_hw *hw, + struct virtchnl_vf_resource *msg); +i40e_status i40e_vf_reset(struct i40e_hw *hw); +i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw, + enum virtchnl_ops v_opcode, + i40e_status v_retval, u8 *msg, u16 msglen, + struct i40e_asq_cmd_details *cmd_details); +#endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/i40e_register.h b/drivers/net/ethernet/intel/iavf/i40e_register.h new file mode 100644 index 000000000..20b464ac1 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_register.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_REGISTER_H_ +#define _I40E_REGISTER_H_ + +#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */ +#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */ +#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */ +#define I40E_VF_ARQH1_ARQH_SHIFT 0 +#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT) +#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */ +#define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28 +#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT) +#define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29 +#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT) +#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30 +#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT) +#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31 +#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT) +#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */ +#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */ +#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */ +#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */ +#define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28 +#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT) +#define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29 +#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT) +#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30 +#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT) +#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31 +#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT) +#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */ +#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */ +#define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0 +#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT) +#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */ +#define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */ +#define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0 +#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT) +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2 +#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT) +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3 +#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) +#define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24 +#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT) +#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */ +#define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30 +#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT) +#define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31 +#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */ +#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */ +#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */ +#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */ +#define I40E_VFQF_HKEY_MAX_INDEX 12 +#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */ +#define I40E_VFQF_HLUT_MAX_INDEX 15 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30 +#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT) +#endif /* _I40E_REGISTER_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/iavf/i40e_status.h index 77be0702d..77be0702d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_status.h +++ b/drivers/net/ethernet/intel/iavf/i40e_status.h diff --git a/drivers/net/ethernet/intel/i40evf/i40e_trace.h b/drivers/net/ethernet/intel/iavf/i40e_trace.h index d7a4e6882..d7a4e6882 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_trace.h +++ b/drivers/net/ethernet/intel/iavf/i40e_trace.h diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/iavf/i40e_txrx.c index 1bf9734ae..d4bd06adc 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.c @@ -1062,7 +1062,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (ring->netdev->features & NETIF_F_RXHASH) + if (!(ring->netdev->features & NETIF_F_RXHASH)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/iavf/i40e_txrx.h index 3b5a63b32..3b5a63b32 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/iavf/i40e_txrx.h diff --git a/drivers/net/ethernet/intel/iavf/i40e_type.h b/drivers/net/ethernet/intel/iavf/i40e_type.h new file mode 100644 index 000000000..8f1344094 --- /dev/null +++ b/drivers/net/ethernet/intel/iavf/i40e_type.h @@ -0,0 +1,719 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2013 - 2018 Intel Corporation. */ + +#ifndef _I40E_TYPE_H_ +#define _I40E_TYPE_H_ + +#include "i40e_status.h" +#include "i40e_osdep.h" +#include "i40e_register.h" +#include "i40e_adminq.h" +#include "i40e_devids.h" + +#define I40E_RXQ_CTX_DBUFF_SHIFT 7 + +/* I40E_MASK is a macro used on 32 bit registers */ +#define I40E_MASK(mask, shift) ((u32)(mask) << (shift)) + +#define I40E_MAX_VSI_QP 16 +#define I40E_MAX_VF_VSI 3 +#define I40E_MAX_CHAINED_RX_BUFFERS 5 + +/* forward declaration */ +struct i40e_hw; +typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *); + +/* Data type manipulation macros. */ + +#define I40E_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \ + (R)->next_to_clean - (R)->next_to_use - 1) + +/* bitfields for Tx queue mapping in QTX_CTL */ +#define I40E_QTX_CTL_VF_QUEUE 0x0 +#define I40E_QTX_CTL_VM_QUEUE 0x1 +#define I40E_QTX_CTL_PF_QUEUE 0x2 + +/* debug masks - set these bits in hw->debug_mask to control output */ +enum i40e_debug_mask { + I40E_DEBUG_INIT = 0x00000001, + I40E_DEBUG_RELEASE = 0x00000002, + + I40E_DEBUG_LINK = 0x00000010, + I40E_DEBUG_PHY = 0x00000020, + I40E_DEBUG_HMC = 0x00000040, + I40E_DEBUG_NVM = 0x00000080, + I40E_DEBUG_LAN = 0x00000100, + I40E_DEBUG_FLOW = 0x00000200, + I40E_DEBUG_DCB = 0x00000400, + I40E_DEBUG_DIAG = 0x00000800, + I40E_DEBUG_FD = 0x00001000, + I40E_DEBUG_PACKAGE = 0x00002000, + + I40E_DEBUG_AQ_MESSAGE = 0x01000000, + I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000, + I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000, + I40E_DEBUG_AQ_COMMAND = 0x06000000, + I40E_DEBUG_AQ = 0x0F000000, + + I40E_DEBUG_USER = 0xF0000000, + + I40E_DEBUG_ALL = 0xFFFFFFFF +}; + +/* These are structs for managing the hardware information and the operations. + * The structures of function pointers are filled out at init time when we + * know for sure exactly which hardware we're working with. This gives us the + * flexibility of using the same main driver code but adapting to slightly + * different hardware needs as new parts are developed. For this architecture, + * the Firmware and AdminQ are intended to insulate the driver from most of the + * future changes, but these structures will also do part of the job. + */ +enum i40e_mac_type { + I40E_MAC_UNKNOWN = 0, + I40E_MAC_XL710, + I40E_MAC_VF, + I40E_MAC_X722, + I40E_MAC_X722_VF, + I40E_MAC_GENERIC, +}; + +enum i40e_vsi_type { + I40E_VSI_MAIN = 0, + I40E_VSI_VMDQ1 = 1, + I40E_VSI_VMDQ2 = 2, + I40E_VSI_CTRL = 3, + I40E_VSI_FCOE = 4, + I40E_VSI_MIRROR = 5, + I40E_VSI_SRIOV = 6, + I40E_VSI_FDIR = 7, + I40E_VSI_TYPE_UNKNOWN +}; + +enum i40e_queue_type { + I40E_QUEUE_TYPE_RX = 0, + I40E_QUEUE_TYPE_TX, + I40E_QUEUE_TYPE_PE_CEQ, + I40E_QUEUE_TYPE_UNKNOWN +}; + +#define I40E_HW_CAP_MAX_GPIO 30 +/* Capabilities of a PF or a VF or the whole device */ +struct i40e_hw_capabilities { + bool dcb; + bool fcoe; + u32 num_vsis; + u32 num_rx_qp; + u32 num_tx_qp; + u32 base_queue; + u32 num_msix_vectors_vf; +}; + +struct i40e_mac_info { + enum i40e_mac_type type; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + u8 san_addr[ETH_ALEN]; + u16 max_fcoeq; +}; + +/* PCI bus types */ +enum i40e_bus_type { + i40e_bus_type_unknown = 0, + i40e_bus_type_pci, + i40e_bus_type_pcix, + i40e_bus_type_pci_express, + i40e_bus_type_reserved +}; + +/* PCI bus speeds */ +enum i40e_bus_speed { + i40e_bus_speed_unknown = 0, + i40e_bus_speed_33 = 33, + i40e_bus_speed_66 = 66, + i40e_bus_speed_100 = 100, + i40e_bus_speed_120 = 120, + i40e_bus_speed_133 = 133, + i40e_bus_speed_2500 = 2500, + i40e_bus_speed_5000 = 5000, + i40e_bus_speed_8000 = 8000, + i40e_bus_speed_reserved +}; + +/* PCI bus widths */ +enum i40e_bus_width { + i40e_bus_width_unknown = 0, + i40e_bus_width_pcie_x1 = 1, + i40e_bus_width_pcie_x2 = 2, + i40e_bus_width_pcie_x4 = 4, + i40e_bus_width_pcie_x8 = 8, + i40e_bus_width_32 = 32, + i40e_bus_width_64 = 64, + i40e_bus_width_reserved +}; + +/* Bus parameters */ +struct i40e_bus_info { + enum i40e_bus_speed speed; + enum i40e_bus_width width; + enum i40e_bus_type type; + + u16 func; + u16 device; + u16 lan_id; + u16 bus_id; +}; + +#define I40E_MAX_TRAFFIC_CLASS 8 +#define I40E_MAX_USER_PRIORITY 8 +/* Port hardware description */ +struct i40e_hw { + u8 __iomem *hw_addr; + void *back; + + /* subsystem structs */ + struct i40e_mac_info mac; + struct i40e_bus_info bus; + + /* pci info */ + u16 device_id; + u16 vendor_id; + u16 subsystem_device_id; + u16 subsystem_vendor_id; + u8 revision_id; + + /* capabilities for entire device and PCI func */ + struct i40e_hw_capabilities dev_caps; + + /* Admin Queue info */ + struct i40e_adminq_info aq; + + /* debug mask */ + u32 debug_mask; + char err_str[16]; +}; + +static inline bool i40e_is_vf(struct i40e_hw *hw) +{ + return (hw->mac.type == I40E_MAC_VF || + hw->mac.type == I40E_MAC_X722_VF); +} + +struct i40e_driver_version { + u8 major_version; + u8 minor_version; + u8 build_version; + u8 subbuild_version; + u8 driver_string[32]; +}; + +/* RX Descriptors */ +union i40e_16byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fd_id; /* Flow director filter id */ + __le32 fcoe_param; /* FCoE DDP Context id */ + } hi_dword; + } qword0; + struct { + /* ext status/error/pktype/length */ + __le64 status_error_len; + } qword1; + } wb; /* writeback */ +}; + +union i40e_32byte_rx_desc { + struct { + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_buffer_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; + } read; + struct { + struct { + struct { + union { + __le16 mirroring_status; + __le16 fcoe_ctx_id; + } mirr_fcoe; + __le16 l2tag1; + } lo_dword; + union { + __le32 rss; /* RSS Hash */ + __le32 fcoe_param; /* FCoE DDP Context id */ + /* Flow director filter id in case of + * Programming status desc WB + */ + __le32 fd_id; + } hi_dword; + } qword0; + struct { + /* status/error/pktype/length */ + __le64 status_error_len; + } qword1; + struct { + __le16 ext_status; /* extended status */ + __le16 rsvd; + __le16 l2tag2_1; + __le16 l2tag2_2; + } qword2; + struct { + union { + __le32 flex_bytes_lo; + __le32 pe_status; + } lo_dword; + union { + __le32 flex_bytes_hi; + __le32 fd_id; + } hi_dword; + } qword3; + } wb; /* writeback */ +}; + +enum i40e_rx_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_STATUS_DD_SHIFT = 0, + I40E_RX_DESC_STATUS_EOF_SHIFT = 1, + I40E_RX_DESC_STATUS_L2TAG1P_SHIFT = 2, + I40E_RX_DESC_STATUS_L3L4P_SHIFT = 3, + I40E_RX_DESC_STATUS_CRCP_SHIFT = 4, + I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */ + I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7, + /* Note: Bit 8 is reserved in X710 and XL710 */ + I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8, + I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */ + I40E_RX_DESC_STATUS_FLM_SHIFT = 11, + I40E_RX_DESC_STATUS_FLTSTAT_SHIFT = 12, /* 2 BITS */ + I40E_RX_DESC_STATUS_LPBK_SHIFT = 14, + I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15, + I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */ + /* Note: For non-tunnel packets INT_UDP_0 is the right status for + * UDP header + */ + I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18, + I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */ +}; + +#define I40E_RXD_QW1_STATUS_SHIFT 0 +#define I40E_RXD_QW1_STATUS_MASK ((BIT(I40E_RX_DESC_STATUS_LAST) - 1) \ + << I40E_RXD_QW1_STATUS_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \ + I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT) + +#define I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT I40E_RX_DESC_STATUS_TSYNVALID_SHIFT +#define I40E_RXD_QW1_STATUS_TSYNVALID_MASK \ + BIT_ULL(I40E_RXD_QW1_STATUS_TSYNVALID_SHIFT) + +enum i40e_rx_desc_fltstat_values { + I40E_RX_DESC_FLTSTAT_NO_DATA = 0, + I40E_RX_DESC_FLTSTAT_RSV_FD_ID = 1, /* 16byte desc? FD_ID : RSV */ + I40E_RX_DESC_FLTSTAT_RSV = 2, + I40E_RX_DESC_FLTSTAT_RSS_HASH = 3, +}; + +#define I40E_RXD_QW1_ERROR_SHIFT 19 +#define I40E_RXD_QW1_ERROR_MASK (0xFFUL << I40E_RXD_QW1_ERROR_SHIFT) + +enum i40e_rx_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_ERROR_RXE_SHIFT = 0, + I40E_RX_DESC_ERROR_RECIPE_SHIFT = 1, + I40E_RX_DESC_ERROR_HBO_SHIFT = 2, + I40E_RX_DESC_ERROR_L3L4E_SHIFT = 3, /* 3 BITS */ + I40E_RX_DESC_ERROR_IPE_SHIFT = 3, + I40E_RX_DESC_ERROR_L4E_SHIFT = 4, + I40E_RX_DESC_ERROR_EIPE_SHIFT = 5, + I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6, + I40E_RX_DESC_ERROR_PPRS_SHIFT = 7 +}; + +enum i40e_rx_desc_error_l3l4e_fcoe_masks { + I40E_RX_DESC_ERROR_L3L4E_NONE = 0, + I40E_RX_DESC_ERROR_L3L4E_PROT = 1, + I40E_RX_DESC_ERROR_L3L4E_FC = 2, + I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR = 3, + I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN = 4 +}; + +#define I40E_RXD_QW1_PTYPE_SHIFT 30 +#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) + +/* Packet type non-ip values */ +enum i40e_rx_l2_ptype { + I40E_RX_PTYPE_L2_RESERVED = 0, + I40E_RX_PTYPE_L2_MAC_PAY2 = 1, + I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, + I40E_RX_PTYPE_L2_FIP_PAY2 = 3, + I40E_RX_PTYPE_L2_OUI_PAY2 = 4, + I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, + I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, + I40E_RX_PTYPE_L2_ECP_PAY2 = 7, + I40E_RX_PTYPE_L2_EVB_PAY2 = 8, + I40E_RX_PTYPE_L2_QCN_PAY2 = 9, + I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, + I40E_RX_PTYPE_L2_ARP = 11, + I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, + I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, + I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, + I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, + I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, + I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, + I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, + I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, + I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, + I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, + I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, + I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, + I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 +}; + +struct i40e_rx_ptype_decoded { + u32 ptype:8; + u32 known:1; + u32 outer_ip:1; + u32 outer_ip_ver:1; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:4; + u32 payload_layer:3; +}; + +enum i40e_rx_ptype_outer_ip { + I40E_RX_PTYPE_OUTER_L2 = 0, + I40E_RX_PTYPE_OUTER_IP = 1 +}; + +enum i40e_rx_ptype_outer_ip_ver { + I40E_RX_PTYPE_OUTER_NONE = 0, + I40E_RX_PTYPE_OUTER_IPV4 = 0, + I40E_RX_PTYPE_OUTER_IPV6 = 1 +}; + +enum i40e_rx_ptype_outer_fragmented { + I40E_RX_PTYPE_NOT_FRAG = 0, + I40E_RX_PTYPE_FRAG = 1 +}; + +enum i40e_rx_ptype_tunnel_type { + I40E_RX_PTYPE_TUNNEL_NONE = 0, + I40E_RX_PTYPE_TUNNEL_IP_IP = 1, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, + I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, +}; + +enum i40e_rx_ptype_tunnel_end_prot { + I40E_RX_PTYPE_TUNNEL_END_NONE = 0, + I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, + I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, +}; + +enum i40e_rx_ptype_inner_prot { + I40E_RX_PTYPE_INNER_PROT_NONE = 0, + I40E_RX_PTYPE_INNER_PROT_UDP = 1, + I40E_RX_PTYPE_INNER_PROT_TCP = 2, + I40E_RX_PTYPE_INNER_PROT_SCTP = 3, + I40E_RX_PTYPE_INNER_PROT_ICMP = 4, + I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 +}; + +enum i40e_rx_ptype_payload_layer { + I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, + I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, +}; + +#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 +#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ + I40E_RXD_QW1_LENGTH_PBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_HBUF_SHIFT 52 +#define I40E_RXD_QW1_LENGTH_HBUF_MASK (0x7FFULL << \ + I40E_RXD_QW1_LENGTH_HBUF_SHIFT) + +#define I40E_RXD_QW1_LENGTH_SPH_SHIFT 63 +#define I40E_RXD_QW1_LENGTH_SPH_MASK BIT_ULL(I40E_RXD_QW1_LENGTH_SPH_SHIFT) + +enum i40e_rx_desc_ext_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT = 0, + I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1, + I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */ + I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9, + I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10, + I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11, +}; + +enum i40e_rx_desc_pe_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_DESC_PE_STATUS_QPID_SHIFT = 0, /* 18 BITS */ + I40E_RX_DESC_PE_STATUS_L4PORT_SHIFT = 0, /* 16 BITS */ + I40E_RX_DESC_PE_STATUS_IPINDEX_SHIFT = 16, /* 8 BITS */ + I40E_RX_DESC_PE_STATUS_QPIDHIT_SHIFT = 24, + I40E_RX_DESC_PE_STATUS_APBVTHIT_SHIFT = 25, + I40E_RX_DESC_PE_STATUS_PORTV_SHIFT = 26, + I40E_RX_DESC_PE_STATUS_URG_SHIFT = 27, + I40E_RX_DESC_PE_STATUS_IPFRAG_SHIFT = 28, + I40E_RX_DESC_PE_STATUS_IPOPT_SHIFT = 29 +}; + +#define I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT 38 +#define I40E_RX_PROG_STATUS_DESC_LENGTH 0x2000000 + +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT 2 +#define I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK (0x7UL << \ + I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT) + +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT 19 +#define I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK (0x3FUL << \ + I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT) + +enum i40e_rx_prog_status_desc_status_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_DD_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_PROG_ID_SHIFT = 2 /* 3 BITS */ +}; + +enum i40e_rx_prog_status_desc_prog_id_masks { + I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS = 4, +}; + +enum i40e_rx_prog_status_desc_error_bits { + /* Note: These are predefined bit offsets */ + I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT = 0, + I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT = 1, + I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT = 2, + I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT = 3 +}; + +/* TX Descriptor */ +struct i40e_tx_desc { + __le64 buffer_addr; /* Address of descriptor's data buf */ + __le64 cmd_type_offset_bsz; +}; + +#define I40E_TXD_QW1_DTYPE_SHIFT 0 +#define I40E_TXD_QW1_DTYPE_MASK (0xFUL << I40E_TXD_QW1_DTYPE_SHIFT) + +enum i40e_tx_desc_dtype_value { + I40E_TX_DESC_DTYPE_DATA = 0x0, + I40E_TX_DESC_DTYPE_NOP = 0x1, /* same as Context desc */ + I40E_TX_DESC_DTYPE_CONTEXT = 0x1, + I40E_TX_DESC_DTYPE_FCOE_CTX = 0x2, + I40E_TX_DESC_DTYPE_FILTER_PROG = 0x8, + I40E_TX_DESC_DTYPE_DDP_CTX = 0x9, + I40E_TX_DESC_DTYPE_FLEX_DATA = 0xB, + I40E_TX_DESC_DTYPE_FLEX_CTX_1 = 0xC, + I40E_TX_DESC_DTYPE_FLEX_CTX_2 = 0xD, + I40E_TX_DESC_DTYPE_DESC_DONE = 0xF +}; + +#define I40E_TXD_QW1_CMD_SHIFT 4 +#define I40E_TXD_QW1_CMD_MASK (0x3FFUL << I40E_TXD_QW1_CMD_SHIFT) + +enum i40e_tx_desc_cmd_bits { + I40E_TX_DESC_CMD_EOP = 0x0001, + I40E_TX_DESC_CMD_RS = 0x0002, + I40E_TX_DESC_CMD_ICRC = 0x0004, + I40E_TX_DESC_CMD_IL2TAG1 = 0x0008, + I40E_TX_DESC_CMD_DUMMY = 0x0010, + I40E_TX_DESC_CMD_IIPT_NONIP = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV6 = 0x0020, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4 = 0x0040, /* 2 BITS */ + I40E_TX_DESC_CMD_IIPT_IPV4_CSUM = 0x0060, /* 2 BITS */ + I40E_TX_DESC_CMD_FCOET = 0x0080, + I40E_TX_DESC_CMD_L4T_EOFT_UNK = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_TCP = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_SCTP = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_UDP = 0x0300, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_N = 0x0000, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_T = 0x0100, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI = 0x0200, /* 2 BITS */ + I40E_TX_DESC_CMD_L4T_EOFT_EOF_A = 0x0300, /* 2 BITS */ +}; + +#define I40E_TXD_QW1_OFFSET_SHIFT 16 +#define I40E_TXD_QW1_OFFSET_MASK (0x3FFFFULL << \ + I40E_TXD_QW1_OFFSET_SHIFT) + +enum i40e_tx_desc_length_fields { + /* Note: These are predefined bit offsets */ + I40E_TX_DESC_LENGTH_MACLEN_SHIFT = 0, /* 7 BITS */ + I40E_TX_DESC_LENGTH_IPLEN_SHIFT = 7, /* 7 BITS */ + I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT = 14 /* 4 BITS */ +}; + +#define I40E_TXD_QW1_TX_BUF_SZ_SHIFT 34 +#define I40E_TXD_QW1_TX_BUF_SZ_MASK (0x3FFFULL << \ + I40E_TXD_QW1_TX_BUF_SZ_SHIFT) + +#define I40E_TXD_QW1_L2TAG1_SHIFT 48 +#define I40E_TXD_QW1_L2TAG1_MASK (0xFFFFULL << I40E_TXD_QW1_L2TAG1_SHIFT) + +/* Context descriptors */ +struct i40e_tx_context_desc { + __le32 tunneling_params; + __le16 l2tag2; + __le16 rsvd; + __le64 type_cmd_tso_mss; +}; + +#define I40E_TXD_CTX_QW1_CMD_SHIFT 4 +#define I40E_TXD_CTX_QW1_CMD_MASK (0xFFFFUL << I40E_TXD_CTX_QW1_CMD_SHIFT) + +enum i40e_tx_ctx_desc_cmd_bits { + I40E_TX_CTX_DESC_TSO = 0x01, + I40E_TX_CTX_DESC_TSYN = 0x02, + I40E_TX_CTX_DESC_IL2TAG2 = 0x04, + I40E_TX_CTX_DESC_IL2TAG2_IL2H = 0x08, + I40E_TX_CTX_DESC_SWTCH_NOTAG = 0x00, + I40E_TX_CTX_DESC_SWTCH_UPLINK = 0x10, + I40E_TX_CTX_DESC_SWTCH_LOCAL = 0x20, + I40E_TX_CTX_DESC_SWTCH_VSI = 0x30, + I40E_TX_CTX_DESC_SWPE = 0x40 +}; + +#define I40E_TXD_CTX_QW1_TSO_LEN_SHIFT 30 +#define I40E_TXD_CTX_QW1_TSO_LEN_MASK (0x3FFFFULL << \ + I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) + +#define I40E_TXD_CTX_QW1_MSS_SHIFT 50 +#define I40E_TXD_CTX_QW1_MSS_MASK (0x3FFFULL << \ + I40E_TXD_CTX_QW1_MSS_SHIFT) + +#define I40E_TXD_CTX_QW1_VSI_SHIFT 50 +#define I40E_TXD_CTX_QW1_VSI_MASK (0x1FFULL << I40E_TXD_CTX_QW1_VSI_SHIFT) + +#define I40E_TXD_CTX_QW0_EXT_IP_SHIFT 0 +#define I40E_TXD_CTX_QW0_EXT_IP_MASK (0x3ULL << \ + I40E_TXD_CTX_QW0_EXT_IP_SHIFT) + +enum i40e_tx_ctx_desc_eipt_offload { + I40E_TX_CTX_EXT_IP_NONE = 0x0, + I40E_TX_CTX_EXT_IP_IPV6 = 0x1, + I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM = 0x2, + I40E_TX_CTX_EXT_IP_IPV4 = 0x3 +}; + +#define I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT 2 +#define I40E_TXD_CTX_QW0_EXT_IPLEN_MASK (0x3FULL << \ + I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_NATT_SHIFT 9 +#define I40E_TXD_CTX_QW0_NATT_MASK (0x3ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_UDP_TUNNELING BIT_ULL(I40E_TXD_CTX_QW0_NATT_SHIFT) +#define I40E_TXD_CTX_GRE_TUNNELING (0x2ULL << I40E_TXD_CTX_QW0_NATT_SHIFT) + +#define I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT 11 +#define I40E_TXD_CTX_QW0_EIP_NOINC_MASK \ + BIT_ULL(I40E_TXD_CTX_QW0_EIP_NOINC_SHIFT) + +#define I40E_TXD_CTX_EIP_NOINC_IPID_CONST I40E_TXD_CTX_QW0_EIP_NOINC_MASK + +#define I40E_TXD_CTX_QW0_NATLEN_SHIFT 12 +#define I40E_TXD_CTX_QW0_NATLEN_MASK (0X7FULL << \ + I40E_TXD_CTX_QW0_NATLEN_SHIFT) + +#define I40E_TXD_CTX_QW0_DECTTL_SHIFT 19 +#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \ + I40E_TXD_CTX_QW0_DECTTL_SHIFT) + +#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23 +#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT) + +/* Packet Classifier Types for filters */ +enum i40e_filter_pctype { + /* Note: Values 0-28 are reserved for future use. + * Value 29, 30, 32 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30, + I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32, + I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33, + I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34, + I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35, + I40E_FILTER_PCTYPE_FRAG_IPV4 = 36, + /* Note: Values 37-38 are reserved for future use. + * Value 39, 40, 42 are not supported on XL710 and X710. + */ + I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39, + I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40, + I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42, + I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43, + I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44, + I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45, + I40E_FILTER_PCTYPE_FRAG_IPV6 = 46, + /* Note: Value 47 is reserved for future use */ + I40E_FILTER_PCTYPE_FCOE_OX = 48, + I40E_FILTER_PCTYPE_FCOE_RX = 49, + I40E_FILTER_PCTYPE_FCOE_OTHER = 50, + /* Note: Values 51-62 are reserved for future use */ + I40E_FILTER_PCTYPE_L2_PAYLOAD = 63, +}; + + +struct i40e_vsi_context { + u16 seid; + u16 uplink_seid; + u16 vsi_number; + u16 vsis_allocated; + u16 vsis_unallocated; + u16 flags; + u8 pf_num; + u8 vf_num; + u8 connection_type; + struct i40e_aqc_vsi_properties_data info; +}; + +struct i40e_veb_context { + u16 seid; + u16 uplink_seid; + u16 veb_number; + u16 vebs_allocated; + u16 vebs_unallocated; + u16 flags; + struct i40e_aqc_get_veb_parameters_completion info; +}; + +/* Statistics collected by each port, VSI, VEB, and S-channel */ +struct i40e_eth_stats { + u64 rx_bytes; /* gorc */ + u64 rx_unicast; /* uprc */ + u64 rx_multicast; /* mprc */ + u64 rx_broadcast; /* bprc */ + u64 rx_discards; /* rdpc */ + u64 rx_unknown_protocol; /* rupp */ + u64 tx_bytes; /* gotc */ + u64 tx_unicast; /* uptc */ + u64 tx_multicast; /* mptc */ + u64 tx_broadcast; /* bptc */ + u64 tx_discards; /* tdpc */ + u64 tx_errors; /* tepc */ +}; +#endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/iavf/i40evf.h index 96e537a35..96e537a35 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/iavf/i40evf.h diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/iavf/i40evf_client.c index 3cc9d60d0..3cc9d60d0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_client.c diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.h b/drivers/net/ethernet/intel/iavf/i40evf_client.h index 5585f3620..5585f3620 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.h +++ b/drivers/net/ethernet/intel/iavf/i40evf_client.h diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c index 69efe0aec..69efe0aec 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_ethtool.c diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/iavf/i40evf_main.c index 5a6e579e9..60c2e5df5 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_main.c @@ -17,20 +17,20 @@ static int i40evf_close(struct net_device *netdev); char i40evf_driver_name[] = "i40evf"; static const char i40evf_driver_string[] = - "Intel(R) 40-10 Gigabit Virtual Function Network Driver"; + "Intel(R) Ethernet Adaptive Virtual Function Network Driver"; #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 3 #define DRV_VERSION_MINOR 2 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_BUILD 3 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) \ DRV_KERN const char i40evf_driver_version[] = DRV_VERSION; static const char i40evf_copyright[] = - "Copyright (c) 2013 - 2015 Intel Corporation."; + "Copyright (c) 2013 - 2018 Intel Corporation."; /* i40evf_pci_tbl - PCI Device ID Table * @@ -51,6 +51,7 @@ static const struct pci_device_id i40evf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, i40evf_pci_tbl); +MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c index 94dabc9d8..6579dabab 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/i40evf_virtchnl.c @@ -1362,8 +1362,15 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, memcpy(adapter->vf_res, msg, min(msglen, len)); i40evf_validate_num_queues(adapter); i40e_vf_parse_hw_config(&adapter->hw, adapter->vf_res); - /* restore current mac address */ - ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + if (is_zero_ether_addr(adapter->hw.mac.addr)) { + /* restore current mac address */ + ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr); + } else { + /* refresh current mac address if changed */ + ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr); + ether_addr_copy(netdev->perm_addr, + adapter->hw.mac.addr); + } i40evf_process_config(adapter); } break; diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index aacfa5fcd..6f9d563de 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1211,8 +1211,12 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, if (!q_vector) { q_vector = kzalloc(size, GFP_KERNEL); } else if (size > ksize(q_vector)) { - kfree_rcu(q_vector, rcu); - q_vector = kzalloc(size, GFP_KERNEL); + struct igb_q_vector *new_q_vector; + + new_q_vector = kzalloc(size, GFP_KERNEL); + if (new_q_vector) + kfree_rcu(q_vector, rcu); + q_vector = new_q_vector; } else { memset(q_vector, 0, size); } @@ -3696,9 +3700,7 @@ static void igb_remove(struct pci_dev *pdev) igb_release_hw_control(adapter); #ifdef CONFIG_PCI_IOV - rtnl_lock(); igb_disable_sriov(pdev); - rtnl_unlock(); #endif unregister_netdev(netdev); @@ -7165,7 +7167,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) { struct e1000_hw *hw = &adapter->hw; unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; - u32 reg, msgbuf[3]; + u32 reg, msgbuf[3] = {}; u8 *addr = (u8 *)(&msgbuf[1]); /* process all the same items cleared in a function level reset */ diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index df827c254..70f5f28bf 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1070,7 +1070,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) igbvf_intr_msix_rx, 0, adapter->rx_ring->name, netdev); if (err) - goto out; + goto free_irq_tx; adapter->rx_ring->itr_register = E1000_EITR(vector); adapter->rx_ring->itr_val = adapter->current_itr; @@ -1079,10 +1079,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter) err = request_irq(adapter->msix_entries[vector].vector, igbvf_msix_other, 0, netdev->name, netdev); if (err) - goto out; + goto free_irq_rx; igbvf_configure_msix(adapter); return 0; +free_irq_rx: + free_irq(adapter->msix_entries[--vector].vector, netdev); +free_irq_tx: + free_irq(adapter->msix_entries[--vector].vector, netdev); out: return err; } diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c index b8ba3f94c..a47a2e3e5 100644 --- a/drivers/net/ethernet/intel/igbvf/vf.c +++ b/drivers/net/ethernet/intel/igbvf/vf.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2009 - 2018 Intel Corporation. */ +#include <linux/etherdevice.h> + #include "vf.h" static s32 e1000_check_for_link_vf(struct e1000_hw *hw); @@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw) /* set our "perm_addr" based on info provided by PF */ ret_val = mbx->ops.read_posted(hw, msgbuf, 3); if (!ret_val) { - if (msgbuf[0] == (E1000_VF_RESET | - E1000_VT_MSGTYPE_ACK)) + switch (msgbuf[0]) { + case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK: memcpy(hw->mac.perm_addr, addr, ETH_ALEN); - else + break; + case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK: + eth_zero_addr(hw->mac.perm_addr); + break; + default: ret_val = -E1000_ERR_MAC_INIT; + } } } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index fd1311681..f1a4b11ce 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -542,6 +542,20 @@ struct mvneta_rx_desc { }; #endif +enum mvneta_tx_buf_type { + MVNETA_TYPE_SKB, + MVNETA_TYPE_XDP_TX, + MVNETA_TYPE_XDP_NDO, +}; + +struct mvneta_tx_buf { + enum mvneta_tx_buf_type type; + union { + struct xdp_frame *xdpf; + struct sk_buff *skb; + }; +}; + struct mvneta_tx_queue { /* Number of this TX queue, in the range 0-7 */ u8 id; @@ -557,8 +571,8 @@ struct mvneta_tx_queue { int tx_stop_threshold; int tx_wake_threshold; - /* Array of transmitted skb */ - struct sk_buff **tx_skb; + /* Array of transmitted buffers */ + struct mvneta_tx_buf *buf; /* Index of last TX DMA descriptor that was inserted */ int txq_put_index; @@ -1767,14 +1781,9 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, int i; for (i = 0; i < num; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index]; struct mvneta_tx_desc *tx_desc = txq->descs + txq->txq_get_index; - struct sk_buff *skb = txq->tx_skb[txq->txq_get_index]; - - if (skb) { - bytes_compl += skb->len; - pkts_compl++; - } mvneta_txq_inc_get(txq); @@ -1782,9 +1791,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp, dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr, tx_desc->data_size, DMA_TO_DEVICE); - if (!skb) + if (!buf->skb) continue; - dev_kfree_skb_any(skb); + + bytes_compl += buf->skb->len; + pkts_compl++; + dev_kfree_skb_any(buf->skb); } netdev_tx_completed_queue(nq, pkts_compl, bytes_compl); @@ -2238,16 +2250,19 @@ static inline void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_port *pp, struct mvneta_tx_queue *txq) { - struct mvneta_tx_desc *tx_desc; int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; + struct mvneta_tx_desc *tx_desc; - txq->tx_skb[txq->txq_put_index] = NULL; tx_desc = mvneta_txq_next_desc_get(txq); tx_desc->data_size = hdr_len; tx_desc->command = mvneta_skb_tx_csum(pp, skb); tx_desc->command |= MVNETA_TXD_F_DESC; tx_desc->buf_phys_addr = txq->tso_hdrs_phys + txq->txq_put_index * TSO_HEADER_SIZE; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; + mvneta_txq_inc_put(txq); } @@ -2256,6 +2271,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, struct sk_buff *skb, char *data, int size, bool last_tcp, bool is_last) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; tx_desc = mvneta_txq_next_desc_get(txq); @@ -2269,7 +2285,8 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, } tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->type = MVNETA_TYPE_SKB; + buf->skb = NULL; if (last_tcp) { /* last descriptor in the TCP packet */ @@ -2277,7 +2294,7 @@ mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq, /* last descriptor in SKB */ if (is_last) - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } mvneta_txq_inc_put(txq); return 0; @@ -2362,6 +2379,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, int i, nr_frags = skb_shinfo(skb)->nr_frags; for (i = 0; i < nr_frags; i++) { + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; void *addr = page_address(frag->page.p) + frag->page_offset; @@ -2381,12 +2399,13 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, if (i == nr_frags - 1) { /* Last descriptor */ tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; } else { /* Descriptor in the middle: Not First, Not Last */ tx_desc->command = 0; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; } + buf->type = MVNETA_TYPE_SKB; mvneta_txq_inc_put(txq); } @@ -2414,6 +2433,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) struct mvneta_port *pp = netdev_priv(dev); u16 txq_id = skb_get_queue_mapping(skb); struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; + struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index]; struct mvneta_tx_desc *tx_desc; int len = skb->len; int frags = 0; @@ -2446,16 +2466,17 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev) goto out; } + buf->type = MVNETA_TYPE_SKB; if (frags == 1) { /* First and Last descriptor */ tx_cmd |= MVNETA_TXD_FLZ_DESC; tx_desc->command = tx_cmd; - txq->tx_skb[txq->txq_put_index] = skb; + buf->skb = skb; mvneta_txq_inc_put(txq); } else { /* First but not Last */ tx_cmd |= MVNETA_TXD_F_DESC; - txq->tx_skb[txq->txq_put_index] = NULL; + buf->skb = NULL; mvneta_txq_inc_put(txq); tx_desc->command = tx_cmd; /* Continue with other skb fragments */ @@ -3000,9 +3021,8 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->last_desc = txq->size - 1; - txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb), - GFP_KERNEL); - if (!txq->tx_skb) { + txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL); + if (!txq->buf) { dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3014,7 +3034,7 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp, txq->size * TSO_HEADER_SIZE, &txq->tso_hdrs_phys, GFP_KERNEL); if (!txq->tso_hdrs) { - kfree(txq->tx_skb); + kfree(txq->buf); dma_free_coherent(pp->dev->dev.parent, txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->descs, txq->descs_phys); @@ -3069,7 +3089,7 @@ static void mvneta_txq_sw_deinit(struct mvneta_port *pp, { struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); - kfree(txq->tx_skb); + kfree(txq->buf); if (txq->tso_hdrs) dma_free_coherent(pp->dev->dev.parent, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index ef9f932f0..5a2feadd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -564,7 +564,7 @@ static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, } else { cur_string = mlx5_tracer_message_get(tracer, tracer_event); if (!cur_string) { - pr_debug("%s Got string event for unknown string tdsm: %d\n", + pr_debug("%s Got string event for unknown string tmsn: %d\n", __func__, tracer_event->string_event.tmsn); return -1; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 722998d68..6f1f53f91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -109,12 +109,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, if (!MLX5_CAP_GEN(priv->mdev, ets)) return -EOPNOTSUPP; - ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; - for (i = 0; i < ets->ets_cap; i++) { + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); if (err) return err; + } + ets->ets_cap = mlx5_max_tc(priv->mdev) + 1; + for (i = 0; i < ets->ets_cap; i++) { err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 0fd62510f..dee65443d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -395,8 +395,8 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, static const struct ptp_clock_info mlx5_ptp_clock_info = { .owner = THIS_MODULE, - .name = "mlx5_p2p", - .max_adj = 100000000, + .name = "mlx5_ptp", + .max_adj = 50000000, .n_alarm = 0, .n_ext_ts = 0, .n_per_out = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index a2b25afa2..e09bd0599 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1683,7 +1683,7 @@ static void mlx5_core_verify_params(void) } } -static int __init init(void) +static int __init mlx5_init(void) { int err; @@ -1708,7 +1708,7 @@ err_debug: return err; } -static void __exit cleanup(void) +static void __exit mlx5_cleanup(void) { #ifdef CONFIG_MLX5_CORE_EN mlx5e_cleanup(); @@ -1717,5 +1717,5 @@ static void __exit cleanup(void) mlx5_unregister_debugfs(); } -module_init(init); -module_exit(cleanup); +module_init(mlx5_init); +module_exit(mlx5_cleanup); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 9c3653e06..fc880c024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -164,7 +164,8 @@ static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr) fp = list_entry(dev->priv.free_list.next, struct fw_page, list); n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask)); if (n >= MLX5_NUM_4K_IN_PAGE) { - mlx5_core_warn(dev, "alloc 4k bug\n"); + mlx5_core_warn(dev, "alloc 4k bug: fw page = 0x%llx, n = %u, bitmask: %lu, max num of 4K pages: %d\n", + fp->addr, n, fp->bitmask, MLX5_NUM_4K_IN_PAGE); return -ENOENT; } clear_bit(n, &fp->bitmask); diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c index 0094b92a2..31c0d6ee8 100644 --- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c +++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_tlv_multi.c @@ -62,6 +62,8 @@ mlxfw_mfa2_tlv_next(const struct mlxfw_mfa2_file *mfa2_file, if (tlv->type == MLXFW_MFA2_TLV_MULTI_PART) { multi = mlxfw_mfa2_tlv_multi_get(mfa2_file, tlv); + if (!multi) + return NULL; tlv_len = NLA_ALIGN(tlv_len + be16_to_cpu(multi->total_len)); } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 3bc570c46..8296b0aa4 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3960,6 +3960,7 @@ abort_with_slices: myri10ge_free_slices(mgp); abort_with_firmware: + kfree(mgp->msix_vectors); myri10ge_dummy_rdma(mgp, 0); abort_with_ioremap: diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index 69282f31d..fe54bcab7 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -255,7 +255,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) */ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); - if (!laddr) { + if (dma_mapping_error(lp->device, laddr)) { pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name); dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -473,7 +473,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!*new_addr) { + if (dma_mapping_error(lp->device, *new_addr)) { dev_kfree_skb(*new_skb); *new_skb = NULL; return false; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 0a8483c61..b42f81d0c 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -2375,7 +2375,7 @@ static void free_tx_buffers(struct s2io_nic *nic) skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j); if (skb) { swstats->mem_freed += skb->truesize; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); cnt++; } } diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c index 4e417f839..f7b354e79 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -874,7 +874,6 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, } /* Adjust the start address to be cache size aligned */ - cache->id = id; cache->addr = addr & ~(u64)(cache->size - 1); /* Re-init to the new ID and address */ @@ -894,6 +893,8 @@ area_cache_get(struct nfp_cpp *cpp, u32 id, return NULL; } + cache->id = id; + exit: /* Adjust offset */ *offset = addr - cache->addr; diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index e50fc8f71..7e5beb413 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4062,6 +4062,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, num_vports = p_hwfn->qm_info.num_vports; + if (num_vports < 2) { + DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); + return -EINVAL; + } + /* Accounting for the vports which are configured for WFQ explicitly */ for (i = 0; i < num_vports; i++) { u32 tmp_speed; diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index 402c1c3d8..5c8eaded6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -4403,6 +4403,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate) } vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true); + if (!vf) + return -EINVAL; + vport_id = vf->vport_id; return qed_configure_vport_wfq(cdev, vport_id, rate); @@ -5142,7 +5145,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) /* Validate that the VF has a configured vport */ vf = qed_iov_get_vf_info(hwfn, i, true); - if (!vf->vport_instance) + if (!vf || !vf->vport_instance) continue; memset(¶ms, 0, sizeof(params)); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 102862150..85419b825 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2525,7 +2525,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac) goto disable_mbx_intr; qlcnic_83xx_clear_function_resources(adapter); - qlcnic_dcb_enable(adapter->dcb); + + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + goto disable_mbx_intr; + } + qlcnic_83xx_initialize_nic(adapter, 1); qlcnic_dcb_get_info(adapter->dcb); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index d344e9d43..d3030bd96 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -629,7 +629,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev) int i, err, ring; if (dev->flags & QLCNIC_NEED_FLR) { - pci_reset_function(dev->pdev); + err = pci_reset_function(dev->pdev); + if (err) { + dev_err(&dev->pdev->dev, + "Adapter reset failed (%d). Please reboot\n", + err); + return err; + } dev->flags &= ~QLCNIC_NEED_FLR; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h index 0a9d24e86..eb8000d9b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h @@ -42,11 +42,6 @@ struct qlcnic_dcb { unsigned long state; }; -static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb) -{ - kfree(dcb); -} - static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb) { if (dcb && dcb->ops->get_hw_capability) @@ -113,9 +108,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb) dcb->ops->init_dcbnl_ops(dcb); } -static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb) +static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb) { - if (dcb && qlcnic_dcb_attach(dcb)) - qlcnic_clear_dcb_ops(dcb); + return dcb ? qlcnic_dcb_attach(dcb) : 0; } #endif diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 43920374b..a0c427f3b 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2638,7 +2638,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) "Device does not support MSI interrupts\n"); if (qlcnic_82xx_check(adapter)) { - qlcnic_dcb_enable(adapter->dcb); + err = qlcnic_dcb_enable(adapter->dcb); + if (err) { + qlcnic_dcb_free(adapter->dcb); + dev_err(&pdev->dev, "Failed to enable DCB\n"); + goto err_out_free_hw; + } + qlcnic_dcb_get_info(adapter->dcb); err = qlcnic_setup_intr(adapter); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 98275f18a..d28c4436f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -222,6 +222,8 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs) return 0; qlcnic_destroy_async_wq: + while (i--) + kfree(sriov->vf_info[i].vp); destroy_workqueue(bc->bc_async_wq); qlcnic_destroy_trans_wq: diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 76a9b37c8..3c764c28d 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -752,9 +752,15 @@ static int emac_remove(struct platform_device *pdev) struct net_device *netdev = dev_get_drvdata(&pdev->dev); struct emac_adapter *adpt = netdev_priv(netdev); + netif_carrier_off(netdev); + netif_tx_disable(netdev); + unregister_netdev(netdev); netif_napi_del(&adpt->rx_q.napi); + free_irq(adpt->irq.irq, &adpt->irq); + cancel_work_sync(&adpt->work_thread); + emac_clks_teardown(adpt); put_device(&adpt->phydev->mdio.dev); diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 2199bd08f..e377c1f68 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1184,10 +1184,12 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Failed to register net device\n"); - goto err_out_mdio_unregister; + goto err_out_phy_disconnect; } return 0; +err_out_phy_disconnect: + phy_disconnect(dev->phydev); err_out_mdio_unregister: mdiobus_unregister(lp->mii_bus); err_out_mdio: @@ -1211,6 +1213,7 @@ static void r6040_remove_one(struct pci_dev *pdev) struct r6040_private *lp = netdev_priv(dev); unregister_netdev(dev); + phy_disconnect(dev->phydev); mdiobus_unregister(lp->mii_bus); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 9077014f6..a1906804c 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -738,14 +738,14 @@ static void ravb_error_interrupt(struct net_device *ndev) ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS); if (eis & EIS_QFS) { ris2 = ravb_read(ndev, RIS2); - ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED), + ravb_write(ndev, ~(RIS2_QFF0 | RIS2_QFF1 | RIS2_RFFF | RIS2_RESERVED), RIS2); /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF0) priv->stats[RAVB_BE].rx_over_errors++; - /* Receive Descriptor Empty int */ + /* Receive Descriptor Empty int */ if (ris2 & RIS2_QFF1) priv->stats[RAVB_NC].rx_over_errors++; @@ -2199,11 +2199,11 @@ static int ravb_remove(struct platform_device *pdev) priv->desc_bat_dma); /* Set reset mode */ ravb_write(ndev, CCC_OPC_RESET, CCC); - pm_runtime_put_sync(&pdev->dev); unregister_netdev(ndev); netif_napi_del(&priv->napi[RAVB_NC]); netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); + pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); free_netdev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c index e436fa160..59165c056 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c @@ -520,9 +520,9 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, return 0; } - val |= PPSCMDx(index, 0x2); val |= TRGTMODSELx(index, 0x2); val |= PPSEN0; + writel(val, ioaddr + MAC_PPS_CONTROL); writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index)); @@ -547,6 +547,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index, writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index)); /* Finally, activate it */ + val |= PPSCMDx(index, 0x2); writel(val, ioaddr + MAC_PPS_CONTROL); return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index 08a058e1b..541c0449f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c @@ -53,7 +53,8 @@ static void config_sub_second_increment(void __iomem *ioaddr, if (!(value & PTP_TCR_TSCTRLSSR)) data = (data * 1000) / 465; - data &= PTP_SSIR_SSINC_MASK; + if (data > PTP_SSIR_SSINC_MAX) + data = PTP_SSIR_SSINC_MAX; reg_value = data; if (gmac4) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 9e040eb62..f57761a03 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -518,7 +518,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); - if (plat->force_thresh_dma_mode) { + if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) { plat->force_sf_dma_mode = 0; pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index ecccf895f..306ebf1a4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h @@ -66,7 +66,7 @@ #define PTP_TCR_TSENMACADDR BIT(18) /* SSIR defines */ -#define PTP_SSIR_SSINC_MASK 0xff +#define PTP_SSIR_SSINC_MAX 0xff #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 #endif /* __STMMAC_PTP_H__ */ diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 644e42c18..1c9522ad3 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -291,6 +291,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 605c4d15b..1693a7032 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -4505,7 +4505,7 @@ static int niu_alloc_channels(struct niu *np) err = niu_rbr_fill(np, rp, GFP_KERNEL); if (err) - return err; + goto out_err; } tx_rings = kcalloc(num_tx_rings, sizeof(struct tx_ring_info), diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index 590172818..3a1f0653c 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -432,6 +432,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 609986521..7fdbd2ff5 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1276,7 +1276,7 @@ out: } /* Submit the packet */ -static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct netcp_intf *netcp = netdev_priv(ndev); struct netcp_stats *tx_stats = &netcp->stats; diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c index 75237c81c..572294678 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c @@ -330,15 +330,17 @@ static int gelic_card_init_chain(struct gelic_card *card, /* set up the hardware pointers in each descriptor */ for (i = 0; i < no; i++, descr++) { + dma_addr_t cpu_addr; + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); - descr->bus_addr = - dma_map_single(ctodev(card), descr, - GELIC_DESCR_SIZE, - DMA_BIDIRECTIONAL); - if (!descr->bus_addr) + cpu_addr = dma_map_single(ctodev(card), descr, + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); + + if (dma_mapping_error(ctodev(card), cpu_addr)) goto iommu_error; + descr->bus_addr = cpu_to_be32(cpu_addr); descr->next = descr + 1; descr->prev = descr - 1; } @@ -378,28 +380,30 @@ iommu_error: * * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. * Activate the descriptor state-wise + * + * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length + * must be a multiple of GELIC_NET_RXBUF_ALIGN. */ static int gelic_descr_prepare_rx(struct gelic_card *card, struct gelic_descr *descr) { + static const unsigned int rx_skb_size = + ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) + + GELIC_NET_RXBUF_ALIGN - 1; + dma_addr_t cpu_addr; int offset; - unsigned int bufsize; if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) dev_info(ctodev(card), "%s: ERROR status\n", __func__); - /* we need to round up the buffer size to a multiple of 128 */ - bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); - /* and we need to have it 128 byte aligned, therefore we allocate a - * bit more */ - descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); + descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size); if (!descr->skb) { descr->buf_addr = 0; /* tell DMAC don't touch memory */ dev_info(ctodev(card), "%s:allocate skb failed !!\n", __func__); return -ENOMEM; } - descr->buf_size = cpu_to_be32(bufsize); + descr->buf_size = cpu_to_be32(rx_skb_size); descr->dmac_cmd_status = 0; descr->result_size = 0; descr->valid_size = 0; @@ -410,11 +414,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card, if (offset) skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); /* io-mmu-map the skb */ - descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), - descr->skb->data, - GELIC_NET_MAX_MTU, - DMA_FROM_DEVICE)); - if (!descr->buf_addr) { + cpu_addr = dma_map_single(ctodev(card), descr->skb->data, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); + descr->buf_addr = cpu_to_be32(cpu_addr); + if (dma_mapping_error(ctodev(card), cpu_addr)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; dev_info(ctodev(card), @@ -794,7 +797,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card, buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE); - if (!buf) { + if (dma_mapping_error(ctodev(card), buf)) { dev_err(ctodev(card), "dma map 2 failed (%p, %i). Dropping packet\n", skb->data, skb->len); @@ -930,7 +933,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr, data_error = be32_to_cpu(descr->data_error); /* unmap skb buffer */ dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), - GELIC_NET_MAX_MTU, + GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE); skb_put(skb, be32_to_cpu(descr->valid_size)? diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h index fbbf9b54b..0e592fc19 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h +++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h @@ -32,8 +32,9 @@ #define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */ #define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */ -#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN -#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN +#define GELIC_NET_MAX_FRAME 2312 +#define GELIC_NET_MAX_MTU 2294 +#define GELIC_NET_MIN_MTU 64 #define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 4e1504587..e3f0beaa7 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -543,7 +543,7 @@ static void xemaclite_tx_timeout(struct net_device *dev) xemaclite_enable_interrupts(lp); if (lp->deferred_skb) { - dev_kfree_skb(lp->deferred_skb); + dev_kfree_skb_irq(lp->deferred_skb); lp->deferred_skb = NULL; dev->stats.tx_errors++; } diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index fd5288ff5..e3438cef5 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -503,6 +503,11 @@ static void xirc2ps_detach(struct pcmcia_device *link) { struct net_device *dev = link->priv; + struct local_info *local = netdev_priv(dev); + + netif_carrier_off(dev); + netif_tx_disable(dev); + cancel_work_sync(&local->tx_timeout_task); dev_dbg(&link->dev, "detach\n"); diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 3b48c8905..7f14aad1c 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c @@ -3844,10 +3844,24 @@ static int dfx_init(void) int status; status = pci_register_driver(&dfx_pci_driver); - if (!status) - status = eisa_driver_register(&dfx_eisa_driver); - if (!status) - status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_pci_register; + + status = eisa_driver_register(&dfx_eisa_driver); + if (status) + goto err_eisa_register; + + status = tc_register_driver(&dfx_tc_driver); + if (status) + goto err_tc_register; + + return 0; + +err_tc_register: + eisa_driver_unregister(&dfx_eisa_driver); +err_eisa_register: + pci_unregister_driver(&dfx_pci_driver); +err_pci_register: return status; } diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index 1e62d0073..787eaf3f4 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c @@ -772,7 +772,7 @@ static void epp_bh(struct work_struct *work) * ===================== network driver interface ========================= */ -static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t baycom_send_packet(struct sk_buff *skb, struct net_device *dev) { struct baycom_state *bc = netdev_priv(dev); diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c index 6c03932d8..3dc4eb841 100644 --- a/drivers/net/hamradio/scc.c +++ b/drivers/net/hamradio/scc.c @@ -300,12 +300,12 @@ static inline void scc_discard_buffers(struct scc_channel *scc) spin_lock_irqsave(&scc->lock, flags); if (scc->tx_buff != NULL) { - dev_kfree_skb(scc->tx_buff); + dev_kfree_skb_irq(scc->tx_buff); scc->tx_buff = NULL; } while (!skb_queue_empty(&scc->tx_queue)) - dev_kfree_skb(skb_dequeue(&scc->tx_queue)); + dev_kfree_skb_irq(skb_dequeue(&scc->tx_queue)); spin_unlock_irqrestore(&scc->lock, flags); } @@ -1667,7 +1667,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev) if (skb_queue_len(&scc->tx_queue) > scc->dev->tx_queue_len) { struct sk_buff *skb_del; skb_del = skb_dequeue(&scc->tx_queue); - dev_kfree_skb(skb_del); + dev_kfree_skb_irq(skb_del); } skb_queue_tail(&scc->tx_queue, skb); netif_trans_update(dev); diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index 917edb3d0..f75faec23 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1943,10 +1943,9 @@ static int ca8210_skb_tx( struct ca8210_priv *priv ) { - int status; struct ieee802154_hdr header = { }; struct secspec secspec; - unsigned int mac_len; + int mac_len, status; dev_dbg(&priv->spi->dev, "%s called\n", __func__); @@ -1954,6 +1953,8 @@ static int ca8210_skb_tx( * packet */ mac_len = ieee802154_hdr_peek_addrs(skb, &header); + if (mac_len < 0) + return mac_len; secspec.security_level = header.sec.level; secspec.key_id_mode = header.sec.key_id_mode; diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index d192936b7..786391859 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -210,7 +210,7 @@ static __net_init int loopback_net_init(struct net *net) int err; err = -ENOMEM; - dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup); + dev = alloc_netdev(0, "lo", NET_NAME_PREDICTABLE, loopback_setup); if (!dev) goto out; diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 33974e751..ca0053775 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -140,7 +140,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data, enqueue_again: rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); if (rc) { - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); ndev->stats.rx_errors++; ndev->stats.rx_fifo_errors++; } @@ -195,7 +195,7 @@ static void ntb_netdev_tx_handler(struct ntb_transport_qp *qp, void *qp_data, ndev->stats.tx_aborted_errors++; } - dev_kfree_skb(skb); + dev_kfree_skb_any(skb); if (ntb_transport_tx_free_entry(dev->qp) >= tx_start) { /* Make sure anybody stopping the queue after this sees the new diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/phy/mdio-thunder.c index c0c922eff..959bf3421 100644 --- a/drivers/net/phy/mdio-thunder.c +++ b/drivers/net/phy/mdio-thunder.c @@ -107,6 +107,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev, if (i >= ARRAY_SIZE(nexus->buses)) break; } + fwnode_handle_put(fwn); return 0; err_release_regions: diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 1d1fbd7bd..550806351 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -103,7 +103,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device); struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr) { - struct mdio_device *mdiodev = bus->mdio_map[addr]; + struct mdio_device *mdiodev; + + if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map)) + return NULL; + + mdiodev = bus->mdio_map[addr]; if (!mdiodev) return NULL; diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 7ceebbc4b..75546829b 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -246,11 +246,26 @@ static struct phy_driver meson_gxl_phy[] = { .config_intr = meson_gxl_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, + }, { + PHY_ID_MATCH_EXACT(0x01803301), + .name = "Meson G12A Internal PHY", + .features = PHY_BASIC_FEATURES, + .flags = PHY_IS_INTERNAL, + .soft_reset = genphy_soft_reset, + .ack_interrupt = meson_gxl_ack_interrupt, + .config_intr = meson_gxl_config_intr, + .suspend = genphy_suspend, + .resume = genphy_resume, + .read_mmd = genphy_read_mmd_unsupported, + .write_mmd = genphy_write_mmd_unsupported, }, }; static struct mdio_device_id __maybe_unused meson_gxl_tbl[] = { { 0x01814400, 0xfffffff0 }, + { PHY_ID_MATCH_VENDOR(0x01803301) }, { } }; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c32820838..fd7c9f5ff 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -112,8 +112,11 @@ static int lan911x_config_init(struct phy_device *phydev) static int lan87xx_read_status(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; + int err; - int err = genphy_read_status(phydev); + err = genphy_read_status(phydev); + if (err) + return err; if (!phydev->link && priv->energy_enable) { int i; diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index bd6084e31..f44dc80ed 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -91,6 +91,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) if (!priv->phy_dev->drv) { dev_info(dev, "Attached phy not ready\n"); + put_device(&priv->phy_dev->mdio.dev); return -EPROBE_DEFER; } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 3f335b57d..220b28711 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1528,6 +1528,8 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb) int len; unsigned char *cp; + skb->dev = ppp->dev; + if (proto < 0x8000) { #ifdef CONFIG_PPP_FILTER /* check if we should pass this packet */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5194b2ccd..e61f02f76 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -256,6 +256,9 @@ struct tun_struct { struct tun_prog __rcu *steering_prog; struct tun_prog __rcu *filter_prog; struct ethtool_link_ksettings link_ksettings; + /* init args */ + struct file *file; + struct ifreq *ifr; }; struct veth { @@ -281,6 +284,9 @@ void *tun_ptr_to_xdp(void *ptr) } EXPORT_SYMBOL(tun_ptr_to_xdp); +static void tun_flow_init(struct tun_struct *tun); +static void tun_flow_uninit(struct tun_struct *tun); + static int tun_napi_receive(struct napi_struct *napi, int budget) { struct tun_file *tfile = container_of(napi, struct tun_file, napi); @@ -1038,6 +1044,49 @@ static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) static const struct ethtool_ops tun_ethtool_ops; +static int tun_net_init(struct net_device *dev) +{ + struct tun_struct *tun = netdev_priv(dev); + struct ifreq *ifr = tun->ifr; + int err; + + tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); + if (!tun->pcpu_stats) + return -ENOMEM; + + spin_lock_init(&tun->lock); + + err = security_tun_dev_alloc_security(&tun->security); + if (err < 0) { + free_percpu(tun->pcpu_stats); + return err; + } + + tun_flow_init(tun); + + dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | + TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX; + dev->features = dev->hw_features | NETIF_F_LLTX; + dev->vlan_features = dev->features & + ~(NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_STAG_TX); + + tun->flags = (tun->flags & ~TUN_FEATURES) | + (ifr->ifr_flags & TUN_FEATURES); + + INIT_LIST_HEAD(&tun->disabled); + err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI, + ifr->ifr_flags & IFF_NAPI_FRAGS, false); + if (err < 0) { + tun_flow_uninit(tun); + security_tun_dev_free_security(tun->security); + free_percpu(tun->pcpu_stats); + return err; + } + return 0; +} + /* Net device detach from fd. */ static void tun_net_uninit(struct net_device *dev) { @@ -1268,6 +1317,7 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops tun_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1347,6 +1397,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp) } static const struct net_device_ops tap_netdev_ops = { + .ndo_init = tun_net_init, .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, .ndo_stop = tun_net_close, @@ -1386,7 +1437,7 @@ static void tun_flow_uninit(struct tun_struct *tun) #define MAX_MTU 65535 /* Initialize net device. */ -static void tun_net_init(struct net_device *dev) +static void tun_net_initialize(struct net_device *dev) { struct tun_struct *tun = netdev_priv(dev); @@ -2658,9 +2709,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) if (!dev) return -ENOMEM; - err = dev_get_valid_name(net, dev, name); - if (err < 0) - goto err_free_dev; dev_net_set(dev, net); dev->rtnl_link_ops = &tun_link_ops; @@ -2679,41 +2727,16 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) tun->rx_batched = 0; RCU_INIT_POINTER(tun->steering_prog, NULL); - tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats); - if (!tun->pcpu_stats) { - err = -ENOMEM; - goto err_free_dev; - } - - spin_lock_init(&tun->lock); - - err = security_tun_dev_alloc_security(&tun->security); - if (err < 0) - goto err_free_stat; - - tun_net_init(dev); - tun_flow_init(tun); - - dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | - TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX; - dev->features = dev->hw_features | NETIF_F_LLTX; - dev->vlan_features = dev->features & - ~(NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_STAG_TX); + tun->ifr = ifr; + tun->file = file; - tun->flags = (tun->flags & ~TUN_FEATURES) | - (ifr->ifr_flags & TUN_FEATURES); - - INIT_LIST_HEAD(&tun->disabled); - err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, - ifr->ifr_flags & IFF_NAPI_FRAGS, false); - if (err < 0) - goto err_free_flow; + tun_net_initialize(dev); err = register_netdevice(tun->dev); - if (err < 0) - goto err_detach; + if (err < 0) { + free_netdev(dev); + return err; + } /* free_netdev() won't check refcnt, to aovid race * with dev_put() we need publish tun after registration. */ @@ -2732,20 +2755,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) strcpy(ifr->ifr_name, tun->dev->name); return 0; - -err_detach: - tun_detach_all(dev); - /* register_netdevice() already called tun_free_netdev() */ - goto err_free_dev; - -err_free_flow: - tun_flow_uninit(tun); - security_tun_dev_free_security(tun->security); -err_free_stat: - free_percpu(tun->pcpu_stats); -err_free_dev: - free_netdev(dev); - return err; } static void tun_get_iff(struct net *net, struct tun_struct *tun, diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c index 41bac861c..72a93dc2d 100644 --- a/drivers/net/usb/cdc_mbim.c +++ b/drivers/net/usb/cdc_mbim.c @@ -665,6 +665,11 @@ static const struct usb_device_id mbim_devs[] = { .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, }, + /* Telit FE990 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle, + }, + /* default entry */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&cdc_mbim_info_zlp, diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c index 0cc6993c2..93ee909c7 100644 --- a/drivers/net/usb/kalmia.c +++ b/drivers/net/usb/kalmia.c @@ -69,8 +69,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT); if (status != 0) { netdev_err(dev->net, - "Error sending init packet. Status %i, length %i\n", - status, act_len); + "Error sending init packet. Status %i\n", + status); return status; } else if (act_len != init_msg_len) { @@ -87,8 +87,8 @@ kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len, if (status != 0) netdev_err(dev->net, - "Error receiving init result. Status %i, length %i\n", - status, act_len); + "Error receiving init result. Status %i\n", + status); else if (act_len != expected_len) netdev_err(dev->net, "Unexpected init result length: %i\n", act_len); diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 6fe59373c..8bab7306e 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -69,9 +69,7 @@ static inline int pl_vendor_req(struct usbnet *dev, u8 req, u8 val, u8 index) { - return usbnet_read_cmd(dev, req, - USB_DIR_IN | USB_TYPE_VENDOR | - USB_RECIP_DEVICE, + return usbnet_write_cmd(dev, req, USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, index, NULL, 0); } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 24ce49b31..541793224 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1322,6 +1322,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index ab41a63aa..497d6bcdc 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -267,7 +267,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf, off = le32_to_cpu(u.get_c->offset); len = le32_to_cpu(u.get_c->len); - if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE)) + if (unlikely((off > CONTROL_BUFFER_SIZE - 8) || + (len > CONTROL_BUFFER_SIZE - 8 - off))) goto response_error; if (*reply_len != -1 && len != *reply_len) diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 8b9fd4e07..313a4b0ed 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2213,6 +2213,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 4f29010e1..085048686 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1950,6 +1950,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (u16)((header & RX_STS_FL_) >> 16); align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err header=0x%08x\n", header); + return 0; + } + if (unlikely(header & RX_STS_ES_)) { netif_dbg(dev, rx_err, dev->net, "Error header=0x%08x\n", header); diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c index 83640628c..8bee8286e 100644 --- a/drivers/net/usb/sr9700.c +++ b/drivers/net/usb/sr9700.c @@ -410,7 +410,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb) /* ignore the CRC length */ len = (skb->data[1] | (skb->data[2] << 8)) - 4; - if (len > ETH_FRAME_LEN || len > skb->len) + if (len > ETH_FRAME_LEN || len > skb->len || len < 0) return 0; /* the last packet of current skb */ diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 406ef4cc6..0cd46735e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -600,8 +600,13 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, int page_off, unsigned int *len) { - struct page *page = alloc_page(GFP_ATOMIC); + int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + struct page *page; + if (page_off + *len + tailroom > PAGE_SIZE) + return NULL; + + page = alloc_page(GFP_ATOMIC); if (!page) return NULL; @@ -609,7 +614,6 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, page_off += *len; while (--*num_buf) { - int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); unsigned int buflen; void *buf; int off; diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index 2a3f0f1a2..bc249f81c 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2617,6 +2617,7 @@ fst_remove_one(struct pci_dev *pdev) for (i = 0; i < card->nports; i++) { struct net_device *dev = port_to_dev(&card->ports[i]); unregister_hdlc_device(dev); + free_netdev(dev); } fst_disable_intr(card); @@ -2637,6 +2638,7 @@ fst_remove_one(struct pci_dev *pdev) card->tx_dma_handle_card); } fst_card_array[card->card_no] = NULL; + kfree(card); } static struct pci_driver fst_driver = { diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 58e189ec6..5d3cf354f 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -241,6 +241,11 @@ static void ar5523_cmd_tx_cb(struct urb *urb) } } +static void ar5523_cancel_tx_cmd(struct ar5523 *ar) +{ + usb_kill_urb(ar->tx_cmd.urb_tx); +} + static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, int ilen, void *odata, int olen, int flags) { @@ -280,6 +285,7 @@ static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata, } if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) { + ar5523_cancel_tx_cmd(ar); cmd->odata = NULL; ar5523_err(ar, "timeout waiting for command %02x reply\n", code); diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index caece8339..92757495c 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c @@ -3739,18 +3739,22 @@ static struct pci_driver ath10k_pci_driver = { static int __init ath10k_pci_init(void) { - int ret; + int ret1, ret2; - ret = pci_register_driver(&ath10k_pci_driver); - if (ret) + ret1 = pci_register_driver(&ath10k_pci_driver); + if (ret1) printk(KERN_ERR "failed to register ath10k pci driver: %d\n", - ret); + ret1); - ret = ath10k_ahb_init(); - if (ret) - printk(KERN_ERR "ahb init failed: %d\n", ret); + ret2 = ath10k_ahb_init(); + if (ret2) + printk(KERN_ERR "ahb init failed: %d\n", ret2); - return ret; + if (ret1 && ret2) + return ret1; + + /* registered to at least one bus */ + return 0; } module_init(ath10k_pci_init); diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index c8c7afe0e..e23d58f83 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c @@ -244,11 +244,11 @@ static inline void ath9k_skb_queue_complete(struct hif_device_usb *hif_dev, ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, txok); if (txok) { - TX_STAT_INC(skb_success); - TX_STAT_ADD(skb_success_bytes, ln); + TX_STAT_INC(hif_dev, skb_success); + TX_STAT_ADD(hif_dev, skb_success_bytes, ln); } else - TX_STAT_INC(skb_failed); + TX_STAT_INC(hif_dev, skb_failed); } } @@ -302,7 +302,7 @@ static void hif_usb_tx_cb(struct urb *urb) hif_dev->tx.tx_buf_cnt++; if (!(hif_dev->tx.flags & HIF_USB_TX_STOP)) __hif_usb_tx(hif_dev); /* Check for pending SKBs */ - TX_STAT_INC(buf_completed); + TX_STAT_INC(hif_dev, buf_completed); spin_unlock(&hif_dev->tx.tx_lock); } @@ -353,7 +353,7 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) tx_buf->len += tx_buf->offset; __skb_queue_tail(&tx_buf->skb_queue, nskb); - TX_STAT_INC(skb_queued); + TX_STAT_INC(hif_dev, skb_queued); } usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev, @@ -368,11 +368,10 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) __skb_queue_head_init(&tx_buf->skb_queue); list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_dev->tx.tx_buf_cnt++; + } else { + TX_STAT_INC(hif_dev, buf_queued); } - if (!ret) - TX_STAT_INC(buf_queued); - return ret; } @@ -515,7 +514,7 @@ static void hif_usb_sta_drain(void *hif_handle, u8 idx) ath9k_htc_txcompletion_cb(hif_dev->htc_handle, skb, false); hif_dev->tx.tx_skb_cnt--; - TX_STAT_INC(skb_failed); + TX_STAT_INC(hif_dev, skb_failed); } } @@ -562,11 +561,11 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, memcpy(ptr, skb->data, rx_remain_len); rx_pkt_len += rx_remain_len; - hif_dev->rx_remain_len = 0; skb_put(remain_skb, rx_pkt_len); skb_pool[pool_index++] = remain_skb; - + hif_dev->remain_skb = NULL; + hif_dev->rx_remain_len = 0; } else { index = rx_remain_len; } @@ -585,16 +584,21 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, pkt_len = get_unaligned_le16(ptr + index); pkt_tag = get_unaligned_le16(ptr + index + 2); + /* It is supposed that if we have an invalid pkt_tag or + * pkt_len then the whole input SKB is considered invalid + * and dropped; the associated packets already in skb_pool + * are dropped, too. + */ if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) { - RX_STAT_INC(skb_dropped); - return; + RX_STAT_INC(hif_dev, skb_dropped); + goto invalid_pkt; } if (pkt_len > 2 * MAX_RX_BUF_SIZE) { dev_err(&hif_dev->udev->dev, "ath9k_htc: invalid pkt_len (%x)\n", pkt_len); - RX_STAT_INC(skb_dropped); - return; + RX_STAT_INC(hif_dev, skb_dropped); + goto invalid_pkt; } pad_len = 4 - (pkt_len & 0x3); @@ -606,11 +610,6 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, if (index > MAX_RX_BUF_SIZE) { spin_lock(&hif_dev->rx_lock); - hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; - hif_dev->rx_transfer_len = - MAX_RX_BUF_SIZE - chk_idx - 4; - hif_dev->rx_pad_len = pad_len; - nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC); if (!nskb) { dev_err(&hif_dev->udev->dev, @@ -618,8 +617,14 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, spin_unlock(&hif_dev->rx_lock); goto err; } + + hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; + hif_dev->rx_transfer_len = + MAX_RX_BUF_SIZE - chk_idx - 4; + hif_dev->rx_pad_len = pad_len; + skb_reserve(nskb, 32); - RX_STAT_INC(skb_allocated); + RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), hif_dev->rx_transfer_len); @@ -640,7 +645,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, goto err; } skb_reserve(nskb, 32); - RX_STAT_INC(skb_allocated); + RX_STAT_INC(hif_dev, skb_allocated); memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len); skb_put(nskb, pkt_len); @@ -650,11 +655,18 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev, err: for (i = 0; i < pool_index; i++) { - RX_STAT_ADD(skb_completed_bytes, skb_pool[i]->len); + RX_STAT_ADD(hif_dev, skb_completed_bytes, skb_pool[i]->len); ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i], skb_pool[i]->len, USB_WLAN_RX_PIPE); - RX_STAT_INC(skb_completed); + RX_STAT_INC(hif_dev, skb_completed); } + return; +invalid_pkt: + for (i = 0; i < pool_index; i++) { + dev_kfree_skb_any(skb_pool[i]); + RX_STAT_INC(hif_dev, skb_dropped); + } + return; } static void ath9k_hif_usb_rx_cb(struct urb *urb) @@ -709,14 +721,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) struct rx_buf *rx_buf = (struct rx_buf *)urb->context; struct hif_device_usb *hif_dev = rx_buf->hif_dev; struct sk_buff *skb = rx_buf->skb; - struct sk_buff *nskb; int ret; if (!skb) return; if (!hif_dev) - goto free; + goto free_skb; switch (urb->status) { case 0: @@ -725,7 +736,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) case -ECONNRESET: case -ENODEV: case -ESHUTDOWN: - goto free; + goto free_skb; default: skb_reset_tail_pointer(skb); skb_trim(skb, 0); @@ -736,25 +747,27 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb) if (likely(urb->actual_length != 0)) { skb_put(skb, urb->actual_length); - /* Process the command first */ + /* + * Process the command first. + * skb is either freed here or passed to be + * managed to another callback function. + */ ath9k_htc_rx_msg(hif_dev->htc_handle, skb, skb->len, USB_REG_IN_PIPE); - - nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); - if (!nskb) { + skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC); + if (!skb) { dev_err(&hif_dev->udev->dev, "ath9k_htc: REG_IN memory allocation failure\n"); - urb->context = NULL; - return; + goto free_rx_buf; } - rx_buf->skb = nskb; + rx_buf->skb = skb; usb_fill_int_urb(urb, hif_dev->udev, usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE), - nskb->data, MAX_REG_IN_BUF_SIZE, + skb->data, MAX_REG_IN_BUF_SIZE, ath9k_hif_usb_reg_in_cb, rx_buf, 1); } @@ -763,12 +776,13 @@ resubmit: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { usb_unanchor_urb(urb); - goto free; + goto free_skb; } return; -free: +free_skb: kfree_skb(skb); +free_rx_buf: kfree(rx_buf); urb->context = NULL; } @@ -781,14 +795,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) { - usb_get_urb(tx_buf->urb); - spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); - usb_kill_urb(tx_buf->urb); list_del(&tx_buf->list); usb_free_urb(tx_buf->urb); kfree(tx_buf->buf); kfree(tx_buf); - spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); } spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); @@ -1330,10 +1340,24 @@ static int send_eject_command(struct usb_interface *interface) static int ath9k_hif_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { + struct usb_endpoint_descriptor *bulk_in, *bulk_out, *int_in, *int_out; struct usb_device *udev = interface_to_usbdev(interface); + struct usb_host_interface *alt; struct hif_device_usb *hif_dev; int ret = 0; + /* Verify the expected endpoints are present */ + alt = interface->cur_altsetting; + if (usb_find_common_endpoints(alt, &bulk_in, &bulk_out, &int_in, &int_out) < 0 || + usb_endpoint_num(bulk_in) != USB_WLAN_RX_PIPE || + usb_endpoint_num(bulk_out) != USB_WLAN_TX_PIPE || + usb_endpoint_num(int_in) != USB_REG_IN_PIPE || + usb_endpoint_num(int_out) != USB_REG_OUT_PIPE) { + dev_err(&udev->dev, + "ath9k_htc: Device endpoint numbers are not the expected ones\n"); + return -ENODEV; + } + if (id->driver_info == STORAGE_DEVICE) return send_eject_command(interface); diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h index 81107100e..232e93dfb 100644 --- a/drivers/net/wireless/ath/ath9k/htc.h +++ b/drivers/net/wireless/ath/ath9k/htc.h @@ -325,14 +325,18 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb) } #ifdef CONFIG_ATH9K_HTC_DEBUGFS -#define __STAT_SAFE(expr) (hif_dev->htc_handle->drv_priv ? (expr) : 0) -#define TX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c++) -#define TX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.tx_stats.c += a) -#define RX_STAT_INC(c) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c++) -#define RX_STAT_ADD(c, a) __STAT_SAFE(hif_dev->htc_handle->drv_priv->debug.skbrx_stats.c += a) -#define CAB_STAT_INC priv->debug.tx_stats.cab_queued++ - -#define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) +#define __STAT_SAFE(hif_dev, expr) do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0) +#define CAB_STAT_INC(priv) do { ((priv)->debug.tx_stats.cab_queued++); } while (0) +#define TX_QSTAT_INC(priv, q) do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0) + +#define TX_STAT_INC(hif_dev, c) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++) +#define TX_STAT_ADD(hif_dev, c, a) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c += a) +#define RX_STAT_INC(hif_dev, c) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c++) +#define RX_STAT_ADD(hif_dev, c, a) \ + __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.skbrx_stats.c += a) void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, struct ath_rx_status *rs); @@ -372,13 +376,13 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw, struct ethtool_stats *stats, u64 *data); #else -#define TX_STAT_INC(c) do { } while (0) -#define TX_STAT_ADD(c, a) do { } while (0) -#define RX_STAT_INC(c) do { } while (0) -#define RX_STAT_ADD(c, a) do { } while (0) -#define CAB_STAT_INC do { } while (0) +#define TX_STAT_INC(hif_dev, c) do { } while (0) +#define TX_STAT_ADD(hif_dev, c, a) do { } while (0) +#define RX_STAT_INC(hif_dev, c) do { } while (0) +#define RX_STAT_ADD(hif_dev, c, a) do { } while (0) -#define TX_QSTAT_INC(c) do { } while (0) +#define CAB_STAT_INC(priv) +#define TX_QSTAT_INC(priv, c) static inline void ath9k_htc_err_stat_rx(struct ath9k_htc_priv *priv, struct ath_rx_status *rs) diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 3cd3f3ca1..979ac31a7 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -106,20 +106,20 @@ static inline enum htc_endpoint_id get_htc_epid(struct ath9k_htc_priv *priv, switch (qnum) { case 0: - TX_QSTAT_INC(IEEE80211_AC_VO); + TX_QSTAT_INC(priv, IEEE80211_AC_VO); epid = priv->data_vo_ep; break; case 1: - TX_QSTAT_INC(IEEE80211_AC_VI); + TX_QSTAT_INC(priv, IEEE80211_AC_VI); epid = priv->data_vi_ep; break; case 2: - TX_QSTAT_INC(IEEE80211_AC_BE); + TX_QSTAT_INC(priv, IEEE80211_AC_BE); epid = priv->data_be_ep; break; case 3: default: - TX_QSTAT_INC(IEEE80211_AC_BK); + TX_QSTAT_INC(priv, IEEE80211_AC_BK); epid = priv->data_bk_ep; break; } @@ -323,7 +323,7 @@ static void ath9k_htc_tx_data(struct ath9k_htc_priv *priv, memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr)); if (is_cab) { - CAB_STAT_INC; + CAB_STAT_INC(priv); tx_ctl->epid = priv->cab_ep; return; } diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c index 6d69cf69f..6331c9808 100644 --- a/drivers/net/wireless/ath/ath9k/htc_hst.c +++ b/drivers/net/wireless/ath/ath9k/htc_hst.c @@ -394,7 +394,7 @@ static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle, * HTC Messages are handled directly here and the obtained SKB * is freed. * - * Service messages (Data, WMI) passed to the corresponding + * Service messages (Data, WMI) are passed to the corresponding * endpoint RX handlers, which have to free the SKB. */ void ath9k_htc_rx_msg(struct htc_target *htc_handle, @@ -481,6 +481,8 @@ invalid: if (endpoint->ep_callbacks.rx) endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv, skb, epid); + else + goto invalid; } } diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c index 066677bb8..e4ea6f5cc 100644 --- a/drivers/net/wireless/ath/ath9k/wmi.c +++ b/drivers/net/wireless/ath/ath9k/wmi.c @@ -338,6 +338,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id, if (!time_left) { ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n", wmi_cmd_to_name(cmd_id)); + wmi->last_seq_id = 0; mutex_unlock(&wmi->op_mutex); kfree_skb(skb); return -ETIMEDOUT; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 8510d207e..3626ea9be 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -273,6 +273,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err); goto done; } + buf[sizeof(buf) - 1] = '\0'; ptr = (char *)buf; strsep(&ptr, "\n"); @@ -289,15 +290,17 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) if (err) { brcmf_dbg(TRACE, "retrieving clmver failed, %d\n", err); } else { + buf[sizeof(buf) - 1] = '\0'; clmver = (char *)buf; - /* store CLM version for adding it to revinfo debugfs file */ - memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver)); /* Replace all newline/linefeed characters with space * character */ strreplace(clmver, '\n', ' '); + /* store CLM version for adding it to revinfo debugfs file */ + memcpy(ifp->drvr->clmver, clmver, sizeof(ifp->drvr->clmver)); + brcmf_dbg(INFO, "CLM version = %s\n", clmver); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index 31bf2eb47..6fd155187 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -313,6 +313,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb, brcmf_err("%s: failed to expand headroom\n", brcmf_ifname(ifp)); atomic_inc(&drvr->bus_if->stats.pktcow_failed); + dev_kfree_skb(skb); goto done; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 4e5a6c311..d8460835f 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -648,6 +648,11 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, char end = '\0'; size_t reqsz; + if (chiprev >= BITS_PER_TYPE(u32)) { + brcmf_err("Invalid chip revision %u\n", chiprev); + return NULL; + } + for (i = 0; i < table_size; i++) { if (mapping_table[i].chipid == chip && mapping_table[i].revmask & BIT(chiprev)) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 768a99c15..e81e892dd 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c @@ -339,8 +339,11 @@ brcmf_msgbuf_alloc_pktid(struct device *dev, count++; } while (count < pktids->array_size); - if (count == pktids->array_size) + if (count == pktids->array_size) { + dma_unmap_single(dev, *physaddr, skb->len - data_offset, + pktids->direction); return -ENOMEM; + } array[*idx].data_offset = data_offset; array[*idx].physaddr = *physaddr; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index a21529d2c..6ee04af85 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -612,7 +612,7 @@ static int brcmf_pcie_exit_download_state(struct brcmf_pciedev_info *devinfo, } if (!brcmf_chip_set_active(devinfo->ci, resetintr)) - return -EINVAL; + return -EIO; return 0; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 0a96c1071..6eb1daf51 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3337,6 +3337,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus, /* Take arm out of reset */ if (!brcmf_chip_set_active(bus->ci, rstvec)) { brcmf_err("error getting out of ARM core reset\n"); + bcmerror = -EIO; goto err; } diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index a3a470976..68c4ce352 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -2308,10 +2308,11 @@ static int ipw2100_alloc_skb(struct ipw2100_priv *priv, return -ENOMEM; packet->rxp = (struct ipw2100_rx *)packet->skb->data; - packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, + packet->dma_addr = dma_map_single(&priv->pci_dev->dev, + packet->skb->data, sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) { + DMA_FROM_DEVICE); + if (dma_mapping_error(&priv->pci_dev->dev, packet->dma_addr)) { dev_kfree_skb(packet->skb); return -ENOMEM; } @@ -2492,9 +2493,8 @@ static void isr_rx(struct ipw2100_priv *priv, int i, return; } - pci_unmap_single(priv->pci_dev, - packet->dma_addr, - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); skb_put(packet->skb, status->frame_size); @@ -2576,8 +2576,8 @@ static void isr_rx_monitor(struct ipw2100_priv *priv, int i, return; } - pci_unmap_single(priv->pci_dev, packet->dma_addr, - sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), DMA_FROM_DEVICE); memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), packet->skb->data, status->frame_size); @@ -2702,9 +2702,9 @@ static void __ipw2100_rx_process(struct ipw2100_priv *priv) /* Sync the DMA for the RX buffer so CPU is sure to get * the correct values */ - pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, - sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pci_dev->dev, packet->dma_addr, + sizeof(struct ipw2100_rx), + DMA_FROM_DEVICE); if (unlikely(ipw2100_corruption_check(priv, i))) { ipw2100_corruption_detected(priv, i); @@ -2936,9 +2936,8 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv) (packet->index + 1 + i) % txq->entries, tbd->host_addr, tbd->buf_length); - pci_unmap_single(priv->pci_dev, - tbd->host_addr, - tbd->buf_length, PCI_DMA_TODEVICE); + dma_unmap_single(&priv->pci_dev->dev, tbd->host_addr, + tbd->buf_length, DMA_TO_DEVICE); } libipw_txb_free(packet->info.d_struct.txb); @@ -3178,15 +3177,13 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) tbd->buf_length = packet->info.d_struct.txb-> fragments[i]->len - LIBIPW_3ADDR_LEN; - tbd->host_addr = pci_map_single(priv->pci_dev, + tbd->host_addr = dma_map_single(&priv->pci_dev->dev, packet->info.d_struct. - txb->fragments[i]-> - data + + txb->fragments[i]->data + LIBIPW_3ADDR_LEN, tbd->buf_length, - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(priv->pci_dev, - tbd->host_addr)) { + DMA_TO_DEVICE); + if (dma_mapping_error(&priv->pci_dev->dev, tbd->host_addr)) { IPW_DEBUG_TX("dma mapping error\n"); break; } @@ -3195,10 +3192,10 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) txq->next, tbd->host_addr, tbd->buf_length); - pci_dma_sync_single_for_device(priv->pci_dev, - tbd->host_addr, - tbd->buf_length, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(&priv->pci_dev->dev, + tbd->host_addr, + tbd->buf_length, + DMA_TO_DEVICE); txq->next++; txq->next %= txq->entries; @@ -3453,9 +3450,9 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) return -ENOMEM; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { - v = pci_zalloc_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - &p); + v = dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), &p, + GFP_KERNEL); if (!v) { printk(KERN_ERR DRV_NAME ": " "%s: PCI alloc failed for msg " @@ -3474,11 +3471,10 @@ static int ipw2100_msg_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - priv->msg_buffers[j].info.c_struct.cmd, - priv->msg_buffers[j].info.c_struct. - cmd_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[j].info.c_struct.cmd, + priv->msg_buffers[j].info.c_struct.cmd_phys); } kfree(priv->msg_buffers); @@ -3509,11 +3505,10 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv) return; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_cmd_header), - priv->msg_buffers[i].info.c_struct.cmd, - priv->msg_buffers[i].info.c_struct. - cmd_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_cmd_header), + priv->msg_buffers[i].info.c_struct.cmd, + priv->msg_buffers[i].info.c_struct.cmd_phys); } kfree(priv->msg_buffers); @@ -4336,7 +4331,8 @@ static int status_queue_allocate(struct ipw2100_priv *priv, int entries) IPW_DEBUG_INFO("enter\n"); q->size = entries * sizeof(struct ipw2100_status); - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, + GFP_KERNEL); if (!q->drv) { IPW_DEBUG_WARNING("Can not allocate status queue.\n"); return -ENOMEM; @@ -4352,9 +4348,10 @@ static void status_queue_free(struct ipw2100_priv *priv) IPW_DEBUG_INFO("enter\n"); if (priv->status_queue.drv) { - pci_free_consistent(priv->pci_dev, priv->status_queue.size, - priv->status_queue.drv, - priv->status_queue.nic); + dma_free_coherent(&priv->pci_dev->dev, + priv->status_queue.size, + priv->status_queue.drv, + priv->status_queue.nic); priv->status_queue.drv = NULL; } @@ -4370,7 +4367,8 @@ static int bd_queue_allocate(struct ipw2100_priv *priv, q->entries = entries; q->size = entries * sizeof(struct ipw2100_bd); - q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); + q->drv = dma_alloc_coherent(&priv->pci_dev->dev, q->size, &q->nic, + GFP_KERNEL); if (!q->drv) { IPW_DEBUG_INFO ("can't allocate shared memory for buffer descriptors\n"); @@ -4390,7 +4388,8 @@ static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q) return; if (q->drv) { - pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); + dma_free_coherent(&priv->pci_dev->dev, q->size, q->drv, + q->nic); q->drv = NULL; } @@ -4450,9 +4449,9 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) } for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { - v = pci_alloc_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - &p); + v = dma_alloc_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), &p, + GFP_KERNEL); if (!v) { printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for tx " "buffers.\n", @@ -4472,11 +4471,10 @@ static int ipw2100_tx_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - priv->tx_buffers[j].info.d_struct.data, - priv->tx_buffers[j].info.d_struct. - data_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[j].info.d_struct.data, + priv->tx_buffers[j].info.d_struct.data_phys); } kfree(priv->tx_buffers); @@ -4553,12 +4551,10 @@ static void ipw2100_tx_free(struct ipw2100_priv *priv) priv->tx_buffers[i].info.d_struct.txb = NULL; } if (priv->tx_buffers[i].info.d_struct.data) - pci_free_consistent(priv->pci_dev, - sizeof(struct ipw2100_data_header), - priv->tx_buffers[i].info.d_struct. - data, - priv->tx_buffers[i].info.d_struct. - data_phys); + dma_free_coherent(&priv->pci_dev->dev, + sizeof(struct ipw2100_data_header), + priv->tx_buffers[i].info.d_struct.data, + priv->tx_buffers[i].info.d_struct.data_phys); } kfree(priv->tx_buffers); @@ -4621,9 +4617,10 @@ static int ipw2100_rx_allocate(struct ipw2100_priv *priv) return 0; for (j = 0; j < i; j++) { - pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr, + dma_unmap_single(&priv->pci_dev->dev, + priv->rx_buffers[j].dma_addr, sizeof(struct ipw2100_rx_packet), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(priv->rx_buffers[j].skb); } @@ -4675,10 +4672,10 @@ static void ipw2100_rx_free(struct ipw2100_priv *priv) for (i = 0; i < RX_QUEUE_LENGTH; i++) { if (priv->rx_buffers[i].rxp) { - pci_unmap_single(priv->pci_dev, + dma_unmap_single(&priv->pci_dev->dev, priv->rx_buffers[i].dma_addr, sizeof(struct ipw2100_rx), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); dev_kfree_skb(priv->rx_buffers[i].skb); } } @@ -6214,7 +6211,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev, pci_set_master(pci_dev); pci_set_drvdata(pci_dev, priv); - err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_WARNING DRV_NAME "Error calling pci_set_dma_mask.\n"); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 04aee2fdb..c6f2cc308 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -3456,9 +3456,10 @@ static void ipw_rx_queue_reset(struct ipw_priv *priv, /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); - dev_kfree_skb(rxq->pool[i].skb); + dma_unmap_single(&priv->pci_dev->dev, + rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); + dev_kfree_skb_irq(rxq->pool[i].skb); rxq->pool[i].skb = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); @@ -3790,7 +3791,8 @@ static int ipw_queue_tx_init(struct ipw_priv *priv, } q->bd = - pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); + dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count, + &q->q.dma_addr, GFP_KERNEL); if (!q->bd) { IPW_ERROR("pci_alloc_consistent(%zd) failed\n", sizeof(q->bd[0]) * count); @@ -3832,9 +3834,10 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv, /* unmap chunks if any */ for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) { - pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]), + dma_unmap_single(&dev->dev, + le32_to_cpu(bd->u.data.chunk_ptr[i]), le16_to_cpu(bd->u.data.chunk_len[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (txq->txb[txq->q.last_used]) { libipw_txb_free(txq->txb[txq->q.last_used]); txq->txb[txq->q.last_used] = NULL; @@ -3866,8 +3869,8 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq) } /* free buffers belonging to queue itself */ - pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, - q->dma_addr); + dma_free_coherent(&dev->dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, + q->dma_addr); kfree(txq->txb); /* 0 fill whole structure */ @@ -5212,8 +5215,8 @@ static void ipw_rx_queue_replenish(void *data) list_del(element); rxb->dma_addr = - pci_map_single(priv->pci_dev, rxb->skb->data, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_map_single(&priv->pci_dev->dev, rxb->skb->data, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; @@ -5246,8 +5249,9 @@ static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq) for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, + rxq->pool[i].dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); dev_kfree_skb(rxq->pool[i].skb); } } @@ -8285,9 +8289,8 @@ static void ipw_rx(struct ipw_priv *priv) } priv->rxq->queue[i] = NULL; - pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, - IPW_RX_BUF_SIZE, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&priv->pci_dev->dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); pkt = (struct ipw_rx_packet *)rxb->skb->data; IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n", @@ -8439,8 +8442,8 @@ static void ipw_rx(struct ipw_priv *priv) rxb->skb = NULL; } - pci_unmap_single(priv->pci_dev, rxb->dma_addr, - IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_single(&priv->pci_dev->dev, rxb->dma_addr, + IPW_RX_BUF_SIZE, DMA_FROM_DEVICE); list_add_tail(&rxb->list, &priv->rxq->rx_used); i = (i + 1) % RX_QUEUE_SIZE; @@ -10239,11 +10242,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, txb->fragments[i]->len - hdr_len); tfd->u.data.chunk_ptr[i] = - cpu_to_le32(pci_map_single - (priv->pci_dev, - txb->fragments[i]->data + hdr_len, - txb->fragments[i]->len - hdr_len, - PCI_DMA_TODEVICE)); + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, + txb->fragments[i]->data + hdr_len, + txb->fragments[i]->len - hdr_len, + DMA_TO_DEVICE)); tfd->u.data.chunk_len[i] = cpu_to_le16(txb->fragments[i]->len - hdr_len); } @@ -10273,10 +10275,10 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb, dev_kfree_skb_any(txb->fragments[i]); txb->fragments[i] = skb; tfd->u.data.chunk_ptr[i] = - cpu_to_le32(pci_map_single - (priv->pci_dev, skb->data, - remaining_bytes, - PCI_DMA_TODEVICE)); + cpu_to_le32(dma_map_single(&priv->pci_dev->dev, + skb->data, + remaining_bytes, + DMA_TO_DEVICE)); le32_add_cpu(&tfd->u.data.num_chunks, 1); } @@ -11429,9 +11431,14 @@ static int ipw_wdev_init(struct net_device *dev) set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); /* With that information in place, we can now register the wiphy... */ - if (wiphy_register(wdev->wiphy)) - rc = -EIO; + rc = wiphy_register(wdev->wiphy); + if (rc) + goto out; + + return 0; out: + kfree(priv->ieee->a_band.channels); + kfree(priv->ieee->bg_band.channels); return rc; } @@ -11649,9 +11656,9 @@ static int ipw_pci_probe(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); goto out_pci_disable_device; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index b536ec20e..d51a23815 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -3400,10 +3400,12 @@ static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log); * *****************************************************************************/ -static void +static int il3945_setup_deferred_work(struct il_priv *il) { il->workqueue = create_singlethread_workqueue(DRV_NAME); + if (!il->workqueue) + return -ENOMEM; init_waitqueue_head(&il->wait_command_queue); @@ -3422,6 +3424,8 @@ il3945_setup_deferred_work(struct il_priv *il) tasklet_init(&il->irq_tasklet, il3945_irq_tasklet, (unsigned long)il); + + return 0; } static void @@ -3743,7 +3747,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]); - il3945_setup_deferred_work(il); + err = il3945_setup_deferred_work(il); + if (err) + goto out_remove_sysfs; + il3945_setup_handlers(il); il_power_initialize(il); @@ -3755,7 +3762,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = il3945_setup_mac(il); if (err) - goto out_remove_sysfs; + goto out_destroy_workqueue; err = il_dbgfs_register(il, DRV_NAME); if (err) @@ -3767,9 +3774,10 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; -out_remove_sysfs: +out_destroy_workqueue: destroy_workqueue(il->workqueue); il->workqueue = NULL; +out_remove_sysfs: sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group); out_release_irq: free_irq(il->pci_dev->irq, il); diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 6fc51c74c..4970c19df 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -6236,10 +6236,12 @@ out: mutex_unlock(&il->mutex); } -static void +static int il4965_setup_deferred_work(struct il_priv *il) { il->workqueue = create_singlethread_workqueue(DRV_NAME); + if (!il->workqueue) + return -ENOMEM; init_waitqueue_head(&il->wait_command_queue); @@ -6260,6 +6262,8 @@ il4965_setup_deferred_work(struct il_priv *il) tasklet_init(&il->irq_tasklet, il4965_irq_tasklet, (unsigned long)il); + + return 0; } static void @@ -6649,7 +6653,10 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_msi; } - il4965_setup_deferred_work(il); + err = il4965_setup_deferred_work(il); + if (err) + goto out_free_irq; + il4965_setup_handlers(il); /********************************************* @@ -6687,6 +6694,7 @@ il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_destroy_workqueue: destroy_workqueue(il->workqueue); il->workqueue = NULL; +out_free_irq: free_irq(il->pci_dev->irq, il); out_disable_msi: pci_disable_msi(il->pci_dev); diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c index 61af5a28f..af49aa421 100644 --- a/drivers/net/wireless/intersil/orinoco/hw.c +++ b/drivers/net/wireless/intersil/orinoco/hw.c @@ -931,6 +931,8 @@ int __orinoco_hw_setup_enc(struct orinoco_private *priv) err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION_AGERE, auth_flag); + if (err) + return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPENABLED_AGERE, diff --git a/drivers/net/wireless/marvell/libertas/cmdresp.c b/drivers/net/wireless/marvell/libertas/cmdresp.c index b73d08381..5908f07d6 100644 --- a/drivers/net/wireless/marvell/libertas/cmdresp.c +++ b/drivers/net/wireless/marvell/libertas/cmdresp.c @@ -48,7 +48,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv, /* Free Tx and Rx packets */ spin_lock_irqsave(&priv->driver_lock, flags); - kfree_skb(priv->currenttxskb); + dev_kfree_skb_irq(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c index d75763410..885abbd66 100644 --- a/drivers/net/wireless/marvell/libertas/if_usb.c +++ b/drivers/net/wireless/marvell/libertas/if_usb.c @@ -633,7 +633,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN); memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN, priv->resp_len[i]); - kfree_skb(skb); + dev_kfree_skb_irq(skb); lbs_notify_command_response(priv, i); spin_unlock_irqrestore(&priv->driver_lock, flags); diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c index f22e1c220..997c246b9 100644 --- a/drivers/net/wireless/marvell/libertas/main.c +++ b/drivers/net/wireless/marvell/libertas/main.c @@ -216,7 +216,7 @@ int lbs_stop_iface(struct lbs_private *priv) spin_lock_irqsave(&priv->driver_lock, flags); priv->iface_running = false; - kfree_skb(priv->currenttxskb); + dev_kfree_skb_irq(priv->currenttxskb); priv->currenttxskb = NULL; priv->tx_pending_len = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); @@ -869,6 +869,7 @@ static int lbs_init_adapter(struct lbs_private *priv) ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); if (ret) { pr_err("Out of memory allocating event FIFO buffer\n"); + lbs_free_cmd_buffer(priv); goto out; } diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c index 60941c319..5e7edc030 100644 --- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c +++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c @@ -616,7 +616,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, spin_lock_irqsave(&priv->driver_lock, flags); memcpy(priv->cmd_resp_buff, recvbuff + MESSAGE_HEADER_LEN, recvlength - MESSAGE_HEADER_LEN); - kfree_skb(skb); + dev_kfree_skb_irq(skb); lbtf_cmd_response_rx(priv); spin_unlock_irqrestore(&priv->driver_lock, flags); } diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c index 5dcc305cc..452ac56cd 100644 --- a/drivers/net/wireless/marvell/mwifiex/11n.c +++ b/drivers/net/wireless/marvell/mwifiex/11n.c @@ -901,7 +901,7 @@ mwifiex_send_delba_txbastream_tbl(struct mwifiex_private *priv, u8 tid) */ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter) { - u8 i; + u8 i, j; u32 tx_win_size; struct mwifiex_private *priv; @@ -932,8 +932,8 @@ void mwifiex_update_ampdu_txwinsize(struct mwifiex_adapter *adapter) if (tx_win_size != priv->add_ba_param.tx_win_size) { if (!priv->media_connected) continue; - for (i = 0; i < MAX_NUM_TID; i++) - mwifiex_send_delba_txbastream_tbl(priv, i); + for (j = 0; j < MAX_NUM_TID; j++) + mwifiex_send_delba_txbastream_tbl(priv, j); } } } diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c index 0773d8107..a484dc6ad 100644 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c @@ -58,6 +58,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { }; static const struct of_device_id mwifiex_sdio_of_match_table[] = { + { .compatible = "marvell,sd8787" }, { .compatible = "marvell,sd8897" }, { .compatible = "marvell,sd8997" }, { } diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h index bd28deff9..921a226b1 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h @@ -1136,6 +1136,15 @@ enum bt_mp_oper_opcode_8723b { BT_MP_OP_ENABLE_CFO_TRACKING = 0x24, }; +enum rtl8xxxu_bw_mode { + RTL8XXXU_CHANNEL_WIDTH_20 = 0, + RTL8XXXU_CHANNEL_WIDTH_40 = 1, + RTL8XXXU_CHANNEL_WIDTH_80 = 2, + RTL8XXXU_CHANNEL_WIDTH_160 = 3, + RTL8XXXU_CHANNEL_WIDTH_80_80 = 4, + RTL8XXXU_CHANNEL_WIDTH_MAX = 5, +}; + struct rtl8723bu_c2h { u8 id; u8 seq; @@ -1186,7 +1195,7 @@ struct rtl8723bu_c2h { u8 dummy3_0; } __packed ra_report; }; -}; +} __packed; struct rtl8xxxu_fileops; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c index 837a1b9d1..eb8f046ae 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c @@ -1679,6 +1679,11 @@ static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv) val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1); val8 &= ~BIT(0); rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8); + + /* + * Fix transmission failure of rtl8192e. + */ + rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00); } struct rtl8xxxu_fileops rtl8192eu_fops = { diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 38f06ee98..9c811fe30 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1614,18 +1614,18 @@ static void rtl8xxxu_print_chipinfo(struct rtl8xxxu_priv *priv) static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) { struct device *dev = &priv->udev->dev; - u32 val32, bonding; + u32 val32, bonding, sys_cfg; u16 val16; - val32 = rtl8xxxu_read32(priv, REG_SYS_CFG); - priv->chip_cut = (val32 & SYS_CFG_CHIP_VERSION_MASK) >> + sys_cfg = rtl8xxxu_read32(priv, REG_SYS_CFG); + priv->chip_cut = (sys_cfg & SYS_CFG_CHIP_VERSION_MASK) >> SYS_CFG_CHIP_VERSION_SHIFT; - if (val32 & SYS_CFG_TRP_VAUX_EN) { + if (sys_cfg & SYS_CFG_TRP_VAUX_EN) { dev_info(dev, "Unsupported test chip\n"); return -ENOTSUPP; } - if (val32 & SYS_CFG_BT_FUNC) { + if (sys_cfg & SYS_CFG_BT_FUNC) { if (priv->chip_cut >= 3) { sprintf(priv->chip_name, "8723BU"); priv->rtl_chip = RTL8723B; @@ -1647,7 +1647,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) if (val32 & MULTI_GPS_FUNC_EN) priv->has_gps = 1; priv->is_multi_func = 1; - } else if (val32 & SYS_CFG_TYPE_ID) { + } else if (sys_cfg & SYS_CFG_TYPE_ID) { bonding = rtl8xxxu_read32(priv, REG_HPON_FSM); bonding &= HPON_FSM_BONDING_MASK; if (priv->fops->tx_desc_size == @@ -1695,7 +1695,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case RTL8188E: case RTL8192E: case RTL8723B: - switch (val32 & SYS_CFG_VENDOR_EXT_MASK) { + switch (sys_cfg & SYS_CFG_VENDOR_EXT_MASK) { case SYS_CFG_VENDOR_ID_TSMC: sprintf(priv->chip_vendor, "TSMC"); break; @@ -1712,7 +1712,7 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) } break; default: - if (val32 & SYS_CFG_VENDOR_ID) { + if (sys_cfg & SYS_CFG_VENDOR_ID) { sprintf(priv->chip_vendor, "UMC"); priv->vendor_umc = 1; } else { @@ -4333,7 +4333,7 @@ void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi) { struct h2c_cmd h2c; - u8 bw = 0; + u8 bw = RTL8XXXU_CHANNEL_WIDTH_20; memset(&h2c, 0, sizeof(struct h2c_cmd)); @@ -4375,12 +4375,9 @@ void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv, void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, u8 macid, bool connect) { -#ifdef RTL8XXXU_GEN2_REPORT_CONNECT /* - * Barry Day reports this causes issues with 8192eu and 8723bu - * devices reconnecting. The reason for this is unclear, but - * until it is better understood, leave the code in place but - * disabled, so it is not lost. + * The firmware turns on the rate control when it knows it's + * connected to a network. */ struct h2c_cmd h2c; @@ -4393,7 +4390,6 @@ void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv, h2c.media_status_rpt.parm &= ~BIT(0); rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt)); -#endif } void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv) @@ -5105,7 +5101,7 @@ static void rtl8xxxu_queue_rx_urb(struct rtl8xxxu_priv *priv, pending = priv->rx_urb_pending_count; } else { skb = (struct sk_buff *)rx_urb->urb.context; - dev_kfree_skb(skb); + dev_kfree_skb_irq(skb); usb_free_urb(&rx_urb->urb); } @@ -5504,7 +5500,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) { struct rtl8xxxu_priv *priv = hw->priv; struct device *dev = &priv->udev->dev; - u16 val16; int ret = 0, channel; bool ht40; @@ -5514,14 +5509,6 @@ static int rtl8xxxu_config(struct ieee80211_hw *hw, u32 changed) __func__, hw->conf.chandef.chan->hw_value, changed, hw->conf.chandef.width); - if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { - val16 = ((hw->conf.long_frame_max_tx_count << - RETRY_LIMIT_LONG_SHIFT) & RETRY_LIMIT_LONG_MASK) | - ((hw->conf.short_frame_max_tx_count << - RETRY_LIMIT_SHORT_SHIFT) & RETRY_LIMIT_SHORT_MASK); - rtl8xxxu_write16(priv, REG_RETRY_LIMIT, val16); - } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 176deb2b5..502ac10cf 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -1608,7 +1608,7 @@ static void _rtl8821ae_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw } /* string is in decimal */ -static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) +static bool _rtl8812ae_get_integer_from_string(const char *str, u8 *pint) { u16 i = 0; *pint = 0; @@ -1626,18 +1626,6 @@ static bool _rtl8812ae_get_integer_from_string(char *str, u8 *pint) return true; } -static bool _rtl8812ae_eq_n_byte(u8 *str1, u8 *str2, u32 num) -{ - if (num == 0) - return false; - while (num > 0) { - num--; - if (str1[num] != str2[num]) - return false; - } - return true; -} - static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, u8 band, u8 channel) { @@ -1664,10 +1652,11 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw, return channel_index; } -static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregulation, - u8 *pband, u8 *pbandwidth, - u8 *prate_section, u8 *prf_path, - u8 *pchannel, u8 *ppower_limit) +static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, + const char *pregulation, + const char *pband, const char *pbandwidth, + const char *prate_section, const char *prf_path, + const char *pchannel, const char *ppower_limit) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -1675,8 +1664,8 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul u8 channel_index; s8 power_limit = 0, prev_power_limit, ret; - if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) || - !_rtl8812ae_get_integer_from_string((char *)ppower_limit, + if (!_rtl8812ae_get_integer_from_string(pchannel, &channel) || + !_rtl8812ae_get_integer_from_string(ppower_limit, &power_limit)) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Illegal index of pwr_lmt table [chnl %d][val %d]\n", @@ -1686,42 +1675,42 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul power_limit = power_limit > MAX_POWER_INDEX ? MAX_POWER_INDEX : power_limit; - if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("FCC"), 3)) + if (strcmp(pregulation, "FCC") == 0) regulation = 0; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("MKK"), 3)) + else if (strcmp(pregulation, "MKK") == 0) regulation = 1; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("ETSI"), 4)) + else if (strcmp(pregulation, "ETSI") == 0) regulation = 2; - else if (_rtl8812ae_eq_n_byte(pregulation, (u8 *)("WW13"), 4)) + else if (strcmp(pregulation, "WW13") == 0) regulation = 3; - if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("CCK"), 3)) + if (strcmp(prate_section, "CCK") == 0) rate_section = 0; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("OFDM"), 4)) + else if (strcmp(prate_section, "OFDM") == 0) rate_section = 1; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (strcmp(prate_section, "HT") == 0 && + strcmp(prf_path, "1T") == 0) rate_section = 2; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("HT"), 2) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (strcmp(prate_section, "HT") == 0 && + strcmp(prf_path, "2T") == 0) rate_section = 3; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("1T"), 2)) + else if (strcmp(prate_section, "VHT") == 0 && + strcmp(prf_path, "1T") == 0) rate_section = 4; - else if (_rtl8812ae_eq_n_byte(prate_section, (u8 *)("VHT"), 3) && - _rtl8812ae_eq_n_byte(prf_path, (u8 *)("2T"), 2)) + else if (strcmp(prate_section, "VHT") == 0 && + strcmp(prf_path, "2T") == 0) rate_section = 5; - if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("20M"), 3)) + if (strcmp(pbandwidth, "20M") == 0) bandwidth = 0; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("40M"), 3)) + else if (strcmp(pbandwidth, "40M") == 0) bandwidth = 1; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("80M"), 3)) + else if (strcmp(pbandwidth, "80M") == 0) bandwidth = 2; - else if (_rtl8812ae_eq_n_byte(pbandwidth, (u8 *)("160M"), 4)) + else if (strcmp(pbandwidth, "160M") == 0) bandwidth = 3; - if (_rtl8812ae_eq_n_byte(pband, (u8 *)("2.4G"), 4)) { + if (strcmp(pband, "2.4G") == 0) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_2_4G, channel); @@ -1745,7 +1734,7 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul regulation, bandwidth, rate_section, channel_index, rtlphy->txpwr_limit_2_4g[regulation][bandwidth] [rate_section][channel_index][RF90_PATH_A]); - } else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) { + } else if (strcmp(pband, "5G") == 0) { ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw, BAND_ON_5G, channel); @@ -1776,10 +1765,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul } static void _rtl8812ae_phy_config_bb_txpwr_lmt(struct ieee80211_hw *hw, - u8 *regulation, u8 *band, - u8 *bandwidth, u8 *rate_section, - u8 *rf_path, u8 *channel, - u8 *power_limit) + const char *regulation, const char *band, + const char *bandwidth, const char *rate_section, + const char *rf_path, const char *channel, + const char *power_limit) { _rtl8812ae_phy_set_txpower_limit(hw, regulation, band, bandwidth, rate_section, rf_path, channel, @@ -1792,7 +1781,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) struct rtl_hal *rtlhal = rtl_hal(rtlpriv); u32 i = 0; u32 array_len; - u8 **array; + const char **array; if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) { array_len = RTL8812AE_TXPWR_LMT_ARRAY_LEN; @@ -1806,13 +1795,13 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw) "\n"); for (i = 0; i < array_len; i += 7) { - u8 *regulation = array[i]; - u8 *band = array[i+1]; - u8 *bandwidth = array[i+2]; - u8 *rate = array[i+3]; - u8 *rf_path = array[i+4]; - u8 *chnl = array[i+5]; - u8 *val = array[i+6]; + const char *regulation = array[i]; + const char *band = array[i+1]; + const char *bandwidth = array[i+2]; + const char *rate = array[i+3]; + const char *rf_path = array[i+4]; + const char *chnl = array[i+5]; + const char *val = array[i+6]; _rtl8812ae_phy_config_bb_txpwr_lmt(hw, regulation, band, bandwidth, rate, rf_path, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c index ac44fd5d0..e1e7fa990 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.c @@ -2917,7 +2917,7 @@ u32 RTL8821AE_AGC_TAB_1TARRAYLEN = ARRAY_SIZE(RTL8821AE_AGC_TAB_ARRAY); * TXPWR_LMT.TXT ******************************************************************************/ -u8 *RTL8812AE_TXPWR_LMT[] = { +const char *RTL8812AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "36", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", @@ -3486,7 +3486,7 @@ u8 *RTL8812AE_TXPWR_LMT[] = { u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN = ARRAY_SIZE(RTL8812AE_TXPWR_LMT); -u8 *RTL8821AE_TXPWR_LMT[] = { +const char *RTL8821AE_TXPWR_LMT[] = { "FCC", "2.4G", "20M", "CCK", "1T", "01", "32", "ETSI", "2.4G", "20M", "CCK", "1T", "01", "32", "MKK", "2.4G", "20M", "CCK", "1T", "01", "32", diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h index 36c2388b6..f8550a012 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/table.h @@ -52,7 +52,7 @@ extern u32 RTL8821AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_AGC_TAB_1TARRAYLEN; extern u32 RTL8812AE_AGC_TAB_ARRAY[]; extern u32 RTL8812AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8812AE_TXPWR_LMT[]; +extern const char *RTL8812AE_TXPWR_LMT[]; extern u32 RTL8821AE_TXPWR_LMT_ARRAY_LEN; -extern u8 *RTL8821AE_TXPWR_LMT[]; +extern const char *RTL8821AE_TXPWR_LMT[]; #endif diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 51e4e92d9..0bbeb61ec 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -712,8 +712,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) struct rndis_query *get; struct rndis_query_c *get_c; } u; - int ret, buflen; - int resplen, respoffs, copylen; + int ret; + size_t buflen, resplen, respoffs, copylen; buflen = *len + sizeof(*u.get); if (buflen < CONTROL_BUFFER_SIZE) @@ -748,22 +748,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len) if (respoffs > buflen) { /* Device returned data offset outside buffer, error. */ - netdev_dbg(dev->net, "%s(%s): received invalid " - "data offset: %d > %d\n", __func__, - oid_to_string(oid), respoffs, buflen); + netdev_dbg(dev->net, + "%s(%s): received invalid data offset: %zu > %zu\n", + __func__, oid_to_string(oid), respoffs, buflen); ret = -EINVAL; goto exit_unlock; } - if ((resplen + respoffs) > buflen) { - /* Device would have returned more data if buffer would - * have been big enough. Copy just the bits that we got. - */ - copylen = buflen - respoffs; - } else { - copylen = resplen; - } + copylen = min(resplen, buflen - respoffs); if (copylen > *len) copylen = *len; diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c index c8ba148f8..acf4d8cb4 100644 --- a/drivers/net/wireless/rsi/rsi_91x_coex.c +++ b/drivers/net/wireless/rsi/rsi_91x_coex.c @@ -160,6 +160,7 @@ int rsi_coex_attach(struct rsi_common *common) rsi_coex_scheduler_thread, "Coex-Tx-Thread")) { rsi_dbg(ERR_ZONE, "%s: Unable to init tx thrd\n", __func__); + kfree(coex_cb); return -EINVAL; } return 0; diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c index c6c29034b..a939b552a 100644 --- a/drivers/net/wireless/rsi/rsi_91x_core.c +++ b/drivers/net/wireless/rsi/rsi_91x_core.c @@ -466,7 +466,9 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb) tid, 0); } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + + if (IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO) { q_num = MGMT_SOFT_Q; skb->priority = q_num; } diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c index 2cb7cca4e..b445a52fb 100644 --- a/drivers/net/wireless/rsi/rsi_91x_hal.c +++ b/drivers/net/wireless/rsi/rsi_91x_hal.c @@ -152,12 +152,16 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) u8 header_size; u8 vap_id = 0; u8 dword_align_bytes; + bool tx_eapol; u16 seq_num; info = IEEE80211_SKB_CB(skb); vif = info->control.vif; tx_params = (struct skb_info *)info->driver_data; + tx_eapol = IEEE80211_SKB_CB(skb)->control.flags & + IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + header_size = FRAME_DESC_SZ + sizeof(struct rsi_xtended_desc); if (header_size > skb_headroom(skb)) { rsi_dbg(ERR_ZONE, "%s: Unable to send pkt\n", __func__); @@ -221,7 +225,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb) } } - if (skb->protocol == cpu_to_be16(ETH_P_PAE)) { + if (tx_eapol) { rsi_dbg(INFO_ZONE, "*** Tx EAPOL ***\n"); data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE); diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c index f33ece937..cfde9b94b 100644 --- a/drivers/net/wireless/wl3501_cs.c +++ b/drivers/net/wireless/wl3501_cs.c @@ -1329,7 +1329,7 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb, } else { ++dev->stats.tx_packets; dev->stats.tx_bytes += skb->len; - kfree_skb(skb); + dev_kfree_skb_irq(skb); if (this->tx_buffer_cnt < 2) netif_stop_queue(dev); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 92d30ebdb..2b984c5ba 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -166,7 +166,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; - struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; + struct gnttab_copy tx_copy_ops[2 * MAX_PENDING_REQS]; struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; /* passed to gnttab_[un]map_refs with pages under (un)mapping */ diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index fc389f2bb..d2b79d7c0 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -327,6 +327,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue, struct xenvif_tx_cb { u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1]; u8 copy_count; + u32 split_mask; }; #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb) @@ -354,6 +355,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size) struct sk_buff *skb = alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC | __GFP_NOWARN); + + BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb)); if (unlikely(skb == NULL)) return NULL; @@ -389,11 +392,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, nr_slots = shinfo->nr_frags + 1; copy_count(skb) = 0; + XENVIF_TX_CB(skb)->split_mask = 0; /* Create copy ops for exactly data_len bytes into the skb head. */ __skb_put(skb, data_len); while (data_len > 0) { int amount = data_len > txp->size ? txp->size : data_len; + bool split = false; cop->source.u.ref = txp->gref; cop->source.domid = queue->vif->domid; @@ -406,6 +411,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue, cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb) - data_len); + /* Don't cross local page boundary! */ + if (cop->dest.offset + amount > XEN_PAGE_SIZE) { + amount = XEN_PAGE_SIZE - cop->dest.offset; + XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb); + split = true; + } + cop->len = amount; cop->flags = GNTCOPY_source_gref; @@ -413,7 +425,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, pending_idx = queue->pending_ring[index]; callback_param(queue, pending_idx).ctx = NULL; copy_pending_idx(skb, copy_count(skb)) = pending_idx; - copy_count(skb)++; + if (!split) + copy_count(skb)++; cop++; data_len -= amount; @@ -434,7 +447,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, nr_slots--; } else { /* The copy op partially covered the tx_request. - * The remainder will be mapped. + * The remainder will be mapped or copied in the next + * iteration. */ txp->offset += amount; txp->size -= amount; @@ -532,6 +546,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue, pending_idx = copy_pending_idx(skb, i); newerr = (*gopp_copy)->status; + + /* Split copies need to be handled together. */ + if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) { + (*gopp_copy)++; + if (!newerr) + newerr = (*gopp_copy)->status; + } if (likely(!newerr)) { /* The first frag might still have this slot mapped */ if (i < copy_count(skb) - 1 || !sharedslot) @@ -968,10 +989,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, /* No crossing a page as the payload mustn't fragment. */ if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) { - netdev_err(queue->vif->dev, - "txreq.offset: %u, size: %u, end: %lu\n", - txreq.offset, txreq.size, - (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size); + netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n", + txreq.offset, txreq.size); xenvif_fatal_tx_err(queue->vif); break; } |