diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/net/ethernet/broadcom/tg3.c | 136 |
1 files changed, 108 insertions, 28 deletions
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index f1c8ff5b63..f52830dfb2 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6314,6 +6314,46 @@ err_out: return -EOPNOTSUPP; } +static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, + struct skb_shared_hwtstamps *timestamp) +{ + memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); + timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + + tp->ptp_adjust); +} + +static void tg3_read_tx_tstamp(struct tg3 *tp, u64 *hwclock) +{ + *hwclock = tr32(TG3_TX_TSTAMP_LSB); + *hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; +} + +static long tg3_ptp_ts_aux_work(struct ptp_clock_info *ptp) +{ + struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); + struct skb_shared_hwtstamps timestamp; + u64 hwclock; + + if (tp->ptp_txts_retrycnt > 2) + goto done; + + tg3_read_tx_tstamp(tp, &hwclock); + + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(tp->tx_tstamp_skb, ×tamp); + goto done; + } + tp->ptp_txts_retrycnt++; + return HZ / 10; +done: + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; + tp->ptp_txts_retrycnt = 0; + tp->pre_tx_ts = 0; + return -1; +} + static const struct ptp_clock_info tg3_ptp_caps = { .owner = THIS_MODULE, .name = "tg3 clock", @@ -6325,19 +6365,12 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfine = tg3_ptp_adjfine, .adjtime = tg3_ptp_adjtime, + .do_aux_work = tg3_ptp_ts_aux_work, .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; -static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock, - struct skb_shared_hwtstamps *timestamp) -{ - memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps)); - timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) + - tp->ptp_adjust); -} - /* tp->lock must be held */ static void tg3_ptp_init(struct tg3 *tp) { @@ -6368,6 +6401,8 @@ static void tg3_ptp_fini(struct tg3 *tp) ptp_clock_unregister(tp->ptp_clock); tp->ptp_clock = NULL; tp->ptp_adjust = 0; + dev_consume_skb_any(tp->tx_tstamp_skb); + tp->tx_tstamp_skb = NULL; } static inline int tg3_irq_sync(struct tg3 *tp) @@ -6546,6 +6581,7 @@ static void tg3_tx(struct tg3_napi *tnapi) while (sw_idx != hw_idx) { struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx]; + bool complete_skb_later = false; struct sk_buff *skb = ri->skb; int i, tx_bug = 0; @@ -6556,12 +6592,17 @@ static void tg3_tx(struct tg3_napi *tnapi) if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) { struct skb_shared_hwtstamps timestamp; - u64 hwclock = tr32(TG3_TX_TSTAMP_LSB); - hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32; + u64 hwclock; - tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); - - skb_tstamp_tx(skb, ×tamp); + tg3_read_tx_tstamp(tp, &hwclock); + if (hwclock != tp->pre_tx_ts) { + tg3_hwclock_to_timestamp(tp, hwclock, ×tamp); + skb_tstamp_tx(skb, ×tamp); + tp->pre_tx_ts = 0; + } else { + tp->tx_tstamp_skb = skb; + complete_skb_later = true; + } } dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), @@ -6599,7 +6640,10 @@ static void tg3_tx(struct tg3_napi *tnapi) pkts_compl++; bytes_compl += skb->len; - dev_consume_skb_any(skb); + if (!complete_skb_later) + dev_consume_skb_any(skb); + else + ptp_schedule_worker(tp->ptp_clock, 0); if (unlikely(tx_bug)) { tg3_tx_recover(tp); @@ -6611,9 +6655,9 @@ static void tg3_tx(struct tg3_napi *tnapi) tnapi->tx_cons = sw_idx; - /* Need to make the tx_cons update visible to tg3_start_xmit() + /* Need to make the tx_cons update visible to __tg3_start_xmit() * before checking for netif_queue_stopped(). Without the - * memory barrier, there is a small possibility that tg3_start_xmit() + * memory barrier, there is a small possibility that __tg3_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); @@ -7853,7 +7897,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb) return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3; } -static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); +static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *); /* Use GSO to workaround all TSO packets that meet HW bug conditions * indicated in tg3_tx_frag_set() @@ -7889,7 +7933,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi, skb_list_walk_safe(segs, seg, next) { skb_mark_not_on_list(seg); - tg3_start_xmit(seg, tp->dev); + __tg3_start_xmit(seg, tp->dev); } tg3_tso_bug_end: @@ -7899,7 +7943,7 @@ tg3_tso_bug_end: } /* hard_start_xmit for all devices */ -static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct tg3 *tp = netdev_priv(dev); u32 len, entry, base_flags, mss, vlan = 0; @@ -8038,8 +8082,13 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) && tg3_flag(tp, TX_TSTAMP_EN)) { - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - base_flags |= TXD_FLAG_HWTSTAMP; + tg3_full_lock(tp, 0); + if (!tp->pre_tx_ts) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + base_flags |= TXD_FLAG_HWTSTAMP; + tg3_read_tx_tstamp(tp, &tp->pre_tx_ts); + } + tg3_full_unlock(tp); } len = skb_headlen(skb); @@ -8143,11 +8192,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) netif_tx_wake_queue(txq); } - if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { - /* Packets are ready, update Tx producer idx on card. */ - tw32_tx_mbox(tnapi->prodmbox, entry); - } - return NETDEV_TX_OK; dma_error: @@ -8160,6 +8204,42 @@ drop_nofree: return NETDEV_TX_OK; } +static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netdev_queue *txq; + u16 skb_queue_mapping; + netdev_tx_t ret; + + skb_queue_mapping = skb_get_queue_mapping(skb); + txq = netdev_get_tx_queue(dev, skb_queue_mapping); + + ret = __tg3_start_xmit(skb, dev); + + /* Notify the hardware that packets are ready by updating the TX ring + * tail pointer. We respect netdev_xmit_more() thus avoiding poking + * the hardware for every packet. To guarantee forward progress the TX + * ring must be drained when it is full as indicated by + * netif_xmit_stopped(). This needs to happen even when the current + * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets + * queued by previous __tg3_start_xmit() calls might get stuck in + * the queue forever. + */ + if (!netdev_xmit_more() || netif_xmit_stopped(txq)) { + struct tg3_napi *tnapi; + struct tg3 *tp; + + tp = netdev_priv(dev); + tnapi = &tp->napi[skb_queue_mapping]; + + if (tg3_flag(tp, ENABLE_TSS)) + tnapi++; + + tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod); + } + + return ret; +} + static void tg3_mac_loopback(struct tg3 *tp, bool enable) { if (enable) { @@ -17044,7 +17124,7 @@ static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val) !tg3_flag(tp, PCI_EXPRESS)) goto out; -#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC) +#if defined(CONFIG_PPC64) || defined(CONFIG_PARISC) goal = BOUNDARY_MULTI_CACHELINE; #else #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA) @@ -17719,7 +17799,7 @@ static int tg3_init_one(struct pci_dev *pdev, * device behind the EPB cannot support DMA addresses > 40-bit. * On 64-bit systems with IOMMU, use 40-bit dma_mask. * On 64-bit systems without IOMMU, use 64-bit dma_mask and - * do DMA address check in tg3_start_xmit(). + * do DMA address check in __tg3_start_xmit(). */ if (tg3_flag(tp, IS_5788)) persist_dma_mask = dma_mask = DMA_BIT_MASK(32); |