diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /net/rxrpc/input.c | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/rxrpc/input.c')
-rw-r--r-- | net/rxrpc/input.c | 94 |
1 files changed, 41 insertions, 53 deletions
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 5dfda1ac51..16d49a861d 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -223,7 +223,7 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, list_for_each_entry_rcu(txb, &call->tx_buffer, call_link, false) { if (before_eq(txb->seq, call->acks_hard_ack)) continue; - if (test_bit(RXRPC_TXBUF_LAST, &txb->flags)) { + if (txb->flags & RXRPC_LAST_PACKET) { set_bit(RXRPC_CALL_TX_LAST, &call->flags); rot_last = true; } @@ -263,6 +263,9 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, { ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); + call->resend_at = KTIME_MAX; + trace_rxrpc_timer_can(call, rxrpc_timer_trace_resend); + if (unlikely(call->cong_last_nack)) { rxrpc_free_skb(call->cong_last_nack, rxrpc_skb_put_last_nack); call->cong_last_nack = NULL; @@ -299,15 +302,11 @@ static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, static bool rxrpc_receiving_reply(struct rxrpc_call *call) { struct rxrpc_ack_summary summary = { 0 }; - unsigned long now, timo; rxrpc_seq_t top = READ_ONCE(call->tx_top); if (call->ackr_reason) { - now = jiffies; - timo = now + MAX_JIFFY_OFFSET; - - WRITE_ONCE(call->delay_ack_at, timo); - trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); + call->delay_ack_at = KTIME_MAX; + trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack); } if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { @@ -340,7 +339,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) case RXRPC_CALL_SERVER_RECV_REQUEST: rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST); - call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; + call->expect_req_by = KTIME_MAX; rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op); break; @@ -613,14 +612,12 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) case RXRPC_CALL_SERVER_RECV_REQUEST: { unsigned long timo = READ_ONCE(call->next_req_timo); - unsigned long now, expect_req_by; if (timo) { - now = jiffies; - expect_req_by = now + timo; - WRITE_ONCE(call->expect_req_by, expect_req_by); - rxrpc_reduce_call_timer(call, expect_req_by, now, - rxrpc_timer_set_for_idle); + ktime_t delay = ms_to_ktime(timo); + + call->expect_req_by = ktime_add(ktime_get_real(), delay); + trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle); } break; } @@ -734,20 +731,19 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call, rxrpc_seq_t seq) { struct sk_buff *skb = call->cong_last_nack; - struct rxrpc_ackpacket ack; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int i, new_acks = 0, retained_nacks = 0; - rxrpc_seq_t old_seq = sp->first_ack; - u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(ack); + rxrpc_seq_t old_seq = sp->ack.first_ack; + u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket); - if (after_eq(seq, old_seq + sp->nr_acks)) { - summary->nr_new_acks += sp->nr_nacks; - summary->nr_new_acks += seq - (old_seq + sp->nr_acks); + if (after_eq(seq, old_seq + sp->ack.nr_acks)) { + summary->nr_new_acks += sp->ack.nr_nacks; + summary->nr_new_acks += seq - (old_seq + sp->ack.nr_acks); summary->nr_retained_nacks = 0; } else if (seq == old_seq) { - summary->nr_retained_nacks = sp->nr_nacks; + summary->nr_retained_nacks = sp->ack.nr_nacks; } else { - for (i = 0; i < sp->nr_acks; i++) { + for (i = 0; i < sp->ack.nr_acks; i++) { if (acks[i] == RXRPC_ACK_TYPE_NACK) { if (before(old_seq + i, seq)) new_acks++; @@ -760,7 +756,7 @@ static rxrpc_seq_t rxrpc_input_check_prev_ack(struct rxrpc_call *call, summary->nr_retained_nacks = retained_nacks; } - return old_seq + sp->nr_acks; + return old_seq + sp->ack.nr_acks; } /* @@ -780,10 +776,10 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); unsigned int i, old_nacks = 0; - rxrpc_seq_t lowest_nak = seq + sp->nr_acks; + rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks; u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket); - for (i = 0; i < sp->nr_acks; i++) { + for (i = 0; i < sp->ack.nr_acks; i++) { if (acks[i] == RXRPC_ACK_TYPE_ACK) { summary->nr_acks++; if (after_eq(seq, since)) @@ -795,7 +791,7 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, old_nacks++; } else { summary->nr_new_nacks++; - sp->nr_nacks++; + sp->ack.nr_nacks++; } if (before(seq, lowest_nak)) @@ -856,7 +852,6 @@ static bool rxrpc_is_ack_valid(struct rxrpc_call *call, static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) { struct rxrpc_ack_summary summary = { 0 }; - struct rxrpc_ackpacket ack; struct rxrpc_skb_priv *sp = rxrpc_skb(skb); struct rxrpc_acktrailer trailer; rxrpc_serial_t ack_serial, acked_serial; @@ -865,29 +860,24 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) _enter(""); - offset = sizeof(struct rxrpc_wire_header); - if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) - return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack); - offset += sizeof(ack); - - ack_serial = sp->hdr.serial; - acked_serial = ntohl(ack.serial); - first_soft_ack = ntohl(ack.firstPacket); - prev_pkt = ntohl(ack.previousPacket); - hard_ack = first_soft_ack - 1; - nr_acks = ack.nAcks; - sp->first_ack = first_soft_ack; - sp->nr_acks = nr_acks; - summary.ack_reason = (ack.reason < RXRPC_ACK__INVALID ? - ack.reason : RXRPC_ACK__INVALID); + offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket); + + ack_serial = sp->hdr.serial; + acked_serial = sp->ack.acked_serial; + first_soft_ack = sp->ack.first_ack; + prev_pkt = sp->ack.prev_ack; + nr_acks = sp->ack.nr_acks; + hard_ack = first_soft_ack - 1; + summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ? + sp->ack.reason : RXRPC_ACK__INVALID); trace_rxrpc_rx_ack(call, ack_serial, acked_serial, first_soft_ack, prev_pkt, summary.ack_reason, nr_acks); - rxrpc_inc_stat(call->rxnet, stat_rx_acks[ack.reason]); + rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]); if (acked_serial != 0) { - switch (ack.reason) { + switch (summary.ack_reason) { case RXRPC_ACK_PING_RESPONSE: rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial, rxrpc_rtt_rx_ping_response); @@ -907,7 +897,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) * indicates that the client address changed due to NAT. The server * lost the call because it switched to a different peer. */ - if (unlikely(ack.reason == RXRPC_ACK_EXCEEDS_WINDOW) && + if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) && first_soft_ack == 1 && prev_pkt == 0 && rxrpc_is_client_call(call)) { @@ -920,7 +910,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) * indicate a change of address. However, we can retransmit the call * if we still have it buffered to the beginning. */ - if (unlikely(ack.reason == RXRPC_ACK_OUT_OF_SEQUENCE) && + if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) && first_soft_ack == 1 && prev_pkt == 0 && call->acks_hard_ack == 0 && @@ -961,7 +951,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) call->acks_first_seq = first_soft_ack; call->acks_prev_seq = prev_pkt; - switch (ack.reason) { + switch (summary.ack_reason) { case RXRPC_ACK_PING: break; default: @@ -1018,7 +1008,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb) rxrpc_congestion_management(call, skb, &summary, acked_serial); send_response: - if (ack.reason == RXRPC_ACK_PING) + if (summary.ack_reason == RXRPC_ACK_PING) rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, ack_serial, rxrpc_propose_ack_respond_to_ping); else if (sp->hdr.flags & RXRPC_REQUEST_ACK) @@ -1069,12 +1059,10 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb) timo = READ_ONCE(call->next_rx_timo); if (timo) { - unsigned long now = jiffies, expect_rx_by; + ktime_t delay = ms_to_ktime(timo); - expect_rx_by = now + timo; - WRITE_ONCE(call->expect_rx_by, expect_rx_by); - rxrpc_reduce_call_timer(call, expect_rx_by, now, - rxrpc_timer_set_for_normal); + call->expect_rx_by = ktime_add(ktime_get_real(), delay); + trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx); } switch (sp->hdr.type) { |