Skip to content

Instantly share code, notes, and snippets.

@SaveTheRbtz
Last active June 24, 2021 14:02
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save SaveTheRbtz/2407652 to your computer and use it in GitHub Desktop.
Save SaveTheRbtz/2407652 to your computer and use it in GitHub Desktop.
On the fly TCP timeout tuning for linux-3.2
commit 99f490afdc91d4965aafd5629567ef02c515e169
Author: Alexey Ivanov <rbtz@yandex-team.ru>
Date: Tue Apr 17 16:55:10 2012 +0400
YANDEX: tcpm: Added TCP sysctl tunables
I made following constants tunable for internal testing:
TCP_DELACK_MIN -> net.ipv4.tcp_delack_min
TCP_DELACK_MAX -> net.ipv4.tcp_delack_max
TCP_ATO_MIN -> net.ipv4.tcp_ato_min
TCP_RTO_MAX -> net.ipv4.tcp_rto_max
TCP_RTO_MIN -> net.ipv4.tcp_rto_min
TCP_TIMEOUT_INIT -> net.ipv4.tcp_timeout_init
TCP_SYNQ_INTERVAL -> net.ipv4.tcp_synq_interval
- delack: Delayed ACK;
- ato: Quick ACK timeout;
- rto: retrasmit timeout;
- timeout_init: SYN/SYN-ACK retransmit timeout;
- synq_interval: multiplier of syn-queue traversal speed.
For more info see Google's proposal:
http://www.ietf.org/mail-archive/web/tcpm/current/msg04707.html
diff --git a/include/net/tcp.h b/include/net/tcp.h
index bb18c4d..60b7480 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -251,6 +251,13 @@ extern int sysctl_tcp_max_ssthresh;
extern int sysctl_tcp_cookie_size;
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
+extern int sysctl_tcp_synq_interval;
+extern int sysctl_tcp_rto_min;
+extern int sysctl_tcp_rto_max;
+extern int sysctl_tcp_delack_min;
+extern int sysctl_tcp_delack_max;
+extern int sysctl_tcp_ato_min;
+extern int sysctl_tcp_timeout_init;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -346,7 +353,7 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
if (pkts >= icsk->icsk_ack.quick) {
icsk->icsk_ack.quick = 0;
/* Leaving quickack mode we deflate ATO. */
- icsk->icsk_ack.ato = TCP_ATO_MIN;
+ icsk->icsk_ack.ato = sysctl_tcp_ato_min;
} else
icsk->icsk_ack.quick -= pkts;
}
@@ -538,8 +545,8 @@ extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
static inline void tcp_bound_rto(const struct sock *sk)
{
- if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
- inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
+ if (inet_csk(sk)->icsk_rto > sysctl_tcp_rto_max)
+ inet_csk(sk)->icsk_rto = sysctl_tcp_rto_max;
}
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
@@ -574,7 +581,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
static inline u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = sysctl_tcp_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -866,7 +873,7 @@ static inline void tcp_check_probe_timer(struct sock *sk)
if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
}
static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
@@ -950,7 +957,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
return 1;
}
@@ -1101,8 +1108,8 @@ static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, sysctl_tcp_rto_min*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, sysctl_tcp_rto_max*1000/HZ);
TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 769c0e9..1f495f5 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -183,7 +183,7 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
/*
* This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+ * Its ideal value should be dependent on sysctl_tcp_timeout_init and
* sysctl_tcp_retries1. It's a rather complicated formula (exponential
* backoff) to compute at runtime so it's currently hardcoded here.
*/
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 69fd720..ea5acfb 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -638,6 +638,55 @@ static struct ctl_table ipv4_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "tcp_rto_min",
+ .data = &sysctl_tcp_rto_min,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_rto_max",
+ .data = &sysctl_tcp_rto_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_delack_min",
+ .data = &sysctl_tcp_delack_min,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_delack_max",
+ .data = &sysctl_tcp_delack_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_ato_min",
+ .data = &sysctl_tcp_ato_min,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_timeout_init",
+ .data = &sysctl_tcp_timeout_init,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "tcp_synq_interval",
+ .data = &sysctl_tcp_synq_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
{
.procname = "udp_mem",
.data = &sysctl_udp_mem,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 34f5db1..63b915d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2350,8 +2350,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_DEFER_ACCEPT:
/* Translate value in seconds to number of retransmits */
icsk->icsk_accept_queue.rskq_defer_accept =
- secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
- TCP_RTO_MAX / HZ);
+ secs_to_retrans(val, sysctl_tcp_timeout_init / HZ,
+ sysctl_tcp_rto_max / HZ);
break;
case TCP_WINDOW_CLAMP:
@@ -2539,7 +2539,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_DEFER_ACCEPT:
val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
- TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
+ sysctl_tcp_timeout_init / HZ, sysctl_tcp_rto_max / HZ);
break;
case TCP_WINDOW_CLAMP:
val = tp->window_clamp;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e4d1e4a..9a83207 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -187,7 +187,7 @@ static void tcp_enter_quickack_mode(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk);
icsk->icsk_ack.pingpong = 0;
- icsk->icsk_ack.ato = TCP_ATO_MIN;
+ icsk->icsk_ack.ato = sysctl_tcp_ato_min;
}
/* Send ACKs quickly, if "quick" count is not exhausted
@@ -592,13 +592,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
* delayed ACK engine.
*/
tcp_incr_quickack(sk);
- icsk->icsk_ack.ato = TCP_ATO_MIN;
+ icsk->icsk_ack.ato = sysctl_tcp_ato_min;
} else {
int m = now - icsk->icsk_ack.lrcvtime;
- if (m <= TCP_ATO_MIN / 2) {
+ if (m <= sysctl_tcp_ato_min / 2) {
/* The fastest case is the first. */
- icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
+ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + sysctl_tcp_ato_min / 2;
} else if (m < icsk->icsk_ack.ato) {
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
if (icsk->icsk_ack.ato > icsk->icsk_rto)
@@ -715,7 +715,7 @@ static inline void tcp_set_rto(struct sock *sk)
* with correct one. It is exactly, which we pretend to do.
*/
- /* NOTE: clamping at TCP_RTO_MIN is not required, current algo
+ /* NOTE: clamping at sysctl_tcp_rto_min is not required, current algo
* guarantees that rto is higher.
*/
tcp_bound_rto(sk);
@@ -2309,7 +2309,7 @@ static int tcp_check_sack_reneging(struct sock *sk, int flag)
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- icsk->icsk_rto, TCP_RTO_MAX);
+ icsk->icsk_rto, sysctl_tcp_rto_max);
return 1;
}
return 0;
@@ -3272,7 +3272,7 @@ static void tcp_rearm_rto(struct sock *sk)
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
}
}
@@ -3483,8 +3483,8 @@ static void tcp_ack_probe(struct sock *sk)
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min(icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
}
}
@@ -5705,11 +5705,11 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
*/
inet_csk_schedule_ack(sk);
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
- icsk->icsk_ack.ato = TCP_ATO_MIN;
+ icsk->icsk_ack.ato = sysctl_tcp_ato_min;
tcp_incr_quickack(sk);
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ sysctl_tcp_delack_max, sysctl_tcp_rto_max);
discard:
__kfree_skb(skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index de69cec..af66ca9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -431,7 +431,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
icsk->icsk_backoff--;
inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
- TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
+ sysctl_tcp_timeout_init) << icsk->icsk_backoff;
tcp_bound_rto(sk);
skb = tcp_write_queue_head(sk);
@@ -442,7 +442,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- remaining, TCP_RTO_MAX);
+ remaining, sysctl_tcp_rto_max);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
@@ -1410,7 +1410,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
want_cookie)
goto drop_and_free;
- inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ inet_csk_reqsk_queue_hash_add(sk, req, sysctl_tcp_timeout_init);
return 0;
drop_and_release:
@@ -1877,8 +1877,8 @@ static int tcp_v4_init_sock(struct sock *sk)
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
- icsk->icsk_rto = TCP_TIMEOUT_INIT;
- tp->mdev = TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = sysctl_tcp_timeout_init;
+ tp->mdev = sysctl_tcp_timeout_init;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 66363b6..63b9c69 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -474,8 +474,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_init_wl(newtp, treq->rcv_isn);
newtp->srtt = 0;
- newtp->mdev = TCP_TIMEOUT_INIT;
- newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newtp->mdev = sysctl_tcp_timeout_init;
+ newicsk->icsk_rto = sysctl_tcp_timeout_init;
newtp->packets_out = 0;
newtp->retrans_out = 0;
@@ -584,7 +584,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* it can be estimated (approximately)
* from another data.
*/
- tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans);
+ tmp_opt.ts_recent_stamp = get_seconds() - ((sysctl_tcp_timeout_init/HZ)<<req->retrans);
paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
}
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 097e0c7..fee48d6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -80,7 +80,7 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
tp->packets_out += tcp_skb_pcount(skb);
if (!prior_packets)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
}
/* SND.NXT, if window was not shrunk.
@@ -2305,7 +2305,7 @@ begin_fwd:
if (skb == tcp_write_queue_head(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
@@ -2607,7 +2607,7 @@ static void tcp_connect_init(struct sock *sk)
tp->rcv_wup = 0;
tp->copied_seq = 0;
- inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
+ inet_csk(sk)->icsk_rto = sysctl_tcp_timeout_init;
inet_csk(sk)->icsk_retransmits = 0;
tcp_clear_retrans(tp);
}
@@ -2653,7 +2653,7 @@ int tcp_connect(struct sock *sk)
/* Timer for repeating the SYN until an answer. */
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
- inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
+ inet_csk(sk)->icsk_rto, sysctl_tcp_rto_max);
return 0;
}
EXPORT_SYMBOL(tcp_connect);
@@ -2668,13 +2668,13 @@ void tcp_send_delayed_ack(struct sock *sk)
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
- if (ato > TCP_DELACK_MIN) {
+ if (ato > sysctl_tcp_delack_min) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
if (icsk->icsk_ack.pingpong ||
(icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
- max_ato = TCP_DELACK_MAX;
+ max_ato = sysctl_tcp_delack_max;
/* Slow path, intersegment interval is "high". */
@@ -2683,7 +2683,7 @@ void tcp_send_delayed_ack(struct sock *sk)
* directly.
*/
if (tp->srtt) {
- int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
+ int rtt = max(tp->srtt >> 3, sysctl_tcp_delack_min);
if (rtt < max_ato)
max_ato = rtt;
@@ -2730,9 +2730,9 @@ void tcp_send_ack(struct sock *sk)
buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
if (buff == NULL) {
inet_csk_schedule_ack(sk);
- inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
+ inet_csk(sk)->icsk_ack.ato = sysctl_tcp_ato_min;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ sysctl_tcp_delack_max, sysctl_tcp_rto_max);
return;
}
@@ -2844,8 +2844,8 @@ void tcp_send_probe0(struct sock *sk)
icsk->icsk_backoff++;
icsk->icsk_probes_out++;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
- min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
- TCP_RTO_MAX);
+ min(icsk->icsk_rto << icsk->icsk_backoff, sysctl_tcp_rto_max),
+ sysctl_tcp_rto_max);
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember icsk_probes_out.
@@ -2858,6 +2858,6 @@ void tcp_send_probe0(struct sock *sk)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
}
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2e0f0af..9dc5781 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -32,6 +32,21 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
int sysctl_tcp_orphan_retries __read_mostly;
int sysctl_tcp_thin_linear_timeouts __read_mostly;
+int sysctl_tcp_timeout_init __read_mostly = TCP_TIMEOUT_INIT;
+EXPORT_SYMBOL(sysctl_tcp_timeout_init);
+int sysctl_tcp_rto_min __read_mostly = TCP_RTO_MIN;
+EXPORT_SYMBOL(sysctl_tcp_rto_min);
+int sysctl_tcp_rto_max __read_mostly = TCP_RTO_MAX;
+EXPORT_SYMBOL(sysctl_tcp_rto_max);
+int sysctl_tcp_delack_min __read_mostly = TCP_DELACK_MIN;
+EXPORT_SYMBOL(sysctl_tcp_delack_min);
+int sysctl_tcp_delack_max __read_mostly = TCP_DELACK_MAX;
+EXPORT_SYMBOL(sysctl_tcp_delack_max);
+int sysctl_tcp_ato_min __read_mostly = TCP_ATO_MIN;
+EXPORT_SYMBOL(sysctl_tcp_ato_min);
+int sysctl_tcp_synq_interval __read_mostly = TCP_SYNQ_INTERVAL;
+EXPORT_SYMBOL(sysctl_tcp_synq_interval);
+
static void tcp_write_timer(unsigned long);
static void tcp_delack_timer(unsigned long);
static void tcp_keepalive_timer (unsigned long data);
@@ -70,7 +85,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
/* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */
- if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
+ if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*sysctl_tcp_rto_max || !do_reset)
shift++;
/* If some dubious ICMP arrived, penalize even more. */
@@ -135,8 +150,8 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
/* This function calculates a "timeout" which is equivalent to the timeout of a
* TCP connection after "boundary" unsuccessful, exponentially backed-off
- * retransmissions with an initial RTO of TCP_RTO_MIN or TCP_TIMEOUT_INIT if
- * syn_set flag is set.
+ * retransmissions with an initial RTO of sysctl_tcp_rto_min or
+ * sysctl_tcp_timeout_init if syn_set flag is set.
*/
static bool retransmits_timed_out(struct sock *sk,
unsigned int boundary,
@@ -144,7 +159,7 @@ static bool retransmits_timed_out(struct sock *sk,
bool syn_set)
{
unsigned int linear_backoff_thresh, start_ts;
- unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
+ unsigned int rto_base = syn_set ? sysctl_tcp_timeout_init : sysctl_tcp_rto_min;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -155,13 +170,13 @@ static bool retransmits_timed_out(struct sock *sk,
start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0)) {
- linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
+ linear_backoff_thresh = ilog2(sysctl_tcp_rto_max/rto_base);
if (boundary <= linear_backoff_thresh)
timeout = ((2 << boundary) - 1) * rto_base;
else
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
- (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
+ (boundary - linear_backoff_thresh) * sysctl_tcp_rto_max;
}
return (tcp_time_stamp - start_ts) >= timeout;
}
@@ -188,7 +203,7 @@ static int tcp_write_timeout(struct sock *sk)
retry_until = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = (icsk->icsk_rto < TCP_RTO_MAX);
+ const int alive = (icsk->icsk_rto < sysctl_tcp_rto_max);
retry_until = tcp_orphan_retries(sk, alive);
do_reset = alive ||
@@ -219,7 +234,7 @@ static void tcp_delack_timer(unsigned long data)
/* Try again later. */
icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
- sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
+ sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + sysctl_tcp_delack_min);
goto out_unlock;
}
@@ -254,7 +269,7 @@ static void tcp_delack_timer(unsigned long data)
* deflate ATO.
*/
icsk->icsk_ack.pingpong = 0;
- icsk->icsk_ack.ato = TCP_ATO_MIN;
+ icsk->icsk_ack.ato = sysctl_tcp_ato_min;
}
tcp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS);
@@ -297,7 +312,7 @@ static void tcp_probe_timer(struct sock *sk)
max_probes = sysctl_tcp_retries2;
if (sock_flag(sk, SOCK_DEAD)) {
- const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX);
+ const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < sysctl_tcp_rto_max);
max_probes = tcp_orphan_retries(sk, alive);
@@ -348,7 +363,7 @@ void tcp_retransmit_timer(struct sock *sk)
inet->inet_num, tp->snd_una, tp->snd_nxt);
}
#endif
- if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
+ if (tcp_time_stamp - tp->rcv_tstamp > sysctl_tcp_rto_max) {
tcp_write_err(sk);
goto out;
}
@@ -397,7 +412,7 @@ void tcp_retransmit_timer(struct sock *sk)
icsk->icsk_retransmits = 1;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
- TCP_RTO_MAX);
+ sysctl_tcp_rto_max);
goto out;
}
@@ -434,12 +449,12 @@ out_reset_timer:
tcp_stream_is_thin(tp) &&
icsk->icsk_backoff = 0;
- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
+ icsk->icsk_rto = min(__tcp_set_rto(tp), sysctl_tcp_rto_max);
} else {
/* Use normal (exponential) backoff */
- icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, sysctl_tcp_rto_max);
}
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, sysctl_tcp_rto_max);
if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
__sk_dst_reset(sk);
@@ -492,8 +507,8 @@ out_unlock:
static void tcp_synack_timer(struct sock *sk)
{
- inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
- TCP_TIMEOUT_INIT, TCP_RTO_MAX);
+ inet_csk_reqsk_queue_prune(sk, sysctl_tcp_synq_interval,
+ sysctl_tcp_timeout_init, sysctl_tcp_rto_max);
}
void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5a0d664..187ec44 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -41,7 +41,7 @@ static __u16 const msstab[] = {
/*
* This (misnamed) value is the age of syncookie which is permitted.
- * Its ideal value should be dependent on TCP_TIMEOUT_INIT and
+ * Its ideal value should be dependent on sysctl_tcp_timeout_init and
* sysctl_tcp_retries1. It's a rather complicated formula (exponential
* backoff) to compute at runtime so it's currently hardcoded here.
*/
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b859e4a..fe9925e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1332,7 +1332,7 @@ have_isn:
want_cookie)
goto drop_and_free;
- inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+ inet6_csk_reqsk_queue_hash_add(sk, req, sysctl_tcp_timeout_init);
return 0;
drop_and_release:
@@ -1948,8 +1948,8 @@ static int tcp_v6_init_sock(struct sock *sk)
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
- icsk->icsk_rto = TCP_TIMEOUT_INIT;
- tp->mdev = TCP_TIMEOUT_INIT;
+ icsk->icsk_rto = sysctl_tcp_timeout_init;
+ tp->mdev = sysctl_tcp_timeout_init;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment