Skip to content

Instantly share code, notes, and snippets.

@0x7f454c46
Created September 18, 2023 18:24
Show Gist options
  • Save 0x7f454c46/fe546b9cf323ca21acc3d0eabbd41236 to your computer and use it in GitHub Desktop.
Save 0x7f454c46/fe546b9cf323ca21acc3d0eabbd41236 to your computer and use it in GitHub Desktop.
TCP-AO v11...v12
1: a291f3b54022 = 1: 435d4f9ce486 net/tcp: Prepare tcp_md5sig_pool for TCP-AO
2: 6b2f304dfa84 = 2: af930060117e net/tcp: Add TCP-AO config and structures
3: a067d79b4f1a = 3: 000a493414ce net/tcp: Introduce TCP_AO setsockopt()s
4: 296293cbde93 = 4: beff7a212fe1 net/tcp: Prevent TCP-MD5 with TCP-AO being set
5: e24e9dfc9d4c = 5: 7e877c760a84 net/tcp: Calculate TCP-AO traffic keys
6: 8db71a6fa739 ! 6: d60355172baf net/tcp: Add TCP-AO sign to outgoing packets
@@ include/net/tcp.h: struct tcp_sock_af_ops {
#endif
};
+@@ include/net/tcp.h: static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+ }
+ #endif
+
++struct tcp_key {
++ union {
++ struct tcp_ao_key *ao_key;
++ struct tcp_md5sig_key *md5_key;
++ };
++ enum {
++ TCP_KEY_NONE = 0,
++ TCP_KEY_MD5,
++ TCP_KEY_AO,
++ } type;
++};
++
++static inline void tcp_get_current_key(const struct sock *sk,
++ struct tcp_key *out)
++{
++#if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
++ const struct tcp_sock *tp = tcp_sk(sk);
++#endif
++#ifdef CONFIG_TCP_AO
++ struct tcp_ao_info *ao;
++
++ ao = rcu_dereference_protected(tp->ao_info, lockdep_sock_is_held(sk));
++ if (ao) {
++ out->ao_key = READ_ONCE(ao->current_key);
++ out->type = TCP_KEY_AO;
++ return;
++ }
++#endif
++#ifdef CONFIG_TCP_MD5SIG
++ if (static_branch_unlikely(&tcp_md5_needed.key) &&
++ rcu_access_pointer(tp->md5sig_info)) {
++ out->md5_key = tp->af_specific->md5_lookup(sk, sk);
++ if (out->md5_key) {
++ out->type = TCP_KEY_MD5;
++ return;
++ }
++ }
++#endif
++ out->type = TCP_KEY_NONE;
++}
++
++static inline bool tcp_key_is_md5(const struct tcp_key *key)
++{
++#ifdef CONFIG_TCP_MD5SIG
++ if (static_branch_unlikely(&tcp_md5_needed.key) &&
++ key->type == TCP_KEY_MD5)
++ return true;
++#endif
++ return false;
++}
++
++static inline bool tcp_key_is_ao(const struct tcp_key *key)
++{
++#ifdef CONFIG_TCP_AO
++ if (key->type == TCP_KEY_AO)
++ return true;
++#endif
++ return false;
++}
++
+ int tcpv4_offload_init(void);
+
+ void tcp_v4_init(void);
## include/net/tcp_ao.h ##
@@ include/net/tcp_ao.h: struct tcp6_ao_context {
@@ net/ipv4/tcp_output.c: static void bpf_skops_write_hdr_opt(struct sock *sk, stru
static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
- struct tcp_out_options *opts)
+ struct tcp_out_options *opts,
-+ struct tcp_ao_key *ao_key)
++ struct tcp_key *key)
{
__be32 *ptr = (__be32 *)(th + 1);
u16 options = opts->options; /* mungable copy */
-@@ net/ipv4/tcp_output.c: static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
+
+- if (unlikely(OPTION_MD5 & options)) {
++ if (tcp_key_is_md5(key)) {
+ *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
+ /* overload cookie hash location */
opts->hash_location = (__u8 *)ptr;
ptr += 4;
- }
+- }
++ } else if (tcp_key_is_ao(key)) {
+#ifdef CONFIG_TCP_AO
-+ if (unlikely(OPTION_AO & options)) {
+ struct tcp_ao_key *rnext_key;
+ struct tcp_ao_info *ao_info;
+ u8 maclen;
-+ if (WARN_ON_ONCE(!ao_key))
-+ goto out_ao;
+ ao_info = rcu_dereference_check(tp->ao_info,
+ lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
+ rnext_key = READ_ONCE(ao_info->rnext_key);
+ if (WARN_ON_ONCE(!rnext_key))
+ goto out_ao;
-+ maclen = tcp_ao_maclen(ao_key);
++ maclen = tcp_ao_maclen(key->ao_key);
+ *ptr++ = htonl((TCPOPT_AO << 24) |
-+ (tcp_ao_len(ao_key) << 16) |
-+ (ao_key->sndid << 8) |
++ (tcp_ao_len(key->ao_key) << 16) |
++ (key->ao_key->sndid << 8) |
+ (rnext_key->rcvid));
+ opts->hash_location = (__u8 *)ptr;
+ ptr += maclen / sizeof(*ptr);
@@ net/ipv4/tcp_output.c: static void tcp_options_write(struct tcphdr *th, struct t
+ memset(ptr, TCPOPT_NOP, sizeof(*ptr));
+ ptr++;
+ }
-+ }
+out_ao:
+#endif
++ }
if (unlikely(opts->mss)) {
*ptr++ = htonl((TCPOPT_MSS << 24) |
(TCPOLEN_MSS << 16) |
@@ net/ipv4/tcp_output.c: static void mptcp_set_option_cond(const struct request_so
static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5)
-+ struct tcp_md5sig_key **md5,
-+ struct tcp_ao_key *ao_key)
++ struct tcp_key *key)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
-@@ net/ipv4/tcp_output.c: static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+ struct tcp_fastopen_request *fastopen = tp->fastopen_req;
++ bool timestamps;
+
+- *md5 = NULL;
+-#ifdef CONFIG_TCP_MD5SIG
+- if (static_branch_unlikely(&tcp_md5_needed.key) &&
+- rcu_access_pointer(tp->md5sig_info)) {
+- *md5 = tp->af_specific->md5_lookup(sk, sk);
+- if (*md5) {
+- opts->options |= OPTION_MD5;
+- remaining -= TCPOLEN_MD5SIG_ALIGNED;
++ /* Better than switch (key.type) as it has static branches */
++ if (tcp_key_is_md5(key)) {
++ timestamps = false;
++ opts->options |= OPTION_MD5;
++ remaining -= TCPOLEN_MD5SIG_ALIGNED;
++ } else {
++ timestamps = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps);
++ if (tcp_key_is_ao(key)) {
++ opts->options |= OPTION_AO;
++ remaining -= tcp_ao_len(key->ao_key);
}
}
- #endif
-+#ifdef CONFIG_TCP_AO
-+ if (ao_key) {
-+ opts->options |= OPTION_AO;
-+ remaining -= tcp_ao_len(ao_key);
-+ }
-+#endif
+-#endif
/* We always get an MSS option. The option bytes which will be seen in
* normal data packets should timestamps be used, must be in the MSS
+@@ net/ipv4/tcp_output.c: static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
+ opts->mss = tcp_advertise_mss(sk);
+ remaining -= TCPOLEN_MSS_ALIGNED;
+
+- if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) {
++ if (likely(timestamps)) {
+ opts->options |= OPTION_TS;
+ opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
+ opts->tsecr = tp->rx_opt.ts_recent;
@@ net/ipv4/tcp_output.c: static unsigned int tcp_synack_options(const struct sock *sk,
*/
static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
struct tcp_out_options *opts,
- struct tcp_md5sig_key **md5)
-+ struct tcp_md5sig_key **md5,
-+ struct tcp_ao_key *ao_key)
++ struct tcp_key *key)
{
struct tcp_sock *tp = tcp_sk(sk);
unsigned int size = 0;
@@ net/ipv4/tcp_output.c: static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
- }
- }
- #endif
-+#ifdef CONFIG_TCP_AO
-+ if (ao_key) {
+
+ opts->options = 0;
+
+- *md5 = NULL;
+-#ifdef CONFIG_TCP_MD5SIG
+- if (static_branch_unlikely(&tcp_md5_needed.key) &&
+- rcu_access_pointer(tp->md5sig_info)) {
+- *md5 = tp->af_specific->md5_lookup(sk, sk);
+- if (*md5) {
+- opts->options |= OPTION_MD5;
+- size += TCPOLEN_MD5SIG_ALIGNED;
+- }
++ /* Better than switch (key.type) as it has static branches */
++ if (tcp_key_is_md5(key)) {
++ opts->options |= OPTION_MD5;
++ size += TCPOLEN_MD5SIG_ALIGNED;
++ } else if (tcp_key_is_ao(key)) {
+ opts->options |= OPTION_AO;
-+ size += tcp_ao_len(ao_key);
-+ }
-+#endif
++ size += tcp_ao_len(key->ao_key);
+ }
+-#endif
if (likely(tp->rx_opt.tstamp_ok)) {
opts->options |= OPTION_TS;
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
- int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
- {
- const struct inet_connection_sock *icsk = inet_csk(sk);
-+ struct tcp_ao_key *ao_key = NULL;
- struct inet_sock *inet;
- struct tcp_sock *tp;
- struct tcp_skb_cb *tcb;
-@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ struct tcp_out_options opts;
unsigned int tcp_options_size, tcp_header_size;
struct sk_buff *oskb = NULL;
- struct tcp_md5sig_key *md5;
-+#ifdef CONFIG_TCP_AO
-+ struct tcp_ao_info *ao;
-+#endif
+- struct tcp_md5sig_key *md5;
++ struct tcp_key key;
struct tcphdr *th;
u64 prior_wstamp;
int err;
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_
tcb = TCP_SKB_CB(skb);
memset(&opts, 0, sizeof(opts));
-+#ifdef CONFIG_TCP_AO
-+ ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
-+ lockdep_sock_is_held(sk));
-+ if (ao)
-+ ao_key = READ_ONCE(ao->current_key);
-+#endif
++ tcp_get_current_key(sk, &key);
if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
- tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5);
-+ tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5, ao_key);
++ tcp_options_size = tcp_syn_options(sk, skb, &opts, &key);
} else {
- tcp_options_size = tcp_established_options(sk, skb, &opts,
+- tcp_options_size = tcp_established_options(sk, skb, &opts,
- &md5);
-+ &md5, ao_key);
++ tcp_options_size = tcp_established_options(sk, skb, &opts, &key);
/* Force a PSH flag on all (GSO) packets to expedite GRO flush
* at receiver : This slightly improve GRO performance.
* Note that we do not force the PSH flag for non GSO packets,
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_
}
- tcp_options_write(th, tp, &opts);
-+ tcp_options_write(th, tp, &opts, ao_key);
++ tcp_options_write(th, tp, &opts, &key);
++ if (tcp_key_is_md5(&key)) {
#ifdef CONFIG_TCP_MD5SIG
- /* Calculate the MD5 hash, as we have all we need now */
-@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
- md5, sk, skb);
- }
+- /* Calculate the MD5 hash, as we have all we need now */
+- if (md5) {
++ /* Calculate the MD5 hash, as we have all we need now */
+ sk_gso_disable(sk);
+ tp->af_specific->calc_md5_hash(opts.hash_location,
+- md5, sk, skb);
+- }
++ key.md5_key, sk, skb);
#endif
++ } else if (tcp_key_is_ao(&key)) {
+#ifdef CONFIG_TCP_AO
-+ if (ao) {
-+ u8 *traffic_key;
++ struct tcp_ao_info *ao;
+ void *tkey_buf = NULL;
++ u8 *traffic_key;
+ __be32 disn;
+
-+ sk_gso_disable(sk);
++ ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
++ lockdep_sock_is_held(sk));
+ if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
+ if (tcb->tcp_flags & TCPHDR_ACK)
+ disn = ao->risn;
+ else
+ disn = 0;
+
-+ tkey_buf = kmalloc(tcp_ao_digest_size(ao_key), GFP_ATOMIC);
-+ if (!tkey_buf)
++ tkey_buf = kmalloc(tcp_ao_digest_size(key.ao_key),
++ GFP_ATOMIC);
++ if (!tkey_buf) {
++ kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
+ return -ENOMEM;
++ }
+ traffic_key = tkey_buf;
-+ tp->af_specific->ao_calc_key_sk(ao_key, traffic_key,
++ tp->af_specific->ao_calc_key_sk(key.ao_key, traffic_key,
+ sk, ao->lisn, disn, true);
+ } else {
-+ traffic_key = snd_other_key(ao_key);
++ traffic_key = snd_other_key(key.ao_key);
+ }
-+ tp->af_specific->calc_ao_hash(opts.hash_location, ao_key, sk, skb,
-+ traffic_key,
++ tp->af_specific->calc_ao_hash(opts.hash_location, key.ao_key,
++ sk, skb, traffic_key,
+ opts.hash_location - (u8 *)th, 0);
+ kfree(tkey_buf);
-+ }
+#endif
++ }
/* BPF prog is the last one writing header option */
bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
@@ net/ipv4/tcp_output.c: unsigned int tcp_current_mss(struct sock *sk)
- {
- const struct tcp_sock *tp = tcp_sk(sk);
- const struct dst_entry *dst = __sk_dst_get(sk);
-+ struct tcp_ao_key *ao_key = NULL;
u32 mss_now;
unsigned int header_len;
struct tcp_out_options opts;
- struct tcp_md5sig_key *md5;
-+#ifdef CONFIG_TCP_AO
-+ struct tcp_ao_info *ao_info;
-+#endif
+- struct tcp_md5sig_key *md5;
++ struct tcp_key key;
mss_now = tp->mss_cache;
@@ net/ipv4/tcp_output.c: unsigned int tcp_current_mss(struct sock *sk)
}
-
- header_len = tcp_established_options(sk, NULL, &opts, &md5) +
-+#ifdef CONFIG_TCP_AO
-+ ao_info = rcu_dereference_check(tp->ao_info, lockdep_sock_is_held(sk));
-+ if (ao_info)
-+ /* TODO: verify if we can access current_key or we need to pass
-+ * it from every caller of tcp_current_mss instead. The reason
-+ * is that the current_key pointer can change asynchronously
-+ * from the rx path.
-+ */
-+ ao_key = READ_ONCE(ao_info->current_key);
-+#endif
-+ header_len = tcp_established_options(sk, NULL, &opts, &md5, ao_key) +
++ tcp_get_current_key(sk, &key);
++ header_len = tcp_established_options(sk, NULL, &opts, &key) +
sizeof(struct tcphdr);
/* The mss_cache is sized based on tp->tcp_header_len, which assumes
* some common options. If this is an odd packet (because we have SACK
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_key *md5 = NULL;
+ struct tcp_out_options opts;
++ struct tcp_key key = {};
+ struct sk_buff *skb;
+ int tcp_header_size;
+ struct tcphdr *th;
+@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ #ifdef CONFIG_TCP_MD5SIG
+ rcu_read_lock();
+ md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
++ if (md5)
++ key.type = TCP_KEY_MD5;
+ #endif
+ skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
+ /* bpf program will be interested in the tcp_flags */
+@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
- tcp_options_write(th, NULL, &opts);
-+ tcp_options_write(th, NULL, &opts, NULL);
++ tcp_options_write(th, NULL, &opts, &key);
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
7: 91df6553c815 = 7: e43c749c7a8c net/tcp: Add tcp_parse_auth_options()
8: 52a4789a5f2f ! 8: 73e52504575d net/tcp: Add AO sign to RST packets
@@ Commit message
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
+ ## include/net/tcp.h ##
+@@ include/net/tcp.h: static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+
+ struct tcp_key {
+ union {
+- struct tcp_ao_key *ao_key;
++ struct {
++ struct tcp_ao_key *ao_key;
++ u32 sne;
++ char *traffic_key;
++ u8 rcv_next;
++ };
+ struct tcp_md5sig_key *md5_key;
+ };
+ enum {
+
## include/net/tcp_ao.h ##
@@ include/net/tcp_ao.h: int tcp_ao_hash_skb(unsigned short int family,
const u8 *tkey, int hash_offset, u32 sne);
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_reset(const struct sock *sk, struct
## net/ipv6/tcp_ipv6.c ##
@@ net/ipv6/tcp_ipv6.c: const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+
static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr,
- int oif, struct tcp_md5sig_key *key, int rst,
+- int oif, struct tcp_md5sig_key *key, int rst,
- u8 tclass, __be32 label, u32 priority, u32 txhash)
-+ u8 tclass, __be32 label, u32 priority, u32 txhash,
-+ struct tcp_ao_key *ao_key, char *tkey,
-+ u8 rcv_next, u32 ao_sne)
++ int oif, int rst, u8 tclass, __be32 label,
++ u32 priority, u32 txhash, struct tcp_key *key)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcphdr *t1;
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
- if (key)
+
+ if (tsecr)
+ tot_len += TCPOLEN_TSTAMP_ALIGNED;
+-#ifdef CONFIG_TCP_MD5SIG
+- if (key)
++ if (tcp_key_is_md5(key))
tot_len += TCPOLEN_MD5SIG_ALIGNED;
- #endif
-+#ifdef CONFIG_TCP_AO
-+ if (ao_key)
-+ tot_len += tcp_ao_len(ao_key);
-+#endif
-+#if defined(CONFIG_TCP_MD5SIG) && defined(CONFIG_TCP_AO)
-+ WARN_ON_ONCE(key && ao_key);
-+#endif
+-#endif
++ if (tcp_key_is_ao(key))
++ tot_len += tcp_ao_len(key->ao_key);
#ifdef CONFIG_MPTCP
- if (rst && !key) {
+- if (rst && !key) {
++ if (rst && !tcp_key_is_md5(key)) {
+ mrst = mptcp_reset_option(skb);
+
+ if (mrst)
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
+ *topt++ = mrst;
+
+ #ifdef CONFIG_TCP_MD5SIG
+- if (key) {
++ if (tcp_key_is_md5(key)) {
+ *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
+- tcp_v6_md5_hash_hdr((__u8 *)topt, key,
++ tcp_v6_md5_hash_hdr((__u8 *)topt, key->md5_key,
+ &ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, t1);
}
#endif
+#ifdef CONFIG_TCP_AO
-+ if (ao_key) {
-+ *topt++ = htonl((TCPOPT_AO << 24) | (tcp_ao_len(ao_key) << 16) |
-+ (ao_key->sndid << 8) | (rcv_next));
++ if (tcp_key_is_ao(key)) {
++ *topt++ = htonl((TCPOPT_AO << 24) |
++ (tcp_ao_len(key->ao_key) << 16) |
++ (key->ao_key->sndid << 8) |
++ (key->rcv_next));
+
-+ tcp_ao_hash_hdr(AF_INET6, (char *)topt, ao_key, tkey,
++ tcp_ao_hash_hdr(AF_INET6, (char *)topt, key->ao_key,
++ key->traffic_key,
+ (union tcp_ao_addr *)&ipv6_hdr(skb)->saddr,
+ (union tcp_ao_addr *)&ipv6_hdr(skb)->daddr,
-+ t1, ao_sne);
++ t1, key->sne);
+ }
+#endif
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
- struct tcp_md5sig_key *key = NULL;
-#ifdef CONFIG_TCP_MD5SIG
const __u8 *md5_hash_location = NULL;
-+ u32 seq = 0, ack_seq = 0, ao_sne = 0;
+- unsigned char newhash[16];
+- int genhash;
+- struct sock *sk1 = NULL;
++#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+ bool allocated_traffic_key = false;
-+ struct tcp_md5sig_key *key = NULL;
-+ struct tcp_ao_key *ao_key = NULL;
-+ const struct tcp_ao_hdr *aoh;
-+ char *traffic_key = NULL;
-+ __be32 label = 0;
-+ u32 priority = 0;
-+ struct net *net;
-+ u8 rcv_next = 0;
-+ u32 txhash = 0;
-+ int oif = 0;
-+#ifdef CONFIG_TCP_MD5SIG
- unsigned char newhash[16];
- int genhash;
- struct sock *sk1 = NULL;
#endif
-- __be32 label = 0;
-- u32 priority = 0;
-- struct net *net;
-- u32 txhash = 0;
-- int oif = 0;
++ const struct tcp_ao_hdr *aoh;
++ struct tcp_key key = {};
++ u32 seq = 0, ack_seq = 0;
+ __be32 label = 0;
+ u32 priority = 0;
+ struct net *net;
+ u32 txhash = 0;
+ int oif = 0;
++#ifdef CONFIG_TCP_MD5SIG
++ unsigned char newhash[16];
++ int genhash;
++ struct sock *sk1 = NULL;
++#endif
if (th->rst)
return;
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
if (sk && sk_fullsock(sk)) {
int l3index;
+@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ * in an L3 domain and inet_iif is set to it.
+ */
+ l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
+- key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
++ key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
+ } else if (md5_hash_location) {
+ int dif = tcp_v6_iif_l3_slave(skb);
+ int sdif = tcp_v6_sdif(skb);
+@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ */
+ l3index = tcp_v6_sdif(skb) ? dif : 0;
+
+- key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
+- if (!key)
++ key.md5_key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
++ if (!key.md5_key)
+ goto out;
++ key.type = TCP_KEY_MD5;
+
+- genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
++ genhash = tcp_v6_md5_hash_skb(newhash, key.md5_key, NULL, skb);
+ if (genhash || memcmp(md5_hash_location, newhash, 16) != 0)
+ goto out;
+ }
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
(th->doff << 2);
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
+
+ l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
+ if (tcp_ao_prepare_reset(sk, skb, aoh, l3index,
-+ &ao_key, &traffic_key,
++ &key.ao_key, &key.traffic_key,
+ &allocated_traffic_key,
-+ &rcv_next, &ao_sne))
++ &key.rcv_next, &key.sne))
+ goto out;
++ key.type = TCP_KEY_AO;
+ }
+#endif
+
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
oif = sk->sk_bound_dev_if;
if (sk_fullsock(sk)) {
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+ label = ip6_flowlabel(ipv6h);
}
- tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
+- tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
- ipv6_get_dsfield(ipv6h), label, priority, txhash);
++ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, 1,
+ ipv6_get_dsfield(ipv6h), label, priority, txhash,
-+ ao_key, traffic_key, rcv_next, ao_sne);
++ &key);
-#ifdef CONFIG_TCP_MD5SIG
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
out:
+ if (allocated_traffic_key)
-+ kfree(traffic_key);
++ kfree(key.traffic_key);
rcu_read_unlock();
#endif
}
-@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
+
+ static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
+ u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
+- struct tcp_md5sig_key *key, u8 tclass,
++ struct tcp_md5sig_key *md5_key, u8 tclass,
__be32 label, u32 priority, u32 txhash)
{
- tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
+- tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
- tclass, label, priority, txhash);
-+ tclass, label, priority, txhash, NULL, NULL, 0, 0);
++ struct tcp_key key = {
++ .md5_key = md5_key,
++ .type = md5_key ? TCP_KEY_MD5 : TCP_KEY_NONE,
++ };
++
++ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
++ tclass, label, priority, txhash, &key);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
9: 03d42e48c538 ! 9: 2e9617ed61b0 net/tcp: Add TCP-AO sign to twsk
@@ net/ipv4/tcp_ao.c: static struct tcp_ao_info *setsockopt_ao_info(struct sock *sk
}
## net/ipv4/tcp_ipv4.c ##
-@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_ack(const struct sock *sk,
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
+ static void tcp_v4_send_ack(const struct sock *sk,
struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 tsval, u32 tsecr, int oif,
- struct tcp_md5sig_key *key,
-+ struct tcp_ao_key *ao_key,
-+ u8 *traffic_key,
-+ u8 rcv_next,
-+ u32 ao_sne,
+- struct tcp_md5sig_key *key,
++ struct tcp_key *key,
int reply_flags, u8 tos, u32 txhash)
{
const struct tcphdr *th = tcp_hdr(skb);
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_ack(const struct sock *sk,
struct net *net = sock_net(sk);
struct ip_reply_arg arg;
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_ack(const struct sock *sk,
- key, ip_hdr(skb)->saddr,
+ rep.th.window = htons(win);
+
+ #ifdef CONFIG_TCP_MD5SIG
+- if (key) {
++ if (tcp_key_is_md5(key)) {
+ int offset = (tsecr) ? 3 : 0;
+
+ rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_send_ack(const struct sock *sk,
+ rep.th.doff = arg.iov[0].iov_len/4;
+
+ tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
+- key, ip_hdr(skb)->saddr,
++ key->md5_key, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &rep.th);
}
+#endif
+#ifdef CONFIG_TCP_AO
-+ if (ao_key) {
++ if (tcp_key_is_ao(key)) {
+ int offset = (tsecr) ? 3 : 0;
+
+ rep.opt[offset++] = htonl((TCPOPT_AO << 24) |
-+ (tcp_ao_len(ao_key) << 16) |
-+ (ao_key->sndid << 8) | rcv_next);
-+ arg.iov[0].iov_len += round_up(tcp_ao_len(ao_key), 4);
++ (tcp_ao_len(key->ao_key) << 16) |
++ (key->ao_key->sndid << 8) |
++ key->rcv_next);
++ arg.iov[0].iov_len += round_up(tcp_ao_len(key->ao_key), 4);
+ rep.th.doff = arg.iov[0].iov_len / 4;
+
+ tcp_ao_hash_hdr(AF_INET, (char *)&rep.opt[offset],
-+ ao_key, traffic_key,
++ key->ao_key, key->traffic_key,
+ (union tcp_ao_addr *)&ip_hdr(skb)->saddr,
+ (union tcp_ao_addr *)&ip_hdr(skb)->daddr,
-+ &rep.th, ao_sne);
++ &rep.th, key->sne);
+ }
-+ WARN_ON_ONCE(key && ao_key);
#endif
arg.flags = reply_flags;
arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
-+ struct tcp_ao_key *ao_key = NULL;
-+ u8 *traffic_key = NULL;
-+ u8 rcv_next = 0;
-+ u32 ao_sne = 0;
++ struct tcp_key key = {};
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_
+ if (ao_info) {
+ const struct tcp_ao_hdr *aoh;
+
-+ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
-+ goto out; /* something is wrong with the sign */
++ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
++ inet_twsk_put(tw);
++ return;
++ }
+
+ if (aoh)
-+ ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
++ key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
+ }
-+ if (ao_key) {
++ if (key.ao_key) {
+ struct tcp_ao_key *rnext_key;
+
-+ traffic_key = snd_other_key(ao_key);
++ key.traffic_key = snd_other_key(key.ao_key);
+ rnext_key = READ_ONCE(ao_info->rnext_key);
-+ rcv_next = rnext_key->rcvid;
-+ }
++ key.rcv_next = rnext_key->rcvid;
++ key.type = TCP_KEY_AO;
++#else
++ if (0) {
+#endif
++#ifdef CONFIG_TCP_MD5SIG
++ } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ key.md5_key = tcp_twsk_md5_key(tcptw);
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
++#endif
++ }
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
-@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
- tw->tw_bound_dev_if,
- tcp_twsk_md5_key(tcptw),
-+ ao_key, traffic_key, rcv_next, ao_sne,
+- tw->tw_bound_dev_if,
+- tcp_twsk_md5_key(tcptw),
++ tw->tw_bound_dev_if, &key,
tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
tw->tw_tos,
- tw->tw_txhash
- );
+- tw->tw_txhash
+- );
++ tw->tw_txhash);
-+#ifdef CONFIG_TCP_AO
-+out:
-+#endif
inet_twsk_put(tw);
}
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+ {
+- const union tcp_md5_addr *addr;
+- int l3index;
++ struct tcp_key key = {};
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ * exception of <SYN> segments, MUST be right-shifted by
+ * Rcv.Wind.Shift bits:
+ */
+- addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
+- l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
++#ifdef CONFIG_TCP_MD5SIG
++ if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ const union tcp_md5_addr *addr;
++ int l3index;
++
++ addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
++ l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
++ key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
++ }
++#endif
+ tcp_v4_send_ack(sk, skb, seq,
+ tcp_rsk(req)->rcv_nxt,
+ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
READ_ONCE(req->ts_recent),
- 0,
- tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
-+ NULL, NULL, 0, 0,
+- 0,
+- tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
++ 0, &key,
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos,
READ_ONCE(tcp_rsk(req)->txhash));
@@ net/ipv4/tcp_output.c: int tcp_connect(struct sock *sk)
kfree(rcu_replace_pointer(tp->md5sig_info, NULL,
## net/ipv6/tcp_ipv6.c ##
+@@ net/ipv6/tcp_ipv6.c: static int tcp_v6_md5_hash_skb(char *md5_hash,
+ memset(md5_hash, 0, 16);
+ return 1;
+ }
+-#else /* CONFIG_TCP_MD5SIG */
+-static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
+- const struct in6_addr *addr,
+- int l3index)
+-{
+- return NULL;
+-}
+ #endif
+
+ static void tcp_v6_init_req(struct request_sock *req,
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
+
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
- struct tcp_md5sig_key *key, u8 tclass,
-- __be32 label, u32 priority, u32 txhash)
-+ __be32 label, u32 priority, u32 txhash,
-+ struct tcp_ao_key *ao_key, char *tkey,
-+ u8 rcv_next, u32 ao_sne)
+- struct tcp_md5sig_key *md5_key, u8 tclass,
++ struct tcp_key *key, u8 tclass,
+ __be32 label, u32 priority, u32 txhash)
{
- tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
-- tclass, label, priority, txhash, NULL, NULL, 0, 0);
-+ tclass, label, priority, txhash,
-+ ao_key, tkey, rcv_next, ao_sne);
+- struct tcp_key key = {
+- .md5_key = md5_key,
+- .type = md5_key ? TCP_KEY_MD5 : TCP_KEY_NONE,
+- };
+-
+ tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, 0,
+- tclass, label, priority, txhash, &key);
++ tclass, label, priority, txhash, key);
}
static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
-+ struct tcp_ao_key *ao_key = NULL;
-+ u8 *traffic_key = NULL;
-+ u8 rcv_next = 0;
-+ u32 ao_sne = 0;
++ struct tcp_key key = {};
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
+ /* Invalid TCP option size or twice included auth */
+ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
+ goto out;
-+ if (aoh)
-+ ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
++ if (aoh) {
++ key.ao_key = tcp_ao_established_key(ao_info,
++ aoh->rnext_keyid, -1);
++ }
+ }
-+ if (ao_key) {
++ if (key.ao_key) {
+ struct tcp_ao_key *rnext_key;
+
-+ traffic_key = snd_other_key(ao_key);
++ key.traffic_key = snd_other_key(key.ao_key);
+ /* rcv_next switches to our rcv_next */
+ rnext_key = READ_ONCE(ao_info->rnext_key);
-+ rcv_next = rnext_key->rcvid;
-+ }
++ key.rcv_next = rnext_key->rcvid;
++ key.type = TCP_KEY_AO;
++#else
++ if (0) {
+#endif
++#ifdef CONFIG_TCP_MD5SIG
++ } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ key.md5_key = tcp_twsk_md5_key(tcptw);
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
++#endif
++ }
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp_raw() + tcptw->tw_ts_offset,
- tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
+- tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
++ tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key,
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority,
-- tw->tw_txhash);
-+ tw->tw_txhash, ao_key, traffic_key, rcv_next, ao_sne);
+ tw->tw_txhash);
+#ifdef CONFIG_TCP_AO
+out:
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
inet_twsk_put(tw);
}
+ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req)
+ {
+- int l3index;
++ struct tcp_key key = {};
+
+- l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
++#ifdef CONFIG_TCP_MD5SIG
++ if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
++
++ key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
++ l3index);
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
++ }
++#endif
+
+ /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
+ * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
- ipv6_get_dsfield(ipv6_hdr(skb)), 0,
+ req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
+ READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
+- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
+- ipv6_get_dsfield(ipv6_hdr(skb)), 0,
++ &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
READ_ONCE(sk->sk_priority),
-- READ_ONCE(tcp_rsk(req)->txhash));
-+ READ_ONCE(tcp_rsk(req)->txhash),
-+ NULL, NULL, 0, 0);
+ READ_ONCE(tcp_rsk(req)->txhash));
}
-
-
10: 2bfe0ad0ddbd ! 10: 168a943888aa net/tcp: Wire TCP-AO to request sockets
@@ net/ipv4/tcp_input.c: int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
## net/ipv4/tcp_ipv4.c ##
-@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
- static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- struct request_sock *req)
- {
-+ struct tcp_md5sig_key *md5_key = NULL;
-+ struct tcp_ao_key *ao_key = NULL;
- const union tcp_md5_addr *addr;
-- int l3index;
-+ u8 *traffic_key = NULL;
-+ u8 keyid = 0;
-+#ifdef CONFIG_TCP_AO
-+ const struct tcp_ao_hdr *aoh;
-+#endif
-
- /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
- * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
- */
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
tcp_sk(sk)->snd_nxt;
-+ addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
-+ if (tcp_rsk_used_ao(req)) {
+- /* RFC 7323 2.3
+- * The window field (SEG.WND) of every outgoing segment, with the
+- * exception of <SYN> segments, MUST be right-shifted by
+- * Rcv.Wind.Shift bits:
+- */
+#ifdef CONFIG_TCP_AO
++ if (tcp_rsk_used_ao(req)) {
++ const union tcp_md5_addr *addr;
++ const struct tcp_ao_hdr *aoh;
++
+ /* Invalid TCP option size or twice included auth */
+ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
+ return;
-+
+ if (!aoh)
+ return;
+
-+ ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, aoh->rnext_keyid, -1);
-+ if (unlikely(!ao_key)) {
++ addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
++ key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET,
++ aoh->rnext_keyid, -1);
++ if (unlikely(!key.ao_key)) {
+ /* Send ACK with any matching MKT for the peer */
-+ ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, -1, -1);
++ key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, -1, -1);
+ /* Matching key disappeared (user removed the key?)
+ * let the handshake timeout.
+ */
-+ if (!ao_key) {
++ if (!key.ao_key) {
+ net_info_ratelimited("TCP-AO key for (%pI4, %d)->(%pI4, %d) suddenly disappeared, won't ACK new connection\n",
+ addr,
+ ntohs(tcp_hdr(skb)->source),
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_
+ return;
+ }
+ }
-+ traffic_key = kmalloc(tcp_ao_digest_size(ao_key), GFP_ATOMIC);
-+ if (!traffic_key)
++ key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
++ if (!key.traffic_key)
+ return;
+
-+ keyid = aoh->keyid;
-+ tcp_v4_ao_calc_key_rsk(ao_key, traffic_key, req);
++ key.type = TCP_KEY_AO;
++ key.rcv_next = aoh->keyid;
++ tcp_v4_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
++#else
++ if (0) {
+#endif
-+ } else {
-+ int l3index;
-+
-+ l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
-+ md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
+ #ifdef CONFIG_TCP_MD5SIG
+- if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ const union tcp_md5_addr *addr;
+ int l3index;
+
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
+ if (key.md5_key)
+ key.type = TCP_KEY_MD5;
+- }
+ #endif
+ }
- /* RFC 7323 2.3
- * The window field (SEG.WND) of every outgoing segment, with the
- * exception of <SYN> segments, MUST be right-shifted by
- * Rcv.Wind.Shift bits:
- */
-- addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
-- l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
++
++ /* RFC 7323 2.3
++ * The window field (SEG.WND) of every outgoing segment, with the
++ * exception of <SYN> segments, MUST be right-shifted by
++ * Rcv.Wind.Shift bits:
++ */
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
- READ_ONCE(req->ts_recent),
- 0,
-- tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
-- NULL, NULL, 0, 0,
-+ md5_key, ao_key, traffic_key, keyid, 0,
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos,
READ_ONCE(tcp_rsk(req)->txhash));
-+ kfree(traffic_key);
++ if (tcp_key_is_ao(&key))
++ kfree(key.traffic_key);
}
/*
@@ net/ipv4/tcp_output.c: static void bpf_skops_write_hdr_opt(struct sock *sk, stru
static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
+ const struct tcp_request_sock *tcprsk,
struct tcp_out_options *opts,
- struct tcp_ao_key *ao_key)
+ struct tcp_key *key)
{
@@ net/ipv4/tcp_output.c: static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
- }
+ ptr += 4;
+ } else if (tcp_key_is_ao(key)) {
#ifdef CONFIG_TCP_AO
- if (unlikely(OPTION_AO & options)) {
- struct tcp_ao_key *rnext_key;
- struct tcp_ao_info *ao_info;
- u8 maclen;
+- u8 maclen;
++ u8 maclen = tcp_ao_maclen(key->ao_key);
- if (WARN_ON_ONCE(!ao_key))
- goto out_ao;
- ao_info = rcu_dereference_check(tp->ao_info,
-- lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
-- rnext_key = READ_ONCE(ao_info->rnext_key);
-- if (WARN_ON_ONCE(!rnext_key))
-- goto out_ao;
- maclen = tcp_ao_maclen(ao_key);
-- *ptr++ = htonl((TCPOPT_AO << 24) |
-- (tcp_ao_len(ao_key) << 16) |
-- (ao_key->sndid << 8) |
-- (rnext_key->rcvid));
-+
+ if (tcprsk) {
+ u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
+
+ *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
+ (tcprsk->ao_keyid << 8) |
+ (tcprsk->ao_rcv_next));
-+ } else if (WARN_ON_ONCE(!tp)) {
-+ goto out_ao;
+ } else {
+ struct tcp_ao_key *rnext_key;
+ struct tcp_ao_info *ao_info;
+
+ ao_info = rcu_dereference_check(tp->ao_info,
-+ lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
+ lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
+- rnext_key = READ_ONCE(ao_info->rnext_key);
+- if (WARN_ON_ONCE(!rnext_key))
+- goto out_ao;
+- maclen = tcp_ao_maclen(key->ao_key);
+- *ptr++ = htonl((TCPOPT_AO << 24) |
+- (tcp_ao_len(key->ao_key) << 16) |
+- (key->ao_key->sndid << 8) |
+- (rnext_key->rcvid));
+ rnext_key = READ_ONCE(ao_info->rnext_key);
+ if (WARN_ON_ONCE(!rnext_key))
+ goto out_ao;
+ *ptr++ = htonl((TCPOPT_AO << 24) |
-+ (tcp_ao_len(ao_key) << 16) |
-+ (ao_key->sndid << 8) |
++ (tcp_ao_len(key->ao_key) << 16) |
++ (key->ao_key->sndid << 8) |
+ (rnext_key->rcvid));
+ }
opts->hash_location = (__u8 *)ptr;
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_
th->window = htons(min(tp->rcv_wnd, 65535U));
}
-- tcp_options_write(th, tp, &opts, ao_key);
-+ tcp_options_write(th, tp, NULL, &opts, ao_key);
+- tcp_options_write(th, tp, &opts, &key);
++ tcp_options_write(th, tp, NULL, &opts, &key);
+ if (tcp_key_is_md5(&key)) {
#ifdef CONFIG_TCP_MD5SIG
- /* Calculate the MD5 hash, as we have all we need now */
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
-- tcp_options_write(th, NULL, &opts, NULL);
-+ tcp_options_write(th, NULL, NULL, &opts, NULL);
+- tcp_options_write(th, NULL, &opts, &key);
++ tcp_options_write(th, NULL, NULL, &opts, &key);
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
@@ net/ipv6/tcp_ipv6.c: const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v6_init_sequence,
#endif
-@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
- static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- struct request_sock *req)
+@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
{
-+ struct tcp_md5sig_key *md5_key = NULL;
-+ struct tcp_ao_key *ao_key = NULL;
-+ const struct in6_addr *addr;
-+ u8 *traffic_key = NULL;
-+ u8 keyid = 0;
- int l3index;
+ struct tcp_key key = {};
- l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
-+ addr = &ipv6_hdr(skb)->saddr;
-+
-+ if (tcp_rsk_used_ao(req)) {
+#ifdef CONFIG_TCP_AO
++ if (tcp_rsk_used_ao(req)) {
++ const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
+ const struct tcp_ao_hdr *aoh;
++ int l3index;
+
++ l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
+ /* Invalid TCP option size or twice included auth */
+ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
+ return;
+ if (!aoh)
+ return;
-+ ao_key = tcp_v6_ao_do_lookup(sk, addr, aoh->rnext_keyid, -1);
-+ if (unlikely(!ao_key)) {
++ key.ao_key = tcp_v6_ao_do_lookup(sk, addr, aoh->rnext_keyid, -1);
++ if (unlikely(!key.ao_key)) {
+ /* Send ACK with any matching MKT for the peer */
-+ ao_key = tcp_v6_ao_do_lookup(sk, addr, -1, -1);
++ key.ao_key = tcp_v6_ao_do_lookup(sk, addr, -1, -1);
+ /* Matching key disappeared (user removed the key?)
+ * let the handshake timeout.
+ */
-+ if (!ao_key) {
++ if (!key.ao_key) {
+ net_info_ratelimited("TCP-AO key for (%pI6, %d)->(%pI6, %d) suddenly disappeared, won't ACK new connection\n",
+ addr,
+ ntohs(tcp_hdr(skb)->source),
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_timewait_ack(struct sock *sk, struct sk_
+ return;
+ }
+ }
-+ traffic_key = kmalloc(tcp_ao_digest_size(ao_key), GFP_ATOMIC);
-+ if (!traffic_key)
++ key.traffic_key = kmalloc(tcp_ao_digest_size(key.ao_key), GFP_ATOMIC);
++ if (!key.traffic_key)
+ return;
+
-+ keyid = aoh->keyid;
-+ tcp_v6_ao_calc_key_rsk(ao_key, traffic_key, req);
++ key.type = TCP_KEY_AO;
++ key.rcv_next = aoh->keyid;
++ tcp_v6_ao_calc_key_rsk(key.ao_key, key.traffic_key, req);
++#else
++ if (0) {
+#endif
-+ } else {
-+ md5_key = tcp_v6_md5_do_lookup(sk, addr, l3index);
+ #ifdef CONFIG_TCP_MD5SIG
+- if (static_branch_unlikely(&tcp_md5_needed.key)) {
++ } else if (static_branch_unlikely(&tcp_md5_needed.key)) {
+ int l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
+
+ key.md5_key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr,
+ l3index);
+ if (key.md5_key)
+ key.type = TCP_KEY_MD5;
+- }
+ #endif
+ }
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
- READ_ONCE(req->ts_recent), sk->sk_bound_dev_if,
-- tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
-+ md5_key,
- ipv6_get_dsfield(ipv6_hdr(skb)), 0,
+ &key, ipv6_get_dsfield(ipv6_hdr(skb)), 0,
READ_ONCE(sk->sk_priority),
- READ_ONCE(tcp_rsk(req)->txhash),
-- NULL, NULL, 0, 0);
-+ ao_key, traffic_key, keyid, 0);
-+ kfree(traffic_key);
+ READ_ONCE(tcp_rsk(req)->txhash));
++ if (tcp_key_is_ao(&key))
++ kfree(key.traffic_key);
}
11: 515597b76231 ! 11: e968cc683e6f net/tcp: Sign SYN-ACK segments with TCP-AO
@@ net/ipv4/tcp_ipv4.c: const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops
## net/ipv4/tcp_output.c ##
@@ net/ipv4/tcp_output.c: static unsigned int tcp_synack_options(const struct sock *sk,
+ struct request_sock *req,
unsigned int mss, struct sk_buff *skb,
struct tcp_out_options *opts,
- const struct tcp_md5sig_key *md5,
-+ const struct tcp_ao_key *ao,
+- const struct tcp_md5sig_key *md5,
++ const struct tcp_key *key,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
@@ net/ipv4/tcp_output.c: static unsigned int tcp_synack_options(const struct sock *sk,
+ struct inet_request_sock *ireq = inet_rsk(req);
+ unsigned int remaining = MAX_TCP_OPTION_SPACE;
+
+-#ifdef CONFIG_TCP_MD5SIG
+- if (md5) {
++ if (tcp_key_is_md5(key)) {
+ opts->options |= OPTION_MD5;
+ remaining -= TCPOLEN_MD5SIG_ALIGNED;
+
+@@ net/ipv4/tcp_output.c: static unsigned int tcp_synack_options(const struct sock *sk,
+ */
+ if (synack_type != TCP_SYNACK_COOKIE)
ireq->tstamp_ok &= !ireq->sack_ok;
- }
- #endif
-+#ifdef CONFIG_TCP_AO
-+ if (ao) {
++ } else if (tcp_key_is_ao(key)) {
+ opts->options |= OPTION_AO;
-+ remaining -= tcp_ao_len(ao);
++ remaining -= tcp_ao_len(key->ao_key);
+ ireq->tstamp_ok &= !ireq->sack_ok;
-+ }
-+#endif
-+ WARN_ON_ONCE(md5 && ao);
+ }
+-#endif
/* We always send an MSS option. */
opts->mss = mss;
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+ {
struct inet_request_sock *ireq = inet_rsk(req);
const struct tcp_sock *tp = tcp_sk(sk);
- struct tcp_md5sig_key *md5 = NULL;
-+ struct tcp_ao_key *ao_key = NULL;
+- struct tcp_md5sig_key *md5 = NULL;
struct tcp_out_options opts;
+ struct tcp_key key = {};
struct sk_buff *skb;
- int tcp_header_size;
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb);
}
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, st
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
rcu_read_lock();
- md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
+- if (md5)
+- key.type = TCP_KEY_MD5;
#endif
+ if (tcp_rsk_used_ao(req)) {
+#ifdef CONFIG_TCP_AO
++ struct tcp_ao_key *ao_key = NULL;
+ u8 maclen = tcp_rsk(req)->maclen;
+ u8 keyid = tcp_rsk(req)->ao_keyid;
+
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, st
+ * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here.
+ */
+ if (unlikely(!ao_key || tcp_ao_maclen(ao_key) != maclen)) {
++ u8 key_maclen = ao_key ? tcp_ao_maclen(ao_key) : 0;
++
+ rcu_read_unlock();
-+ skb_dst_drop(skb);
+ kfree_skb(skb);
+ net_warn_ratelimited("TCP-AO: the keyid %u with maclen %u|%u from SYN packet is not present - not sending SYNACK\n",
-+ keyid, maclen,
-+ ao_key ? tcp_ao_maclen(ao_key) : 0);
++ keyid, maclen, key_maclen);
+ return NULL;
+ }
++ key.ao_key = ao_key;
++ key.type = TCP_KEY_AO;
+#endif
+ } else {
+#ifdef CONFIG_TCP_MD5SIG
-+ md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
-+ req_to_sk(req));
++ key.md5_key = tcp_rsk(req)->af_specific->req_md5_lookup(sk,
++ req_to_sk(req));
++ if (key.md5_key)
++ key.type = TCP_KEY_MD5;
+#endif
+ }
skb_set_hash(skb, READ_ONCE(tcp_rsk(req)->txhash), PKT_HASH_TYPE_L4);
/* bpf program will be interested in the tcp_flags */
TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
- tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
+- tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
- foc, synack_type,
- syn_skb) + sizeof(*th);
-+ ao_key, foc, synack_type, syn_skb)
++ tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts,
++ &key, foc, synack_type, syn_skb)
+ + sizeof(*th);
skb_push(skb, tcp_header_size);
@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, st
/* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
th->window = htons(min(req->rsk_rcv_wnd, 65535U));
-- tcp_options_write(th, NULL, NULL, &opts, NULL);
-+ tcp_options_write(th, NULL, tcp_rsk(req), &opts, ao_key);
+- tcp_options_write(th, NULL, NULL, &opts, &key);
++ tcp_options_write(th, NULL, tcp_rsk(req), &opts, &key);
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
-@@ net/ipv4/tcp_output.c: struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
+-#ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */
- if (md5)
+- if (md5)
++ if (tcp_key_is_md5(&key)) {
++#ifdef CONFIG_TCP_MD5SIG
tcp_rsk(req)->af_specific->calc_md5_hash(opts.hash_location,
- md5, req_to_sk(req), skb);
-+ md5, req_to_sk(req), skb);
++ key.md5_key, req_to_sk(req), skb);
+#endif
++ } else if (tcp_key_is_ao(&key)) {
+#ifdef CONFIG_TCP_AO
-+ if (ao_key)
+ tcp_rsk(req)->af_specific->ao_synack_hash(opts.hash_location,
-+ ao_key, req, skb,
++ key.ao_key, req, skb,
+ opts.hash_location - (u8 *)th, 0);
+#endif
++ }
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
rcu_read_unlock();
#endif
12: 19f1c1aa3289 = 12: c9973a4d3d49 net/tcp: Verify inbound TCP-AO signed segments
13: 174362ea65f4 = 13: 8853e3aa8936 net/tcp: Add TCP-AO segments counters
14: e1d3c735cb25 ! 14: 6c22dfd9e18d net/tcp: Add TCP-AO SNE support
@@ net/ipv4/tcp_ipv4.c: static bool tcp_v4_ao_sign_reset(const struct sock *sk, str
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
struct tcp_ao_key *rnext_key;
- traffic_key = snd_other_key(ao_key);
-+ ao_sne = READ_ONCE(ao_info->snd_sne);
+ key.traffic_key = snd_other_key(key.ao_key);
++ key.sne = READ_ONCE(ao_info->snd_sne);
rnext_key = READ_ONCE(ao_info->rnext_key);
- rcv_next = rnext_key->rcvid;
- }
+ key.rcv_next = rnext_key->rcvid;
+ key.type = TCP_KEY_AO;
## net/ipv4/tcp_minisocks.c ##
@@ net/ipv4/tcp_minisocks.c: tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
@@ net/ipv4/tcp_minisocks.c: tcp_timewait_state_process(struct inet_timewait_sock *
## net/ipv4/tcp_output.c ##
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
- u8 *traffic_key;
void *tkey_buf = NULL;
+ u8 *traffic_key;
__be32 disn;
+ u32 sne;
- sk_gso_disable(sk);
- if (unlikely(tcb->tcp_flags & TCPHDR_SYN)) {
+ ao = rcu_dereference_protected(tcp_sk(sk)->ao_info,
+ lockdep_sock_is_held(sk));
@@ net/ipv4/tcp_output.c: static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
} else {
- traffic_key = snd_other_key(ao_key);
+ traffic_key = snd_other_key(key.ao_key);
}
+ sne = tcp_ao_compute_sne(READ_ONCE(ao->snd_sne),
+ READ_ONCE(tp->snd_una),
+ ntohl(th->seq));
- tp->af_specific->calc_ao_hash(opts.hash_location, ao_key, sk, skb,
- traffic_key,
+ tp->af_specific->calc_ao_hash(opts.hash_location, key.ao_key,
+ sk, skb, traffic_key,
- opts.hash_location - (u8 *)th, 0);
+ opts.hash_location - (u8 *)th, sne);
kfree(tkey_buf);
- }
#endif
+ }
## net/ipv6/tcp_ipv6.c ##
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_send_reset(const struct sock *sk, struct
l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
- if (tcp_ao_prepare_reset(sk, skb, aoh, l3index,
+ if (tcp_ao_prepare_reset(sk, skb, aoh, l3index, seq,
- &ao_key, &traffic_key,
+ &key.ao_key, &key.traffic_key,
&allocated_traffic_key,
- &rcv_next, &ao_sne))
+ &key.rcv_next, &key.sne))
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
/* rcv_next switches to our rcv_next */
rnext_key = READ_ONCE(ao_info->rnext_key);
- rcv_next = rnext_key->rcvid;
-+ ao_sne = READ_ONCE(ao_info->snd_sne);
- }
- #endif
-
+ key.rcv_next = rnext_key->rcvid;
++ key.sne = READ_ONCE(ao_info->snd_sne);
+ key.type = TCP_KEY_AO;
+ #else
+ if (0) {
15: 8bc1d37e268b = 15: 8795f6f10f5a net/tcp: Add tcp_hash_fail() ratelimited logs
16: fd4dda1d3a30 = 16: f9eef5a78069 net/tcp: Ignore specific ICMPs for TCP-AO connections
17: 596a6b4d4343 = 17: 2bcdc665d2b9 net/tcp: Add option for TCP-AO to (not) hash header
18: ad9c69645d58 = 18: 8c8e5721acd7 net/tcp: Add TCP-AO getsockopt()s
19: af85fe6a711a = 19: 871084c908ed net/tcp: Allow asynchronous delete for TCP-AO keys (MKTs)
20: 03ae1b6b2dda ! 20: 63f2e24048a1 net/tcp: Add static_key for TCP-AO
@@ Commit message
Acked-by: David Ahern <dsahern@kernel.org>
## include/net/tcp.h ##
+@@ include/net/tcp.h: static inline void tcp_get_current_key(const struct sock *sk,
+ #if defined(CONFIG_TCP_AO) || defined(CONFIG_TCP_MD5SIG)
+ const struct tcp_sock *tp = tcp_sk(sk);
+ #endif
+-#ifdef CONFIG_TCP_AO
+- struct tcp_ao_info *ao;
+
+- ao = rcu_dereference_protected(tp->ao_info, lockdep_sock_is_held(sk));
+- if (ao) {
+- out->ao_key = READ_ONCE(ao->current_key);
+- out->type = TCP_KEY_AO;
+- return;
++#ifdef CONFIG_TCP_AO
++ if (static_branch_unlikely(&tcp_ao_needed.key)) {
++ struct tcp_ao_info *ao;
++
++ ao = rcu_dereference_protected(tp->ao_info,
++ lockdep_sock_is_held(sk));
++ if (ao) {
++ out->ao_key = READ_ONCE(ao->current_key);
++ out->type = TCP_KEY_AO;
++ return;
++ }
+ }
+ #endif
+ #ifdef CONFIG_TCP_MD5SIG
+@@ include/net/tcp.h: static inline bool tcp_key_is_md5(const struct tcp_key *key)
+ static inline bool tcp_key_is_ao(const struct tcp_key *key)
+ {
+ #ifdef CONFIG_TCP_AO
+- if (key->type == TCP_KEY_AO)
++ if (static_branch_unlikely(&tcp_ao_needed.key) &&
++ key->type == TCP_KEY_AO)
+ return true;
+ #endif
+ return false;
@@ include/net/tcp.h: static inline bool tcp_ao_required(struct sock *sk, const void *saddr,
struct tcp_ao_info *ao_info;
struct tcp_ao_key *ao_key;
@@ net/ipv4/tcp_input.c: static inline bool tcp_may_update_window(const struct tcp_
WRITE_ONCE(tp->rcv_nxt, seq);
}
+
+ ## net/ipv4/tcp_ipv4.c ##
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ #ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+
+- /* FIXME: the segment to-be-acked is not verified yet */
+- ao_info = rcu_dereference(tcptw->ao_info);
+- if (ao_info) {
+- const struct tcp_ao_hdr *aoh;
++ if (static_branch_unlikely(&tcp_ao_needed.key)) {
++ /* FIXME: the segment to-be-acked is not verified yet */
++ ao_info = rcu_dereference(tcptw->ao_info);
++ if (ao_info) {
++ const struct tcp_ao_hdr *aoh;
+
+- if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
+- inet_twsk_put(tw);
+- return;
++ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh)) {
++ inet_twsk_put(tw);
++ return;
++ }
++
++ if (aoh)
++ key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
+ }
+-
+- if (aoh)
+- key.ao_key = tcp_ao_established_key(ao_info, aoh->rnext_keyid, -1);
+ }
+ if (key.ao_key) {
+ struct tcp_ao_key *rnext_key;
+@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ tcp_sk(sk)->snd_nxt;
+
+ #ifdef CONFIG_TCP_AO
+- if (tcp_rsk_used_ao(req)) {
++ if (static_branch_unlikely(&tcp_ao_needed.key) &&
++ tcp_rsk_used_ao(req)) {
+ const union tcp_md5_addr *addr;
+ const struct tcp_ao_hdr *aoh;
+
+
+ ## net/ipv6/tcp_ipv6.c ##
+@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
+ #ifdef CONFIG_TCP_AO
+ struct tcp_ao_info *ao_info;
+
+- /* FIXME: the segment to-be-acked is not verified yet */
+- ao_info = rcu_dereference(tcptw->ao_info);
+- if (ao_info) {
+- const struct tcp_ao_hdr *aoh;
++ if (static_branch_unlikely(&tcp_ao_needed.key)) {
+
+- /* Invalid TCP option size or twice included auth */
+- if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
+- goto out;
+- if (aoh) {
+- key.ao_key = tcp_ao_established_key(ao_info,
+- aoh->rnext_keyid, -1);
++ /* FIXME: the segment to-be-acked is not verified yet */
++ ao_info = rcu_dereference(tcptw->ao_info);
++ if (ao_info) {
++ const struct tcp_ao_hdr *aoh;
++
++ /* Invalid TCP option size or twice included auth */
++ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
++ goto out;
++ if (aoh)
++ key.ao_key = tcp_ao_established_key(ao_info,
++ aoh->rnext_keyid, -1);
+ }
+ }
+ if (key.ao_key) {
+@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
+ struct tcp_key key = {};
+
+ #ifdef CONFIG_TCP_AO
+- if (tcp_rsk_used_ao(req)) {
++ if (static_branch_unlikely(&tcp_ao_needed.key) &&
++ tcp_rsk_used_ao(req)) {
+ const struct in6_addr *addr = &ipv6_hdr(skb)->saddr;
+ const struct tcp_ao_hdr *aoh;
+ int l3index;
21: bdafd6aad825 ! 21: d44ac752d214 net/tcp: Wire up l3index to TCP-AO
@@ net/ipv4/tcp_ao.c: static int tcp_ao_copy_mkts_to_user(struct tcp_ao_info *ao_in
## net/ipv4/tcp_ipv4.c ##
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- const union tcp_md5_addr *addr;
- u8 *traffic_key = NULL;
- u8 keyid = 0;
-+ int l3index;
- #ifdef CONFIG_TCP_AO
- const struct tcp_ao_hdr *aoh;
- #endif
-@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
- tcp_sk(sk)->snd_nxt;
- addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
-+ l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
+ tcp_rsk_used_ao(req)) {
+ const union tcp_md5_addr *addr;
+ const struct tcp_ao_hdr *aoh;
++ int l3index;
- if (tcp_rsk_used_ao(req)) {
- #ifdef CONFIG_TCP_AO
+ /* Invalid TCP option size or twice included auth */
+ if (tcp_parse_auth_options(tcp_hdr(skb), NULL, &aoh))
@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- if (!aoh)
return;
-- ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, aoh->rnext_keyid, -1);
-+ ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET,
-+ aoh->rnext_keyid, -1);
- if (unlikely(!ao_key)) {
+ addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
+- key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET,
++ l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
++ key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET,
+ aoh->rnext_keyid, -1);
+ if (unlikely(!key.ao_key)) {
/* Send ACK with any matching MKT for the peer */
-- ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, -1, -1);
-+ ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1);
+- key.ao_key = tcp_ao_do_lookup(sk, addr, AF_INET, -1, -1);
++ key.ao_key = tcp_ao_do_lookup(sk, l3index, addr, AF_INET, -1, -1);
/* Matching key disappeared (user removed the key?)
* let the handshake timeout.
*/
-@@ net/ipv4/tcp_ipv4.c: static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
- tcp_v4_ao_calc_key_rsk(ao_key, traffic_key, req);
- #endif
- } else {
-- int l3index;
--
-- l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
- md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
- }
- /* RFC 7323 2.3
@@ net/ipv4/tcp_ipv4.c: static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
const union tcp_md5_addr *addr;
u8 prefixlen = 32;
@@ net/ipv6/tcp_ipv6.c: static void tcp_v6_reqsk_send_ack(const struct sock *sk, st
return;
if (!aoh)
return;
-- ao_key = tcp_v6_ao_do_lookup(sk, addr, aoh->rnext_keyid, -1);
-+ ao_key = tcp_ao_do_lookup(sk, l3index,
-+ (union tcp_ao_addr *)addr, AF_INET6,
-+ aoh->rnext_keyid, -1);
- if (unlikely(!ao_key)) {
+- key.ao_key = tcp_v6_ao_do_lookup(sk, addr, aoh->rnext_keyid, -1);
++ key.ao_key = tcp_ao_do_lookup(sk, l3index,
++ (union tcp_ao_addr *)addr,
++ AF_INET6, aoh->rnext_keyid, -1);
+ if (unlikely(!key.ao_key)) {
/* Send ACK with any matching MKT for the peer */
-- ao_key = tcp_v6_ao_do_lookup(sk, addr, -1, -1);
-+ ao_key = tcp_ao_do_lookup(sk, l3index,
-+ (union tcp_ao_addr *)addr,
-+ AF_INET6, -1, -1);
+- key.ao_key = tcp_v6_ao_do_lookup(sk, addr, -1, -1);
++ key.ao_key = tcp_ao_do_lookup(sk, l3index,
++ (union tcp_ao_addr *)addr,
++ AF_INET6, -1, -1);
/* Matching key disappeared (user removed the key?)
* let the handshake timeout.
*/
22: 9378801aed68 = 22: 7e164ae9d552 net/tcp: Add TCP_AO_REPAIR
23: 93327d04220e = 23: fd1fd07d4fa0 Documentation/tcp: Add TCP-AO documentation
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment