/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * Version: $Id: tcp_minisocks.c,v 1.5 2000/11/28 17:04:10 davem Exp $ * * Authors: Ross Biro, <bir7@leland.Stanford.Edu> * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> * Mark Evans, <evansmp@uhura.aston.ac.uk> * Corey Minyard <wf-rch!minyard@relay.EU.net> * Florian La Roche, <flla@stud.uni-sb.de> * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> * Linus Torvalds, <torvalds@cs.helsinki.fi> * Alan Cox, <gw4pts@gw4pts.ampr.org> * Matthew Dillon, <dillon@apollo.west.oic.com> * Arnt Gulbrandsen, <agulbra@nvg.unit.no> * Jorge Cwik, <jorge@laser.satlink.net> */ #include <linux/config.h> #include <linux/mm.h> #include <linux/sysctl.h> #include <net/tcp.h> #include <net/inet_common.h> #ifdef CONFIG_SYSCTL #define SYNC_INIT 0 /* let the user enable it */ #else #define SYNC_INIT 1 #endif int sysctl_tcp_tw_recycle = 0; int sysctl_tcp_max_tw_buckets = NR_FILE*2; int sysctl_tcp_syncookies = SYNC_INIT; int sysctl_tcp_abort_on_overflow = 0; 41 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) { 43 if (seq == s_win) 44 return 1; 45 if (after(end_seq, s_win) && before(seq, e_win)) 46 return 1; 47 return (seq == e_win && seq == end_seq); } /* New-style handling of TIME_WAIT sockets. */ int tcp_tw_count = 0; /* Must be called with locally disabled BHs. */ 56 void tcp_timewait_kill(struct tcp_tw_bucket *tw) { struct tcp_ehash_bucket *ehead; struct tcp_bind_hashbucket *bhead; struct tcp_bind_bucket *tb; /* Unlink from established hashes. */ ehead = &tcp_ehash[tw->hashent]; write_lock(&ehead->lock); 65 if (!tw->pprev) { 66 write_unlock(&ehead->lock); 67 return; } 69 if(tw->next) tw->next->pprev = tw->pprev; *(tw->pprev) = tw->next; tw->pprev = NULL; 73 write_unlock(&ehead->lock); /* Disassociate with bind bucket. */ bhead = &tcp_bhash[tcp_bhashfn(tw->num)]; spin_lock(&bhead->lock); 78 if ((tb = tw->tb) != NULL) { 79 if(tw->bind_next) tw->bind_next->bind_pprev = tw->bind_pprev; *(tw->bind_pprev) = tw->bind_next; tw->tb = NULL; 83 if (tb->owners == NULL) { 84 if (tb->next) tb->next->pprev = tb->pprev; *(tb->pprev) = tb->next; kmem_cache_free(tcp_bucket_cachep, tb); } } 90 spin_unlock(&bhead->lock); #ifdef INET_REFCNT_DEBUG if (atomic_read(&tw->refcnt) != 1) { printk(KERN_DEBUG "tw_bucket %p refcnt=%d\n", tw, atomic_read(&tw->refcnt)); } #endif tcp_tw_put(tw); } /* * * Main purpose of TIME-WAIT state is to close connection gracefully, * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN * (and, probably, tail of data) and one or more our ACKs are lost. * * What is TIME-WAIT timeout? It is associated with maximal packet * lifetime in the internet, which results in wrong conclusion, that * it is set to catch "old duplicate segments" wandering out of their path. * It is not quite correct. This timeout is calculated so that it exceeds * maximal retransmision timeout enough to allow to lose one (or more) * segments sent by peer and our ACKs. This time may be calculated from RTO. * * When TIME-WAIT socket receives RST, it means that another end * finally closed and we are allowed to kill TIME-WAIT too. * * Second purpose of TIME-WAIT is catching old duplicate segments. * Well, certainly it is pure paranoia, but if we load TIME-WAIT * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. * * If we invented some more clever way to catch duplicates * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. * * The algorithm below is based on FORMAL INTERPRETATION of RFCs. * When you compare it to RFCs, please, read section SEGMENT ARRIVES * from the very beginning. * * NOTE. With recycling (and later with fin-wait-2) TW bucket * is _not_ stateless. It means, that strictly speaking we must * spinlock it. I do not want! Well, probability of misbehaviour * is ridiculously low and, seems, we could use some mb() tricks * to avoid misread sequence numbers, states etc. --ANK */ enum tcp_tw_status 129 tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len) { struct tcp_opt tp; int paws_reject = 0; tp.saw_tstamp = 0; 136 if (th->doff > (sizeof(struct tcphdr)>>2) && tw->ts_recent_stamp) { tcp_parse_options(skb, &tp, 0); 139 if (tp.saw_tstamp) { tp.ts_recent = tw->ts_recent; tp.ts_recent_stamp = tw->ts_recent_stamp; paws_reject = tcp_paws_check(&tp, th->rst); } } 146 if (tw->substate == TCP_FIN_WAIT2) { /* Just repeat all the checks of tcp_rcv_state_process() */ /* Out of window, send ACK */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 152 tw->rcv_nxt, tw->rcv_nxt + tw->rcv_wnd)) 153 return TCP_TW_ACK; 155 if (th->rst) 156 goto kill; 158 if (th->syn && TCP_SKB_CB(skb)->seq != tw->syn_seq) 159 goto kill_with_rst; /* Dup ACK? */ if (!after(TCP_SKB_CB(skb)->end_seq, tw->rcv_nxt) || 163 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { tcp_tw_put(tw); 165 return TCP_TW_SUCCESS; } /* New data or FIN. If new data arrive after half-duplex close, * reset. */ 171 if (!th->fin || TCP_SKB_CB(skb)->end_seq != tw->rcv_nxt+1) { kill_with_rst: tcp_tw_deschedule(tw); tcp_timewait_kill(tw); tcp_tw_put(tw); 176 return TCP_TW_RST; } /* FIN arrived, enter true time-wait state. */ tw->substate = TCP_TIME_WAIT; tw->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 182 if (tp.saw_tstamp) { tw->ts_recent_stamp = xtime.tv_sec; tw->ts_recent = tp.rcv_tsval; } /* I am shamed, but failed to make it more elegant. * Yes, it is direct reference to IP, which is impossible * to generalize to IPv6. Taking into account that IPv6 * do not undertsnad recycling in any case, it not * a big problem in practice. --ANK */ if (tw->family == AF_INET && sysctl_tcp_tw_recycle && tw->ts_recent_stamp && 194 tcp_v4_tw_remember_stamp(tw)) tcp_tw_schedule(tw, tw->timeout); 196 else tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 198 return TCP_TW_ACK; } /* * Now real TIME-WAIT state. * * RFC 1122: * "When a connection is [...] on TIME-WAIT state [...] * [a TCP] MAY accept a new SYN from the remote TCP to * reopen the connection directly, if it: * * (1) assigns its initial sequence number for the new * connection to be larger than the largest sequence * number it used on the previous connection incarnation, * and * * (2) returns to TIME-WAIT state if the SYN turns out * to be an old duplicate". */ if (!paws_reject && (TCP_SKB_CB(skb)->seq == tw->rcv_nxt && 220 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { /* In window segment, it may be only reset or bare ack. */ 223 if (th->rst) { /* This is TIME_WAIT assasination, in two flavors. * Oh well... nobody has a sufficient solution to this * protocol bug yet. */ 228 if (sysctl_tcp_rfc1337 == 0) { kill: tcp_tw_deschedule(tw); tcp_timewait_kill(tw); tcp_tw_put(tw); 233 return TCP_TW_SUCCESS; } } tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); 238 if (tp.saw_tstamp) { tw->ts_recent = tp.rcv_tsval; tw->ts_recent_stamp = xtime.tv_sec; } tcp_tw_put(tw); 244 return TCP_TW_SUCCESS; } /* Out of window segment. All the segments are ACKed immediately. The only exception is new SYN. We accept it, if it is not old duplicate and we are not in danger to be killed by delayed old duplicates. RFC check is that it has newer sequence number works at rates <40Mbit/sec. However, if paws works, it is reliable AND even more, we even may relax silly seq space cutoff. RED-PEN: we violate main RFC requirement, if this SYN will appear old duplicate (i.e. we receive RST in reply to SYN-ACK), we must return socket to time-wait state. It is not good, but not fatal yet. */ if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, tw->rcv_nxt) || 266 (tp.saw_tstamp && (s32)(tw->ts_recent - tp.rcv_tsval) < 0))) { u32 isn = tw->snd_nxt+65535+2; 268 if (isn == 0) isn++; TCP_SKB_CB(skb)->when = isn; 271 return TCP_TW_SYN; } 274 if (paws_reject) NET_INC_STATS_BH(PAWSEstabRejected); 277 if(!th->rst) { /* In this case we must reset the TIMEWAIT timer. * * If it is ACKless SYN it may be both old duplicate * and new good SYN with random sequence number <rcv_nxt. * Do not reschedule in the last case. */ 284 if (paws_reject || th->ack) tcp_tw_schedule(tw, TCP_TIMEWAIT_LEN); /* Send ACK. Note, we do not put the bucket, * it will be released by caller. */ 290 return TCP_TW_ACK; } tcp_tw_put(tw); 293 return TCP_TW_SUCCESS; } /* Enter the time wait state. This is called with locally disabled BH. * Essentially we whip up a timewait bucket, copy the * relevant info into it from the SK, and mess with hash chains * and list linkage. */ 301 static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) { struct tcp_ehash_bucket *ehead = &tcp_ehash[sk->hashent]; struct tcp_bind_hashbucket *bhead; struct sock **head, *sktw; write_lock(&ehead->lock); /* Step 1: Remove SK from established hash. */ 310 if (sk->pprev) { 311 if(sk->next) sk->next->pprev = sk->pprev; *sk->pprev = sk->next; sk->pprev = NULL; sock_prot_dec_use(sk->prot); } /* Step 2: Hash TW into TIMEWAIT half of established hash table. */ head = &(ehead + tcp_ehash_size)->chain; sktw = (struct sock *)tw; 321 if((sktw->next = *head) != NULL) (*head)->pprev = &sktw->next; *head = sktw; sktw->pprev = head; atomic_inc(&tw->refcnt); 327 write_unlock(&ehead->lock); /* Step 3: Put TW into bind hash. Original socket stays there too. Note, that any socket with sk->num!=0 MUST be bound in binding cache, even if it is closed. */ bhead = &tcp_bhash[tcp_bhashfn(sk->num)]; spin_lock(&bhead->lock); tw->tb = (struct tcp_bind_bucket *)sk->prev; 336 BUG_TRAP(sk->prev!=NULL); 337 if ((tw->bind_next = tw->tb->owners) != NULL) tw->tb->owners->bind_pprev = &tw->bind_next; tw->tb->owners = (struct sock*)tw; tw->bind_pprev = &tw->tb->owners; 341 spin_unlock(&bhead->lock); } /* * Move a socket to time-wait or dead fin-wait-2 state. */ 347 void tcp_time_wait(struct sock *sk, int state, int timeo) { struct tcp_tw_bucket *tw = NULL; struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); int recycle_ok = 0; 353 if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp) recycle_ok = tp->af_specific->remember_stamp(sk); 356 if (tcp_tw_count < sysctl_tcp_max_tw_buckets) tw = kmem_cache_alloc(tcp_timewait_cachep, SLAB_ATOMIC); 359 if(tw != NULL) { int rto = (tp->rto<<2) - (tp->rto>>1); /* Give us an identity. */ tw->daddr = sk->daddr; tw->rcv_saddr = sk->rcv_saddr; tw->bound_dev_if= sk->bound_dev_if; tw->num = sk->num; tw->state = TCP_TIME_WAIT; tw->substate = state; tw->sport = sk->sport; tw->dport = sk->dport; tw->family = sk->family; tw->reuse = sk->reuse; tw->rcv_wscale = tp->rcv_wscale; atomic_set(&tw->refcnt, 0); tw->hashent = sk->hashent; tw->rcv_nxt = tp->rcv_nxt; tw->snd_nxt = tp->snd_nxt; tw->rcv_wnd = tcp_receive_window(tp); tw->syn_seq = tp->syn_seq; tw->ts_recent = tp->ts_recent; tw->ts_recent_stamp= tp->ts_recent_stamp; tw->pprev_death = NULL; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if(tw->family == PF_INET6) { memcpy(&tw->v6_daddr, &sk->net_pinfo.af_inet6.daddr, sizeof(struct in6_addr)); memcpy(&tw->v6_rcv_saddr, &sk->net_pinfo.af_inet6.rcv_saddr, sizeof(struct in6_addr)); } #endif /* Linkage updates. */ __tcp_tw_hashdance(sk, tw); /* Get the TIME_WAIT timeout firing. */ 399 if (timeo < rto) timeo = rto; 402 if (recycle_ok) { tw->timeout = rto; 404 } else { tw->timeout = TCP_TIMEWAIT_LEN; 406 if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; } tcp_tw_schedule(tw, timeo); 411 } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than * non-graceful socket closings. */ 416 if (net_ratelimit()) printk(KERN_INFO "TCP: time wait bucket table overflow\n"); } tcp_update_metrics(sk); tcp_done(sk); } /* Kill off TIME_WAIT sockets once their lifetime has expired. */ static int tcp_tw_death_row_slot = 0; static void tcp_twkill(unsigned long); static struct tcp_tw_bucket *tcp_tw_death_row[TCP_TWKILL_SLOTS]; static spinlock_t tw_death_lock = SPIN_LOCK_UNLOCKED; static struct timer_list tcp_tw_timer = { function: tcp_twkill }; 433 static void SMP_TIMER_NAME(tcp_twkill)(unsigned long dummy) { struct tcp_tw_bucket *tw; int killed = 0; /* NOTE: compare this to previous version where lock * was released after detaching chain. It was racy, * because tw buckets are scheduled in not serialized context * in 2.3 (with netfilter), and with softnet it is common, because * soft irqs are not sequenced. */ spin_lock(&tw_death_lock); 446 if (tcp_tw_count == 0) 447 goto out; 449 while((tw = tcp_tw_death_row[tcp_tw_death_row_slot]) != NULL) { tcp_tw_death_row[tcp_tw_death_row_slot] = tw->next_death; tw->pprev_death = NULL; 452 spin_unlock(&tw_death_lock); tcp_timewait_kill(tw); tcp_tw_put(tw); killed++; spin_lock(&tw_death_lock); } tcp_tw_death_row_slot = ((tcp_tw_death_row_slot + 1) & (TCP_TWKILL_SLOTS - 1)); 464 if ((tcp_tw_count -= killed) != 0) mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD); net_statistics[smp_processor_id()*2].TimeWaited += killed; out: 468 spin_unlock(&tw_death_lock); } SMP_TIMER_DEFINE(tcp_twkill, tcp_twkill_task); /* These are always called from BH context. See callers in * tcp_input.c to verify this. */ /* This is for handling early-kills of TIME_WAIT sockets. */ 478 void tcp_tw_deschedule(struct tcp_tw_bucket *tw) { spin_lock(&tw_death_lock); 481 if (tw->pprev_death) { 482 if(tw->next_death) tw->next_death->pprev_death = tw->pprev_death; *tw->pprev_death = tw->next_death; tw->pprev_death = NULL; tcp_tw_put(tw); 487 if (--tcp_tw_count == 0) del_timer(&tcp_tw_timer); } 490 spin_unlock(&tw_death_lock); } /* Short-time timewait calendar */ static int tcp_twcal_hand = -1; static int tcp_twcal_jiffie; static void tcp_twcal_tick(unsigned long); static struct timer_list tcp_twcal_timer = {function: tcp_twcal_tick}; static struct tcp_tw_bucket *tcp_twcal_row[TCP_TW_RECYCLE_SLOTS]; 501 void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo) { struct tcp_tw_bucket **tpp; int slot; /* timeout := RTO * 3.5 * * 3.5 = 1+2+0.5 to wait for two retransmits. * * RATIONALE: if FIN arrived and we entered TIME-WAIT state, * our ACK acking that FIN can be lost. If N subsequent retransmitted * FINs (or previous seqments) are lost (probability of such event * is p^(N+1), where p is probability to lose single packet and * time to detect the loss is about RTO*(2^N - 1) with exponential * backoff). Normal timewait length is calculated so, that we * waited at least for one retransmitted FIN (maximal RTO is 120sec). * [ BTW Linux. following BSD, violates this requirement waiting * only for 60sec, we should wait at least for 240 secs. * Well, 240 consumes too much of resources 8) * ] * This interval is not reduced to catch old duplicate and * responces to our wandering segments living for two MSLs. * However, if we use PAWS to detect * old duplicates, we can reduce the interval to bounds required * by RTO, rather than MSL. So, if peer understands PAWS, we * kill tw bucket after 3.5*RTO (it is important that this number * is greater than TS tick!) and detect old duplicates with help * of PAWS. */ slot = (timeo + (1<<TCP_TW_RECYCLE_TICK) - 1) >> TCP_TW_RECYCLE_TICK; spin_lock(&tw_death_lock); /* Unlink it, if it was scheduled */ 535 if (tw->pprev_death) { 536 if(tw->next_death) tw->next_death->pprev_death = tw->pprev_death; *tw->pprev_death = tw->next_death; tw->pprev_death = NULL; tcp_tw_count--; 541 } else atomic_inc(&tw->refcnt); 544 if (slot >= TCP_TW_RECYCLE_SLOTS) { /* Schedule to slow timer */ 546 if (timeo >= TCP_TIMEWAIT_LEN) { slot = TCP_TWKILL_SLOTS-1; 548 } else { slot = (timeo + TCP_TWKILL_PERIOD-1) / TCP_TWKILL_PERIOD; 550 if (slot >= TCP_TWKILL_SLOTS) slot = TCP_TWKILL_SLOTS-1; } tw->ttd = jiffies + timeo; slot = (tcp_tw_death_row_slot + slot) & (TCP_TWKILL_SLOTS - 1); tpp = &tcp_tw_death_row[slot]; 556 } else { tw->ttd = jiffies + (slot<<TCP_TW_RECYCLE_TICK); 559 if (tcp_twcal_hand < 0) { tcp_twcal_hand = 0; tcp_twcal_jiffie = jiffies; tcp_twcal_timer.expires = tcp_twcal_jiffie + (slot<<TCP_TW_RECYCLE_TICK); add_timer(&tcp_twcal_timer); 564 } else { 565 if ((long)(tcp_twcal_timer.expires - jiffies) > (slot<<TCP_TW_RECYCLE_TICK)) mod_timer(&tcp_twcal_timer, jiffies + (slot<<TCP_TW_RECYCLE_TICK)); slot = (tcp_twcal_hand + slot)&(TCP_TW_RECYCLE_SLOTS-1); } tpp = &tcp_twcal_row[slot]; } 572 if((tw->next_death = *tpp) != NULL) (*tpp)->pprev_death = &tw->next_death; *tpp = tw; tw->pprev_death = tpp; 577 if (tcp_tw_count++ == 0) mod_timer(&tcp_tw_timer, jiffies+TCP_TWKILL_PERIOD); 579 spin_unlock(&tw_death_lock); } 582 void SMP_TIMER_NAME(tcp_twcal_tick)(unsigned long dummy) { int n, slot; unsigned long j; unsigned long now = jiffies; int killed = 0; int adv = 0; spin_lock(&tw_death_lock); 591 if (tcp_twcal_hand < 0) 592 goto out; slot = tcp_twcal_hand; j = tcp_twcal_jiffie; 597 for (n=0; n<TCP_TW_RECYCLE_SLOTS; n++) { 598 if ((long)(j - now) <= 0) { struct tcp_tw_bucket *tw; 601 while((tw = tcp_twcal_row[slot]) != NULL) { tcp_twcal_row[slot] = tw->next_death; tw->pprev_death = NULL; tcp_timewait_kill(tw); tcp_tw_put(tw); killed++; } 609 } else { 610 if (!adv) { adv = 1; tcp_twcal_jiffie = j; tcp_twcal_hand = slot; } 616 if (tcp_twcal_row[slot] != NULL) { mod_timer(&tcp_twcal_timer, j); 618 goto out; } } j += (1<<TCP_TW_RECYCLE_TICK); slot = (slot+1)&(TCP_TW_RECYCLE_SLOTS-1); } tcp_twcal_hand = -1; out: 627 if ((tcp_tw_count -= killed) == 0) del_timer(&tcp_tw_timer); net_statistics[smp_processor_id()*2].TimeWaitKilled += killed; 630 spin_unlock(&tw_death_lock); } SMP_TIMER_DEFINE(tcp_twcal_tick, tcp_twcal_tasklet); /* This is not only more efficient than what we used to do, it eliminates * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM * * Actually, we could lots of memory writes here. tp of listening * socket contains all necessary default parameters. */ 642 struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sk_buff *skb) { struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0); 646 if(newsk != NULL) { struct tcp_opt *newtp; #ifdef CONFIG_FILTER struct sk_filter *filter; #endif memcpy(newsk, sk, sizeof(*newsk)); newsk->state = TCP_SYN_RECV; /* SANITY */ newsk->pprev = NULL; newsk->prev = NULL; /* Clone the TCP header template */ newsk->dport = req->rmt_port; 662 sock_lock_init(newsk); bh_lock_sock(newsk); newsk->dst_lock = RW_LOCK_UNLOCKED; atomic_set(&newsk->rmem_alloc, 0); skb_queue_head_init(&newsk->receive_queue); atomic_set(&newsk->wmem_alloc, 0); skb_queue_head_init(&newsk->write_queue); atomic_set(&newsk->omem_alloc, 0); newsk->wmem_queued = 0; newsk->forward_alloc = 0; newsk->done = 0; newsk->userlocks = sk->userlocks & ~SOCK_BINDPORT_LOCK; newsk->proc = 0; newsk->backlog.head = newsk->backlog.tail = NULL; newsk->callback_lock = RW_LOCK_UNLOCKED; skb_queue_head_init(&newsk->error_queue); newsk->write_space = tcp_write_space; #ifdef CONFIG_FILTER if ((filter = newsk->filter) != NULL) sk_filter_charge(newsk, filter); #endif /* Now setup tcp_opt */ newtp = &(newsk->tp_pinfo.af_tcp); newtp->pred_flags = 0; newtp->rcv_nxt = req->rcv_isn + 1; newtp->snd_nxt = req->snt_isn + 1; newtp->snd_una = req->snt_isn + 1; newtp->snd_sml = req->snt_isn + 1; tcp_delack_init(newtp); tcp_prequeue_init(newtp); tcp_init_wl(newtp, req->snt_isn, req->rcv_isn); newtp->retransmits = 0; newtp->backoff = 0; newtp->srtt = 0; newtp->mdev = TCP_TIMEOUT_INIT; newtp->rto = TCP_TIMEOUT_INIT; newtp->packets_out = 0; newtp->left_out = 0; newtp->retrans_out = 0; newtp->sacked_out = 0; newtp->fackets_out = 0; newtp->snd_ssthresh = 0x7fffffff; /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ newtp->snd_cwnd = 2; newtp->snd_cwnd_cnt = 0; newtp->ca_state = TCP_CA_Open; tcp_init_xmit_timers(newsk); skb_queue_head_init(&newtp->out_of_order_queue); newtp->send_head = NULL; newtp->rcv_wup = req->rcv_isn + 1; newtp->write_seq = req->snt_isn + 1; newtp->pushed_seq = newtp->write_seq; newtp->copied_seq = req->rcv_isn + 1; newtp->saw_tstamp = 0; newtp->dsack = 0; newtp->eff_sacks = 0; newtp->probes_out = 0; newtp->num_sacks = 0; newtp->syn_seq = req->rcv_isn; newtp->fin_seq = req->rcv_isn; newtp->urg_data = 0; newtp->listen_opt = NULL; newtp->accept_queue = newtp->accept_queue_tail = NULL; /* Deinitialize syn_wait_lock to trap illegal accesses. */ memset(&newtp->syn_wait_lock, 0, sizeof(newtp->syn_wait_lock)); /* Back to base struct sock members. */ newsk->err = 0; newsk->priority = 0; atomic_set(&newsk->refcnt, 2); #ifdef INET_REFCNT_DEBUG atomic_inc(&inet_sock_nr); #endif atomic_inc(&tcp_sockets_allocated); 754 if (newsk->keepopen) tcp_reset_keepalive_timer(newsk, keepalive_time_when(newtp)); newsk->socket = NULL; newsk->sleep = NULL; newtp->tstamp_ok = req->tstamp_ok; 760 if((newtp->sack_ok = req->sack_ok) != 0) { 761 if (sysctl_tcp_fack) newtp->sack_ok |= 2; } newtp->window_clamp = req->window_clamp; newtp->rcv_ssthresh = req->rcv_wnd; newtp->rcv_wnd = req->rcv_wnd; newtp->wscale_ok = req->wscale_ok; 768 if (newtp->wscale_ok) { newtp->snd_wscale = req->snd_wscale; newtp->rcv_wscale = req->rcv_wscale; 771 } else { newtp->snd_wscale = newtp->rcv_wscale = 0; newtp->window_clamp = min(newtp->window_clamp,65535); } newtp->snd_wnd = ntohs(skb->h.th->window) << newtp->snd_wscale; newtp->max_window = newtp->snd_wnd; 778 if (newtp->tstamp_ok) { newtp->ts_recent = req->ts_recent; newtp->ts_recent_stamp = xtime.tv_sec; newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; 782 } else { newtp->ts_recent_stamp = 0; newtp->tcp_header_len = sizeof(struct tcphdr); } 786 if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len; newtp->mss_clamp = req->mss; 789 TCP_ECN_openreq_child(newtp, req); } 791 return newsk; } /* * Process an incoming packet for SYN_RECV sockets represented * as an open_request. */ 799 struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct open_request *req, struct open_request **prev) { struct tcphdr *th = skb->h.th; struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp); u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; struct tcp_opt ttp; struct sock *child; ttp.saw_tstamp = 0; 811 if (th->doff > (sizeof(struct tcphdr)>>2)) { tcp_parse_options(skb, &ttp, 0); 814 if (ttp.saw_tstamp) { ttp.ts_recent = req->ts_recent; /* We do not store true stamp, but it is not required, * it can be estimated (approximately) * from another data. */ ttp.ts_recent_stamp = xtime.tv_sec - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); paws_reject = tcp_paws_check(&ttp, th->rst); } } /* Check for pure retransmited SYN. */ if (TCP_SKB_CB(skb)->seq == req->rcv_isn && flg == TCP_FLAG_SYN && 828 !paws_reject) { /* * RFC793 draws (Incorrectly! It was fixed in RFC1122) * this case on figure 6 and figure 8, but formal * protocol description says NOTHING. * To be more exact, it says that we should send ACK, * because this segment (at least, if it has no data) * is out of window. * * CONCLUSION: RFC793 (even with RFC1122) DOES NOT * describe SYN-RECV state. All the description * is wrong, we cannot believe to it and should * rely only on common sense and implementation * experience. * * Enforce "SYN-ACK" according to figure 8, figure 6 * of RFC793, fixed by RFC1122. */ req->class->rtx_syn_ack(sk, req, NULL); 847 return NULL; } /* Further reproduces section "SEGMENT ARRIVES" for state SYN-RECEIVED of RFC793. It is broken, however, it does not work only when SYNs are crossed, which is impossible in our case. But generally, we should (RFC lies!) to accept ACK from SYNACK both here and in tcp_rcv_state_process(). tcp_rcv_state_process() does not, hence, we do not too. Note that the case is absolutely generic: we cannot optimize anything here without violating protocol. All the checks must be made before attempt to create socket. */ /* RFC793: "first check sequence number". */ if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, 869 req->rcv_isn+1, req->rcv_isn+1+req->rcv_wnd)) { /* Out of window: send ACK and drop. */ 871 if (!(flg & TCP_FLAG_RST)) req->class->send_ack(skb, req); 873 if (paws_reject) NET_INC_STATS_BH(PAWSEstabRejected); 875 return NULL; } /* In sequence, PAWS is OK. */ 880 if (ttp.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, req->rcv_isn+1)) req->ts_recent = ttp.rcv_tsval; 883 if (TCP_SKB_CB(skb)->seq == req->rcv_isn) { /* Truncate SYN, it is out of window starting at req->rcv_isn+1. */ flg &= ~TCP_FLAG_SYN; } /* RFC793: "second check the RST bit" and * "fourth, check the SYN bit" */ 892 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) 893 goto embryonic_reset; /* RFC793: "fifth check the ACK field" */ 897 if (!(flg & TCP_FLAG_ACK)) 898 return NULL; /* Invalid ACK: reset will be sent by listening socket */ 901 if (TCP_SKB_CB(skb)->ack_seq != req->snt_isn+1) 902 return sk; /* Also, it would be not so bad idea to check rcv_tsecr, which * is essentially ACK extension and too early or too late values * should cause reset in unsynchronized states. */ /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ 909 if (tp->defer_accept && TCP_SKB_CB(skb)->end_seq == req->rcv_isn+1) { req->acked = 1; 911 return NULL; } /* OK, ACK is valid, create big socket and * feed this segment to it. It will repeat all * the tests. THIS SEGMENT MUST MOVE SOCKET TO * ESTABLISHED STATE. If it will be dropped after * socket is created, wait for troubles. */ child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL); 921 if (child == NULL) 922 goto listen_overflow; tcp_synq_unlink(tp, req, prev); tcp_synq_removed(sk, req); tcp_acceptq_queue(sk, req, child); 928 return child; listen_overflow: 931 if (!sysctl_tcp_abort_on_overflow) { req->acked = 1; 933 return NULL; } embryonic_reset: NET_INC_STATS_BH(EmbryonicRsts); 938 if (!(flg & TCP_FLAG_RST)) req->class->send_reset(skb); tcp_synq_drop(sk, req, prev); 942 return NULL; } /* * Queue segment on the new socket if the new socket is active, * otherwise we just shortcircuit this and continue with * the new socket. */ 951 int tcp_child_process(struct sock *parent, struct sock *child, struct sk_buff *skb) { int ret = 0; int state = child->state; 957 if (child->lock.users == 0) { ret = tcp_rcv_state_process(child, skb, skb->h.th, skb->len); /* Wakeup parent, send SIGIO */ 961 if (state == TCP_SYN_RECV && child->state != state) parent->data_ready(parent, 0); 963 } else { /* Alas, it is possible again, because we do lookup * in main socket hash table and lock on listening * socket does not protect us more. */ 968 sk_add_backlog(child, skb); } 971 bh_unlock_sock(child); sock_put(child); 973 return ret; }