diff options
author | David S. Miller <davem@nuts.davemloft.net> | 2005-01-05 23:13:51 -0800 |
---|---|---|
committer | David S. Miller <davem@nuts.davemloft.net> | 2005-01-05 23:13:51 -0800 |
commit | e093407dfb2d957d0055528d6d198e191296eb71 (patch) | |
tree | 6db577d5ea852928ec651dd8a6d261cd1649c4ba /net | |
parent | c327acdc68eea4ac7871ec7313cc896e9a74a57d (diff) | |
parent | ef4a8922c78eecaad68d507d084de81c82db1d2b (diff) | |
download | history-e093407dfb2d957d0055528d6d198e191296eb71.tar.gz |
Merge bk://kernel.bkbits.net/acme/connection_sock-2.6
into nuts.davemloft.net:/disk1/BK/net-2.6
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/ip_sockglue.c | 2 | ||||
-rw-r--r-- | net/ipv4/syncookies.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 48 | ||||
-rw-r--r-- | net/ipv4/tcp_diag.c | 4 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 240 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 30 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 12 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 63 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 20 | ||||
-rw-r--r-- | net/ipv6/ipv6_sockglue.c | 4 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 28 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 3 |
13 files changed, 231 insertions, 231 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index ee4e0362846573..5b242e1f60457b 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -429,7 +429,7 @@ int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, if (err) break; if (sk->sk_type == SOCK_STREAM) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) if (sk->sk_family == PF_INET || (!((1 << sk->sk_state) & diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 473813b3cbae00..5d6d2138ac91a7 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -47,7 +47,7 @@ static __u16 const msstab[] = { */ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int mssind; const __u16 mss = *mssp; @@ -98,7 +98,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct open_request *req, struct dst_entry *dst) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sock *child; child = tp->af_specific->syn_recv_sock(sk, skb, req, dst); @@ -114,7 +114,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); __u32 cookie = ntohl(skb->h.th->ack_seq) - 1; struct sock *ret = sk; struct open_request *req; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2469aba903ee6a..dd6b9aff0280d2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -331,7 +331,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); poll_wait(file, sk->sk_sleep, wait); if (sk->sk_state == TCP_LISTEN) @@ -414,7 +414,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int answ; switch (cmd) { @@ -462,7 +462,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) int tcp_listen_start(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt; sk->sk_max_ack_backlog = 0; @@ -515,7 +515,7 @@ int tcp_listen_start(struct sock *sk) static void tcp_listen_stop (struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt = tp->listen_opt; struct open_request *acc_req = tp->accept_queue; struct open_request *req; @@ -579,18 +579,18 @@ static void tcp_listen_stop (struct sock *sk) BUG_TRAP(!sk->sk_ack_backlog); } -static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb) +static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; tp->pushed_seq = tp->write_seq; } -static inline int forced_push(struct tcp_opt *tp) +static inline int forced_push(struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); } -static inline void skb_entail(struct sock *sk, struct tcp_opt *tp, +static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { skb->csum = 0; @@ -606,7 +606,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp, tp->nonagle &= ~TCP_NAGLE_PUSH; } -static inline void tcp_mark_urg(struct tcp_opt *tp, int flags, +static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, struct sk_buff *skb) { if (flags & MSG_OOB) { @@ -616,7 +616,7 @@ static inline void tcp_mark_urg(struct tcp_opt *tp, int flags, } } -static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, +static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, int mss_now, int nonagle) { if (sk->sk_send_head) { @@ -632,7 +632,7 @@ static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags, static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, size_t psize, int flags) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int mss_now; int err; ssize_t copied; @@ -761,7 +761,7 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, #define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_OFF(sk) (sk->sk_sndmsg_off) -static inline int select_size(struct sock *sk, struct tcp_opt *tp) +static inline int select_size(struct sock *sk, struct tcp_sock *tp) { int tmp = tp->mss_cache_std; @@ -779,7 +779,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t size) { struct iovec *iov; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int iovlen, flags; int mss_now; @@ -1003,7 +1003,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo, struct msghdr *msg, int len, int flags, int *addr_len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* No URG data to read. */ if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || @@ -1053,7 +1053,7 @@ static int tcp_recv_urg(struct sock *sk, long timeo, */ static void cleanup_rbuf(struct sock *sk, int copied) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int time_to_ack = 0; #if TCP_DEBUG @@ -1108,7 +1108,7 @@ static void cleanup_rbuf(struct sock *sk, int copied) static void tcp_prequeue_process(struct sock *sk) { struct sk_buff *skb; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue)); @@ -1155,7 +1155,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, sk_read_actor_t recv_actor) { struct sk_buff *skb; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 seq = tp->copied_seq; u32 offset; int copied = 0; @@ -1214,7 +1214,7 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len, int nonblock, int flags, int *addr_len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int copied = 0; u32 peek_seq; u32 *seq; @@ -1720,7 +1720,7 @@ adjudge_to_death: */ if (sk->sk_state == TCP_FIN_WAIT2) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tp->linger2 < 0) { tcp_set_state(sk, TCP_CLOSE); tcp_send_active_reset(sk, GFP_ATOMIC); @@ -1774,7 +1774,7 @@ static inline int tcp_need_reset(int state) int tcp_disconnect(struct sock *sk, int flags) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int err = 0; int old_state = sk->sk_state; @@ -1836,7 +1836,7 @@ int tcp_disconnect(struct sock *sk, int flags) */ static int wait_for_connect(struct sock *sk, long timeo) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); DEFINE_WAIT(wait); int err; @@ -1884,7 +1884,7 @@ static int wait_for_connect(struct sock *sk, long timeo) struct sock *tcp_accept(struct sock *sk, int flags, int *err) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct open_request *req; struct sock *newsk; int error; @@ -1935,7 +1935,7 @@ out: int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int val; int err = 0; @@ -2099,7 +2099,7 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, /* Return information about state of tcp endpoint in API format. */ void tcp_get_info(struct sock *sk, struct tcp_info *info) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 now = tcp_time_stamp; memset(info, 0, sizeof(*info)); @@ -2158,7 +2158,7 @@ EXPORT_SYMBOL_GPL(tcp_get_info); int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int val, len; if (level != SOL_TCP) diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index cbaa439ee83c03..313c1408da33ac 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c @@ -56,7 +56,7 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk, int ext, u32 pid, u32 seq, u16 nlmsg_flags) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcpdiagmsg *r; struct nlmsghdr *nlh; struct tcp_info *info = NULL; @@ -512,7 +512,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk, { struct tcpdiag_entry entry; struct tcpdiagreq *r = NLMSG_DATA(cb->nlh); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt; struct rtattr *bc = NULL; struct inet_sock *inet = inet_sk(sk); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 15c90606726653..5a8085e923025d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -127,7 +127,8 @@ int sysctl_tcp_bic_low_window = 14; /* Adapt the MSS value used to make delayed ack decision to the * real world. */ -static __inline__ void tcp_measure_rcv_mss(struct tcp_opt *tp, struct sk_buff *skb) +static inline void tcp_measure_rcv_mss(struct tcp_sock *tp, + struct sk_buff *skb) { unsigned int len, lss; @@ -170,7 +171,7 @@ static __inline__ void tcp_measure_rcv_mss(struct tcp_opt *tp, struct sk_buff *s } } -static void tcp_incr_quickack(struct tcp_opt *tp) +static void tcp_incr_quickack(struct tcp_sock *tp) { unsigned quickacks = tp->rcv_wnd/(2*tp->ack.rcv_mss); @@ -180,7 +181,7 @@ static void tcp_incr_quickack(struct tcp_opt *tp) tp->ack.quick = min(quickacks, TCP_MAX_QUICKACKS); } -void tcp_enter_quickack_mode(struct tcp_opt *tp) +void tcp_enter_quickack_mode(struct tcp_sock *tp) { tcp_incr_quickack(tp); tp->ack.pingpong = 0; @@ -191,7 +192,7 @@ void tcp_enter_quickack_mode(struct tcp_opt *tp) * and the session is not interactive. */ -static __inline__ int tcp_in_quickack_mode(struct tcp_opt *tp) +static __inline__ int tcp_in_quickack_mode(struct tcp_sock *tp) { return (tp->ack.quick && !tp->ack.pingpong); } @@ -236,8 +237,8 @@ static void tcp_fixup_sndbuf(struct sock *sk) */ /* Slow part of check#2. */ -static int -__tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) +static int __tcp_grow_window(struct sock *sk, struct tcp_sock *tp, + struct sk_buff *skb) { /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize)/2; @@ -253,8 +254,8 @@ __tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) return 0; } -static __inline__ void -tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) +static inline void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, + struct sk_buff *skb) { /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && @@ -281,7 +282,7 @@ tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) static void tcp_fixup_rcvbuf(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); /* Try to select rcvbuf so that 4 mss-sized segments @@ -299,7 +300,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) */ static void tcp_init_buffer_space(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) @@ -330,7 +331,7 @@ static void tcp_init_buffer_space(struct sock *sk) tp->snd_cwnd_stamp = tcp_time_stamp; } -static void init_bictcp(struct tcp_opt *tp) +static void init_bictcp(struct tcp_sock *tp) { tp->bictcp.cnt = 0; @@ -340,7 +341,7 @@ static void init_bictcp(struct tcp_opt *tp) } /* 5. Recalculate window clamp after socket hit its memory bounds. */ -static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp) +static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) { struct sk_buff *skb; unsigned int app_win = tp->rcv_nxt - tp->copied_seq; @@ -388,7 +389,7 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp) * though this reference is out of date. A new paper * is pending. */ -static void tcp_rcv_rtt_update(struct tcp_opt *tp, u32 sample, int win_dep) +static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; @@ -421,7 +422,7 @@ static void tcp_rcv_rtt_update(struct tcp_opt *tp, u32 sample, int win_dep) tp->rcv_rtt_est.rtt = new_sample; } -static inline void tcp_rcv_rtt_measure(struct tcp_opt *tp) +static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; @@ -436,7 +437,7 @@ new_measure: tp->rcv_rtt_est.time = tcp_time_stamp; } -static inline void tcp_rcv_rtt_measure_ts(struct tcp_opt *tp, struct sk_buff *skb) +static inline void tcp_rcv_rtt_measure_ts(struct tcp_sock *tp, struct sk_buff *skb) { if (tp->rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - @@ -450,7 +451,7 @@ static inline void tcp_rcv_rtt_measure_ts(struct tcp_opt *tp, struct sk_buff *sk */ void tcp_rcv_space_adjust(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int time; int space; @@ -511,7 +512,7 @@ new_measure: * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */ -static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) +static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) { u32 now; @@ -558,7 +559,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b /* When starting a new connection, pin down the current choice of * congestion algorithm. */ -void tcp_ca_init(struct tcp_opt *tp) +void tcp_ca_init(struct tcp_sock *tp) { if (sysctl_tcp_westwood) tp->adv_cong = TCP_WESTWOOD; @@ -579,7 +580,7 @@ void tcp_ca_init(struct tcp_opt *tp) * o min-filter RTT samples from a much longer window (forever for now) * to find the propagation delay (baseRTT) */ -static inline void vegas_rtt_calc(struct tcp_opt *tp, __u32 rtt) +static inline void vegas_rtt_calc(struct tcp_sock *tp, __u32 rtt) { __u32 vrtt = rtt + 1; /* Never allow zero rtt or baseRTT */ @@ -603,7 +604,7 @@ static inline void vegas_rtt_calc(struct tcp_opt *tp, __u32 rtt) * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */ -static void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt) +static void tcp_rtt_estimator(struct tcp_sock *tp, __u32 mrtt) { long m = mrtt; /* RTT */ @@ -673,7 +674,7 @@ static void tcp_rtt_estimator(struct tcp_opt *tp, __u32 mrtt) /* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */ -static __inline__ void tcp_set_rto(struct tcp_opt *tp) +static inline void tcp_set_rto(struct tcp_sock *tp) { /* Old crap is replaced with new one. 8) * @@ -697,7 +698,7 @@ static __inline__ void tcp_set_rto(struct tcp_opt *tp) /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ -static __inline__ void tcp_bound_rto(struct tcp_opt *tp) +static inline void tcp_bound_rto(struct tcp_sock *tp) { if (tp->rto > TCP_RTO_MAX) tp->rto = TCP_RTO_MAX; @@ -709,7 +710,7 @@ static __inline__ void tcp_bound_rto(struct tcp_opt *tp) */ void tcp_update_metrics(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (sysctl_tcp_nometrics_save) @@ -797,7 +798,7 @@ void tcp_update_metrics(struct sock *sk) } /* Numbers are taken from RFC2414. */ -__u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst) +__u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); @@ -814,7 +815,7 @@ __u32 tcp_init_cwnd(struct tcp_opt *tp, struct dst_entry *dst) static void tcp_init_metrics(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); if (dst == NULL) @@ -883,7 +884,7 @@ reset: } } -static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts) +static void tcp_update_reordering(struct tcp_sock *tp, int metric, int ts) { if (metric > tp->reordering) { tp->reordering = min(TCP_MAX_REORDERING, metric); @@ -961,7 +962,7 @@ static void tcp_update_reordering(struct tcp_opt *tp, int metric, int ts) static int tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; struct tcp_sack_block *sp = (struct tcp_sack_block *)(ptr+2); int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; @@ -1178,7 +1179,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ */ void tcp_enter_frto(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; tp->frto_counter = 1; @@ -1215,7 +1216,7 @@ void tcp_enter_frto(struct sock *sk) */ static void tcp_enter_frto_loss(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt = 0; @@ -1258,7 +1259,7 @@ static void tcp_enter_frto_loss(struct sock *sk) init_bictcp(tp); } -void tcp_clear_retrans(struct tcp_opt *tp) +void tcp_clear_retrans(struct tcp_sock *tp) { tcp_set_pcount(&tp->left_out, 0); tcp_set_pcount(&tp->retrans_out, 0); @@ -1277,7 +1278,7 @@ void tcp_clear_retrans(struct tcp_opt *tp) */ void tcp_enter_loss(struct sock *sk, int how) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt = 0; @@ -1321,7 +1322,7 @@ void tcp_enter_loss(struct sock *sk, int how) TCP_ECN_queue_cwr(tp); } -static int tcp_check_sack_reneging(struct sock *sk, struct tcp_opt *tp) +static int tcp_check_sack_reneging(struct sock *sk, struct tcp_sock *tp) { struct sk_buff *skb; @@ -1344,18 +1345,18 @@ static int tcp_check_sack_reneging(struct sock *sk, struct tcp_opt *tp) return 0; } -static inline int tcp_fackets_out(struct tcp_opt *tp) +static inline int tcp_fackets_out(struct tcp_sock *tp) { return IsReno(tp) ? tcp_get_pcount(&tp->sacked_out)+1 : tcp_get_pcount(&tp->fackets_out); } -static inline int tcp_skb_timedout(struct tcp_opt *tp, struct sk_buff *skb) +static inline int tcp_skb_timedout(struct tcp_sock *tp, struct sk_buff *skb) { return (tcp_time_stamp - TCP_SKB_CB(skb)->when > tp->rto); } -static inline int tcp_head_timedout(struct sock *sk, struct tcp_opt *tp) +static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) { return tcp_get_pcount(&tp->packets_out) && tcp_skb_timedout(tp, skb_peek(&sk->sk_write_queue)); @@ -1454,8 +1455,7 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_opt *tp) * Main question: may we further continue forward transmission * with the same cwnd? */ -static int -tcp_time_to_recover(struct sock *sk, struct tcp_opt *tp) +static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) { __u32 packets_out; @@ -1493,7 +1493,7 @@ tcp_time_to_recover(struct sock *sk, struct tcp_opt *tp) * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */ -static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend) +static void tcp_check_reno_reordering(struct tcp_sock *tp, int addend) { u32 holes; @@ -1512,7 +1512,7 @@ static void tcp_check_reno_reordering(struct tcp_opt *tp, int addend) /* Emulate SACKs for SACKless connection: account for a new dupack. */ -static void tcp_add_reno_sack(struct tcp_opt *tp) +static void tcp_add_reno_sack(struct tcp_sock *tp) { tcp_inc_pcount_explicit(&tp->sacked_out, 1); tcp_check_reno_reordering(tp, 0); @@ -1521,7 +1521,7 @@ static void tcp_add_reno_sack(struct tcp_opt *tp) /* Account for ACK, ACKing some data in Reno Recovery phase. */ -static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_opt *tp, int acked) +static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) { if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ @@ -1534,15 +1534,15 @@ static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_opt *tp, int acked tcp_sync_left_out(tp); } -static inline void tcp_reset_reno_sack(struct tcp_opt *tp) +static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tcp_set_pcount(&tp->sacked_out, 0); tcp_set_pcount(&tp->left_out, tcp_get_pcount(&tp->lost_out)); } /* Mark head of queue up as lost. */ -static void -tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_seq) +static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, + int packets, u32 high_seq) { struct sk_buff *skb; int cnt = packets; @@ -1563,7 +1563,7 @@ tcp_mark_head_lost(struct sock *sk, struct tcp_opt *tp, int packets, u32 high_se /* Account newly detected lost packet(s) */ -static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp) +static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) { if (IsFack(tp)) { int lost = tcp_get_pcount(&tp->fackets_out) - tp->reordering; @@ -1596,7 +1596,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_opt *tp) /* CWND moderation, preventing bursts due to too big ACKs * in dubious situations. */ -static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp) +static inline void tcp_moderate_cwnd(struct tcp_sock *tp) { tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+tcp_max_burst(tp)); @@ -1605,7 +1605,7 @@ static __inline__ void tcp_moderate_cwnd(struct tcp_opt *tp) /* Decrease cwnd each second ack. */ -static void tcp_cwnd_down(struct tcp_opt *tp) +static void tcp_cwnd_down(struct tcp_sock *tp) { int decr = tp->snd_cwnd_cnt + 1; __u32 limit; @@ -1635,7 +1635,7 @@ static void tcp_cwnd_down(struct tcp_opt *tp) /* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */ -static __inline__ int tcp_packet_delayed(struct tcp_opt *tp) +static inline int tcp_packet_delayed(struct tcp_sock *tp) { return !tp->retrans_stamp || (tp->saw_tstamp && tp->rcv_tsecr && @@ -1645,7 +1645,7 @@ static __inline__ int tcp_packet_delayed(struct tcp_opt *tp) /* Undo procedures. */ #if FASTRETRANS_DEBUG > 1 -static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg) +static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) { struct inet_sock *inet = inet_sk(sk); printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", @@ -1659,7 +1659,7 @@ static void DBGUNDO(struct sock *sk, struct tcp_opt *tp, const char *msg) #define DBGUNDO(x...) do { } while (0) #endif -static void tcp_undo_cwr(struct tcp_opt *tp, int undo) +static void tcp_undo_cwr(struct tcp_sock *tp, int undo) { if (tp->prior_ssthresh) { tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh<<1); @@ -1675,14 +1675,14 @@ static void tcp_undo_cwr(struct tcp_opt *tp, int undo) tp->snd_cwnd_stamp = tcp_time_stamp; } -static inline int tcp_may_undo(struct tcp_opt *tp) +static inline int tcp_may_undo(struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); } /* People celebrate: "We love our President!" */ -static int tcp_try_undo_recovery(struct sock *sk, struct tcp_opt *tp) +static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) { if (tcp_may_undo(tp)) { /* Happy end! We did not retransmit anything @@ -1708,7 +1708,7 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_opt *tp) } /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ -static void tcp_try_undo_dsack(struct sock *sk, struct tcp_opt *tp) +static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) { if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, tp, "D-SACK"); @@ -1720,7 +1720,8 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_opt *tp) /* Undo during fast recovery after partial ACK. */ -static int tcp_try_undo_partial(struct sock *sk, struct tcp_opt *tp, int acked) +static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, + int acked) { /* Partial ACK arrived. Force Hoe's retransmit. */ int failed = IsReno(tp) || tcp_get_pcount(&tp->fackets_out)>tp->reordering; @@ -1748,7 +1749,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_opt *tp, int acked) } /* Undo during loss recovery after partial ACK. */ -static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp) +static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) { if (tcp_may_undo(tp)) { struct sk_buff *skb; @@ -1769,7 +1770,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_opt *tp) return 0; } -static __inline__ void tcp_complete_cwr(struct tcp_opt *tp) +static inline void tcp_complete_cwr(struct tcp_sock *tp) { if (tcp_westwood_cwnd(tp)) tp->snd_ssthresh = tp->snd_cwnd; @@ -1778,7 +1779,7 @@ static __inline__ void tcp_complete_cwr(struct tcp_opt *tp) tp->snd_cwnd_stamp = tcp_time_stamp; } -static void tcp_try_to_open(struct sock *sk, struct tcp_opt *tp, int flag) +static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) { tcp_set_pcount(&tp->left_out, tcp_get_pcount(&tp->sacked_out)); @@ -1821,7 +1822,7 @@ static void tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, int prior_packets, int flag) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int is_dupack = (tp->snd_una == prior_snd_una && !(flag&FLAG_NOT_DUP)); /* Some technical things: @@ -1970,7 +1971,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, /* Read draft-ietf-tcplw-high-performance before mucking * with this code. (Superceeds RFC1323) */ -static void tcp_ack_saw_tstamp(struct tcp_opt *tp, int flag) +static void tcp_ack_saw_tstamp(struct tcp_sock *tp, int flag) { __u32 seq_rtt; @@ -1996,7 +1997,7 @@ static void tcp_ack_saw_tstamp(struct tcp_opt *tp, int flag) tcp_bound_rto(tp); } -static void tcp_ack_no_tstamp(struct tcp_opt *tp, u32 seq_rtt, int flag) +static void tcp_ack_no_tstamp(struct tcp_sock *tp, u32 seq_rtt, int flag) { /* We don't have a timestamp. Can only use * packets that are not retransmitted to determine @@ -2016,8 +2017,8 @@ static void tcp_ack_no_tstamp(struct tcp_opt *tp, u32 seq_rtt, int flag) tcp_bound_rto(tp); } -static __inline__ void -tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt) +static inline void tcp_ack_update_rtt(struct tcp_sock *tp, + int flag, s32 seq_rtt) { /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ if (tp->saw_tstamp && tp->rcv_tsecr) @@ -2039,7 +2040,7 @@ tcp_ack_update_rtt(struct tcp_opt *tp, int flag, s32 seq_rtt) * Unless BIC is enabled and congestion window is large * this behaves the same as the original Reno. */ -static inline __u32 bictcp_cwnd(struct tcp_opt *tp) +static inline __u32 bictcp_cwnd(struct tcp_sock *tp) { /* orignal Reno behaviour */ if (!tcp_is_bic(tp)) @@ -2092,7 +2093,7 @@ static inline __u32 bictcp_cwnd(struct tcp_opt *tp) /* This is Jacobson's slow start and congestion avoidance. * SIGCOMM '88, p. 328. */ -static __inline__ void reno_cong_avoid(struct tcp_opt *tp) +static inline void reno_cong_avoid(struct tcp_sock *tp) { if (tp->snd_cwnd <= tp->snd_ssthresh) { /* In "safe" area, increase. */ @@ -2141,7 +2142,7 @@ static __inline__ void reno_cong_avoid(struct tcp_opt *tp) * a cwnd adjustment decision. The original Vegas implementation * assumed senders never went idle. */ -static void vegas_cong_avoid(struct tcp_opt *tp, u32 ack, u32 seq_rtt) +static void vegas_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) { /* The key players are v_beg_snd_una and v_beg_snd_nxt. * @@ -2334,7 +2335,7 @@ static void vegas_cong_avoid(struct tcp_opt *tp, u32 ack, u32 seq_rtt) tp->snd_cwnd_stamp = tcp_time_stamp; } -static inline void tcp_cong_avoid(struct tcp_opt *tp, u32 ack, u32 seq_rtt) +static inline void tcp_cong_avoid(struct tcp_sock *tp, u32 ack, u32 seq_rtt) { if (tcp_vegas_enabled(tp)) vegas_cong_avoid(tp, ack, seq_rtt); @@ -2346,7 +2347,7 @@ static inline void tcp_cong_avoid(struct tcp_opt *tp, u32 ack, u32 seq_rtt) * RFC2988 recommends to restart timer to now+rto. */ -static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp) +static inline void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) { if (!tcp_get_pcount(&tp->packets_out)) { tcp_clear_xmit_timer(sk, TCP_TIME_RETRANS); @@ -2367,7 +2368,7 @@ static __inline__ void tcp_ack_packets_out(struct sock *sk, struct tcp_opt *tp) static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, __u32 now, __s32 *seq_rtt) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *scb = TCP_SKB_CB(skb); __u32 seq = tp->snd_una; __u32 packets_acked; @@ -2428,7 +2429,7 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb, /* Remove acknowledged frames from the retransmission queue. */ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; __u32 now = tcp_time_stamp; int acked = 0; @@ -2525,7 +2526,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) static void tcp_ack_probe(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* Was it a usable window open? */ @@ -2542,13 +2543,13 @@ static void tcp_ack_probe(struct sock *sk) } } -static __inline__ int tcp_ack_is_dubious(struct tcp_opt *tp, int flag) +static inline int tcp_ack_is_dubious(struct tcp_sock *tp, int flag) { return (!(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || tp->ca_state != TCP_CA_Open); } -static __inline__ int tcp_may_raise_cwnd(struct tcp_opt *tp, int flag) +static inline int tcp_may_raise_cwnd(struct tcp_sock *tp, int flag) { return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) && !((1<<tp->ca_state)&(TCPF_CA_Recovery|TCPF_CA_CWR)); @@ -2557,8 +2558,8 @@ static __inline__ int tcp_may_raise_cwnd(struct tcp_opt *tp, int flag) /* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */ -static __inline__ int -tcp_may_update_window(struct tcp_opt *tp, u32 ack, u32 ack_seq, u32 nwin) +static inline int tcp_may_update_window(struct tcp_sock *tp, u32 ack, + u32 ack_seq, u32 nwin) { return (after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || @@ -2570,7 +2571,7 @@ tcp_may_update_window(struct tcp_opt *tp, u32 ack, u32 ack_seq, u32 nwin) * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */ -static int tcp_ack_update_window(struct sock *sk, struct tcp_opt *tp, +static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb, u32 ack, u32 ack_seq) { int flag = 0; @@ -2605,7 +2606,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_opt *tp, static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tcp_sync_left_out(tp); @@ -2654,7 +2655,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) static void init_westwood(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tp->westwood.bw_ns_est = 0; tp->westwood.bw_est = 0; @@ -2678,7 +2679,7 @@ static inline __u32 westwood_do_filter(__u32 a, __u32 b) static void westwood_filter(struct sock *sk, __u32 delta) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tp->westwood.bw_ns_est = westwood_do_filter(tp->westwood.bw_ns_est, @@ -2696,7 +2697,7 @@ static void westwood_filter(struct sock *sk, __u32 delta) static inline __u32 westwood_update_rttmin(const struct sock *sk) { - const struct tcp_opt *tp = tcp_sk(sk); + const struct tcp_sock *tp = tcp_sk(sk); __u32 rttmin = tp->westwood.rtt_min; if (tp->westwood.rtt != 0 && @@ -2713,7 +2714,7 @@ static inline __u32 westwood_update_rttmin(const struct sock *sk) static inline __u32 westwood_acked(const struct sock *sk) { - const struct tcp_opt *tp = tcp_sk(sk); + const struct tcp_sock *tp = tcp_sk(sk); return tp->snd_una - tp->westwood.snd_una; } @@ -2729,7 +2730,7 @@ static inline __u32 westwood_acked(const struct sock *sk) static int westwood_new_window(const struct sock *sk) { - const struct tcp_opt *tp = tcp_sk(sk); + const struct tcp_sock *tp = tcp_sk(sk); __u32 left_bound; __u32 rtt; int ret = 0; @@ -2760,7 +2761,7 @@ static int westwood_new_window(const struct sock *sk) static void __westwood_update_window(struct sock *sk, __u32 now) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); __u32 delta = now - tp->westwood.rtt_win_sx; if (delta) { @@ -2788,7 +2789,7 @@ static void westwood_update_window(struct sock *sk, __u32 now) static void __tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); westwood_update_window(sk, tcp_time_stamp); @@ -2811,24 +2812,24 @@ static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb) static void westwood_dupack_update(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tp->westwood.accounted += tp->mss_cache_std; tp->westwood.cumul_ack = tp->mss_cache_std; } -static inline int westwood_may_change_cumul(struct tcp_opt *tp) +static inline int westwood_may_change_cumul(struct tcp_sock *tp) { return (tp->westwood.cumul_ack > tp->mss_cache_std); } -static inline void westwood_partial_update(struct tcp_opt *tp) +static inline void westwood_partial_update(struct tcp_sock *tp) { tp->westwood.accounted -= tp->westwood.cumul_ack; tp->westwood.cumul_ack = tp->mss_cache_std; } -static inline void westwood_complete_update(struct tcp_opt *tp) +static inline void westwood_complete_update(struct tcp_sock *tp) { tp->westwood.cumul_ack -= tp->westwood.accounted; tp->westwood.accounted = 0; @@ -2842,7 +2843,7 @@ static inline void westwood_complete_update(struct tcp_opt *tp) static inline __u32 westwood_acked_count(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tp->westwood.cumul_ack = westwood_acked(sk); @@ -2875,7 +2876,7 @@ static inline __u32 westwood_acked_count(struct sock *sk) static void __tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); westwood_update_window(sk, tcp_time_stamp); @@ -2892,7 +2893,7 @@ static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb) /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 prior_snd_una = tp->snd_una; u32 ack_seq = TCP_SKB_CB(skb)->seq; u32 ack = TCP_SKB_CB(skb)->ack_seq; @@ -2997,7 +2998,7 @@ uninteresting_ack: * But, this can also be called on packets in the established flow when * the fast version below fails. */ -void tcp_parse_options(struct sk_buff *skb, struct tcp_opt *tp, int estab) +void tcp_parse_options(struct sk_buff *skb, struct tcp_sock *tp, int estab) { unsigned char *ptr; struct tcphdr *th = skb->h.th; @@ -3082,7 +3083,8 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_opt *tp, int estab) /* Fast parse options. This hopes to only see timestamps. * If it is wrong it falls back on tcp_parse_options(). */ -static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, struct tcp_opt *tp) +static inline int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th, + struct tcp_sock *tp) { if (th->doff == sizeof(struct tcphdr)>>2) { tp->saw_tstamp = 0; @@ -3104,15 +3106,13 @@ static __inline__ int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr return 1; } -static __inline__ void -tcp_store_ts_recent(struct tcp_opt *tp) +static inline void tcp_store_ts_recent(struct tcp_sock *tp) { tp->ts_recent = tp->rcv_tsval; tp->ts_recent_stamp = xtime.tv_sec; } -static __inline__ void -tcp_replace_ts_recent(struct tcp_opt *tp, u32 seq) +static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard @@ -3151,7 +3151,7 @@ tcp_replace_ts_recent(struct tcp_opt *tp, u32 seq) * up to bandwidth of 18Gigabit/sec. 8) ] */ -static int tcp_disordered_ack(struct tcp_opt *tp, struct sk_buff *skb) +static int tcp_disordered_ack(struct tcp_sock *tp, struct sk_buff *skb) { struct tcphdr *th = skb->h.th; u32 seq = TCP_SKB_CB(skb)->seq; @@ -3170,7 +3170,7 @@ static int tcp_disordered_ack(struct tcp_opt *tp, struct sk_buff *skb) (s32)(tp->ts_recent - tp->rcv_tsval) <= (tp->rto*1024)/HZ); } -static __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct sk_buff *skb) +static inline int tcp_paws_discard(struct tcp_sock *tp, struct sk_buff *skb) { return ((s32)(tp->ts_recent - tp->rcv_tsval) > TCP_PAWS_WINDOW && xtime.tv_sec < tp->ts_recent_stamp + TCP_PAWS_24DAYS && @@ -3190,7 +3190,7 @@ static __inline__ int tcp_paws_discard(struct tcp_opt *tp, struct sk_buff *skb) * (borrowed from freebsd) */ -static inline int tcp_sequence(struct tcp_opt *tp, u32 seq, u32 end_seq) +static inline int tcp_sequence(struct tcp_sock *tp, u32 seq, u32 end_seq) { return !before(end_seq, tp->rcv_wup) && !after(seq, tp->rcv_nxt + tcp_receive_window(tp)); @@ -3235,7 +3235,7 @@ static void tcp_reset(struct sock *sk) */ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tcp_schedule_ack(tp); @@ -3315,7 +3315,7 @@ tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_seq) return 0; } -static __inline__ void tcp_dsack_set(struct tcp_opt *tp, u32 seq, u32 end_seq) +static inline void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) { if (tp->sack_ok && sysctl_tcp_dsack) { if (before(seq, tp->rcv_nxt)) @@ -3330,7 +3330,7 @@ static __inline__ void tcp_dsack_set(struct tcp_opt *tp, u32 seq, u32 end_seq) } } -static __inline__ void tcp_dsack_extend(struct tcp_opt *tp, u32 seq, u32 end_seq) +static inline void tcp_dsack_extend(struct tcp_sock *tp, u32 seq, u32 end_seq) { if (!tp->dsack) tcp_dsack_set(tp, seq, end_seq); @@ -3340,7 +3340,7 @@ static __inline__ void tcp_dsack_extend(struct tcp_opt *tp, u32 seq, u32 end_seq static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { @@ -3362,7 +3362,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) /* These routines update the SACK block as out-of-order packets arrive or * in-order packets close up the sequence space. */ -static void tcp_sack_maybe_coalesce(struct tcp_opt *tp) +static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) { int this_sack; struct tcp_sack_block *sp = &tp->selective_acks[0]; @@ -3403,7 +3403,7 @@ static __inline__ void tcp_sack_swap(struct tcp_sack_block *sack1, struct tcp_sa static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_sack_block *sp = &tp->selective_acks[0]; int cur_sacks = tp->num_sacks; int this_sack; @@ -3446,7 +3446,7 @@ new_sack: /* RCV.NXT advances, some SACKs should be eaten. */ -static void tcp_sack_remove(struct tcp_opt *tp) +static void tcp_sack_remove(struct tcp_sock *tp) { struct tcp_sack_block *sp = &tp->selective_acks[0]; int num_sacks = tp->num_sacks; @@ -3487,7 +3487,7 @@ static void tcp_sack_remove(struct tcp_opt *tp) */ static void tcp_ofo_queue(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); __u32 dsack_high = tp->rcv_nxt; struct sk_buff *skb; @@ -3525,7 +3525,7 @@ static int tcp_prune_queue(struct sock *sk); static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = skb->h.th; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int eaten = -1; if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) @@ -3833,7 +3833,7 @@ tcp_collapse(struct sock *sk, struct sk_buff *head, */ static void tcp_collapse_ofo_queue(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek(&tp->out_of_order_queue); struct sk_buff *head; u32 start, end; @@ -3878,7 +3878,7 @@ static void tcp_collapse_ofo_queue(struct sock *sk) */ static int tcp_prune_queue(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); @@ -3938,7 +3938,7 @@ static int tcp_prune_queue(struct sock *sk) */ void tcp_cwnd_application_limited(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tp->ca_state == TCP_CA_Open && sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { @@ -3962,7 +3962,7 @@ void tcp_cwnd_application_limited(struct sock *sk) */ static void tcp_new_space(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tcp_get_pcount(&tp->packets_out) < tp->snd_cwnd && !(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && @@ -3993,7 +3993,7 @@ static inline void tcp_check_space(struct sock *sk) static void __tcp_data_snd_check(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) || tcp_packets_in_flight(tp) >= tp->snd_cwnd || @@ -4015,7 +4015,7 @@ static __inline__ void tcp_data_snd_check(struct sock *sk) */ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* More than one full frame received... */ if (((tp->rcv_nxt - tp->rcv_wup) > tp->ack.rcv_mss @@ -4038,7 +4038,7 @@ static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible) static __inline__ void tcp_ack_snd_check(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (!tcp_ack_scheduled(tp)) { /* We sent a data segment already. */ return; @@ -4058,7 +4058,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk) static void tcp_check_urg(struct sock * sk, struct tcphdr * th) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr); if (ptr && !sysctl_tcp_stdurg) @@ -4125,7 +4125,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th) /* This is the 'fast' part of urgent handling. */ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* Check if we get a new urgent pointer - normally not. */ if (th->urg) @@ -4150,7 +4150,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int chunk = skb->len - hlen; int err; @@ -4218,7 +4218,7 @@ tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* * Header prediction. @@ -4468,7 +4468,7 @@ discard: static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int saved_clamp = tp->mss_clamp; tcp_parse_options(skb, tp, 0); @@ -4713,7 +4713,7 @@ reset_and_undo: int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, struct tcphdr *th, unsigned len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int queued = 0; tp->saw_tstamp = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6c5b44af65b27c..aeed1e6ce6eabb 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -568,7 +568,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport, tw = (struct tcp_tw_bucket *)sk2; if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it @@ -744,7 +744,7 @@ out: int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct rtable *rt; u32 daddr, nexthop; @@ -867,7 +867,7 @@ static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd) return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1)); } -static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, +static struct open_request *tcp_v4_search_req(struct tcp_sock *tp, struct open_request ***prevp, __u16 rport, __u32 raddr, __u32 laddr) @@ -893,7 +893,7 @@ static struct open_request *tcp_v4_search_req(struct tcp_opt *tp, static void tcp_v4_synq_add(struct sock *sk, struct open_request *req) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt = tp->listen_opt; u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd); @@ -918,7 +918,7 @@ static inline void do_pmtu_discovery(struct sock *sk, struct iphdr *iph, { struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs * send out by Linux are always <576bytes so they should go through @@ -979,7 +979,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info) { struct iphdr *iph = (struct iphdr *)skb->data; struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); - struct tcp_opt *tp; + struct tcp_sock *tp; struct inet_sock *inet; int type = skb->h.icmph->type; int code = skb->h.icmph->code; @@ -1393,7 +1393,7 @@ struct or_calltable or_ipv4 = { int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt tp; + struct tcp_sock tp; struct open_request *req; __u32 saddr = skb->nh.iph->saddr; __u32 daddr = skb->nh.iph->daddr; @@ -1550,7 +1550,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct dst_entry *dst) { struct inet_sock *newinet; - struct tcp_opt *newtp; + struct tcp_sock *newtp; struct sock *newsk; if (sk_acceptq_is_full(sk)) @@ -1602,7 +1602,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = skb->h.th; struct iphdr *iph = skb->nh.iph; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sock *nsk; struct open_request **prev; /* Find possible connection requests. */ @@ -1972,7 +1972,7 @@ static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr) int tcp_v4_remember_stamp(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct rtable *rt = (struct rtable *)__sk_dst_get(sk); struct inet_peer *peer = NULL; int release_it = 0; @@ -2040,7 +2040,7 @@ struct tcp_func ipv4_specific = { */ static int tcp_v4_init_sock(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); @@ -2082,7 +2082,7 @@ static int tcp_v4_init_sock(struct sock *sk) int tcp_v4_destroy_sock(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tcp_clear_xmit_timers(sk); @@ -2131,7 +2131,7 @@ static inline struct tcp_tw_bucket *tw_next(struct tcp_tw_bucket *tw) static void *listening_get_next(struct seq_file *seq, void *cur) { - struct tcp_opt *tp; + struct tcp_sock *tp; struct hlist_node *node; struct sock *sk = cur; struct tcp_iter_state* st = seq->private; @@ -2368,7 +2368,7 @@ static void tcp_seq_stop(struct seq_file *seq, void *v) switch (st->state) { case TCP_SEQ_STATE_OPENREQ: if (v) { - struct tcp_opt *tp = tcp_sk(st->syn_wait_sk); + struct tcp_sock *tp = tcp_sk(st->syn_wait_sk); read_unlock_bh(&tp->syn_wait_lock); } case TCP_SEQ_STATE_LISTENING: @@ -2473,7 +2473,7 @@ static void get_tcp4_sock(struct sock *sp, char *tmpbuf, int i) { int timer_active; unsigned long timer_expires; - struct tcp_opt *tp = tcp_sk(sp); + struct tcp_sock *tp = tcp_sk(sp); struct inet_sock *inet = inet_sk(sp); unsigned int dest = inet->daddr; unsigned int src = inet->rcv_saddr; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 1e0970332a2140..b2f553ed76154d 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -125,7 +125,7 @@ enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw, struct sk_buff *skb, struct tcphdr *th, unsigned len) { - struct tcp_opt tp; + struct tcp_sock tp; int paws_reject = 0; tp.saw_tstamp = 0; @@ -329,7 +329,7 @@ static void __tcp_tw_hashdance(struct sock *sk, struct tcp_tw_bucket *tw) void tcp_time_wait(struct sock *sk, int state, int timeo) { struct tcp_tw_bucket *tw = NULL; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int recycle_ok = 0; if (sysctl_tcp_tw_recycle && tp->ts_recent_stamp) @@ -692,7 +692,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, struct sock *newsk = sk_alloc(PF_INET, GFP_ATOMIC, 0, sk->sk_prot->slab); if(newsk != NULL) { - struct tcp_opt *newtp; + struct tcp_sock *newtp; struct sk_filter *filter; memcpy(newsk, sk, sizeof(struct tcp_sock)); @@ -736,7 +736,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req, return NULL; } - /* Now setup tcp_opt */ + /* Now setup tcp_sock */ newtp = tcp_sk(newsk); newtp->pred_flags = 0; newtp->rcv_nxt = req->rcv_isn + 1; @@ -860,10 +860,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, struct open_request **prev) { struct tcphdr *th = skb->h.th; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); int paws_reject = 0; - struct tcp_opt ttp; + struct tcp_sock ttp; struct sock *child; ttp.saw_tstamp = 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 6c6fcf22bf791d..7443293b862d90 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -51,8 +51,8 @@ int sysctl_tcp_retrans_collapse = 1; */ int sysctl_tcp_tso_win_divisor = 8; -static __inline__ -void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) +static inline void update_send_head(struct sock *sk, struct tcp_sock *tp, + struct sk_buff *skb) { sk->sk_send_head = skb->next; if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) @@ -67,7 +67,7 @@ void update_send_head(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb) * Anything in between SND.UNA...SND.UNA+SND.WND also can be already * invalid. OK, let's make this for now: */ -static __inline__ __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_opt *tp) +static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) { if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) return tp->snd_nxt; @@ -91,7 +91,7 @@ static __inline__ __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_opt *tp) */ static __u16 tcp_advertise_mss(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); int mss = tp->advmss; @@ -105,7 +105,7 @@ static __u16 tcp_advertise_mss(struct sock *sk) /* RFC2861. Reset CWND after idle period longer RTO to "restart window". * This is the first part of cwnd validation mechanism. */ -static void tcp_cwnd_restart(struct tcp_opt *tp, struct dst_entry *dst) +static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst) { s32 delta = tcp_time_stamp - tp->lsndtime; u32 restart_cwnd = tcp_init_cwnd(tp, dst); @@ -124,7 +124,8 @@ static void tcp_cwnd_restart(struct tcp_opt *tp, struct dst_entry *dst) tp->snd_cwnd_used = 0; } -static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *skb, struct sock *sk) +static inline void tcp_event_data_sent(struct tcp_sock *tp, + struct sk_buff *skb, struct sock *sk) { u32 now = tcp_time_stamp; @@ -143,7 +144,7 @@ static __inline__ void tcp_event_data_sent(struct tcp_opt *tp, struct sk_buff *s static __inline__ void tcp_event_ack_sent(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tcp_dec_quickack_mode(tp); tcp_clear_xmit_timer(sk, TCP_TIME_DACK); @@ -208,14 +209,14 @@ void tcp_select_initial_window(int __space, __u32 mss, (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); } -/* Chose a new window to advertise, update state in tcp_opt for the +/* Chose a new window to advertise, update state in tcp_sock for the * socket, and return result with RFC1323 scaling applied. The return * value can be stuffed directly into th->window for an outgoing * frame. */ static __inline__ u16 tcp_select_window(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); u32 cur_win = tcp_receive_window(tp); u32 new_win = __tcp_select_window(sk); @@ -267,7 +268,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) { if (skb != NULL) { struct inet_sock *inet = inet_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); int tcp_header_size = tp->tcp_header_len; struct tcphdr *th; @@ -396,7 +397,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) */ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* Advance write_seq and place onto the write_queue. */ tp->write_seq = TCP_SKB_CB(skb)->end_seq; @@ -413,7 +414,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) */ void tcp_push_one(struct sock *sk, unsigned cur_mss) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = sk->sk_send_head; if (tcp_snd_test(tp, skb, cur_mss, TCP_NAGLE_PUSH)) { @@ -453,7 +454,7 @@ void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_std) */ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; int nsize; u16 flags; @@ -619,7 +620,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); int mss_now; @@ -666,7 +667,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) unsigned int tcp_current_mss(struct sock *sk, int large) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct dst_entry *dst = __sk_dst_get(sk); unsigned int do_large, mss_now; @@ -727,7 +728,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large) */ int tcp_write_xmit(struct sock *sk, int nonagle) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned int mss_now; /* If we are closed, the bytes will have to remain here. @@ -831,7 +832,7 @@ int tcp_write_xmit(struct sock *sk, int nonagle) */ u32 __tcp_select_window(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* MSS for the peer's data. Previous verions used mss_clamp * here. I don't know if the value based on our guesses * of peer's MSS is better for the performance. It's more correct @@ -892,7 +893,7 @@ u32 __tcp_select_window(struct sock *sk) /* Attempt to collapse two adjacent SKB's during retransmission. */ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *next_skb = skb->next; /* The first test we must make is that neither of these two @@ -970,7 +971,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m */ void tcp_simple_retransmit(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk, 0); int lost = 0; @@ -1016,7 +1017,7 @@ void tcp_simple_retransmit(struct sock *sk) */ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); unsigned int cur_mss = tcp_current_mss(sk, 0); int err; @@ -1140,7 +1141,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) */ void tcp_xmit_retransmit_queue(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int packet_cnt = tcp_get_pcount(&tp->lost_out); @@ -1235,7 +1236,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) */ void tcp_send_fin(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); int mss_now; @@ -1281,7 +1282,7 @@ void tcp_send_fin(struct sock *sk) */ void tcp_send_active_reset(struct sock *sk, int priority) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* NOTE: No TCP options attached and we never retransmit this. */ @@ -1346,7 +1347,7 @@ int tcp_send_synack(struct sock *sk) struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, struct open_request *req) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcphdr *th; int tcp_header_size; struct sk_buff *skb; @@ -1417,7 +1418,7 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, static inline void tcp_connect_init(struct sock *sk) { struct dst_entry *dst = __sk_dst_get(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); /* We'll fix this up when we get a response from the other end. * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. @@ -1466,7 +1467,7 @@ static inline void tcp_connect_init(struct sock *sk) */ int tcp_connect(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; tcp_connect_init(sk); @@ -1510,7 +1511,7 @@ int tcp_connect(struct sock *sk) */ void tcp_send_delayed_ack(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int ato = tp->ack.ato; unsigned long timeout; @@ -1562,7 +1563,7 @@ void tcp_send_ack(struct sock *sk) { /* If we have been reset, we may not send again. */ if (sk->sk_state != TCP_CLOSE) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *buff; /* We are not putting this on the write queue, so @@ -1605,7 +1606,7 @@ void tcp_send_ack(struct sock *sk) */ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; /* We don't queue it, tcp_transmit_skb() sets ownership. */ @@ -1634,7 +1635,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent) int tcp_write_wakeup(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if ((skb = sk->sk_send_head) != NULL && @@ -1688,7 +1689,7 @@ int tcp_write_wakeup(struct sock *sk) */ void tcp_send_probe0(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int err; err = tcp_write_wakeup(sk); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 48fca593c24e45..a2799d1678afaf 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -48,7 +48,7 @@ const char tcp_timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n"; void tcp_init_xmit_timers(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); init_timer(&tp->retransmit_timer); tp->retransmit_timer.function=&tcp_write_timer; @@ -67,7 +67,7 @@ void tcp_init_xmit_timers(struct sock *sk) void tcp_clear_xmit_timers(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); tp->pending = 0; sk_stop_timer(sk, &tp->retransmit_timer); @@ -101,7 +101,7 @@ static void tcp_write_err(struct sock *sk) */ static int tcp_out_of_resources(struct sock *sk, int do_reset) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int orphans = atomic_read(&tcp_orphan_count); /* If peer does not open window for long time, or did not transmit @@ -154,7 +154,7 @@ static int tcp_orphan_retries(struct sock *sk, int alive) /* A write timeout has occurred. Process the after effects. */ static int tcp_write_timeout(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int retry_until; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { @@ -208,7 +208,7 @@ static int tcp_write_timeout(struct sock *sk) static void tcp_delack_timer(unsigned long data) { struct sock *sk = (struct sock*)data; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); bh_lock_sock(sk); if (sock_owned_by_user(sk)) { @@ -268,7 +268,7 @@ out_unlock: static void tcp_probe_timer(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int max_probes; if (tcp_get_pcount(&tp->packets_out) || !sk->sk_send_head) { @@ -316,7 +316,7 @@ static void tcp_probe_timer(struct sock *sk) static void tcp_retransmit_timer(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (!tcp_get_pcount(&tp->packets_out)) goto out; @@ -418,7 +418,7 @@ out:; static void tcp_write_timer(unsigned long data) { struct sock *sk = (struct sock*)data; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); int event; bh_lock_sock(sk); @@ -462,7 +462,7 @@ out_unlock: static void tcp_synack_timer(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt = tp->listen_opt; int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries; int thresh = max_retries; @@ -573,7 +573,7 @@ void tcp_set_keepalive(struct sock *sk, int val) static void tcp_keepalive_timer (unsigned long data) { struct sock *sk = (struct sock *) data; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); __u32 elapsed; /* Only process if socket is not in use. */ diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 73cc1dd09d0803..c41d5a5b7bd473 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -164,7 +164,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname, ipv6_sock_mc_close(sk); if (sk->sk_protocol == IPPROTO_TCP) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); local_bh_disable(); sock_prot_dec_use(sk->sk_prot); @@ -281,7 +281,7 @@ update: retv = 0; if (sk->sk_type == SOCK_STREAM) { if (opt) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (!((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) && inet_sk(sk)->daddr != LOOPBACK4_IPV6) { diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 97fe88e10597d7..3bc7ca44a60bfc 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -235,7 +235,7 @@ static __inline__ void __tcp_v6_hash(struct sock *sk) static void tcp_v6_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tp->af_specific == &ipv6_mapped) { tcp_prot.hash(sk); @@ -391,7 +391,7 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd) return c & (TCP_SYNQ_HSIZE - 1); } -static struct open_request *tcp_v6_search_req(struct tcp_opt *tp, +static struct open_request *tcp_v6_search_req(struct tcp_sock *tp, struct open_request ***prevp, __u16 rport, struct in6_addr *raddr, @@ -466,7 +466,7 @@ static int tcp_v6_check_established(struct sock *sk) ipv6_addr_equal(&tw->tw_v6_daddr, saddr) && ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) && sk2->sk_bound_dev_if == sk->sk_bound_dev_if) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); if (tw->tw_ts_recent_stamp) { /* See comment in tcp_ipv4.c */ @@ -551,7 +551,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p = NULL, final; struct flowi fl; struct dst_entry *dst; @@ -741,7 +741,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, struct ipv6_pinfo *np; struct sock *sk; int err; - struct tcp_opt *tp; + struct tcp_sock *tp; __u32 seq; sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex); @@ -1146,7 +1146,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) { struct open_request *req, **prev; struct tcphdr *th = skb->h.th; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct sock *nsk; /* Find possible connection requests. */ @@ -1179,7 +1179,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) static void tcp_v6_synq_add(struct sock *sk, struct open_request *req) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); struct tcp_listen_opt *lopt = tp->listen_opt; u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd); @@ -1202,7 +1202,7 @@ static void tcp_v6_synq_add(struct sock *sk, struct open_request *req) static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); - struct tcp_opt tmptp, *tp = tcp_sk(sk); + struct tcp_sock tmptp, *tp = tcp_sk(sk); struct open_request *req = NULL; __u32 isn = TCP_SKB_CB(skb)->when; @@ -1282,7 +1282,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, struct ipv6_pinfo *newnp, *np = inet6_sk(sk); struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; - struct tcp_opt *newtp; + struct tcp_sock *newtp; struct sock *newsk; struct ipv6_txoptions *opt; @@ -1297,7 +1297,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, return NULL; newtcp6sk = (struct tcp6_sock *)newsk; - newtcp6sk->inet.pinet6 = &newtcp6sk->inet6; + inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newinet = inet_sk(newsk); newnp = inet6_sk(newsk); @@ -1390,7 +1390,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, ~(NETIF_F_IP_CSUM | NETIF_F_TSO); newtcp6sk = (struct tcp6_sock *)newsk; - newtcp6sk->inet.pinet6 = &newtcp6sk->inet6; + inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); @@ -1497,7 +1497,7 @@ static int tcp_v6_checksum_init(struct sk_buff *skb) static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); - struct tcp_opt *tp; + struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, @@ -1919,7 +1919,7 @@ static struct tcp_func ipv6_mapped = { */ static int tcp_v6_init_sock(struct sock *sk) { - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); skb_queue_head_init(&tp->out_of_order_queue); tcp_init_xmit_timers(sk); @@ -2007,7 +2007,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) int timer_active; unsigned long timer_expires; struct inet_sock *inet = inet_sk(sp); - struct tcp_opt *tp = tcp_sk(sp); + struct tcp_sock *tp = tcp_sk(sp); struct ipv6_pinfo *np = inet6_sk(sp); dest = &np->daddr; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d2d8e9b006c892..8d7e159822127d 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -1077,7 +1077,7 @@ static void svc_tcp_init(struct svc_sock *svsk) { struct sock *sk = svsk->sk_sk; - struct tcp_opt *tp = tcp_sk(sk); + struct tcp_sock *tp = tcp_sk(sk); svsk->sk_recvfrom = svc_tcp_recvfrom; svsk->sk_sendto = svc_tcp_sendto; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1adcf2769e332a..ed2a19c762e021 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1548,8 +1548,7 @@ xprt_bind_socket(struct rpc_xprt *xprt, struct socket *sock) sk->sk_no_check = UDP_CSUM_NORCV; xprt_set_connected(xprt); } else { - struct tcp_opt *tp = tcp_sk(sk); - tp->nonagle = 1; /* disable Nagle's algorithm */ + tcp_sk(sk)->nonagle = 1; /* disable Nagle's algorithm */ sk->sk_data_ready = tcp_data_ready; sk->sk_state_change = tcp_state_change; xprt_clear_connected(xprt); |