tag | line | file | source code |
sk | 114 | drivers/net/de600.c | static unsigned long de600_rspace(struct sock *sk); |
sk | 476 | drivers/net/de600.c | if (skb->sk && (skb->sk->protocol == IPPROTO_TCP) && |
sk | 477 | drivers/net/de600.c | (skb->sk->prot->rspace != &de600_rspace)) |
sk | 478 | drivers/net/de600.c | skb->sk->prot->rspace = de600_rspace; /* Ugh! */ |
sk | 807 | drivers/net/de600.c | de600_rspace(struct sock *sk) |
sk | 811 | drivers/net/de600.c | if (sk != NULL) { |
sk | 818 | drivers/net/de600.c | if (sk->rmem_alloc >= sk->rcvbuf-2*DE600_MIN_WINDOW) return(0); |
sk | 819 | drivers/net/de600.c | amt = min((sk->rcvbuf-sk->rmem_alloc)/2/*-DE600_MIN_WINDOW*/, DE600_MAX_WINDOW); |
sk | 78 | drivers/net/loopback.c | else if(skb->sk) |
sk | 84 | drivers/net/loopback.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
sk | 85 | drivers/net/loopback.c | skb->sk->write_space(skb->sk); |
sk | 73 | fs/ncpfs/sock.c | ncp_wdog_data_ready(struct sock *sk, int len) |
sk | 75 | fs/ncpfs/sock.c | struct socket *sock = sk->socket; |
sk | 77 | fs/ncpfs/sock.c | if (!sk->dead) |
sk | 127 | fs/ncpfs/sock.c | struct sock *sk; |
sk | 148 | fs/ncpfs/sock.c | sk = (struct sock *)(sock->data); |
sk | 150 | fs/ncpfs/sock.c | if (sk == NULL) |
sk | 158 | fs/ncpfs/sock.c | (unsigned int)(sk->data_ready), |
sk | 161 | fs/ncpfs/sock.c | if (sk->data_ready == ncp_wdog_data_ready) |
sk | 167 | fs/ncpfs/sock.c | server->data_ready = sk->data_ready; |
sk | 168 | fs/ncpfs/sock.c | sk->data_ready = ncp_wdog_data_ready; |
sk | 169 | fs/ncpfs/sock.c | sk->allocation = GFP_ATOMIC; |
sk | 179 | fs/ncpfs/sock.c | struct sock *sk; |
sk | 199 | fs/ncpfs/sock.c | sk = (struct sock *)(sock->data); |
sk | 201 | fs/ncpfs/sock.c | if (sk == NULL) |
sk | 214 | fs/ncpfs/sock.c | if (sk->data_ready != ncp_wdog_data_ready) |
sk | 222 | fs/ncpfs/sock.c | (unsigned int)(sk->data_ready), |
sk | 225 | fs/ncpfs/sock.c | sk->data_ready = server->data_ready; |
sk | 226 | fs/ncpfs/sock.c | sk->allocation = GFP_KERNEL; |
sk | 232 | fs/ncpfs/sock.c | ncp_msg_data_ready(struct sock *sk, int len) |
sk | 234 | fs/ncpfs/sock.c | struct socket *sock = sk->socket; |
sk | 236 | fs/ncpfs/sock.c | if (!sk->dead) |
sk | 260 | fs/ncpfs/sock.c | ncp_trigger_message(sk->protinfo.af_ipx.ncp_server); |
sk | 272 | fs/ncpfs/sock.c | struct sock *sk; |
sk | 291 | fs/ncpfs/sock.c | sk = (struct sock *)(sock->data); |
sk | 293 | fs/ncpfs/sock.c | if (sk == NULL) |
sk | 300 | fs/ncpfs/sock.c | (unsigned int)(sk->data_ready)); |
sk | 302 | fs/ncpfs/sock.c | if (sk->data_ready == ncp_msg_data_ready) |
sk | 308 | fs/ncpfs/sock.c | sk->data_ready = ncp_msg_data_ready; |
sk | 309 | fs/ncpfs/sock.c | sk->protinfo.af_ipx.ncp_server = server; |
sk | 64 | fs/smbfs/sock.c | smb_data_callback(struct sock *sk,int len) |
sk | 66 | fs/smbfs/sock.c | struct socket *sock = sk->socket; |
sk | 68 | fs/smbfs/sock.c | if(!sk->dead) |
sk | 101 | fs/smbfs/sock.c | wake_up_interruptible(sk->sleep); |
sk | 112 | fs/smbfs/sock.c | struct sock *sk; |
sk | 132 | fs/smbfs/sock.c | sk = (struct sock *)(sock->data); |
sk | 134 | fs/smbfs/sock.c | if (sk == NULL) { |
sk | 141 | fs/smbfs/sock.c | (unsigned int)(sk->data_ready), |
sk | 144 | fs/smbfs/sock.c | if (sk->data_ready == smb_data_callback) { |
sk | 149 | fs/smbfs/sock.c | server->data_ready = sk->data_ready; |
sk | 150 | fs/smbfs/sock.c | sk->data_ready = smb_data_callback; |
sk | 160 | fs/smbfs/sock.c | struct sock *sk; |
sk | 179 | fs/smbfs/sock.c | sk = (struct sock *)(sock->data); |
sk | 181 | fs/smbfs/sock.c | if (sk == NULL) { |
sk | 192 | fs/smbfs/sock.c | if (sk->data_ready != smb_data_callback) { |
sk | 199 | fs/smbfs/sock.c | (unsigned int)(sk->data_ready), |
sk | 202 | fs/smbfs/sock.c | sk->data_ready = server->data_ready; |
sk | 112 | include/linux/igmp.h | extern int ip_mc_join_group(struct sock *sk, struct device *dev, unsigned long addr); |
sk | 113 | include/linux/igmp.h | extern int ip_mc_leave_group(struct sock *sk, struct device *dev,unsigned long addr); |
sk | 114 | include/linux/igmp.h | extern void ip_mc_drop_socket(struct sock *sk); |
sk | 122 | include/linux/mroute.h | extern int ipmr_ioctl(struct sock *sk, int cmd, unsigned long arg); |
sk | 123 | include/linux/mroute.h | extern void mroute_close(struct sock *sk); |
sk | 55 | include/linux/skbuff.h | struct sock *sk; /* Socket we are owned by */ |
sk | 451 | include/linux/skbuff.h | extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err); |
sk | 452 | include/linux/skbuff.h | extern int datagram_select(struct sock *sk, int sel_type, select_table *wait); |
sk | 455 | include/linux/skbuff.h | extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb); |
sk | 164 | include/net/ax25.h | struct sock *sk; /* Backlink to socket */ |
sk | 36 | include/net/icmp.h | extern int icmp_ioctl(struct sock *sk, int cmd, |
sk | 87 | include/net/ip.h | extern int ip_ioctl(struct sock *sk, int cmd, unsigned long arg); |
sk | 104 | include/net/ip.h | extern void ip_queue_xmit(struct sock *sk, |
sk | 108 | include/net/ip.h | extern int ip_build_xmit(struct sock *sk, |
sk | 130 | include/net/ip.h | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag); |
sk | 151 | include/net/ip.h | extern int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen); |
sk | 152 | include/net/ip.h | extern int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen); |
sk | 63 | include/net/netrom.h | struct sock *sk; /* Backlink to socket */ |
sk | 26 | include/net/raw.h | extern int raw_recvfrom(struct sock *sk, unsigned char *to, |
sk | 29 | include/net/raw.h | extern int raw_read(struct sock *sk, unsigned char *buff, |
sk | 322 | include/net/sock.h | void (*state_change)(struct sock *sk); |
sk | 323 | include/net/sock.h | void (*data_ready)(struct sock *sk,int bytes); |
sk | 324 | include/net/sock.h | void (*write_space)(struct sock *sk); |
sk | 325 | include/net/sock.h | void (*error_report)(struct sock *sk); |
sk | 335 | include/net/sock.h | void (*close)(struct sock *sk, unsigned long timeout); |
sk | 342 | include/net/sock.h | int (*connect)(struct sock *sk, |
sk | 344 | include/net/sock.h | struct sock * (*accept) (struct sock *sk, int flags); |
sk | 345 | include/net/sock.h | void (*queue_xmit)(struct sock *sk, |
sk | 348 | include/net/sock.h | void (*retransmit)(struct sock *sk, int all); |
sk | 349 | include/net/sock.h | void (*write_wakeup)(struct sock *sk); |
sk | 350 | include/net/sock.h | void (*read_wakeup)(struct sock *sk); |
sk | 355 | include/net/sock.h | int (*select)(struct sock *sk, int which, |
sk | 357 | include/net/sock.h | int (*ioctl)(struct sock *sk, int cmd, |
sk | 359 | include/net/sock.h | int (*init)(struct sock *sk); |
sk | 360 | include/net/sock.h | void (*shutdown)(struct sock *sk, int how); |
sk | 361 | include/net/sock.h | int (*setsockopt)(struct sock *sk, int level, int optname, |
sk | 363 | include/net/sock.h | int (*getsockopt)(struct sock *sk, int level, int optname, |
sk | 365 | include/net/sock.h | int (*sendmsg)(struct sock *sk, struct msghdr *msg, int len, |
sk | 367 | include/net/sock.h | int (*recvmsg)(struct sock *sk, struct msghdr *msg, int len, |
sk | 369 | include/net/sock.h | int (*bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
sk | 410 | include/net/sock.h | extern void __release_sock(struct sock *sk); |
sk | 412 | include/net/sock.h | static inline void lock_sock(struct sock *sk) |
sk | 417 | include/net/sock.h | if (sk->users) { |
sk | 423 | include/net/sock.h | sk->users++; |
sk | 427 | include/net/sock.h | static inline void release_sock(struct sock *sk) |
sk | 432 | include/net/sock.h | if (sk->users == 0) { |
sk | 434 | include/net/sock.h | sk->users = 1; |
sk | 439 | include/net/sock.h | if (!--sk->users) |
sk | 440 | include/net/sock.h | __release_sock(sk); |
sk | 444 | include/net/sock.h | extern void destroy_sock(struct sock *sk); |
sk | 457 | include/net/sock.h | extern struct sk_buff *sock_wmalloc(struct sock *sk, |
sk | 460 | include/net/sock.h | extern struct sk_buff *sock_rmalloc(struct sock *sk, |
sk | 463 | include/net/sock.h | extern void sock_wfree(struct sock *sk, |
sk | 465 | include/net/sock.h | extern void sock_rfree(struct sock *sk, |
sk | 467 | include/net/sock.h | extern unsigned long sock_rspace(struct sock *sk); |
sk | 468 | include/net/sock.h | extern unsigned long sock_wspace(struct sock *sk); |
sk | 470 | include/net/sock.h | extern int sock_setsockopt(struct sock *sk, int level, |
sk | 474 | include/net/sock.h | extern int sock_getsockopt(struct sock *sk, int level, |
sk | 492 | include/net/sock.h | extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
sk | 494 | include/net/sock.h | if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf) |
sk | 496 | include/net/sock.h | atomic_add(skb->truesize, &sk->rmem_alloc); |
sk | 497 | include/net/sock.h | skb->sk=sk; |
sk | 498 | include/net/sock.h | skb_queue_tail(&sk->receive_queue,skb); |
sk | 499 | include/net/sock.h | if(!sk->dead) |
sk | 500 | include/net/sock.h | sk->data_ready(sk,skb->len); |
sk | 508 | include/net/sock.h | extern __inline__ int sock_error(struct sock *sk) |
sk | 510 | include/net/sock.h | int err=xchg(&sk->err,0); |
sk | 128 | include/net/tcp.h | extern void tcp_shutdown (struct sock *sk, int how); |
sk | 134 | include/net/tcp.h | extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
sk | 149 | include/net/tcp.h | extern void tcp_send_fin(struct sock *sk); |
sk | 152 | include/net/tcp.h | extern void tcp_send_ack(u32, u32, struct sock *sk, struct tcphdr *th, u32); |
sk | 193 | include/net/tcp.h | static __inline__ unsigned short tcp_raise_window(struct sock *sk) |
sk | 206 | include/net/tcp.h | window = sk->window - (sk->acked_seq - sk->lastwin_seq); |
sk | 210 | include/net/tcp.h | free_space = sock_rspace(sk); |
sk | 214 | include/net/tcp.h | if(sk->window_clamp) |
sk | 215 | include/net/tcp.h | free_space = min(sk->window_clamp, free_space); |
sk | 217 | include/net/tcp.h | if (sk->mss == 0) |
sk | 218 | include/net/tcp.h | sk->mss = sk->mtu; |
sk | 223 | include/net/tcp.h | sk->window, sk->acked_seq, sk->lastwin_seq); |
sk | 226 | include/net/tcp.h | if ( (free_space - window) >= min(sk->mss, MAX_WINDOW/2) ) |
sk | 227 | include/net/tcp.h | return ((free_space - window) / sk->mss) * sk->mss; |
sk | 232 | include/net/tcp.h | static __inline__ unsigned short tcp_select_window(struct sock *sk) |
sk | 234 | include/net/tcp.h | long free_space = sock_rspace(sk); |
sk | 240 | include/net/tcp.h | if (sk->window_clamp) |
sk | 241 | include/net/tcp.h | free_space = min(sk->window_clamp, free_space); |
sk | 248 | include/net/tcp.h | if (sk->mss == 0) |
sk | 249 | include/net/tcp.h | sk->mss = sk->mtu; |
sk | 251 | include/net/tcp.h | window = sk->window - (sk->acked_seq - sk->lastwin_seq); |
sk | 256 | include/net/tcp.h | sk->window, sk->acked_seq, sk->lastwin_seq); |
sk | 269 | include/net/tcp.h | if ( (free_space - window) >= min(sk->mss, MAX_WINDOW/2) ) |
sk | 270 | include/net/tcp.h | window += ((free_space - window) / sk->mss) * sk->mss; |
sk | 272 | include/net/tcp.h | sk->window = window; |
sk | 273 | include/net/tcp.h | sk->lastwin_seq = sk->acked_seq; |
sk | 275 | include/net/tcp.h | return sk->window; |
sk | 311 | include/net/tcp.h | static __inline__ void tcp_set_state(struct sock *sk, int state) |
sk | 313 | include/net/tcp.h | int oldstate = sk->state; |
sk | 315 | include/net/tcp.h | sk->state = state; |
sk | 318 | include/net/tcp.h | if(sk->debug) |
sk | 319 | include/net/tcp.h | printk("TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]); |
sk | 336 | include/net/tcp.h | reset_timer(sk, TIME_DONE, min(sk->rtt * 2, TCP_DONE_TIME)); |
sk | 37 | include/net/udp.h | __u32 daddr, int len, struct sock *sk); |
sk | 38 | include/net/udp.h | extern int udp_recvfrom(struct sock *sk, unsigned char *to, |
sk | 41 | include/net/udp.h | extern int udp_read(struct sock *sk, unsigned char *buff, |
sk | 43 | include/net/udp.h | extern int udp_connect(struct sock *sk, |
sk | 49 | include/net/udp.h | extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); |
sk | 6 | net/802/llc.c | int llc_rx_adm(struct sock *sk,struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 11 | net/802/llc.c | send_response(sk,DM|pf); |
sk | 14 | net/802/llc.c | if(sk->state!=TCP_LISTEN) |
sk | 15 | net/802/llc.c | send_response(sk. DM|pf); |
sk | 18 | net/802/llc.c | sk=ll_rx_accept(sk); |
sk | 19 | net/802/llc.c | if(sk!=NULL) |
sk | 21 | net/802/llc.c | send_response(sk, UA|pf); |
sk | 22 | net/802/llc.c | sk->llc.vs=0; |
sk | 23 | net/802/llc.c | sk->llc.vr=0; |
sk | 24 | net/802/llc.c | sk->llc.p_flag=0; |
sk | 25 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 26 | net/802/llc.c | llc_state(sk,LLC_NORMAL); |
sk | 31 | net/802/llc.c | send_response(sk, DM|PF); |
sk | 36 | net/802/llc.c | int llc_rx_setup(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 42 | net/802/llc.c | sk->llc.vs=0; |
sk | 43 | net/802/llc.c | sk->llc.vr=0; |
sk | 44 | net/802/llc.c | send_response(sk, UA|pf); |
sk | 48 | net/802/llc.c | send_response(sk, DM|pf); |
sk | 49 | net/802/llc.c | llc_error(sk,ECONNRESET); |
sk | 50 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 55 | net/802/llc.c | if(cmd==UA && pf==sk->llc.p_flag) |
sk | 57 | net/802/llc.c | del_timer(&sk->llc.t1); |
sk | 58 | net/802/llc.c | sk->llc.vs=0; |
sk | 59 | net/802/llc.c | llc_update_p_flag(sk,pf); |
sk | 60 | net/802/llc.c | llc_state(sk,LLC_NORMAL); |
sk | 64 | net/802/llc.c | llc_error(sk, ECONNRESET); |
sk | 65 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 70 | net/802/llc.c | int llc_rx_reset(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 76 | net/802/llc.c | sk->llc.vr=0; |
sk | 77 | net/802/llc.c | sk->llc.vs=0; |
sk | 78 | net/802/llc.c | send_response(sk, UA|pf); |
sk | 82 | net/802/llc.c | if(sk->llc.cause_flag==1) |
sk | 83 | net/802/llc.c | llc_shutdown(sk,SHUTDOWN_MASK); |
sk | 85 | net/802/llc.c | llc_eror(sk, ECONNREFUSED); |
sk | 86 | net/802/llc.c | send_response(sk, DM|pf); |
sk | 87 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 94 | net/802/llc.c | if(sk->llc.p_flag==pf) |
sk | 96 | net/802/llc.c | del_timer(&sk->llc.t1); |
sk | 97 | net/802/llc.c | sk->llc.vs=0; |
sk | 98 | net/802/llc.c | sk->llc.vr=0; |
sk | 99 | net/802/llc.c | llc_update_p_flag(sk,pf); |
sk | 100 | net/802/llc.c | llc_confirm_reset(sk, sk->llc.cause_flag); |
sk | 101 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 102 | net/802/llc.c | llc_state(sk, LLC_NORMAL); |
sk | 107 | net/802/llc.c | llc_shutdown(sk, SHUTDOWN_MASK); |
sk | 108 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 114 | net/802/llc.c | int llc_rx_d_conn(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 120 | net/802/llc.c | llc_error(sk, ECONNRESET); |
sk | 121 | net/802/llc.c | llc_state(sk, ADM); |
sk | 126 | net/802/llc.c | llc_state(sk, LLC_D_CONN); |
sk | 129 | net/802/llc.c | send_response(sk, DM|PF); |
sk | 133 | net/802/llc.c | if(cmd==UA && pf==sk->llc.p_flag) |
sk | 135 | net/802/llc.c | del_timer(&sk->llc.t1); |
sk | 136 | net/802/llc.c | llc_state(sk, ADM); |
sk | 137 | net/802/llc.c | llc_confirm_reset(sk, sk->llc.cause_flag); |
sk | 141 | net/802/llc.c | del_timer(&sk->llc.t1); |
sk | 143 | net/802/llc.c | llc_shutdown(sk, SHUTDOWN_MASK); |
sk | 150 | net/802/llc.c | int llc_rx_error(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 156 | net/802/llc.c | sk->llc.vs=0; |
sk | 157 | net/802/llc.c | sk->llc.vr=0; |
sk | 158 | net/802/llc.c | send_response(sk, UA|pf); |
sk | 159 | net/802/llc.c | llc_error(sk,ECONNRESET); |
sk | 160 | net/802/llc.c | sk->llc.p_flag=0; |
sk | 161 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 162 | net/802/llc.c | llc_state(sk, LLC_NORMAL); |
sk | 166 | net/802/llc.c | send_response(sk, UA|pf); |
sk | 167 | net/802/llc.c | llc_shutdown(sk, SHUTDOWN_MASK); |
sk | 168 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 171 | net/802/llc.c | llc_resend_frmr_rsp(sk,pf); |
sk | 177 | net/802/llc.c | llc_error(sk, ECONNRESET); |
sk | 178 | net/802/llc.c | del_timer(&sk->llc.t1); |
sk | 179 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 183 | net/802/llc.c | send_command(sk, SABM); |
sk | 184 | net/802/llc.c | sk->llc.p_flag=pf; |
sk | 186 | net/802/llc.c | sk->llc.retry_count=0; |
sk | 187 | net/802/llc.c | sk->llc.cause_flag=0; |
sk | 188 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 189 | net/802/llc.c | llc_state(sk, LLC_RESET); |
sk | 199 | net/802/llc.c | int llc_rx_nr_shared(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 208 | net/802/llc.c | send_response(sk,DM|pf); |
sk | 209 | net/802/llc.c | llc_error(sk, ECONNRESET); |
sk | 210 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 214 | net/802/llc.c | send_response(sk,UA|pf); |
sk | 215 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 216 | net/802/llc.c | llc_shutdown(sk, SHUTDOWN_MASK); |
sk | 224 | net/802/llc.c | llc_send_frmr_response(sk, ILLEGAL_TYPE,pf); |
sk | 225 | net/802/llc.c | llc_state(sk, LLC_ERROR); |
sk | 226 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 241 | net/802/llc.c | send_command(sk, DM|pf); |
sk | 242 | net/802/llc.c | sk->llc.p_flag=pf; |
sk | 243 | net/802/llc.c | llc_start_t1(sk); |
sk | 244 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 245 | net/802/llc.c | sk->llc.cause_flag=0; |
sk | 246 | net/802/llc.c | llc_state(sk, LLC_D_CONN): |
sk | 250 | net/802/llc.c | llc_state(sk, LLC_ADM); |
sk | 251 | net/802/llc.c | llc_error(sk, ECONNREFUSED); |
sk | 260 | net/802/llc.c | llc_send_frmr_response(sk, UNEXPECTED_CONTROL, pf); |
sk | 261 | net/802/llc.c | llc_state(sk, LLC_ERROR); |
sk | 262 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 264 | net/802/llc.c | else if(pf==1 && sk->llc.p_flag==0) |
sk | 266 | net/802/llc.c | llc_send_frmr_response(sk, UNEXPECTED_RESPONSE, pf); |
sk | 267 | net/802/llc.c | llc_state(sk, LLC_ERROR); |
sk | 268 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 272 | net/802/llc.c | llc_send_frmr_response(sk, ILLEGAL_TYPE,pf); |
sk | 273 | net/802/llc.c | llc_state(sk, LLC_ERROR); |
sk | 274 | net/802/llc.c | llc_error(sk, EPROTO); |
sk | 288 | net/802/llc.c | int llc_rx_normal(struct sock *sk, struct sk_buff *skb, int type, int cmd, int pf, int nr, int ns) |
sk | 290 | net/802/llc.c | if(llc_rx_nr_shared(sk, skb, type, cmd, pf, nr, ns)) |
sk | 294 | net/802/llc.c | if(llc_invalid_ns(sk,ns)) |
sk | 296 | net/802/llc.c | if((type==RESP && sk->llc.p_flag==pf)||(type==CMD && pf==0 && sk->llc.p_flag==0)) |
sk | 298 | net/802/llc.c | llc_command(sk, REJ|PF); |
sk | 299 | net/802/llc.c | llc_ack_frames(sk,nr); /* Ack frames and update N(R) */ |
sk | 300 | net/802/llc.c | sk->llc.p_flag=PF; |
sk | 301 | net/802/llc.c | llc_state(sk, LLC_REJECT); |
sk | 302 | net/802/llc.c | sk->llc.retry_count=0; |
sk | 303 | net/802/llc.c | llc_start_t1(sk); |
sk | 304 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 306 | net/802/llc.c | else if((type==CMD && !pf && sk->llc.p_flag==1) || (type==RESP && !pf && sk->llc.p_flag==1)) |
sk | 309 | net/802/llc.c | llc_response(sk, REJ); |
sk | 311 | net/802/llc.c | llc_command(sk, REJ); |
sk | 312 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 313 | net/802/llc.c | sk->llc.retry_count=0; |
sk | 314 | net/802/llc.c | llc_state(sk, LLC_REJECT); |
sk | 315 | net/802/llc.c | llc_start_t1(sk); |
sk | 319 | net/802/llc.c | llc_response(sk, REJ|PF); |
sk | 320 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 321 | net/802/llc.c | sk->llc.retry_count=0; |
sk | 322 | net/802/llc.c | llc_start_t1(sk); |
sk | 331 | net/802/llc.c | if(sk->llc.p_flag==pf && !(type==CMD && pf)) |
sk | 333 | net/802/llc.c | sk->llc.vr=(sk->llc.vr+1)&7; |
sk | 334 | net/802/llc.c | llc_queue_rr_cmd(sk, PF); |
sk | 335 | net/802/llc.c | sk->llc.retry_count=0; |
sk | 336 | net/802/llc.c | llc_start_t1(sk); |
sk | 337 | net/802/llc.c | sk->llc.p_flag=1; |
sk | 338 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 339 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 341 | net/802/llc.c | else if(sk->ppc.p_flag!=pf) |
sk | 343 | net/802/llc.c | sk->llc.vr=(sk->llc.vr+1)&7; |
sk | 345 | net/802/llc.c | llc_queue_rr_resp(sk, 0); |
sk | 347 | net/802/llc.c | llc_queue_rr_cmd(sk, 0); |
sk | 348 | net/802/llc.c | if(sk->llc.nr!=nr) |
sk | 350 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 351 | net/802/llc.c | llc_reset_t1(sk); |
sk | 356 | net/802/llc.c | sk->llc.vr=(sk->llc.vr+1)&7; |
sk | 357 | net/802/llc.c | llc_queue_rr_resp(sk,PF); |
sk | 358 | net/802/llc.c | if(sk->llc.nr!=nr) |
sk | 360 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 361 | net/802/llc.c | llc_reset_t1(sk); |
sk | 364 | net/802/llc.c | llc_queue_data(sk,skb); |
sk | 370 | net/802/llc.c | if(type==CMD || (type==RESP && (!pf || pf==1 && sk->llc.p_flag==1))) |
sk | 372 | net/802/llc.c | llc_update_p_flag(sk,pf); |
sk | 373 | net/802/llc.c | if(sk->llc.nr!=nr) |
sk | 375 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 376 | net/802/llc.c | llc_reset_t1(sk); |
sk | 379 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 381 | net/802/llc.c | { sk->llc.remote_busy=1; |
sk | 382 | net/802/llc.c | if(!llc_t1_running(sk)) |
sk | 383 | net/802/llc.c | llc_start_t1(sk); |
sk | 389 | net/802/llc.c | llc_queue_rr_resp(sk,PF); |
sk | 392 | net/802/llc.c | send_response(sk, RR|PF); |
sk | 393 | net/802/llc.c | if(!llc_t1_running(sk)) |
sk | 394 | net/802/llc.c | llc_start_t1(sk); |
sk | 396 | net/802/llc.c | if(sk->llc.nr!=nr) |
sk | 398 | net/802/llc.c | llc_ack_frames(sk,nr); |
sk | 399 | net/802/llc.c | llc_reset_t1(sk); |
sk | 402 | net/802/llc.c | sk->llc.remote_busy=0; |
sk | 404 | net/802/llc.c | sk->llc.remote_busy=1; |
sk | 43 | net/802/p8022.c | skb->sk = NULL; |
sk | 67 | net/802/psnap.c | skb->sk = NULL; |
sk | 469 | net/appletalk/aarp.c | if(skb->sk==NULL) |
sk | 472 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
sk | 499 | net/appletalk/aarp.c | if(skb->sk==NULL) |
sk | 502 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
sk | 515 | net/appletalk/aarp.c | if(skb->sk==NULL) |
sk | 518 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
sk | 623 | net/appletalk/aarp.c | if(skb->sk==NULL) |
sk | 626 | net/appletalk/aarp.c | dev_queue_xmit(skb, skb->dev, skb->sk->priority); |
sk | 97 | net/appletalk/ddp.c | static void atalk_remove_socket(atalk_socket *sk) |
sk | 106 | net/appletalk/ddp.c | if(s==sk) |
sk | 114 | net/appletalk/ddp.c | if(s->next==sk) |
sk | 116 | net/appletalk/ddp.c | s->next=sk->next; |
sk | 125 | net/appletalk/ddp.c | static void atalk_insert_socket(atalk_socket *sk) |
sk | 130 | net/appletalk/ddp.c | sk->next=atalk_socket_list; |
sk | 131 | net/appletalk/ddp.c | atalk_socket_list=sk; |
sk | 198 | net/appletalk/ddp.c | static void atalk_destroy_socket(atalk_socket *sk); |
sk | 209 | net/appletalk/ddp.c | static void atalk_destroy_socket(atalk_socket *sk) |
sk | 212 | net/appletalk/ddp.c | atalk_remove_socket(sk); |
sk | 214 | net/appletalk/ddp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
sk | 219 | net/appletalk/ddp.c | if(sk->wmem_alloc == 0 && sk->rmem_alloc == 0 && sk->dead) |
sk | 221 | net/appletalk/ddp.c | kfree_s(sk,sizeof(*sk)); |
sk | 229 | net/appletalk/ddp.c | init_timer(&sk->timer); |
sk | 230 | net/appletalk/ddp.c | sk->timer.expires=jiffies+10*HZ; |
sk | 231 | net/appletalk/ddp.c | sk->timer.function=atalk_destroy_timer; |
sk | 232 | net/appletalk/ddp.c | sk->timer.data = (unsigned long)sk; |
sk | 233 | net/appletalk/ddp.c | add_timer(&sk->timer); |
sk | 1018 | net/appletalk/ddp.c | atalk_socket *sk; |
sk | 1021 | net/appletalk/ddp.c | sk=(atalk_socket *)sock->data; |
sk | 1042 | net/appletalk/ddp.c | return sock_setsockopt(sk,level,optname,optval,optlen); |
sk | 1057 | net/appletalk/ddp.c | atalk_socket *sk; |
sk | 1061 | net/appletalk/ddp.c | sk=(atalk_socket *)sock->data; |
sk | 1075 | net/appletalk/ddp.c | return sock_getsockopt(sk,level,optname,optval,optlen); |
sk | 1104 | net/appletalk/ddp.c | static void def_callback1(struct sock *sk) |
sk | 1106 | net/appletalk/ddp.c | if(!sk->dead) |
sk | 1107 | net/appletalk/ddp.c | wake_up_interruptible(sk->sleep); |
sk | 1110 | net/appletalk/ddp.c | static void def_callback2(struct sock *sk, int len) |
sk | 1112 | net/appletalk/ddp.c | if(!sk->dead) |
sk | 1114 | net/appletalk/ddp.c | wake_up_interruptible(sk->sleep); |
sk | 1115 | net/appletalk/ddp.c | sock_wake_async(sk->socket,0); |
sk | 1126 | net/appletalk/ddp.c | atalk_socket *sk; |
sk | 1127 | net/appletalk/ddp.c | sk=(atalk_socket *)kmalloc(sizeof(*sk),GFP_KERNEL); |
sk | 1128 | net/appletalk/ddp.c | if(sk==NULL) |
sk | 1139 | net/appletalk/ddp.c | kfree_s((void *)sk,sizeof(*sk)); |
sk | 1145 | net/appletalk/ddp.c | sk->dead=0; |
sk | 1146 | net/appletalk/ddp.c | sk->next=NULL; |
sk | 1147 | net/appletalk/ddp.c | sk->broadcast=0; |
sk | 1148 | net/appletalk/ddp.c | sk->no_check=0; /* Checksums on by default */ |
sk | 1149 | net/appletalk/ddp.c | sk->allocation=GFP_KERNEL; |
sk | 1150 | net/appletalk/ddp.c | sk->rcvbuf=SK_RMEM_MAX; |
sk | 1151 | net/appletalk/ddp.c | sk->sndbuf=SK_WMEM_MAX; |
sk | 1152 | net/appletalk/ddp.c | sk->pair=NULL; |
sk | 1153 | net/appletalk/ddp.c | sk->wmem_alloc=0; |
sk | 1154 | net/appletalk/ddp.c | sk->rmem_alloc=0; |
sk | 1155 | net/appletalk/ddp.c | sk->users=0; |
sk | 1156 | net/appletalk/ddp.c | sk->proc=0; |
sk | 1157 | net/appletalk/ddp.c | sk->priority=1; |
sk | 1158 | net/appletalk/ddp.c | sk->shutdown=0; |
sk | 1159 | net/appletalk/ddp.c | sk->prot=NULL; /* So we use default free mechanisms */ |
sk | 1160 | net/appletalk/ddp.c | sk->broadcast=0; |
sk | 1161 | net/appletalk/ddp.c | sk->err=0; |
sk | 1162 | net/appletalk/ddp.c | skb_queue_head_init(&sk->receive_queue); |
sk | 1163 | net/appletalk/ddp.c | skb_queue_head_init(&sk->write_queue); |
sk | 1164 | net/appletalk/ddp.c | sk->send_head=NULL; |
sk | 1165 | net/appletalk/ddp.c | skb_queue_head_init(&sk->back_log); |
sk | 1166 | net/appletalk/ddp.c | sk->state=TCP_CLOSE; |
sk | 1167 | net/appletalk/ddp.c | sk->socket=sock; |
sk | 1168 | net/appletalk/ddp.c | sk->type=sock->type; |
sk | 1169 | net/appletalk/ddp.c | sk->debug=0; |
sk | 1171 | net/appletalk/ddp.c | sk->protinfo.af_at.src_net=0; |
sk | 1172 | net/appletalk/ddp.c | sk->protinfo.af_at.src_node=0; |
sk | 1173 | net/appletalk/ddp.c | sk->protinfo.af_at.src_port=0; |
sk | 1175 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_net=0; |
sk | 1176 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_node=0; |
sk | 1177 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_port=0; |
sk | 1179 | net/appletalk/ddp.c | sk->mtu=DDP_MAXSZ; |
sk | 1183 | net/appletalk/ddp.c | sock->data=(void *)sk; |
sk | 1184 | net/appletalk/ddp.c | sk->sleep=sock->wait; |
sk | 1187 | net/appletalk/ddp.c | sk->state_change=def_callback1; |
sk | 1188 | net/appletalk/ddp.c | sk->data_ready=def_callback2; |
sk | 1189 | net/appletalk/ddp.c | sk->write_space=def_callback1; |
sk | 1190 | net/appletalk/ddp.c | sk->error_report=def_callback1; |
sk | 1192 | net/appletalk/ddp.c | sk->zapped=1; |
sk | 1211 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1212 | net/appletalk/ddp.c | if(sk==NULL) |
sk | 1214 | net/appletalk/ddp.c | if(!sk->dead) |
sk | 1215 | net/appletalk/ddp.c | sk->state_change(sk); |
sk | 1216 | net/appletalk/ddp.c | sk->dead=1; |
sk | 1218 | net/appletalk/ddp.c | atalk_destroy_socket(sk); |
sk | 1237 | net/appletalk/ddp.c | static int atalk_autobind(atalk_socket *sk) |
sk | 1245 | net/appletalk/ddp.c | sk->protinfo.af_at.src_net = sat.sat_addr.s_net = ap->s_net; |
sk | 1246 | net/appletalk/ddp.c | sk->protinfo.af_at.src_node = sat.sat_addr.s_node = ap->s_node; |
sk | 1250 | net/appletalk/ddp.c | sk->protinfo.af_at.src_port=n; |
sk | 1251 | net/appletalk/ddp.c | atalk_insert_socket(sk); |
sk | 1252 | net/appletalk/ddp.c | sk->zapped=0; |
sk | 1262 | net/appletalk/ddp.c | atalk_socket *sk; |
sk | 1265 | net/appletalk/ddp.c | sk=(atalk_socket *)sock->data; |
sk | 1267 | net/appletalk/ddp.c | if(sk->zapped==0) |
sk | 1281 | net/appletalk/ddp.c | sk->protinfo.af_at.src_net=addr->sat_addr.s_net=ap->s_net; |
sk | 1282 | net/appletalk/ddp.c | sk->protinfo.af_at.src_node=addr->sat_addr.s_node=ap->s_node; |
sk | 1288 | net/appletalk/ddp.c | sk->protinfo.af_at.src_net=addr->sat_addr.s_net; |
sk | 1289 | net/appletalk/ddp.c | sk->protinfo.af_at.src_node=addr->sat_addr.s_node; |
sk | 1297 | net/appletalk/ddp.c | sk->protinfo.af_at.src_port=addr->sat_port=n; |
sk | 1300 | net/appletalk/ddp.c | sk->protinfo.af_at.src_port=addr->sat_port; |
sk | 1305 | net/appletalk/ddp.c | atalk_insert_socket(sk); |
sk | 1306 | net/appletalk/ddp.c | sk->zapped=0; |
sk | 1317 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1320 | net/appletalk/ddp.c | sk->state = TCP_CLOSE; |
sk | 1330 | net/appletalk/ddp.c | if(addr->sat_addr.s_node==ATADDR_BCAST && !sk->broadcast) |
sk | 1333 | net/appletalk/ddp.c | if(sk->zapped) |
sk | 1335 | net/appletalk/ddp.c | if(atalk_autobind(sk)<0) |
sk | 1342 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_port=addr->sat_port; |
sk | 1343 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_net=addr->sat_addr.s_net; |
sk | 1344 | net/appletalk/ddp.c | sk->protinfo.af_at.dest_node=addr->sat_addr.s_node; |
sk | 1346 | net/appletalk/ddp.c | sk->state=TCP_ESTABLISHED; |
sk | 1379 | net/appletalk/ddp.c | atalk_socket *sk; |
sk | 1381 | net/appletalk/ddp.c | sk=(atalk_socket *)sock->data; |
sk | 1382 | net/appletalk/ddp.c | if(sk->zapped) |
sk | 1384 | net/appletalk/ddp.c | if(atalk_autobind(sk)<0) |
sk | 1392 | net/appletalk/ddp.c | if(sk->state!=TCP_ESTABLISHED) |
sk | 1394 | net/appletalk/ddp.c | sat.sat_addr.s_net=sk->protinfo.af_at.dest_net; |
sk | 1395 | net/appletalk/ddp.c | sat.sat_addr.s_node=sk->protinfo.af_at.dest_node; |
sk | 1396 | net/appletalk/ddp.c | sat.sat_port=sk->protinfo.af_at.dest_port; |
sk | 1400 | net/appletalk/ddp.c | sat.sat_addr.s_net=sk->protinfo.af_at.src_net; |
sk | 1401 | net/appletalk/ddp.c | sat.sat_addr.s_node=sk->protinfo.af_at.src_node; |
sk | 1402 | net/appletalk/ddp.c | sat.sat_port=sk->protinfo.af_at.src_port; |
sk | 1572 | net/appletalk/ddp.c | skb->sk = sock; |
sk | 1576 | net/appletalk/ddp.c | skb->sk=NULL; |
sk | 1648 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1667 | net/appletalk/ddp.c | if(sk->zapped) |
sk | 1670 | net/appletalk/ddp.c | if(atalk_autobind(sk)<0) |
sk | 1679 | net/appletalk/ddp.c | if(usat->sat_addr.s_node==ATADDR_BCAST && !sk->broadcast) |
sk | 1685 | net/appletalk/ddp.c | if(sk->state!=TCP_ESTABLISHED) |
sk | 1689 | net/appletalk/ddp.c | usat->sat_port=sk->protinfo.af_at.dest_port; |
sk | 1690 | net/appletalk/ddp.c | usat->sat_addr.s_node=sk->protinfo.af_at.dest_node; |
sk | 1691 | net/appletalk/ddp.c | usat->sat_addr.s_net=sk->protinfo.af_at.dest_net; |
sk | 1696 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1697 | net/appletalk/ddp.c | printk("SK %p: Got address.\n",sk); |
sk | 1712 | net/appletalk/ddp.c | at_hint.s_net=sk->protinfo.af_at.src_net; |
sk | 1719 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1720 | net/appletalk/ddp.c | printk("SK %p: Size needed %d, device %s\n", sk, size, dev->name); |
sk | 1724 | net/appletalk/ddp.c | skb = sock_alloc_send_skb(sk, size, 0, 0 , &err); |
sk | 1728 | net/appletalk/ddp.c | skb->sk=sk; |
sk | 1736 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1737 | net/appletalk/ddp.c | printk("SK %p: Begin build.\n", sk); |
sk | 1751 | net/appletalk/ddp.c | ddp->deh_snet=sk->protinfo.af_at.src_net; |
sk | 1753 | net/appletalk/ddp.c | ddp->deh_snode=sk->protinfo.af_at.src_node; |
sk | 1755 | net/appletalk/ddp.c | ddp->deh_sport=sk->protinfo.af_at.src_port; |
sk | 1757 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1758 | net/appletalk/ddp.c | printk("SK %p: Copy user data (%d bytes).\n", sk, len); |
sk | 1762 | net/appletalk/ddp.c | if(sk->no_check==1) |
sk | 1790 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1791 | net/appletalk/ddp.c | printk("SK %p: send out(copy).\n", sk); |
sk | 1801 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1802 | net/appletalk/ddp.c | printk("SK %p: Loop back.\n", sk); |
sk | 1804 | net/appletalk/ddp.c | atomic_sub(skb->truesize, &sk->wmem_alloc); |
sk | 1806 | net/appletalk/ddp.c | skb->sk = NULL; |
sk | 1815 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1816 | net/appletalk/ddp.c | printk("SK %p: send out.\n", sk); |
sk | 1827 | net/appletalk/ddp.c | if(sk->debug) |
sk | 1828 | net/appletalk/ddp.c | printk("SK %p: Done write (%d).\n", sk, len); |
sk | 1835 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1842 | net/appletalk/ddp.c | if(sk->err) |
sk | 1843 | net/appletalk/ddp.c | return sock_error(sk); |
sk | 1848 | net/appletalk/ddp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
sk | 1853 | net/appletalk/ddp.c | if(sk->type==SOCK_RAW) |
sk | 1874 | net/appletalk/ddp.c | skb_free_datagram(sk, skb); |
sk | 1879 | net/appletalk/ddp.c | static int atalk_shutdown(struct socket *sk,int how) |
sk | 1886 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1888 | net/appletalk/ddp.c | return datagram_select(sk,sel_type,wait); |
sk | 1899 | net/appletalk/ddp.c | atalk_socket *sk=(atalk_socket *)sock->data; |
sk | 1907 | net/appletalk/ddp.c | amount=sk->sndbuf-sk->wmem_alloc; |
sk | 1915 | net/appletalk/ddp.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
sk | 1920 | net/appletalk/ddp.c | if (sk) |
sk | 1922 | net/appletalk/ddp.c | if(sk->stamp.tv_sec==0) |
sk | 1927 | net/appletalk/ddp.c | memcpy_tofs((void *)arg,&sk->stamp,sizeof(struct timeval)); |
sk | 222 | net/ax25/af_ax25.c | if (s->sk != NULL) { |
sk | 223 | net/ax25/af_ax25.c | s->sk->state = TCP_CLOSE; |
sk | 224 | net/ax25/af_ax25.c | s->sk->err = ENETUNREACH; |
sk | 225 | net/ax25/af_ax25.c | if (!s->sk->dead) |
sk | 226 | net/ax25/af_ax25.c | s->sk->state_change(s->sk); |
sk | 227 | net/ax25/af_ax25.c | s->sk->dead = 1; |
sk | 285 | net/ax25/af_ax25.c | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->type == type && s->sk->state == TCP_LISTEN) { |
sk | 289 | net/ax25/af_ax25.c | return s->sk; |
sk | 310 | net/ax25/af_ax25.c | if (s->sk != NULL && ax25cmp(&s->source_addr, my_addr) == 0 && ax25cmp(&s->dest_addr, dest_addr) == 0 && s->sk->type == type) { |
sk | 312 | net/ax25/af_ax25.c | return s->sk; |
sk | 334 | net/ax25/af_ax25.c | if (s->sk != NULL && s->sk->type != SOCK_SEQPACKET) |
sk | 359 | net/ax25/af_ax25.c | if (s->sk != NULL && ax25cmp(&s->source_addr, addr) == 0 && s->sk->type == SOCK_RAW) { |
sk | 361 | net/ax25/af_ax25.c | return s->sk; |
sk | 370 | net/ax25/af_ax25.c | static void ax25_send_to_raw(struct sock *sk, struct sk_buff *skb, int proto) |
sk | 374 | net/ax25/af_ax25.c | while (sk != NULL) { |
sk | 375 | net/ax25/af_ax25.c | if (sk->type == SOCK_RAW && sk->protocol == proto && sk->rmem_alloc <= sk->rcvbuf) { |
sk | 379 | net/ax25/af_ax25.c | copy->sk = sk; |
sk | 380 | net/ax25/af_ax25.c | atomic_add(copy->truesize, &sk->rmem_alloc); |
sk | 381 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, copy); |
sk | 382 | net/ax25/af_ax25.c | if (!sk->dead) |
sk | 383 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
sk | 386 | net/ax25/af_ax25.c | sk = sk->next; |
sk | 422 | net/ax25/af_ax25.c | if (ax25->sk != NULL) { |
sk | 423 | net/ax25/af_ax25.c | while ((skb = skb_dequeue(&ax25->sk->receive_queue)) != NULL) { |
sk | 424 | net/ax25/af_ax25.c | if (skb->sk != ax25->sk) { /* A pending connection */ |
sk | 425 | net/ax25/af_ax25.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
sk | 426 | net/ax25/af_ax25.c | ax25_set_timer(skb->sk->ax25); |
sk | 427 | net/ax25/af_ax25.c | skb->sk->ax25->state = AX25_STATE_0; |
sk | 434 | net/ax25/af_ax25.c | if (ax25->sk != NULL) { |
sk | 435 | net/ax25/af_ax25.c | if (ax25->sk->wmem_alloc || ax25->sk->rmem_alloc) { /* Defer: outstanding buffers */ |
sk | 447 | net/ax25/af_ax25.c | kfree_s(ax25->sk, sizeof(*ax25->sk)); |
sk | 563 | net/ax25/af_ax25.c | if (ax25->sk != NULL) { |
sk | 564 | net/ax25/af_ax25.c | ax25->sk->state = TCP_CLOSE; |
sk | 565 | net/ax25/af_ax25.c | ax25->sk->err = ENETRESET; |
sk | 566 | net/ax25/af_ax25.c | if (!ax25->sk->dead) |
sk | 567 | net/ax25/af_ax25.c | ax25->sk->state_change(ax25->sk); |
sk | 568 | net/ax25/af_ax25.c | ax25->sk->dead = 1; |
sk | 695 | net/ax25/af_ax25.c | ax25->sk = NULL; |
sk | 767 | net/ax25/af_ax25.c | if (ax25->sk != NULL && ax25->sk->type != SOCK_SEQPACKET) |
sk | 861 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 864 | net/ax25/af_ax25.c | sk = (struct sock *)sock->data; |
sk | 867 | net/ax25/af_ax25.c | return sock_setsockopt(sk, level, optname, optval, optlen); |
sk | 882 | net/ax25/af_ax25.c | if (sk->ax25->modulus == MODULUS) { |
sk | 889 | net/ax25/af_ax25.c | sk->ax25->window = opt; |
sk | 895 | net/ax25/af_ax25.c | sk->ax25->rtt = (opt * PR_SLOWHZ) / 2; |
sk | 901 | net/ax25/af_ax25.c | sk->ax25->t2 = opt * PR_SLOWHZ; |
sk | 907 | net/ax25/af_ax25.c | sk->ax25->n2 = opt; |
sk | 913 | net/ax25/af_ax25.c | sk->ax25->t3 = opt * PR_SLOWHZ; |
sk | 919 | net/ax25/af_ax25.c | sk->ax25->idle = opt * PR_SLOWHZ * 60; |
sk | 923 | net/ax25/af_ax25.c | sk->ax25->backoff = opt ? 1 : 0; |
sk | 927 | net/ax25/af_ax25.c | sk->ax25->modulus = opt ? EMODULUS : MODULUS; |
sk | 931 | net/ax25/af_ax25.c | sk->ax25->hdrincl = opt ? 1 : 0; |
sk | 937 | net/ax25/af_ax25.c | sk->ax25->paclen = opt; |
sk | 948 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 952 | net/ax25/af_ax25.c | sk = (struct sock *)sock->data; |
sk | 955 | net/ax25/af_ax25.c | return sock_getsockopt(sk, level, optname, optval, optlen); |
sk | 962 | net/ax25/af_ax25.c | val = sk->ax25->window; |
sk | 966 | net/ax25/af_ax25.c | val = (sk->ax25->t1 * 2) / PR_SLOWHZ; |
sk | 970 | net/ax25/af_ax25.c | val = sk->ax25->t2 / PR_SLOWHZ; |
sk | 974 | net/ax25/af_ax25.c | val = sk->ax25->n2; |
sk | 978 | net/ax25/af_ax25.c | val = sk->ax25->t3 / PR_SLOWHZ; |
sk | 982 | net/ax25/af_ax25.c | val = sk->ax25->idle / (PR_SLOWHZ * 60); |
sk | 986 | net/ax25/af_ax25.c | val = sk->ax25->backoff; |
sk | 990 | net/ax25/af_ax25.c | val = (sk->ax25->modulus == EMODULUS); |
sk | 994 | net/ax25/af_ax25.c | val = sk->ax25->hdrincl; |
sk | 998 | net/ax25/af_ax25.c | val = sk->ax25->paclen; |
sk | 1020 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1022 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET && sk->state != TCP_LISTEN) { |
sk | 1023 | net/ax25/af_ax25.c | sk->max_ack_backlog = backlog; |
sk | 1024 | net/ax25/af_ax25.c | sk->state = TCP_LISTEN; |
sk | 1031 | net/ax25/af_ax25.c | static void def_callback1(struct sock *sk) |
sk | 1033 | net/ax25/af_ax25.c | if (!sk->dead) |
sk | 1034 | net/ax25/af_ax25.c | wake_up_interruptible(sk->sleep); |
sk | 1037 | net/ax25/af_ax25.c | static void def_callback2(struct sock *sk, int len) |
sk | 1039 | net/ax25/af_ax25.c | if (!sk->dead) |
sk | 1040 | net/ax25/af_ax25.c | wake_up_interruptible(sk->sleep); |
sk | 1045 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1078 | net/ax25/af_ax25.c | if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL) |
sk | 1082 | net/ax25/af_ax25.c | kfree_s(sk, sizeof(*sk)); |
sk | 1086 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->receive_queue); |
sk | 1087 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->write_queue); |
sk | 1088 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->back_log); |
sk | 1090 | net/ax25/af_ax25.c | sk->socket = sock; |
sk | 1091 | net/ax25/af_ax25.c | sk->type = sock->type; |
sk | 1092 | net/ax25/af_ax25.c | sk->protocol = protocol; |
sk | 1093 | net/ax25/af_ax25.c | sk->dead = 0; |
sk | 1094 | net/ax25/af_ax25.c | sk->next = NULL; |
sk | 1095 | net/ax25/af_ax25.c | sk->broadcast = 0; |
sk | 1096 | net/ax25/af_ax25.c | sk->allocation = GFP_KERNEL; |
sk | 1097 | net/ax25/af_ax25.c | sk->rcvbuf = SK_RMEM_MAX; |
sk | 1098 | net/ax25/af_ax25.c | sk->sndbuf = SK_WMEM_MAX; |
sk | 1099 | net/ax25/af_ax25.c | sk->wmem_alloc = 0; |
sk | 1100 | net/ax25/af_ax25.c | sk->rmem_alloc = 0; |
sk | 1101 | net/ax25/af_ax25.c | sk->users = 0; |
sk | 1102 | net/ax25/af_ax25.c | sk->debug = 0; |
sk | 1103 | net/ax25/af_ax25.c | sk->destroy = 0; |
sk | 1104 | net/ax25/af_ax25.c | sk->prot = NULL; /* So we use default free mechanisms */ |
sk | 1105 | net/ax25/af_ax25.c | sk->err = 0; |
sk | 1106 | net/ax25/af_ax25.c | sk->localroute = 0; |
sk | 1107 | net/ax25/af_ax25.c | sk->send_head = NULL; |
sk | 1108 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1109 | net/ax25/af_ax25.c | sk->shutdown = 0; |
sk | 1110 | net/ax25/af_ax25.c | sk->priority = SOPRI_NORMAL; |
sk | 1111 | net/ax25/af_ax25.c | sk->ack_backlog = 0; |
sk | 1112 | net/ax25/af_ax25.c | sk->mtu = AX25_MTU; /* 256 */ |
sk | 1113 | net/ax25/af_ax25.c | sk->zapped = 1; |
sk | 1115 | net/ax25/af_ax25.c | sk->state_change = def_callback1; |
sk | 1116 | net/ax25/af_ax25.c | sk->data_ready = def_callback2; |
sk | 1117 | net/ax25/af_ax25.c | sk->write_space = def_callback1; |
sk | 1118 | net/ax25/af_ax25.c | sk->error_report = def_callback1; |
sk | 1121 | net/ax25/af_ax25.c | sock->data = (void *)sk; |
sk | 1122 | net/ax25/af_ax25.c | sk->sleep = sock->wait; |
sk | 1125 | net/ax25/af_ax25.c | ax25->sk = sk; |
sk | 1126 | net/ax25/af_ax25.c | sk->ax25 = ax25; |
sk | 1133 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1136 | net/ax25/af_ax25.c | if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL) |
sk | 1140 | net/ax25/af_ax25.c | kfree_s(sk, sizeof(*sk)); |
sk | 1146 | net/ax25/af_ax25.c | sk->type = osk->type; |
sk | 1147 | net/ax25/af_ax25.c | sk->socket = osk->socket; |
sk | 1155 | net/ax25/af_ax25.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 1160 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->receive_queue); |
sk | 1161 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->write_queue); |
sk | 1162 | net/ax25/af_ax25.c | skb_queue_head_init(&sk->back_log); |
sk | 1164 | net/ax25/af_ax25.c | sk->dead = 0; |
sk | 1165 | net/ax25/af_ax25.c | sk->next = NULL; |
sk | 1166 | net/ax25/af_ax25.c | sk->priority = osk->priority; |
sk | 1167 | net/ax25/af_ax25.c | sk->broadcast = 0; |
sk | 1168 | net/ax25/af_ax25.c | sk->protocol = osk->protocol; |
sk | 1169 | net/ax25/af_ax25.c | sk->rcvbuf = osk->rcvbuf; |
sk | 1170 | net/ax25/af_ax25.c | sk->sndbuf = osk->sndbuf; |
sk | 1171 | net/ax25/af_ax25.c | sk->wmem_alloc = 0; |
sk | 1172 | net/ax25/af_ax25.c | sk->rmem_alloc = 0; |
sk | 1173 | net/ax25/af_ax25.c | sk->users = 0; |
sk | 1174 | net/ax25/af_ax25.c | sk->ack_backlog = 0; |
sk | 1175 | net/ax25/af_ax25.c | sk->destroy = 0; |
sk | 1176 | net/ax25/af_ax25.c | sk->prot = NULL; /* So we use default free mechanisms */ |
sk | 1177 | net/ax25/af_ax25.c | sk->err = 0; |
sk | 1178 | net/ax25/af_ax25.c | sk->localroute = 0; |
sk | 1179 | net/ax25/af_ax25.c | sk->send_head = NULL; |
sk | 1180 | net/ax25/af_ax25.c | sk->debug = osk->debug; |
sk | 1181 | net/ax25/af_ax25.c | sk->state = TCP_ESTABLISHED; |
sk | 1182 | net/ax25/af_ax25.c | sk->window = osk->window; |
sk | 1183 | net/ax25/af_ax25.c | sk->shutdown = 0; |
sk | 1184 | net/ax25/af_ax25.c | sk->mtu = osk->mtu; |
sk | 1185 | net/ax25/af_ax25.c | sk->sleep = osk->sleep; |
sk | 1186 | net/ax25/af_ax25.c | sk->zapped = osk->zapped; |
sk | 1188 | net/ax25/af_ax25.c | sk->state_change = def_callback1; |
sk | 1189 | net/ax25/af_ax25.c | sk->data_ready = def_callback2; |
sk | 1190 | net/ax25/af_ax25.c | sk->write_space = def_callback1; |
sk | 1191 | net/ax25/af_ax25.c | sk->error_report = def_callback1; |
sk | 1209 | net/ax25/af_ax25.c | kfree_s(sk, sizeof(*sk)); |
sk | 1218 | net/ax25/af_ax25.c | sk->ax25 = ax25; |
sk | 1219 | net/ax25/af_ax25.c | ax25->sk = sk; |
sk | 1221 | net/ax25/af_ax25.c | return sk; |
sk | 1226 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)oldsock->data; |
sk | 1228 | net/ax25/af_ax25.c | return ax25_create(newsock, sk->protocol); |
sk | 1233 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1235 | net/ax25/af_ax25.c | if (sk == NULL) return 0; |
sk | 1237 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET) { |
sk | 1238 | net/ax25/af_ax25.c | switch (sk->ax25->state) { |
sk | 1240 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1241 | net/ax25/af_ax25.c | sk->state_change(sk); |
sk | 1242 | net/ax25/af_ax25.c | sk->dead = 1; |
sk | 1243 | net/ax25/af_ax25.c | ax25_destroy_socket(sk->ax25); |
sk | 1247 | net/ax25/af_ax25.c | ax25_send_control(sk->ax25, DISC, POLLON, C_COMMAND); |
sk | 1248 | net/ax25/af_ax25.c | sk->ax25->state = AX25_STATE_0; |
sk | 1249 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1250 | net/ax25/af_ax25.c | sk->state_change(sk); |
sk | 1251 | net/ax25/af_ax25.c | sk->dead = 1; |
sk | 1252 | net/ax25/af_ax25.c | ax25_destroy_socket(sk->ax25); |
sk | 1256 | net/ax25/af_ax25.c | if (sk->ax25->dama_slave) |
sk | 1257 | net/ax25/af_ax25.c | ax25_send_control(sk->ax25, DISC, POLLON, C_COMMAND); |
sk | 1259 | net/ax25/af_ax25.c | ax25_send_control(sk->ax25, DM, POLLON, C_RESPONSE); |
sk | 1260 | net/ax25/af_ax25.c | sk->ax25->state = AX25_STATE_0; |
sk | 1261 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1262 | net/ax25/af_ax25.c | sk->state_change(sk); |
sk | 1263 | net/ax25/af_ax25.c | sk->dead = 1; |
sk | 1264 | net/ax25/af_ax25.c | ax25_destroy_socket(sk->ax25); |
sk | 1269 | net/ax25/af_ax25.c | ax25_clear_queues(sk->ax25); |
sk | 1270 | net/ax25/af_ax25.c | sk->ax25->n2count = 0; |
sk | 1271 | net/ax25/af_ax25.c | if (!sk->ax25->dama_slave) |
sk | 1272 | net/ax25/af_ax25.c | ax25_send_control(sk->ax25, DISC, POLLON, C_COMMAND); |
sk | 1273 | net/ax25/af_ax25.c | sk->ax25->t3timer = 0; |
sk | 1274 | net/ax25/af_ax25.c | sk->ax25->t1timer = sk->ax25->t1 = ax25_calculate_t1(sk->ax25); |
sk | 1275 | net/ax25/af_ax25.c | sk->ax25->state = AX25_STATE_2; |
sk | 1276 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1277 | net/ax25/af_ax25.c | sk->state_change(sk); |
sk | 1278 | net/ax25/af_ax25.c | sk->dead = 1; |
sk | 1279 | net/ax25/af_ax25.c | sk->destroy = 1; |
sk | 1286 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1287 | net/ax25/af_ax25.c | sk->state_change(sk); |
sk | 1288 | net/ax25/af_ax25.c | sk->dead = 1; |
sk | 1289 | net/ax25/af_ax25.c | ax25_destroy_socket(sk->ax25); |
sk | 1293 | net/ax25/af_ax25.c | sk->socket = NULL; /* Not used, but we should do this. **/ |
sk | 1306 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1311 | net/ax25/af_ax25.c | sk = (struct sock *)sock->data; |
sk | 1313 | net/ax25/af_ax25.c | if (sk->zapped == 0) |
sk | 1324 | net/ax25/af_ax25.c | sk->ax25->source_addr = addr->fsa_ax25.sax25_call; |
sk | 1326 | net/ax25/af_ax25.c | sk->ax25->source_addr = *call; |
sk | 1328 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1329 | net/ax25/af_ax25.c | printk("AX25: source address set to %s\n", ax2asc(&sk->ax25->source_addr)); |
sk | 1334 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1338 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1342 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1347 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1351 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1355 | net/ax25/af_ax25.c | ax25_fillin_cb(sk->ax25, dev); |
sk | 1356 | net/ax25/af_ax25.c | ax25_insert_socket(sk->ax25); |
sk | 1358 | net/ax25/af_ax25.c | sk->zapped = 0; |
sk | 1360 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1369 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1373 | net/ax25/af_ax25.c | if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
sk | 1378 | net/ax25/af_ax25.c | if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) { |
sk | 1383 | net/ax25/af_ax25.c | if (sk->state == TCP_ESTABLISHED && sk->type == SOCK_SEQPACKET) |
sk | 1386 | net/ax25/af_ax25.c | sk->state = TCP_CLOSE; |
sk | 1403 | net/ax25/af_ax25.c | if (sk->ax25->digipeat == NULL) { |
sk | 1404 | net/ax25/af_ax25.c | if ((sk->ax25->digipeat = (ax25_digi *)kmalloc(sizeof(ax25_digi), GFP_KERNEL)) == NULL) |
sk | 1408 | net/ax25/af_ax25.c | sk->ax25->digipeat->ndigi = addr->sax25_ndigis; |
sk | 1411 | net/ax25/af_ax25.c | sk->ax25->digipeat->repeated[ct] = 0; |
sk | 1412 | net/ax25/af_ax25.c | sk->ax25->digipeat->calls[ct] = fsa->fsa_digipeater[ct]; |
sk | 1416 | net/ax25/af_ax25.c | sk->ax25->digipeat->lastrepeat = 0; |
sk | 1418 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 1420 | net/ax25/af_ax25.c | ax25_rt_build_path(sk->ax25, &addr->sax25_call); |
sk | 1428 | net/ax25/af_ax25.c | if (sk->zapped) { |
sk | 1429 | net/ax25/af_ax25.c | if ((err = ax25_rt_autobind(sk->ax25, &addr->sax25_call)) < 0) |
sk | 1431 | net/ax25/af_ax25.c | ax25_fillin_cb(sk->ax25, sk->ax25->device); |
sk | 1432 | net/ax25/af_ax25.c | ax25_insert_socket(sk->ax25); |
sk | 1434 | net/ax25/af_ax25.c | if (sk->ax25->device == NULL) |
sk | 1438 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET && ax25_find_cb(&sk->ax25->source_addr, &addr->sax25_call, sk->ax25->device) != NULL) |
sk | 1441 | net/ax25/af_ax25.c | sk->ax25->dest_addr = addr->sax25_call; |
sk | 1444 | net/ax25/af_ax25.c | if (sk->type != SOCK_SEQPACKET) { |
sk | 1446 | net/ax25/af_ax25.c | sk->state = TCP_ESTABLISHED; |
sk | 1452 | net/ax25/af_ax25.c | sk->state = TCP_SYN_SENT; |
sk | 1454 | net/ax25/af_ax25.c | if (ax25_dev_is_dama_slave(sk->ax25->device)) |
sk | 1455 | net/ax25/af_ax25.c | dama_establish_data_link(sk->ax25); |
sk | 1457 | net/ax25/af_ax25.c | ax25_establish_data_link(sk->ax25); |
sk | 1459 | net/ax25/af_ax25.c | sk->ax25->state = AX25_STATE_1; |
sk | 1460 | net/ax25/af_ax25.c | ax25_set_timer(sk->ax25); /* Start going SABM SABM until a UA or a give up and DM */ |
sk | 1463 | net/ax25/af_ax25.c | if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
sk | 1469 | net/ax25/af_ax25.c | while (sk->state == TCP_SYN_SENT) { |
sk | 1470 | net/ax25/af_ax25.c | interruptible_sleep_on(sk->sleep); |
sk | 1477 | net/ax25/af_ax25.c | if (sk->state != TCP_ESTABLISHED) |
sk | 1482 | net/ax25/af_ax25.c | return sock_error(sk); /* Always set at this point */ |
sk | 1499 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1508 | net/ax25/af_ax25.c | sk = (struct sock *)sock->data; |
sk | 1510 | net/ax25/af_ax25.c | if (sk->type != SOCK_SEQPACKET) |
sk | 1513 | net/ax25/af_ax25.c | if (sk->state != TCP_LISTEN) |
sk | 1522 | net/ax25/af_ax25.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
sk | 1527 | net/ax25/af_ax25.c | interruptible_sleep_on(sk->sleep); |
sk | 1535 | net/ax25/af_ax25.c | newsk = skb->sk; |
sk | 1540 | net/ax25/af_ax25.c | skb->sk = NULL; |
sk | 1542 | net/ax25/af_ax25.c | sk->ack_backlog--; |
sk | 1553 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1556 | net/ax25/af_ax25.c | sk = (struct sock *)sock->data; |
sk | 1559 | net/ax25/af_ax25.c | if (sk->state != TCP_ESTABLISHED) |
sk | 1561 | net/ax25/af_ax25.c | addr = &sk->ax25->dest_addr; |
sk | 1563 | net/ax25/af_ax25.c | addr = &sk->ax25->source_addr; |
sk | 1572 | net/ax25/af_ax25.c | if (sk->ax25->digipeat != NULL) { |
sk | 1573 | net/ax25/af_ax25.c | ndigi = sk->ax25->digipeat->ndigi; |
sk | 1577 | net/ax25/af_ax25.c | sax->fsa_digipeater[i] = sk->ax25->digipeat->calls[i]; |
sk | 1586 | net/ax25/af_ax25.c | struct sock *sk; |
sk | 1719 | net/ax25/af_ax25.c | if ((sk = ax25_find_socket(&dest, &src, SOCK_DGRAM)) != NULL) { |
sk | 1720 | net/ax25/af_ax25.c | if (sk->rmem_alloc >= sk->rcvbuf) { |
sk | 1728 | net/ax25/af_ax25.c | skb_queue_tail(&sk->receive_queue, skb); |
sk | 1729 | net/ax25/af_ax25.c | skb->sk = sk; |
sk | 1730 | net/ax25/af_ax25.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
sk | 1731 | net/ax25/af_ax25.c | if (!sk->dead) |
sk | 1732 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len); |
sk | 1792 | net/ax25/af_ax25.c | if ((sk = ax25_find_listener(&dest, dev, SOCK_SEQPACKET)) != NULL) { |
sk | 1794 | net/ax25/af_ax25.c | if (sk->ack_backlog == sk->max_ack_backlog || (make = ax25_make_new(sk, dev)) == NULL) { |
sk | 1804 | net/ax25/af_ax25.c | skb_queue_head(&sk->receive_queue, skb); |
sk | 1806 | net/ax25/af_ax25.c | skb->sk = make; |
sk | 1808 | net/ax25/af_ax25.c | make->pair = sk; |
sk | 1810 | net/ax25/af_ax25.c | sk->ack_backlog++; |
sk | 1883 | net/ax25/af_ax25.c | if (sk != NULL) { |
sk | 1884 | net/ax25/af_ax25.c | if (!sk->dead) |
sk | 1885 | net/ax25/af_ax25.c | sk->data_ready(sk, skb->len ); |
sk | 1898 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
sk | 1919 | net/ax25/af_ax25.c | skb->sk = NULL; /* Initially we don't know who its for */ |
sk | 1937 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1949 | net/ax25/af_ax25.c | if (sk->err) { |
sk | 1950 | net/ax25/af_ax25.c | return sock_error(sk); |
sk | 1956 | net/ax25/af_ax25.c | if (sk->zapped) |
sk | 1959 | net/ax25/af_ax25.c | if (sk->ax25->device == NULL) |
sk | 1987 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET && ax25cmp(&sk->ax25->dest_addr, &sax.sax25_call) != 0) |
sk | 1994 | net/ax25/af_ax25.c | if (sk->state != TCP_ESTABLISHED) |
sk | 1997 | net/ax25/af_ax25.c | sax.sax25_call = sk->ax25->dest_addr; |
sk | 1998 | net/ax25/af_ax25.c | dp = sk->ax25->digipeat; |
sk | 2001 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2005 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2011 | net/ax25/af_ax25.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
sk | 2014 | net/ax25/af_ax25.c | skb->sk = sk; |
sk | 2020 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2028 | net/ax25/af_ax25.c | *asmptr = sk->protocol; |
sk | 2030 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2033 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET) { |
sk | 2035 | net/ax25/af_ax25.c | if (sk->state != TCP_ESTABLISHED) { |
sk | 2040 | net/ax25/af_ax25.c | ax25_output(sk->ax25, skb); /* Shove it onto the queue and kick */ |
sk | 2046 | net/ax25/af_ax25.c | if (sk->debug) { |
sk | 2053 | net/ax25/af_ax25.c | asmptr += (lv = build_ax25_addr(asmptr, &sk->ax25->source_addr, &sax.sax25_call, dp, C_COMMAND, MODULUS)); |
sk | 2055 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2060 | net/ax25/af_ax25.c | if (sk->debug) |
sk | 2066 | net/ax25/af_ax25.c | ax25_queue_xmit(skb, sk->ax25->device, SOPRI_NORMAL); |
sk | 2075 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 2082 | net/ax25/af_ax25.c | if (sk->err) { |
sk | 2083 | net/ax25/af_ax25.c | return sock_error(sk); |
sk | 2093 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET && sk->state != TCP_ESTABLISHED) |
sk | 2097 | net/ax25/af_ax25.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
sk | 2100 | net/ax25/af_ax25.c | if (sk->ax25->hdrincl) { |
sk | 2103 | net/ax25/af_ax25.c | if (sk->type == SOCK_SEQPACKET) |
sk | 2145 | net/ax25/af_ax25.c | skb_free_datagram(sk, skb); |
sk | 2150 | net/ax25/af_ax25.c | static int ax25_shutdown(struct socket *sk, int how) |
sk | 2158 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 2160 | net/ax25/af_ax25.c | return datagram_select(sk, sel_type, wait); |
sk | 2165 | net/ax25/af_ax25.c | struct sock *sk = (struct sock *)sock->data; |
sk | 2173 | net/ax25/af_ax25.c | amount = sk->sndbuf - sk->wmem_alloc; |
sk | 2183 | net/ax25/af_ax25.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
sk | 2192 | net/ax25/af_ax25.c | if (sk != NULL) { |
sk | 2193 | net/ax25/af_ax25.c | if (sk->stamp.tv_sec==0) |
sk | 2197 | net/ax25/af_ax25.c | memcpy_tofs((void *)arg, &sk->stamp, sizeof(struct timeval)); |
sk | 2312 | net/ax25/af_ax25.c | if (ax25->sk != NULL) { |
sk | 2314 | net/ax25/af_ax25.c | ax25->sk->wmem_alloc, |
sk | 2315 | net/ax25/af_ax25.c | ax25->sk->rmem_alloc); |
sk | 96 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 97 | net/ax25/ax25_in.c | skbn->sk = ax25->sk; |
sk | 98 | net/ax25/ax25_in.c | atomic_add(skbn->truesize, &ax25->sk->rmem_alloc); |
sk | 198 | net/ax25/ax25_in.c | if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_TEXT) && ax25->sk->protocol == pid) { |
sk | 199 | net/ax25/ax25_in.c | if (sock_queue_rcv_skb(ax25->sk, skb) == 0) { |
sk | 250 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 251 | net/ax25/ax25_in.c | ax25->sk->state = TCP_ESTABLISHED; |
sk | 253 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 254 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 264 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 265 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 266 | net/ax25/ax25_in.c | ax25->sk->err = ECONNREFUSED; |
sk | 267 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 268 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 269 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 308 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 309 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 310 | net/ax25/ax25_in.c | ax25->sk->err = 0; |
sk | 311 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 312 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 313 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 323 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 324 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 325 | net/ax25/ax25_in.c | ax25->sk->err = 0; |
sk | 326 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 327 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 328 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 338 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 339 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 340 | net/ax25/ax25_in.c | ax25->sk->err = 0; |
sk | 341 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 342 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 343 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 416 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 417 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 418 | net/ax25/ax25_in.c | ax25->sk->err = 0; |
sk | 419 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 420 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 421 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 430 | net/ax25/ax25_in.c | if (ax25->sk) { |
sk | 431 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 432 | net/ax25/ax25_in.c | ax25->sk->err = ECONNRESET; |
sk | 433 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 434 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 435 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 612 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 613 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 614 | net/ax25/ax25_in.c | ax25->sk->err = 0; |
sk | 615 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 616 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 617 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 627 | net/ax25/ax25_in.c | if (ax25->sk != NULL) { |
sk | 628 | net/ax25/ax25_in.c | ax25->sk->state = TCP_CLOSE; |
sk | 629 | net/ax25/ax25_in.c | ax25->sk->err = ECONNRESET; |
sk | 630 | net/ax25/ax25_in.c | if (!ax25->sk->dead) |
sk | 631 | net/ax25/ax25_in.c | ax25->sk->state_change(ax25->sk); |
sk | 632 | net/ax25/ax25_in.c | ax25->sk->dead = 1; |
sk | 812 | net/ax25/ax25_in.c | if (ax25->sk != NULL && ax25->state == AX25_STATE_0 && ax25->sk->dead) |
sk | 100 | net/ax25/ax25_out.c | if (skb->sk != NULL) { |
sk | 101 | net/ax25/ax25_out.c | if ((skbn = sock_alloc_send_skb(skb->sk, mtu + 2 + frontlen, 0, 0, &err)) == NULL) |
sk | 108 | net/ax25/ax25_out.c | skbn->sk = skb->sk; |
sk | 254 | net/ax25/ax25_out.c | if (ax25->sk != NULL) { |
sk | 255 | net/ax25/ax25_out.c | ax25->sk->state = TCP_CLOSE; |
sk | 256 | net/ax25/ax25_out.c | ax25->sk->err = ENETUNREACH; |
sk | 257 | net/ax25/ax25_out.c | if (!ax25->sk->dead) |
sk | 258 | net/ax25/ax25_out.c | ax25->sk->state_change(ax25->sk); |
sk | 259 | net/ax25/ax25_out.c | ax25->sk->dead = 1; |
sk | 553 | net/ax25/ax25_route.c | if (ax25->sk != NULL) |
sk | 554 | net/ax25/ax25_route.c | ax25->sk->zapped = 0; |
sk | 216 | net/ax25/ax25_subr.c | if (ax25->sk != NULL) { |
sk | 217 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
sk | 218 | net/ax25/ax25_subr.c | atomic_add(skb->truesize, &ax25->sk->wmem_alloc); |
sk | 268 | net/ax25/ax25_subr.c | skb->sk = NULL; |
sk | 499 | net/ax25/ax25_subr.c | if (ax25->sk != NULL) { |
sk | 500 | net/ax25/ax25_subr.c | skb->sk = ax25->sk; |
sk | 501 | net/ax25/ax25_subr.c | atomic_add(skb->truesize, &ax25->sk->wmem_alloc); |
sk | 517 | net/ax25/ax25_subr.c | if (ax25->sk != NULL && ax25->sk->debug) |
sk | 530 | net/ax25/ax25_subr.c | if (ax25->sk != NULL && ax25->sk->debug) |
sk | 100 | net/ax25/ax25_timer.c | if (ax25->sk == NULL || ax25->sk->destroy || (ax25->sk->state == TCP_LISTEN && ax25->sk->dead)) { |
sk | 112 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) { |
sk | 113 | net/ax25/ax25_timer.c | if (ax25->sk->rmem_alloc < (ax25->sk->rcvbuf / 2) && (ax25->condition & OWN_RX_BUSY_CONDITION)) { |
sk | 153 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) { |
sk | 154 | net/ax25/ax25_timer.c | if (ax25->sk->debug) |
sk | 156 | net/ax25/ax25_timer.c | ax25->sk->state = TCP_CLOSE; |
sk | 157 | net/ax25/ax25_timer.c | ax25->sk->err = ETIMEDOUT; |
sk | 158 | net/ax25/ax25_timer.c | if (!ax25->sk->dead) |
sk | 159 | net/ax25/ax25_timer.c | ax25->sk->state_change(ax25->sk); |
sk | 160 | net/ax25/ax25_timer.c | ax25->sk->dead = 1; |
sk | 196 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) |
sk | 198 | net/ax25/ax25_timer.c | ax25->sk->state = TCP_CLOSE; |
sk | 199 | net/ax25/ax25_timer.c | ax25->sk->err = 0; |
sk | 200 | net/ax25/ax25_timer.c | if (!ax25->sk->dead) |
sk | 201 | net/ax25/ax25_timer.c | ax25->sk->state_change(ax25->sk); |
sk | 202 | net/ax25/ax25_timer.c | ax25->sk->dead = 1; |
sk | 203 | net/ax25/ax25_timer.c | ax25->sk->destroy = 1; |
sk | 243 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) { |
sk | 244 | net/ax25/ax25_timer.c | ax25->sk->state = TCP_CLOSE; |
sk | 245 | net/ax25/ax25_timer.c | ax25->sk->err = ETIMEDOUT; |
sk | 246 | net/ax25/ax25_timer.c | if (!ax25->sk->dead) |
sk | 247 | net/ax25/ax25_timer.c | ax25->sk->state_change(ax25->sk); |
sk | 248 | net/ax25/ax25_timer.c | ax25->sk->dead = 1; |
sk | 275 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) { |
sk | 276 | net/ax25/ax25_timer.c | ax25->sk->state = TCP_CLOSE; |
sk | 277 | net/ax25/ax25_timer.c | ax25->sk->err = ETIMEDOUT; |
sk | 278 | net/ax25/ax25_timer.c | if (!ax25->sk->dead) |
sk | 279 | net/ax25/ax25_timer.c | ax25->sk->state_change(ax25->sk); |
sk | 280 | net/ax25/ax25_timer.c | ax25->sk->dead = 1; |
sk | 304 | net/ax25/ax25_timer.c | if (ax25->sk != NULL) { |
sk | 305 | net/ax25/ax25_timer.c | if (ax25->sk->debug) |
sk | 307 | net/ax25/ax25_timer.c | ax25->sk->state = TCP_CLOSE; |
sk | 308 | net/ax25/ax25_timer.c | ax25->sk->err = ETIMEDOUT; |
sk | 309 | net/ax25/ax25_timer.c | if (!ax25->sk->dead) |
sk | 310 | net/ax25/ax25_timer.c | ax25->sk->state_change(ax25->sk); |
sk | 311 | net/ax25/ax25_timer.c | ax25->sk->dead = 1; |
sk | 50 | net/core/datagram.c | static inline void wait_for_packet(struct sock * sk) |
sk | 54 | net/core/datagram.c | release_sock(sk); |
sk | 57 | net/core/datagram.c | if (skb_peek(&sk->receive_queue) == NULL) |
sk | 58 | net/core/datagram.c | interruptible_sleep_on(sk->sleep); |
sk | 60 | net/core/datagram.c | lock_sock(sk); |
sk | 75 | net/core/datagram.c | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, int *err) |
sk | 80 | net/core/datagram.c | lock_sock(sk); |
sk | 82 | net/core/datagram.c | while(skb_queue_empty(&sk->receive_queue)) /* No data */ |
sk | 85 | net/core/datagram.c | error = sock_error(sk); |
sk | 90 | net/core/datagram.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 95 | net/core/datagram.c | if(sk->type==SOCK_SEQPACKET && sk->state!=TCP_ESTABLISHED) |
sk | 108 | net/core/datagram.c | wait_for_packet(sk); |
sk | 118 | net/core/datagram.c | skb=skb_peek(&sk->receive_queue); |
sk | 126 | net/core/datagram.c | skb = skb_dequeue(&sk->receive_queue); |
sk | 133 | net/core/datagram.c | release_sock(sk); |
sk | 138 | net/core/datagram.c | void skb_free_datagram(struct sock * sk, struct sk_buff *skb) |
sk | 152 | net/core/datagram.c | release_sock(sk); |
sk | 179 | net/core/datagram.c | int datagram_select(struct sock *sk, int sel_type, select_table *wait) |
sk | 181 | net/core/datagram.c | select_wait(sk->sleep, wait); |
sk | 185 | net/core/datagram.c | if (sk->err) |
sk | 187 | net/core/datagram.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 189 | net/core/datagram.c | if (sk->type==SOCK_SEQPACKET && sk->state==TCP_CLOSE) |
sk | 194 | net/core/datagram.c | if (skb_peek(&sk->receive_queue) != NULL) |
sk | 202 | net/core/datagram.c | if (sk->err) |
sk | 204 | net/core/datagram.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 206 | net/core/datagram.c | if (sk->type==SOCK_SEQPACKET && sk->state==TCP_SYN_SENT) |
sk | 211 | net/core/datagram.c | if (sk->prot && sock_wspace(sk) >= MIN_WRITE_SPACE) |
sk | 215 | net/core/datagram.c | if (sk->prot==NULL && sk->sndbuf-sk->wmem_alloc >= MIN_WRITE_SPACE) |
sk | 222 | net/core/datagram.c | if (sk->err) |
sk | 423 | net/core/dev.c | ((struct sock *)ptype->data != skb->sk)) |
sk | 471 | net/core/dev.c | skb->sk = NULL; |
sk | 615 | net/core/skbuff.c | if (skb->sk) |
sk | 617 | net/core/skbuff.c | struct sock * sk = skb->sk; |
sk | 618 | net/core/skbuff.c | if(sk->prot!=NULL) |
sk | 621 | net/core/skbuff.c | sock_rfree(sk, skb); |
sk | 623 | net/core/skbuff.c | sock_wfree(sk, skb); |
sk | 629 | net/core/skbuff.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
sk | 631 | net/core/skbuff.c | atomic_sub(skb->truesize, &sk->wmem_alloc); |
sk | 632 | net/core/skbuff.c | if(!sk->dead) |
sk | 633 | net/core/skbuff.c | sk->write_space(sk); |
sk | 698 | net/core/skbuff.c | skb->sk = NULL; |
sk | 776 | net/core/skbuff.c | n->sk = NULL; |
sk | 817 | net/core/skbuff.c | n->sk=NULL; |
sk | 121 | net/core/sock.c | int sock_setsockopt(struct sock *sk, int level, int optname, |
sk | 137 | net/core/sock.c | sk->linger=0; |
sk | 157 | net/core/sock.c | sk->debug=valbool; |
sk | 160 | net/core/sock.c | sk->reuse = valbool; |
sk | 166 | net/core/sock.c | sk->localroute=valbool; |
sk | 169 | net/core/sock.c | sk->broadcast=valbool; |
sk | 176 | net/core/sock.c | sk->sndbuf = val; |
sk | 184 | net/core/sock.c | sk->rcvbuf = val; |
sk | 188 | net/core/sock.c | sk->keepopen = valbool; |
sk | 192 | net/core/sock.c | sk->urginline = valbool; |
sk | 196 | net/core/sock.c | sk->no_check = valbool; |
sk | 202 | net/core/sock.c | sk->priority = val; |
sk | 217 | net/core/sock.c | sk->linger=0; |
sk | 220 | net/core/sock.c | sk->lingertime=ling.l_linger; |
sk | 221 | net/core/sock.c | sk->linger=1; |
sk | 226 | net/core/sock.c | sk->bsdism = valbool; |
sk | 235 | net/core/sock.c | int sock_getsockopt(struct sock *sk, int level, int optname, |
sk | 245 | net/core/sock.c | val = sk->debug; |
sk | 249 | net/core/sock.c | val = sk->localroute; |
sk | 253 | net/core/sock.c | val= sk->broadcast; |
sk | 257 | net/core/sock.c | val=sk->sndbuf; |
sk | 261 | net/core/sock.c | val =sk->rcvbuf; |
sk | 265 | net/core/sock.c | val = sk->reuse; |
sk | 269 | net/core/sock.c | val = sk->keepopen; |
sk | 273 | net/core/sock.c | val = sk->type; |
sk | 277 | net/core/sock.c | val = sock_error(sk); |
sk | 279 | net/core/sock.c | val=xchg(&sk->err_soft,0); |
sk | 283 | net/core/sock.c | val = sk->urginline; |
sk | 287 | net/core/sock.c | val = sk->no_check; |
sk | 291 | net/core/sock.c | val = sk->priority; |
sk | 302 | net/core/sock.c | ling.l_onoff=sk->linger; |
sk | 303 | net/core/sock.c | ling.l_linger=sk->lingertime; |
sk | 308 | net/core/sock.c | val = sk->bsdism; |
sk | 327 | net/core/sock.c | struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority) |
sk | 329 | net/core/sock.c | if (sk) { |
sk | 330 | net/core/sock.c | if (force || sk->wmem_alloc + size < sk->sndbuf) { |
sk | 333 | net/core/sock.c | atomic_add(skb->truesize, &sk->wmem_alloc); |
sk | 341 | net/core/sock.c | struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority) |
sk | 343 | net/core/sock.c | if (sk) { |
sk | 344 | net/core/sock.c | if (force || sk->rmem_alloc + size < sk->rcvbuf) { |
sk | 347 | net/core/sock.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
sk | 356 | net/core/sock.c | unsigned long sock_rspace(struct sock *sk) |
sk | 360 | net/core/sock.c | if (sk != NULL) |
sk | 362 | net/core/sock.c | if (sk->rmem_alloc >= sk->rcvbuf-2*MIN_WINDOW) |
sk | 364 | net/core/sock.c | amt = min((sk->rcvbuf-sk->rmem_alloc)/2-MIN_WINDOW, MAX_WINDOW); |
sk | 373 | net/core/sock.c | unsigned long sock_wspace(struct sock *sk) |
sk | 375 | net/core/sock.c | if (sk != NULL) |
sk | 377 | net/core/sock.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 379 | net/core/sock.c | if (sk->wmem_alloc >= sk->sndbuf) |
sk | 381 | net/core/sock.c | return sk->sndbuf - sk->wmem_alloc; |
sk | 387 | net/core/sock.c | void sock_wfree(struct sock *sk, struct sk_buff *skb) |
sk | 394 | net/core/sock.c | if (sk) |
sk | 396 | net/core/sock.c | atomic_sub(s, &sk->wmem_alloc); |
sk | 398 | net/core/sock.c | sk->write_space(sk); |
sk | 403 | net/core/sock.c | void sock_rfree(struct sock *sk, struct sk_buff *skb) |
sk | 410 | net/core/sock.c | if (sk) |
sk | 412 | net/core/sock.c | atomic_sub(s, &sk->rmem_alloc); |
sk | 420 | net/core/sock.c | struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, unsigned long fallback, int noblock, int *errcode) |
sk | 427 | net/core/sock.c | if(sk->err!=0) |
sk | 430 | net/core/sock.c | err= -sk->err; |
sk | 431 | net/core/sock.c | sk->err=0; |
sk | 437 | net/core/sock.c | if(sk->shutdown&SEND_SHUTDOWN) |
sk | 444 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0, sk->allocation); |
sk | 449 | net/core/sock.c | skb = sock_wmalloc(sk, size, 0 , GFP_BUFFER); |
sk | 451 | net/core/sock.c | skb=sock_wmalloc(sk, fallback, 0, GFP_KERNEL); |
sk | 462 | net/core/sock.c | sk->socket->flags |= SO_NOSPACE; |
sk | 468 | net/core/sock.c | if(sk->shutdown&SEND_SHUTDOWN) |
sk | 473 | net/core/sock.c | tmp = sk->wmem_alloc; |
sk | 475 | net/core/sock.c | if(sk->shutdown&SEND_SHUTDOWN) |
sk | 483 | net/core/sock.c | if( tmp <= sk->wmem_alloc) |
sk | 493 | net/core/sock.c | if (sk->wmem_alloc + size >= sk->sndbuf) |
sk | 496 | net/core/sock.c | sk->socket->flags &= ~SO_NOSPACE; |
sk | 497 | net/core/sock.c | interruptible_sleep_on(sk->sleep); |
sk | 514 | net/core/sock.c | void __release_sock(struct sock *sk) |
sk | 517 | net/core/sock.c | if (!sk->prot || !sk->prot->rcv) |
sk | 522 | net/core/sock.c | while (!skb_queue_empty(&sk->back_log)) { |
sk | 523 | net/core/sock.c | struct sk_buff * skb = sk->back_log.next; |
sk | 524 | net/core/sock.c | __skb_unlink(skb, &sk->back_log); |
sk | 525 | net/core/sock.c | sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv, |
sk | 528 | net/core/sock.c | (struct inet_protocol *)sk->pair); |
sk | 117 | net/ipv4/af_inet.c | struct sock *sk; |
sk | 119 | net/ipv4/af_inet.c | for(sk = prot->sock_array[num & (SOCK_ARRAY_SIZE -1 )]; |
sk | 120 | net/ipv4/af_inet.c | sk != NULL; sk=sk->next) |
sk | 122 | net/ipv4/af_inet.c | if (sk->num == num) |
sk | 145 | net/ipv4/af_inet.c | struct sock *sk; |
sk | 161 | net/ipv4/af_inet.c | sk = prot->sock_array[(i+base+1) &(SOCK_ARRAY_SIZE -1)]; |
sk | 162 | net/ipv4/af_inet.c | while(sk != NULL) |
sk | 164 | net/ipv4/af_inet.c | sk = sk->next; |
sk | 192 | net/ipv4/af_inet.c | void put_sock(unsigned short num, struct sock *sk) |
sk | 198 | net/ipv4/af_inet.c | if(sk->type==SOCK_PACKET) |
sk | 201 | net/ipv4/af_inet.c | sk->num = num; |
sk | 202 | net/ipv4/af_inet.c | sk->next = NULL; |
sk | 212 | net/ipv4/af_inet.c | sk->prot->inuse += 1; |
sk | 213 | net/ipv4/af_inet.c | if (sk->prot->highestinuse < sk->prot->inuse) |
sk | 214 | net/ipv4/af_inet.c | sk->prot->highestinuse = sk->prot->inuse; |
sk | 216 | net/ipv4/af_inet.c | if (sk->prot->sock_array[num] == NULL) |
sk | 218 | net/ipv4/af_inet.c | sk->prot->sock_array[num] = sk; |
sk | 226 | net/ipv4/af_inet.c | if ((mask & sk->rcv_saddr) && |
sk | 227 | net/ipv4/af_inet.c | (mask & sk->rcv_saddr) != (mask & 0xffffffff)) |
sk | 237 | net/ipv4/af_inet.c | skp = sk->prot->sock_array + num; |
sk | 244 | net/ipv4/af_inet.c | sk->next = tmp; |
sk | 245 | net/ipv4/af_inet.c | *skp = sk; |
sk | 290 | net/ipv4/af_inet.c | void destroy_sock(struct sock *sk) |
sk | 294 | net/ipv4/af_inet.c | lock_sock(sk); /* just to be safe. */ |
sk | 296 | net/ipv4/af_inet.c | remove_sock(sk); |
sk | 303 | net/ipv4/af_inet.c | delete_timer(sk); |
sk | 304 | net/ipv4/af_inet.c | del_timer(&sk->retransmit_timer); |
sk | 310 | net/ipv4/af_inet.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
sk | 320 | net/ipv4/af_inet.c | while((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
sk | 329 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
sk | 335 | net/ipv4/af_inet.c | if (skb->sk != NULL && skb->sk != sk) |
sk | 338 | net/ipv4/af_inet.c | skb->sk->prot->close(skb->sk, 0); |
sk | 349 | net/ipv4/af_inet.c | for(skb = sk->send_head; skb != NULL; ) |
sk | 367 | net/ipv4/af_inet.c | sk->send_head = NULL; |
sk | 374 | net/ipv4/af_inet.c | while((skb=skb_dequeue(&sk->back_log))!=NULL) |
sk | 377 | net/ipv4/af_inet.c | skb->sk = NULL; |
sk | 385 | net/ipv4/af_inet.c | if (sk->pair) |
sk | 387 | net/ipv4/af_inet.c | sk->pair->prot->close(sk->pair, 0); |
sk | 388 | net/ipv4/af_inet.c | sk->pair = NULL; |
sk | 397 | net/ipv4/af_inet.c | if (sk->rmem_alloc == 0 && sk->wmem_alloc == 0) |
sk | 399 | net/ipv4/af_inet.c | if(sk->opt) |
sk | 400 | net/ipv4/af_inet.c | kfree(sk->opt); |
sk | 401 | net/ipv4/af_inet.c | ip_rt_put(sk->ip_route_cache); |
sk | 407 | net/ipv4/af_inet.c | kfree_s((void *)sk,sizeof(*sk)); |
sk | 414 | net/ipv4/af_inet.c | sk->rmem_alloc, sk->wmem_alloc); |
sk | 415 | net/ipv4/af_inet.c | sk->destroy = 1; |
sk | 416 | net/ipv4/af_inet.c | sk->ack_backlog = 0; |
sk | 417 | net/ipv4/af_inet.c | release_sock(sk); |
sk | 418 | net/ipv4/af_inet.c | reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME); |
sk | 430 | net/ipv4/af_inet.c | struct sock *sk; |
sk | 432 | net/ipv4/af_inet.c | sk = (struct sock *) sock->data; |
sk | 444 | net/ipv4/af_inet.c | sk->proc = arg; |
sk | 447 | net/ipv4/af_inet.c | return(sk->proc); |
sk | 460 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 462 | net/ipv4/af_inet.c | return sock_setsockopt(sk,level,optname,optval,optlen); |
sk | 463 | net/ipv4/af_inet.c | if (sk->prot->setsockopt==NULL) |
sk | 466 | net/ipv4/af_inet.c | return sk->prot->setsockopt(sk,level,optname,optval,optlen); |
sk | 476 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 478 | net/ipv4/af_inet.c | return sock_getsockopt(sk,level,optname,optval,optlen); |
sk | 479 | net/ipv4/af_inet.c | if(sk->prot->getsockopt==NULL) |
sk | 482 | net/ipv4/af_inet.c | return sk->prot->getsockopt(sk,level,optname,optval,optlen); |
sk | 489 | net/ipv4/af_inet.c | static int inet_autobind(struct sock *sk) |
sk | 492 | net/ipv4/af_inet.c | if (sk->num == 0) |
sk | 494 | net/ipv4/af_inet.c | sk->num = get_new_socknum(sk->prot, 0); |
sk | 495 | net/ipv4/af_inet.c | if (sk->num == 0) |
sk | 499 | net/ipv4/af_inet.c | put_sock(sk->num, sk); |
sk | 500 | net/ipv4/af_inet.c | sk->dummy_th.source = ntohs(sk->num); |
sk | 511 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 513 | net/ipv4/af_inet.c | if(inet_autobind(sk)!=0) |
sk | 527 | net/ipv4/af_inet.c | sk->max_ack_backlog = backlog; |
sk | 528 | net/ipv4/af_inet.c | if (sk->state != TCP_LISTEN) |
sk | 530 | net/ipv4/af_inet.c | sk->ack_backlog = 0; |
sk | 531 | net/ipv4/af_inet.c | sk->state = TCP_LISTEN; |
sk | 541 | net/ipv4/af_inet.c | static void def_callback1(struct sock *sk) |
sk | 543 | net/ipv4/af_inet.c | if(!sk->dead) |
sk | 544 | net/ipv4/af_inet.c | wake_up_interruptible(sk->sleep); |
sk | 547 | net/ipv4/af_inet.c | static void def_callback2(struct sock *sk,int len) |
sk | 549 | net/ipv4/af_inet.c | if(!sk->dead) |
sk | 551 | net/ipv4/af_inet.c | wake_up_interruptible(sk->sleep); |
sk | 552 | net/ipv4/af_inet.c | sock_wake_async(sk->socket, 1); |
sk | 556 | net/ipv4/af_inet.c | static void def_callback3(struct sock *sk) |
sk | 558 | net/ipv4/af_inet.c | if(!sk->dead && sk->wmem_alloc*2 <= sk->sndbuf) |
sk | 560 | net/ipv4/af_inet.c | wake_up_interruptible(sk->sleep); |
sk | 561 | net/ipv4/af_inet.c | sock_wake_async(sk->socket, 2); |
sk | 574 | net/ipv4/af_inet.c | struct sock *sk; |
sk | 578 | net/ipv4/af_inet.c | sk = (struct sock *) kmalloc(sizeof(*sk), GFP_KERNEL); |
sk | 579 | net/ipv4/af_inet.c | if (sk == NULL) |
sk | 581 | net/ipv4/af_inet.c | memset(sk,0,sizeof(*sk)); /* Efficient way to set most fields to zero */ |
sk | 591 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 595 | net/ipv4/af_inet.c | sk->no_check = TCP_NO_CHECK; |
sk | 602 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 606 | net/ipv4/af_inet.c | sk->no_check = UDP_NO_CHECK; |
sk | 613 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 618 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 622 | net/ipv4/af_inet.c | sk->reuse = 1; |
sk | 623 | net/ipv4/af_inet.c | sk->num = protocol; |
sk | 629 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 634 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 638 | net/ipv4/af_inet.c | sk->reuse = 1; |
sk | 639 | net/ipv4/af_inet.c | sk->num = protocol; |
sk | 643 | net/ipv4/af_inet.c | kfree_s((void *)sk, sizeof(*sk)); |
sk | 646 | net/ipv4/af_inet.c | sk->socket = sock; |
sk | 648 | net/ipv4/af_inet.c | sk->nonagle = 1; |
sk | 650 | net/ipv4/af_inet.c | sk->type = sock->type; |
sk | 651 | net/ipv4/af_inet.c | sk->protocol = protocol; |
sk | 652 | net/ipv4/af_inet.c | sk->allocation = GFP_KERNEL; |
sk | 653 | net/ipv4/af_inet.c | sk->sndbuf = SK_WMEM_MAX; |
sk | 654 | net/ipv4/af_inet.c | sk->rcvbuf = SK_RMEM_MAX; |
sk | 655 | net/ipv4/af_inet.c | sk->ato = HZ/3; |
sk | 656 | net/ipv4/af_inet.c | sk->rto = TCP_TIMEOUT_INIT; /*TCP_WRITE_TIME*/ |
sk | 657 | net/ipv4/af_inet.c | sk->cong_window = 1; /* start with only sending one packet at a time. */ |
sk | 658 | net/ipv4/af_inet.c | sk->priority = 1; |
sk | 659 | net/ipv4/af_inet.c | sk->state = TCP_CLOSE; |
sk | 662 | net/ipv4/af_inet.c | sk->max_unacked = 2048; /* needs to be at most 2 full packets. */ |
sk | 663 | net/ipv4/af_inet.c | sk->delay_acks = 1; |
sk | 664 | net/ipv4/af_inet.c | sk->max_ack_backlog = SOMAXCONN; |
sk | 665 | net/ipv4/af_inet.c | skb_queue_head_init(&sk->write_queue); |
sk | 666 | net/ipv4/af_inet.c | skb_queue_head_init(&sk->receive_queue); |
sk | 667 | net/ipv4/af_inet.c | sk->mtu = 576; |
sk | 668 | net/ipv4/af_inet.c | sk->prot = prot; |
sk | 669 | net/ipv4/af_inet.c | sk->sleep = sock->wait; |
sk | 670 | net/ipv4/af_inet.c | init_timer(&sk->timer); |
sk | 671 | net/ipv4/af_inet.c | init_timer(&sk->retransmit_timer); |
sk | 672 | net/ipv4/af_inet.c | sk->timer.data = (unsigned long)sk; |
sk | 673 | net/ipv4/af_inet.c | sk->timer.function = &net_timer; |
sk | 674 | net/ipv4/af_inet.c | skb_queue_head_init(&sk->back_log); |
sk | 675 | net/ipv4/af_inet.c | sock->data =(void *) sk; |
sk | 676 | net/ipv4/af_inet.c | sk->dummy_th.doff = sizeof(sk->dummy_th)/4; |
sk | 677 | net/ipv4/af_inet.c | sk->ip_ttl=ip_statistics.IpDefaultTTL; |
sk | 678 | net/ipv4/af_inet.c | if(sk->type==SOCK_RAW && protocol==IPPROTO_RAW) |
sk | 679 | net/ipv4/af_inet.c | sk->ip_hdrincl=1; |
sk | 681 | net/ipv4/af_inet.c | sk->ip_hdrincl=0; |
sk | 683 | net/ipv4/af_inet.c | sk->ip_mc_loop=1; |
sk | 684 | net/ipv4/af_inet.c | sk->ip_mc_ttl=1; |
sk | 685 | net/ipv4/af_inet.c | *sk->ip_mc_name=0; |
sk | 686 | net/ipv4/af_inet.c | sk->ip_mc_list=NULL; |
sk | 693 | net/ipv4/af_inet.c | sk->dummy_th.ack=1; |
sk | 694 | net/ipv4/af_inet.c | sk->dummy_th.doff=sizeof(struct tcphdr)>>2; |
sk | 696 | net/ipv4/af_inet.c | sk->state_change = def_callback1; |
sk | 697 | net/ipv4/af_inet.c | sk->data_ready = def_callback2; |
sk | 698 | net/ipv4/af_inet.c | sk->write_space = def_callback3; |
sk | 699 | net/ipv4/af_inet.c | sk->error_report = def_callback1; |
sk | 701 | net/ipv4/af_inet.c | if (sk->num) |
sk | 709 | net/ipv4/af_inet.c | put_sock(sk->num, sk); |
sk | 710 | net/ipv4/af_inet.c | sk->dummy_th.source = ntohs(sk->num); |
sk | 713 | net/ipv4/af_inet.c | if (sk->prot->init) |
sk | 715 | net/ipv4/af_inet.c | err = sk->prot->init(sk); |
sk | 718 | net/ipv4/af_inet.c | destroy_sock(sk); |
sk | 744 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 746 | net/ipv4/af_inet.c | if (sk == NULL) |
sk | 749 | net/ipv4/af_inet.c | sk->state_change(sk); |
sk | 755 | net/ipv4/af_inet.c | ip_mc_drop_socket(sk); |
sk | 766 | net/ipv4/af_inet.c | if (sk->linger) { |
sk | 768 | net/ipv4/af_inet.c | if (!sk->lingertime) |
sk | 769 | net/ipv4/af_inet.c | timeout = jiffies + HZ*sk->lingertime; |
sk | 775 | net/ipv4/af_inet.c | sk->socket = NULL; |
sk | 777 | net/ipv4/af_inet.c | sk->prot->close(sk, timeout); |
sk | 786 | net/ipv4/af_inet.c | struct sock *sk=(struct sock *)sock->data, *sk2; |
sk | 794 | net/ipv4/af_inet.c | if(sk->prot->bind) |
sk | 795 | net/ipv4/af_inet.c | return sk->prot->bind(sk,uaddr, addr_len); |
sk | 798 | net/ipv4/af_inet.c | if (sk->state != TCP_CLOSE) |
sk | 805 | net/ipv4/af_inet.c | if (sk->num != 0) |
sk | 819 | net/ipv4/af_inet.c | snum = get_new_socknum(sk->prot, 0); |
sk | 838 | net/ipv4/af_inet.c | sk->rcv_saddr = addr->sin_addr.s_addr; |
sk | 840 | net/ipv4/af_inet.c | sk->saddr = 0; /* Use device */ |
sk | 842 | net/ipv4/af_inet.c | sk->saddr = addr->sin_addr.s_addr; |
sk | 848 | net/ipv4/af_inet.c | for(sk2 = sk->prot->sock_array[snum & (SOCK_ARRAY_SIZE -1)]; |
sk | 863 | net/ipv4/af_inet.c | if (!sk2->rcv_saddr || !sk->rcv_saddr) |
sk | 868 | net/ipv4/af_inet.c | if(sk2->reuse && sk->reuse && sk2->state!=TCP_LISTEN) |
sk | 878 | net/ipv4/af_inet.c | if (sk2->rcv_saddr != sk->rcv_saddr) |
sk | 884 | net/ipv4/af_inet.c | if (!sk->reuse) |
sk | 902 | net/ipv4/af_inet.c | remove_sock(sk); |
sk | 907 | net/ipv4/af_inet.c | put_sock(snum, sk); |
sk | 908 | net/ipv4/af_inet.c | sk->dummy_th.source = ntohs(sk->num); |
sk | 909 | net/ipv4/af_inet.c | sk->daddr = 0; |
sk | 910 | net/ipv4/af_inet.c | sk->dummy_th.dest = 0; |
sk | 912 | net/ipv4/af_inet.c | ip_rt_put(sk->ip_route_cache); |
sk | 913 | net/ipv4/af_inet.c | sk->ip_route_cache=NULL; |
sk | 925 | net/ipv4/af_inet.c | struct sock *sk=(struct sock *)sock->data; |
sk | 929 | net/ipv4/af_inet.c | if (sock->state == SS_CONNECTING && tcp_connected(sk->state)) |
sk | 936 | net/ipv4/af_inet.c | if (sock->state == SS_CONNECTING && sk->protocol == IPPROTO_TCP && (flags & O_NONBLOCK)) |
sk | 938 | net/ipv4/af_inet.c | if(sk->err!=0) |
sk | 939 | net/ipv4/af_inet.c | return sock_error(sk); |
sk | 945 | net/ipv4/af_inet.c | if(inet_autobind(sk)!=0) |
sk | 947 | net/ipv4/af_inet.c | if (sk->prot->connect == NULL) |
sk | 949 | net/ipv4/af_inet.c | err = sk->prot->connect(sk, (struct sockaddr_in *)uaddr, addr_len); |
sk | 955 | net/ipv4/af_inet.c | if (sk->state > TCP_FIN_WAIT2 && sock->state==SS_CONNECTING) |
sk | 958 | net/ipv4/af_inet.c | return sock_error(sk); |
sk | 961 | net/ipv4/af_inet.c | if (sk->state != TCP_ESTABLISHED &&(flags & O_NONBLOCK)) |
sk | 965 | net/ipv4/af_inet.c | while(sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 967 | net/ipv4/af_inet.c | interruptible_sleep_on(sk->sleep); |
sk | 975 | net/ipv4/af_inet.c | if(sk->err && sk->protocol == IPPROTO_TCP) |
sk | 979 | net/ipv4/af_inet.c | return sock_error(sk); /* set by tcp_err() */ |
sk | 985 | net/ipv4/af_inet.c | if (sk->state != TCP_ESTABLISHED && sk->err) |
sk | 988 | net/ipv4/af_inet.c | return sock_error(sk); |
sk | 1019 | net/ipv4/af_inet.c | struct sock *sk=(struct sock *)newsock->data; |
sk | 1021 | net/ipv4/af_inet.c | destroy_sock(sk); |
sk | 1087 | net/ipv4/af_inet.c | struct sock *sk; |
sk | 1090 | net/ipv4/af_inet.c | sk = (struct sock *) sock->data; |
sk | 1093 | net/ipv4/af_inet.c | if (!tcp_connected(sk->state)) |
sk | 1095 | net/ipv4/af_inet.c | sin->sin_port = sk->dummy_th.dest; |
sk | 1096 | net/ipv4/af_inet.c | sin->sin_addr.s_addr = sk->daddr; |
sk | 1100 | net/ipv4/af_inet.c | __u32 addr = sk->rcv_saddr; |
sk | 1102 | net/ipv4/af_inet.c | addr = sk->saddr; |
sk | 1106 | net/ipv4/af_inet.c | sin->sin_port = sk->dummy_th.source; |
sk | 1118 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 1120 | net/ipv4/af_inet.c | if (sk->prot->recvmsg == NULL) |
sk | 1122 | net/ipv4/af_inet.c | if(sk->err) |
sk | 1123 | net/ipv4/af_inet.c | return sock_error(sk); |
sk | 1125 | net/ipv4/af_inet.c | if(inet_autobind(sk)!=0) |
sk | 1127 | net/ipv4/af_inet.c | return(sk->prot->recvmsg(sk, ubuf, size, noblock, flags,addr_len)); |
sk | 1134 | net/ipv4/af_inet.c | struct sock *sk = (struct sock *) sock->data; |
sk | 1135 | net/ipv4/af_inet.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 1140 | net/ipv4/af_inet.c | if (sk->prot->sendmsg == NULL) |
sk | 1142 | net/ipv4/af_inet.c | if(sk->err) |
sk | 1143 | net/ipv4/af_inet.c | return sock_error(sk); |
sk | 1145 | net/ipv4/af_inet.c | if(inet_autobind(sk)!=0) |
sk | 1147 | net/ipv4/af_inet.c | return(sk->prot->sendmsg(sk, msg, size, noblock, flags)); |
sk | 1154 | net/ipv4/af_inet.c | struct sock *sk=(struct sock*)sock->data; |
sk | 1165 | net/ipv4/af_inet.c | if (sock->state == SS_CONNECTING && sk->state == TCP_ESTABLISHED) |
sk | 1167 | net/ipv4/af_inet.c | if (!sk || !tcp_connected(sk->state)) |
sk | 1169 | net/ipv4/af_inet.c | sk->shutdown |= how; |
sk | 1170 | net/ipv4/af_inet.c | if (sk->prot->shutdown) |
sk | 1171 | net/ipv4/af_inet.c | sk->prot->shutdown(sk, how); |
sk | 1178 | net/ipv4/af_inet.c | struct sock *sk=(struct sock *) sock->data; |
sk | 1179 | net/ipv4/af_inet.c | if (sk->prot->select == NULL) |
sk | 1183 | net/ipv4/af_inet.c | return(sk->prot->select(sk, sel_type, wait)); |
sk | 1198 | net/ipv4/af_inet.c | struct sock *sk=(struct sock *)sock->data; |
sk | 1213 | net/ipv4/af_inet.c | sk->proc = pid; |
sk | 1220 | net/ipv4/af_inet.c | put_fs_long(sk->proc,(int *)arg); |
sk | 1223 | net/ipv4/af_inet.c | if(sk->stamp.tv_sec==0) |
sk | 1228 | net/ipv4/af_inet.c | memcpy_tofs((void *)arg,&sk->stamp,sizeof(struct timeval)); |
sk | 1282 | net/ipv4/af_inet.c | if (sk->prot->ioctl==NULL) |
sk | 1284 | net/ipv4/af_inet.c | return(sk->prot->ioctl(sk, cmd, arg)); |
sk | 1363 | net/ipv4/af_inet.c | struct sock *get_sock_raw(struct sock *sk, |
sk | 1370 | net/ipv4/af_inet.c | s=sk; |
sk | 1392 | net/ipv4/af_inet.c | struct sock *get_sock_mcast(struct sock *sk, |
sk | 1411 | net/ipv4/af_inet.c | s=sk; |
sk | 920 | net/ipv4/arp.c | if(skb->sk==NULL) |
sk | 923 | net/ipv4/arp.c | dev_queue_xmit(skb,skb->dev,skb->sk->priority); |
sk | 1445 | net/ipv4/arp.c | if (skb->sk) |
sk | 1447 | net/ipv4/arp.c | skb->sk->err = EHOSTDOWN; |
sk | 1448 | net/ipv4/arp.c | skb->sk->error_report(skb->sk); |
sk | 234 | net/ipv4/icmp.c | struct sock *sk=icmp_socket.data; |
sk | 238 | net/ipv4/icmp.c | ip_build_xmit(sk, icmp_glue_bits, icmp_param, |
sk | 754 | net/ipv4/icmp.c | struct sock *sk; |
sk | 760 | net/ipv4/icmp.c | sk=icmp_socket.data; |
sk | 761 | net/ipv4/icmp.c | sk->allocation=GFP_ATOMIC; |
sk | 762 | net/ipv4/icmp.c | sk->num = 256; /* Don't receive any data */ |
sk | 576 | net/ipv4/igmp.c | int ip_mc_join_group(struct sock *sk , struct device *dev, unsigned long addr) |
sk | 584 | net/ipv4/igmp.c | if(sk->ip_mc_list==NULL) |
sk | 586 | net/ipv4/igmp.c | if((sk->ip_mc_list=(struct ip_mc_socklist *)kmalloc(sizeof(*sk->ip_mc_list), GFP_KERNEL))==NULL) |
sk | 588 | net/ipv4/igmp.c | memset(sk->ip_mc_list,'\0',sizeof(*sk->ip_mc_list)); |
sk | 592 | net/ipv4/igmp.c | if(sk->ip_mc_list->multiaddr[i]==addr && sk->ip_mc_list->multidev[i]==dev) |
sk | 594 | net/ipv4/igmp.c | if(sk->ip_mc_list->multidev[i]==NULL) |
sk | 600 | net/ipv4/igmp.c | sk->ip_mc_list->multiaddr[unused]=addr; |
sk | 601 | net/ipv4/igmp.c | sk->ip_mc_list->multidev[unused]=dev; |
sk | 610 | net/ipv4/igmp.c | int ip_mc_leave_group(struct sock *sk, struct device *dev, unsigned long addr) |
sk | 617 | net/ipv4/igmp.c | if(sk->ip_mc_list==NULL) |
sk | 622 | net/ipv4/igmp.c | if(sk->ip_mc_list->multiaddr[i]==addr && sk->ip_mc_list->multidev[i]==dev) |
sk | 624 | net/ipv4/igmp.c | sk->ip_mc_list->multidev[i]=NULL; |
sk | 636 | net/ipv4/igmp.c | void ip_mc_drop_socket(struct sock *sk) |
sk | 640 | net/ipv4/igmp.c | if(sk->ip_mc_list==NULL) |
sk | 645 | net/ipv4/igmp.c | if(sk->ip_mc_list->multidev[i]) |
sk | 647 | net/ipv4/igmp.c | ip_mc_dec_group(sk->ip_mc_list->multidev[i], sk->ip_mc_list->multiaddr[i]); |
sk | 648 | net/ipv4/igmp.c | sk->ip_mc_list->multidev[i]=NULL; |
sk | 651 | net/ipv4/igmp.c | kfree_s(sk->ip_mc_list,sizeof(*sk->ip_mc_list)); |
sk | 652 | net/ipv4/igmp.c | sk->ip_mc_list=NULL; |
sk | 464 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
sk | 565 | net/ipv4/ip_fragment.c | skb->sk = NULL; |
sk | 606 | net/ipv4/ip_fragment.c | void ip_fragment(struct sock *sk, struct sk_buff *skb, struct device *dev, int is_frag) |
sk | 720 | net/ipv4/ip_fragment.c | if (sk) |
sk | 722 | net/ipv4/ip_fragment.c | atomic_add(skb2->truesize, &sk->wmem_alloc); |
sk | 723 | net/ipv4/ip_fragment.c | skb2->sk=sk; |
sk | 772 | net/ipv4/ip_fragment.c | ip_queue_xmit(sk, dev, skb2, 2); |
sk | 160 | net/ipv4/ip_input.c | extern void sort_send(struct sock *sk); |
sk | 180 | net/ipv4/ip_input.c | int ip_ioctl(struct sock *sk, int cmd, unsigned long arg) |
sk | 77 | net/ipv4/ip_output.c | newskb->sk=NULL; |
sk | 211 | net/ipv4/ip_output.c | if(MULTICAST(daddr) && *dev==NULL && skb->sk && *skb->sk->ip_mc_name) |
sk | 212 | net/ipv4/ip_output.c | *dev=dev_get(skb->sk->ip_mc_name); |
sk | 334 | net/ipv4/ip_output.c | void ip_queue_xmit(struct sock *sk, struct device *dev, |
sk | 384 | net/ipv4/ip_output.c | if (sk == NULL) |
sk | 397 | net/ipv4/ip_output.c | ip_fragment(sk,skb,dev,0); |
sk | 435 | net/ipv4/ip_output.c | sk->packets_out++; |
sk | 446 | net/ipv4/ip_output.c | if (sk->send_head == NULL) |
sk | 448 | net/ipv4/ip_output.c | sk->send_tail = skb; |
sk | 449 | net/ipv4/ip_output.c | sk->send_head = skb; |
sk | 453 | net/ipv4/ip_output.c | sk->send_tail->link3 = skb; |
sk | 454 | net/ipv4/ip_output.c | sk->send_tail = skb; |
sk | 463 | net/ipv4/ip_output.c | skb->sk = sk; |
sk | 482 | net/ipv4/ip_output.c | if(sk==NULL || sk->ip_mc_loop) |
sk | 521 | net/ipv4/ip_output.c | if (sk != NULL) |
sk | 523 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
sk | 532 | net/ipv4/ip_output.c | if(sk) |
sk | 533 | net/ipv4/ip_output.c | sk->err = ENETDOWN; |
sk | 561 | net/ipv4/ip_output.c | int ip_build_xmit(struct sock *sk, |
sk | 588 | net/ipv4/ip_output.c | if (opt && opt->srr && !sk->ip_hdrincl) |
sk | 594 | net/ipv4/ip_output.c | if(sk && MULTICAST(daddr) && *sk->ip_mc_name) |
sk | 596 | net/ipv4/ip_output.c | dev=dev_get(sk->ip_mc_name); |
sk | 600 | net/ipv4/ip_output.c | if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr))) |
sk | 601 | net/ipv4/ip_output.c | saddr = sk->saddr; |
sk | 608 | net/ipv4/ip_output.c | rt = ip_check_route(&sk->ip_route_cache, daddr, |
sk | 609 | net/ipv4/ip_output.c | sk->localroute || (flags&MSG_DONTROUTE) || |
sk | 620 | net/ipv4/ip_output.c | if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr))) |
sk | 621 | net/ipv4/ip_output.c | saddr = sk->saddr; |
sk | 643 | net/ipv4/ip_output.c | if (!sk->ip_hdrincl && opt) |
sk | 649 | net/ipv4/ip_output.c | struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error); |
sk | 659 | net/ipv4/ip_output.c | skb->sk=sk; |
sk | 685 | net/ipv4/ip_output.c | if(!sk->ip_hdrincl) |
sk | 689 | net/ipv4/ip_output.c | iph->tos=sk->ip_tos; |
sk | 693 | net/ipv4/ip_output.c | iph->ttl=sk->ip_ttl; |
sk | 721 | net/ipv4/ip_output.c | dev_queue_xmit(skb,dev,sk->priority); |
sk | 730 | net/ipv4/ip_output.c | if (sk && !sk->ip_hdrincl && opt) |
sk | 739 | net/ipv4/ip_output.c | if(!sk->ip_hdrincl) |
sk | 779 | net/ipv4/ip_output.c | if (sk->ip_hdrincl && offset > 0) |
sk | 808 | net/ipv4/ip_output.c | skb = sock_alloc_send_skb(sk, fraglen+15, 0, noblock, &error); |
sk | 826 | net/ipv4/ip_output.c | skb->sk = sk; |
sk | 869 | net/ipv4/ip_output.c | if(!sk->ip_hdrincl) |
sk | 879 | net/ipv4/ip_output.c | iph->tos = sk->ip_tos; |
sk | 886 | net/ipv4/ip_output.c | iph->ttl = sk->ip_mc_ttl; |
sk | 889 | net/ipv4/ip_output.c | iph->ttl = sk->ip_ttl; |
sk | 944 | net/ipv4/ip_output.c | if(sk==NULL || sk->ip_mc_loop) |
sk | 988 | net/ipv4/ip_output.c | dev_queue_xmit(skb, dev, sk->priority); |
sk | 1004 | net/ipv4/ip_output.c | if(sk!=NULL) |
sk | 1005 | net/ipv4/ip_output.c | sk->err=ENETDOWN; |
sk | 111 | net/ipv4/ip_sockglue.c | int ip_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen) |
sk | 137 | net/ipv4/ip_sockglue.c | return ip_mroute_setsockopt(sk,optname,optval,optlen); |
sk | 173 | net/ipv4/ip_sockglue.c | old_opt = sk->opt; |
sk | 174 | net/ipv4/ip_sockglue.c | sk->opt = opt; |
sk | 183 | net/ipv4/ip_sockglue.c | sk->ip_tos=val; |
sk | 185 | net/ipv4/ip_sockglue.c | sk->priority=SOPRI_INTERACTIVE; |
sk | 187 | net/ipv4/ip_sockglue.c | sk->priority=SOPRI_BACKGROUND; |
sk | 192 | net/ipv4/ip_sockglue.c | sk->ip_ttl=val; |
sk | 195 | net/ipv4/ip_sockglue.c | if(sk->type!=SOCK_RAW) |
sk | 197 | net/ipv4/ip_sockglue.c | sk->ip_hdrincl=val?1:0; |
sk | 202 | net/ipv4/ip_sockglue.c | sk->ip_mc_ttl=(int)ucval; |
sk | 209 | net/ipv4/ip_sockglue.c | sk->ip_mc_loop=(int)ucval; |
sk | 234 | net/ipv4/ip_sockglue.c | sk->ip_mc_name[0]=0; |
sk | 250 | net/ipv4/ip_sockglue.c | strcpy(sk->ip_mc_name,dev->name); |
sk | 314 | net/ipv4/ip_sockglue.c | return ip_mc_join_group(sk,dev,mreq.imr_multiaddr.s_addr); |
sk | 365 | net/ipv4/ip_sockglue.c | return ip_mc_leave_group(sk,dev,mreq.imr_multiaddr.s_addr); |
sk | 430 | net/ipv4/ip_sockglue.c | int ip_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen) |
sk | 443 | net/ipv4/ip_sockglue.c | return ip_mroute_getsockopt(sk,optname,optval,optlen); |
sk | 458 | net/ipv4/ip_sockglue.c | if (sk->opt) |
sk | 459 | net/ipv4/ip_sockglue.c | memcpy(optbuf, sk->opt, sizeof(struct options)+sk->opt->optlen); |
sk | 503 | net/ipv4/ip_sockglue.c | val=sk->ip_tos; |
sk | 506 | net/ipv4/ip_sockglue.c | val=sk->ip_ttl; |
sk | 509 | net/ipv4/ip_sockglue.c | val=sk->ip_hdrincl; |
sk | 513 | net/ipv4/ip_sockglue.c | val=sk->ip_mc_ttl; |
sk | 516 | net/ipv4/ip_sockglue.c | val=sk->ip_mc_loop; |
sk | 522 | net/ipv4/ip_sockglue.c | len=strlen(sk->ip_mc_name); |
sk | 527 | net/ipv4/ip_sockglue.c | memcpy_tofs((void *)optval,sk->ip_mc_name, len); |
sk | 286 | net/ipv4/ipmr.c | skb->sk=NULL; |
sk | 427 | net/ipv4/ipmr.c | int ip_mroute_setsockopt(struct sock *sk,int optname,char *optval,int optlen) |
sk | 435 | net/ipv4/ipmr.c | if(sk!=mroute_socket) |
sk | 442 | net/ipv4/ipmr.c | if(sk->type!=SOCK_RAW || sk->num!=IPPROTO_IGMP) |
sk | 452 | net/ipv4/ipmr.c | mroute_socket=sk; |
sk | 456 | net/ipv4/ipmr.c | mroute_close(sk); |
sk | 568 | net/ipv4/ipmr.c | int ip_mroute_getsockopt(struct sock *sk,int optname,char *optval,int *optlen) |
sk | 573 | net/ipv4/ipmr.c | if(sk!=mroute_socket) |
sk | 596 | net/ipv4/ipmr.c | int ipmr_ioctl(struct sock *sk, int cmd, unsigned long arg) |
sk | 639 | net/ipv4/ipmr.c | void mroute_close(struct sock *sk) |
sk | 81 | net/ipv4/packet.c | struct sock *sk; |
sk | 88 | net/ipv4/packet.c | sk = (struct sock *) pt->data; |
sk | 108 | net/ipv4/packet.c | if(sock_queue_rcv_skb(sk,skb)<0) |
sk | 110 | net/ipv4/packet.c | skb->sk = NULL; |
sk | 127 | net/ipv4/packet.c | static int packet_sendmsg(struct sock *sk, struct msghdr *msg, int len, |
sk | 175 | net/ipv4/packet.c | skb = sock_wmalloc(sk, len, 0, GFP_KERNEL); |
sk | 192 | net/ipv4/packet.c | skb->sk = sk; |
sk | 203 | net/ipv4/packet.c | dev_queue_xmit(skb, dev, sk->priority); |
sk | 216 | net/ipv4/packet.c | static void packet_close(struct sock *sk, unsigned long timeout) |
sk | 222 | net/ipv4/packet.c | lock_sock(sk); |
sk | 223 | net/ipv4/packet.c | sk->state = TCP_CLOSE; |
sk | 229 | net/ipv4/packet.c | unregister_netdevice_notifier(&sk->protinfo.af_packet.notifier); |
sk | 231 | net/ipv4/packet.c | if(sk->protinfo.af_packet.prot_hook) |
sk | 237 | net/ipv4/packet.c | dev_remove_pack((struct packet_type *)sk->protinfo.af_packet.prot_hook); |
sk | 243 | net/ipv4/packet.c | kfree_s((void *)sk->protinfo.af_packet.prot_hook, sizeof(struct packet_type)); |
sk | 244 | net/ipv4/packet.c | sk->protinfo.af_packet.prot_hook = NULL; |
sk | 247 | net/ipv4/packet.c | release_sock(sk); |
sk | 248 | net/ipv4/packet.c | destroy_sock(sk); |
sk | 255 | net/ipv4/packet.c | int packet_attach(struct sock *sk, struct device *dev) |
sk | 262 | net/ipv4/packet.c | p->type = sk->num; |
sk | 263 | net/ipv4/packet.c | p->data = (void *)sk; |
sk | 271 | net/ipv4/packet.c | sk->protinfo.af_packet.prot_hook = p; |
sk | 272 | net/ipv4/packet.c | sk->protinfo.af_packet.bound_dev = dev; |
sk | 280 | net/ipv4/packet.c | static int packet_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
sk | 317 | net/ipv4/packet.c | memcpy(sk->protinfo.af_packet.device_name,name,15); |
sk | 323 | net/ipv4/packet.c | if(sk->protinfo.af_packet.prot_hook) |
sk | 325 | net/ipv4/packet.c | dev_remove_pack(sk->protinfo.af_packet.prot_hook); |
sk | 326 | net/ipv4/packet.c | sk->protinfo.af_packet.prot_hook->dev=dev; |
sk | 327 | net/ipv4/packet.c | sk->protinfo.af_packet.bound_dev=dev; |
sk | 328 | net/ipv4/packet.c | dev_add_pack(sk->protinfo.af_packet.prot_hook); |
sk | 332 | net/ipv4/packet.c | int err=packet_attach(sk, dev); |
sk | 372 | net/ipv4/packet.c | static int packet_init(struct sock *sk) |
sk | 378 | net/ipv4/packet.c | int err=packet_attach(sk, NULL); |
sk | 386 | net/ipv4/packet.c | sk->protinfo.af_packet.notifier.notifier_call=packet_unbind; |
sk | 387 | net/ipv4/packet.c | sk->protinfo.af_packet.notifier.priority=0; |
sk | 389 | net/ipv4/packet.c | register_netdevice_notifier(&sk->protinfo.af_packet.notifier); |
sk | 400 | net/ipv4/packet.c | int packet_recvmsg(struct sock *sk, struct msghdr *msg, int len, |
sk | 408 | net/ipv4/packet.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 415 | net/ipv4/packet.c | if(sk->protinfo.af_packet.prot_hook==NULL) |
sk | 432 | net/ipv4/packet.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
sk | 451 | net/ipv4/packet.c | sk->stamp=skb->stamp; |
sk | 469 | net/ipv4/packet.c | skb_free_datagram(sk, skb); |
sk | 82 | net/ipv4/raw.c | struct sock *sk; |
sk | 86 | net/ipv4/raw.c | sk = (struct sock *) protocol->data; |
sk | 87 | net/ipv4/raw.c | if (sk == NULL) |
sk | 93 | net/ipv4/raw.c | if (sk->cong_window > 1) sk->cong_window = sk->cong_window/2; |
sk | 99 | net/ipv4/raw.c | sk->err = EPROTO; |
sk | 100 | net/ipv4/raw.c | sk->error_report(sk); |
sk | 105 | net/ipv4/raw.c | sk->err = icmp_err_convert[code & 0xff].errno; |
sk | 106 | net/ipv4/raw.c | sk->error_report(sk); |
sk | 119 | net/ipv4/raw.c | int raw_rcv(struct sock *sk, struct sk_buff *skb, struct device *dev, __u32 saddr, __u32 daddr) |
sk | 122 | net/ipv4/raw.c | skb->sk = sk; |
sk | 136 | net/ipv4/raw.c | if(sk->bsdism) |
sk | 142 | net/ipv4/raw.c | if(sock_queue_rcv_skb(sk,skb)<0) |
sk | 145 | net/ipv4/raw.c | skb->sk=NULL; |
sk | 191 | net/ipv4/raw.c | static int raw_sendto(struct sock *sk, const unsigned char *from, |
sk | 220 | net/ipv4/raw.c | if (sk->state != TCP_ESTABLISHED) |
sk | 223 | net/ipv4/raw.c | sin.sin_port = sk->num; |
sk | 224 | net/ipv4/raw.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 227 | net/ipv4/raw.c | sin.sin_port = sk->num; |
sk | 236 | net/ipv4/raw.c | if (!sk->bsdism && sk->broadcast == 0 && ip_chk_addr(sin.sin_addr.s_addr)==IS_BROADCAST) |
sk | 239 | net/ipv4/raw.c | if(sk->ip_hdrincl) |
sk | 243 | net/ipv4/raw.c | err=ip_build_xmit(sk, raw_getrawfrag, from, len, sin.sin_addr.s_addr, 0, sk->opt, flags, sin.sin_port, noblock); |
sk | 249 | net/ipv4/raw.c | err=ip_build_xmit(sk, raw_getfrag, from, len, sin.sin_addr.s_addr, 0, sk->opt, flags, sin.sin_port, noblock); |
sk | 258 | net/ipv4/raw.c | static int raw_sendmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, |
sk | 262 | net/ipv4/raw.c | return raw_sendto(sk,msg->msg_iov[0].iov_base,len, noblock, flags, msg->msg_name, msg->msg_namelen); |
sk | 282 | net/ipv4/raw.c | err=raw_sendto(sk,buf,len, noblock, flags, msg->msg_name, msg->msg_namelen); |
sk | 289 | net/ipv4/raw.c | static void raw_close(struct sock *sk, unsigned long timeout) |
sk | 291 | net/ipv4/raw.c | sk->state = TCP_CLOSE; |
sk | 293 | net/ipv4/raw.c | if(sk==mroute_socket) |
sk | 295 | net/ipv4/raw.c | mroute_close(sk); |
sk | 299 | net/ipv4/raw.c | destroy_sock(sk); |
sk | 303 | net/ipv4/raw.c | static int raw_init(struct sock *sk) |
sk | 314 | net/ipv4/raw.c | int raw_recvmsg(struct sock *sk, struct msghdr *msg, int len, |
sk | 325 | net/ipv4/raw.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 331 | net/ipv4/raw.c | skb=skb_recv_datagram(sk,flags,noblock,&err); |
sk | 338 | net/ipv4/raw.c | sk->stamp=skb->stamp; |
sk | 346 | net/ipv4/raw.c | skb_free_datagram(sk, skb); |
sk | 425 | net/ipv4/tcp.c | static void tcp_close(struct sock *sk, unsigned long timeout); |
sk | 445 | net/ipv4/tcp.c | if(p->sk->state == TCP_ESTABLISHED || p->sk->state >= TCP_FIN_WAIT1) |
sk | 477 | net/ipv4/tcp.c | static void tcp_close_pending (struct sock *sk) |
sk | 481 | net/ipv4/tcp.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) |
sk | 483 | net/ipv4/tcp.c | tcp_close(skb->sk, 0); |
sk | 493 | net/ipv4/tcp.c | void tcp_time_wait(struct sock *sk) |
sk | 495 | net/ipv4/tcp.c | tcp_set_state(sk,TCP_TIME_WAIT); |
sk | 496 | net/ipv4/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 497 | net/ipv4/tcp.c | if (!sk->dead) |
sk | 498 | net/ipv4/tcp.c | sk->state_change(sk); |
sk | 499 | net/ipv4/tcp.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 516 | net/ipv4/tcp.c | struct sock *sk; |
sk | 525 | net/ipv4/tcp.c | sk = get_sock(&tcp_prot, th->source, daddr, th->dest, saddr); |
sk | 527 | net/ipv4/tcp.c | if (sk == NULL) |
sk | 537 | net/ipv4/tcp.c | if (sk->cong_window > 4) |
sk | 538 | net/ipv4/tcp.c | sk->cong_window--; |
sk | 544 | net/ipv4/tcp.c | sk->err=EPROTO; |
sk | 545 | net/ipv4/tcp.c | sk->error_report(sk); |
sk | 558 | net/ipv4/tcp.c | if ((rt = sk->ip_route_cache) != NULL) |
sk | 562 | net/ipv4/tcp.c | if (sk->mtu > new_mtu - sizeof(struct iphdr) - sizeof(struct tcphdr) |
sk | 564 | net/ipv4/tcp.c | sk->mtu = new_mtu - sizeof(struct iphdr) - sizeof(struct tcphdr); |
sk | 577 | net/ipv4/tcp.c | if(icmp_err_convert[code].fatal || sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 579 | net/ipv4/tcp.c | sk->err = icmp_err_convert[code].errno; |
sk | 580 | net/ipv4/tcp.c | if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 583 | net/ipv4/tcp.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 584 | net/ipv4/tcp.c | sk->error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ |
sk | 588 | net/ipv4/tcp.c | sk->err_soft = icmp_err_convert[code].errno; |
sk | 599 | net/ipv4/tcp.c | static int tcp_readable(struct sock *sk) |
sk | 607 | net/ipv4/tcp.c | if(sk && sk->debug) |
sk | 608 | net/ipv4/tcp.c | printk("tcp_readable: %p - ",sk); |
sk | 612 | net/ipv4/tcp.c | if (sk == NULL || (skb = skb_peek(&sk->receive_queue)) == NULL) |
sk | 615 | net/ipv4/tcp.c | if(sk && sk->debug) |
sk | 620 | net/ipv4/tcp.c | counted = sk->copied_seq; /* Where we are at the moment */ |
sk | 662 | net/ipv4/tcp.c | while(skb != (struct sk_buff *)&sk->receive_queue); |
sk | 665 | net/ipv4/tcp.c | if(sk->debug) |
sk | 673 | net/ipv4/tcp.c | static int tcp_listen_select(struct sock *sk, int sel_type, select_table *wait) |
sk | 678 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 679 | net/ipv4/tcp.c | retval = (tcp_find_established(sk) != NULL); |
sk | 680 | net/ipv4/tcp.c | release_sock(sk); |
sk | 696 | net/ipv4/tcp.c | static int tcp_select(struct sock *sk, int sel_type, select_table *wait) |
sk | 698 | net/ipv4/tcp.c | if (sk->state == TCP_LISTEN) |
sk | 699 | net/ipv4/tcp.c | return tcp_listen_select(sk, sel_type, wait); |
sk | 703 | net/ipv4/tcp.c | if (sk->err) |
sk | 705 | net/ipv4/tcp.c | if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 708 | net/ipv4/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 711 | net/ipv4/tcp.c | if (sk->acked_seq == sk->copied_seq) |
sk | 714 | net/ipv4/tcp.c | if (sk->urg_seq != sk->copied_seq || |
sk | 715 | net/ipv4/tcp.c | sk->acked_seq != sk->copied_seq+1 || |
sk | 716 | net/ipv4/tcp.c | sk->urginline || !sk->urg_data) |
sk | 721 | net/ipv4/tcp.c | if (sk->err) |
sk | 723 | net/ipv4/tcp.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 725 | net/ipv4/tcp.c | if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) |
sk | 732 | net/ipv4/tcp.c | if (sock_wspace(sk) < sk->mtu+128+sk->prot->max_header) |
sk | 737 | net/ipv4/tcp.c | if (sk->urg_data) |
sk | 741 | net/ipv4/tcp.c | select_wait(sk->sleep, wait); |
sk | 745 | net/ipv4/tcp.c | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
sk | 758 | net/ipv4/tcp.c | if (sk->state == TCP_LISTEN) |
sk | 761 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 762 | net/ipv4/tcp.c | amount = tcp_readable(sk); |
sk | 763 | net/ipv4/tcp.c | release_sock(sk); |
sk | 772 | net/ipv4/tcp.c | int answ = sk->urg_data && sk->urg_seq == sk->copied_seq; |
sk | 784 | net/ipv4/tcp.c | if (sk->state == TCP_LISTEN) return(-EINVAL); |
sk | 785 | net/ipv4/tcp.c | amount = sock_wspace(sk); |
sk | 836 | net/ipv4/tcp.c | extern __inline int tcp_build_header(struct tcphdr *th, struct sock *sk, int push) |
sk | 839 | net/ipv4/tcp.c | memcpy(th,(void *) &(sk->dummy_th), sizeof(*th)); |
sk | 840 | net/ipv4/tcp.c | th->seq = htonl(sk->write_seq); |
sk | 842 | net/ipv4/tcp.c | sk->ack_backlog = 0; |
sk | 843 | net/ipv4/tcp.c | sk->bytes_rcv = 0; |
sk | 844 | net/ipv4/tcp.c | sk->ack_timed = 0; |
sk | 845 | net/ipv4/tcp.c | th->ack_seq = htonl(sk->acked_seq); |
sk | 846 | net/ipv4/tcp.c | sk->window = tcp_select_window(sk); |
sk | 847 | net/ipv4/tcp.c | th->window = htons(sk->window); |
sk | 855 | net/ipv4/tcp.c | static void wait_for_tcp_connect(struct sock * sk) |
sk | 857 | net/ipv4/tcp.c | release_sock(sk); |
sk | 859 | net/ipv4/tcp.c | if (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT && sk->err == 0) |
sk | 861 | net/ipv4/tcp.c | interruptible_sleep_on(sk->sleep); |
sk | 864 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 870 | net/ipv4/tcp.c | static void wait_for_tcp_memory(struct sock * sk) |
sk | 872 | net/ipv4/tcp.c | release_sock(sk); |
sk | 874 | net/ipv4/tcp.c | if (sk->wmem_alloc*2 > sk->sndbuf && |
sk | 875 | net/ipv4/tcp.c | (sk->state == TCP_ESTABLISHED||sk->state == TCP_CLOSE_WAIT) |
sk | 876 | net/ipv4/tcp.c | && sk->err == 0) |
sk | 878 | net/ipv4/tcp.c | sk->socket->flags &= ~SO_NOSPACE; |
sk | 879 | net/ipv4/tcp.c | interruptible_sleep_on(sk->sleep); |
sk | 882 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 891 | net/ipv4/tcp.c | static int do_tcp_sendmsg(struct sock *sk, struct msghdr *msg, |
sk | 913 | net/ipv4/tcp.c | prot = sk->prot; |
sk | 919 | net/ipv4/tcp.c | if (sk->err) |
sk | 923 | net/ipv4/tcp.c | return sock_error(sk); |
sk | 929 | net/ipv4/tcp.c | if (sk->shutdown & SEND_SHUTDOWN) |
sk | 939 | net/ipv4/tcp.c | while (sk->state != TCP_ESTABLISHED && sk->state != TCP_CLOSE_WAIT) |
sk | 944 | net/ipv4/tcp.c | if (sk->err) |
sk | 945 | net/ipv4/tcp.c | return sock_error(sk); |
sk | 947 | net/ipv4/tcp.c | if (sk->state != TCP_SYN_SENT && sk->state != TCP_SYN_RECV) |
sk | 949 | net/ipv4/tcp.c | if (sk->keepopen) |
sk | 960 | net/ipv4/tcp.c | wait_for_tcp_connect(sk); |
sk | 988 | net/ipv4/tcp.c | int new_mss = min(sk->mtu, sk->max_window); |
sk | 989 | net/ipv4/tcp.c | if (new_mss < sk->mss) |
sk | 991 | net/ipv4/tcp.c | tcp_send_partial(sk); |
sk | 992 | net/ipv4/tcp.c | sk->mss = new_mss; |
sk | 997 | net/ipv4/tcp.c | if ((skb = tcp_dequeue_partial(sk)) != NULL) |
sk | 1006 | net/ipv4/tcp.c | copy = min(sk->mss - tcp_size, seglen); |
sk | 1018 | net/ipv4/tcp.c | sk->write_seq += copy; |
sk | 1021 | net/ipv4/tcp.c | if (tcp_size >= sk->mss || (flags & MSG_OOB) || !sk->packets_out) |
sk | 1022 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
sk | 1024 | net/ipv4/tcp.c | tcp_enqueue_partial(skb, sk); |
sk | 1040 | net/ipv4/tcp.c | copy = sk->window_seq - sk->write_seq; |
sk | 1041 | net/ipv4/tcp.c | if (copy <= 0 || copy < (sk->max_window >> 1) || copy > sk->mss) |
sk | 1042 | net/ipv4/tcp.c | copy = sk->mss; |
sk | 1047 | net/ipv4/tcp.c | printk("TCP: **bug**: copy=%d, sk->mss=%d\n", copy, sk->mss); |
sk | 1056 | net/ipv4/tcp.c | if (copy < sk->mss && !(flags & MSG_OOB) && sk->packets_out) |
sk | 1058 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, sk->mtu + 128 + prot->max_header + 15, 0, GFP_KERNEL); |
sk | 1063 | net/ipv4/tcp.c | skb = sock_wmalloc(sk, copy + prot->max_header + 15 , 0, GFP_KERNEL); |
sk | 1072 | net/ipv4/tcp.c | sk->socket->flags |= SO_NOSPACE; |
sk | 1087 | net/ipv4/tcp.c | wait_for_tcp_memory(sk); |
sk | 1091 | net/ipv4/tcp.c | skb->sk = sk; |
sk | 1093 | net/ipv4/tcp.c | skb->localroute = sk->localroute|(flags&MSG_DONTROUTE); |
sk | 1100 | net/ipv4/tcp.c | tmp = prot->build_header(skb, sk->saddr, sk->daddr, &dev, |
sk | 1101 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, skb->truesize,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 1104 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
sk | 1114 | net/ipv4/tcp.c | tmp = tcp_build_header(skb->h.th, sk, seglen-copy); |
sk | 1117 | net/ipv4/tcp.c | sock_wfree(sk, skb); |
sk | 1137 | net/ipv4/tcp.c | sk->write_seq += copy; |
sk | 1141 | net/ipv4/tcp.c | tcp_enqueue_partial(send_tmp, sk); |
sk | 1144 | net/ipv4/tcp.c | tcp_send_skb(sk, skb); |
sk | 1147 | net/ipv4/tcp.c | sk->err = 0; |
sk | 1153 | net/ipv4/tcp.c | static int tcp_sendmsg(struct sock *sk, struct msghdr *msg, |
sk | 1172 | net/ipv4/tcp.c | if(sk->state == TCP_CLOSE) |
sk | 1175 | net/ipv4/tcp.c | if (addr->sin_port != sk->dummy_th.dest) |
sk | 1177 | net/ipv4/tcp.c | if (addr->sin_addr.s_addr != sk->daddr) |
sk | 1181 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1182 | net/ipv4/tcp.c | retval = do_tcp_sendmsg(sk, msg, len, nonblock, flags); |
sk | 1193 | net/ipv4/tcp.c | if (sk->partial) { |
sk | 1194 | net/ipv4/tcp.c | if (!sk->packets_out || |
sk | 1195 | net/ipv4/tcp.c | (sk->nonagle && before(sk->write_seq , sk->window_seq))) { |
sk | 1196 | net/ipv4/tcp.c | tcp_send_partial(sk); |
sk | 1200 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1213 | net/ipv4/tcp.c | void tcp_read_wakeup(struct sock *sk) |
sk | 1220 | net/ipv4/tcp.c | if (!sk->ack_backlog) |
sk | 1227 | net/ipv4/tcp.c | if ((sk->state == TCP_CLOSE) || (sk->state == TCP_TIME_WAIT)) |
sk | 1241 | net/ipv4/tcp.c | buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 1245 | net/ipv4/tcp.c | tcp_reset_xmit_timer(sk, TIME_WRITE, HZ); |
sk | 1249 | net/ipv4/tcp.c | buff->sk = sk; |
sk | 1250 | net/ipv4/tcp.c | buff->localroute = sk->localroute; |
sk | 1257 | net/ipv4/tcp.c | tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev, |
sk | 1258 | net/ipv4/tcp.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 1262 | net/ipv4/tcp.c | sock_wfree(sk, buff); |
sk | 1268 | net/ipv4/tcp.c | memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1)); |
sk | 1269 | net/ipv4/tcp.c | t1->seq = htonl(sk->sent_seq); |
sk | 1271 | net/ipv4/tcp.c | sk->ack_backlog = 0; |
sk | 1272 | net/ipv4/tcp.c | sk->bytes_rcv = 0; |
sk | 1274 | net/ipv4/tcp.c | sk->window = tcp_select_window(sk); |
sk | 1275 | net/ipv4/tcp.c | t1->window = htons(sk->window); |
sk | 1276 | net/ipv4/tcp.c | t1->ack_seq = htonl(sk->acked_seq); |
sk | 1278 | net/ipv4/tcp.c | tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff); |
sk | 1279 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 1289 | net/ipv4/tcp.c | static int tcp_recv_urg(struct sock * sk, int nonblock, |
sk | 1295 | net/ipv4/tcp.c | if (sk->urginline || !sk->urg_data || sk->urg_data == URG_READ) |
sk | 1298 | net/ipv4/tcp.c | if (sk->err) |
sk | 1299 | net/ipv4/tcp.c | return sock_error(sk); |
sk | 1301 | net/ipv4/tcp.c | if (sk->state == TCP_CLOSE || sk->done) |
sk | 1303 | net/ipv4/tcp.c | if (!sk->done) |
sk | 1305 | net/ipv4/tcp.c | sk->done = 1; |
sk | 1311 | net/ipv4/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 1313 | net/ipv4/tcp.c | sk->done = 1; |
sk | 1316 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1317 | net/ipv4/tcp.c | if (sk->urg_data & URG_VALID) |
sk | 1319 | net/ipv4/tcp.c | char c = sk->urg_data; |
sk | 1321 | net/ipv4/tcp.c | sk->urg_data = URG_READ; |
sk | 1327 | net/ipv4/tcp.c | sin->sin_addr.s_addr=sk->daddr; |
sk | 1328 | net/ipv4/tcp.c | sin->sin_port=sk->dummy_th.dest; |
sk | 1332 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1335 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1353 | net/ipv4/tcp.c | static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb) |
sk | 1355 | net/ipv4/tcp.c | skb->sk = sk; |
sk | 1356 | net/ipv4/tcp.c | __skb_unlink(skb, &sk->receive_queue); |
sk | 1367 | net/ipv4/tcp.c | static void cleanup_rbuf(struct sock *sk) |
sk | 1375 | net/ipv4/tcp.c | while ((skb=skb_peek(&sk->receive_queue)) != NULL) { |
sk | 1378 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
sk | 1382 | net/ipv4/tcp.c | if (tcp_raise_window(sk)) { |
sk | 1383 | net/ipv4/tcp.c | sk->ack_backlog++; |
sk | 1384 | net/ipv4/tcp.c | tcp_read_wakeup(sk); |
sk | 1393 | net/ipv4/tcp.c | static int tcp_recvmsg(struct sock *sk, struct msghdr *msg, |
sk | 1406 | net/ipv4/tcp.c | if (sk->state == TCP_LISTEN) |
sk | 1414 | net/ipv4/tcp.c | return tcp_recv_urg(sk, nonblock, msg, len, flags, addr_len); |
sk | 1422 | net/ipv4/tcp.c | peek_seq = sk->copied_seq; |
sk | 1423 | net/ipv4/tcp.c | seq = &sk->copied_seq; |
sk | 1427 | net/ipv4/tcp.c | add_wait_queue(sk->sleep, &wait); |
sk | 1428 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1438 | net/ipv4/tcp.c | if (copied && sk->urg_data && sk->urg_seq == *seq) |
sk | 1447 | net/ipv4/tcp.c | skb = skb_peek(&sk->receive_queue); |
sk | 1465 | net/ipv4/tcp.c | while (skb != (struct sk_buff *)&sk->receive_queue); |
sk | 1470 | net/ipv4/tcp.c | if (sk->err) |
sk | 1472 | net/ipv4/tcp.c | copied = sock_error(sk); |
sk | 1476 | net/ipv4/tcp.c | if (sk->state == TCP_CLOSE) |
sk | 1478 | net/ipv4/tcp.c | if (!sk->done) |
sk | 1480 | net/ipv4/tcp.c | sk->done = 1; |
sk | 1487 | net/ipv4/tcp.c | if (sk->shutdown & RCV_SHUTDOWN) |
sk | 1489 | net/ipv4/tcp.c | sk->done = 1; |
sk | 1499 | net/ipv4/tcp.c | cleanup_rbuf(sk); |
sk | 1500 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1501 | net/ipv4/tcp.c | sk->socket->flags |= SO_WAITDATA; |
sk | 1503 | net/ipv4/tcp.c | sk->socket->flags &= ~SO_WAITDATA; |
sk | 1504 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1534 | net/ipv4/tcp.c | if (sk->urg_data) |
sk | 1536 | net/ipv4/tcp.c | u32 urg_offset = sk->urg_seq - *seq; |
sk | 1541 | net/ipv4/tcp.c | if (!sk->urginline) |
sk | 1579 | net/ipv4/tcp.c | if (after(sk->copied_seq,sk->urg_seq)) |
sk | 1580 | net/ipv4/tcp.c | sk->urg_data = 0; |
sk | 1594 | net/ipv4/tcp.c | tcp_eat_skb(sk, skb); |
sk | 1607 | net/ipv4/tcp.c | sk->shutdown |= RCV_SHUTDOWN; |
sk | 1616 | net/ipv4/tcp.c | sin->sin_addr.s_addr=sk->daddr; |
sk | 1617 | net/ipv4/tcp.c | sin->sin_port=sk->dummy_th.dest; |
sk | 1622 | net/ipv4/tcp.c | remove_wait_queue(sk->sleep, &wait); |
sk | 1626 | net/ipv4/tcp.c | cleanup_rbuf(sk); |
sk | 1627 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1640 | net/ipv4/tcp.c | static int tcp_close_state(struct sock *sk, int dead) |
sk | 1644 | net/ipv4/tcp.c | switch(sk->state) |
sk | 1656 | net/ipv4/tcp.c | ns=sk->state; |
sk | 1667 | net/ipv4/tcp.c | tcp_set_state(sk,ns); |
sk | 1680 | net/ipv4/tcp.c | int timer_active=del_timer(&sk->timer); |
sk | 1682 | net/ipv4/tcp.c | add_timer(&sk->timer); |
sk | 1684 | net/ipv4/tcp.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_FIN_TIMEOUT); |
sk | 1695 | net/ipv4/tcp.c | void tcp_shutdown(struct sock *sk, int how) |
sk | 1710 | net/ipv4/tcp.c | if (sk->state == TCP_FIN_WAIT1 || |
sk | 1711 | net/ipv4/tcp.c | sk->state == TCP_FIN_WAIT2 || |
sk | 1712 | net/ipv4/tcp.c | sk->state == TCP_CLOSING || |
sk | 1713 | net/ipv4/tcp.c | sk->state == TCP_LAST_ACK || |
sk | 1714 | net/ipv4/tcp.c | sk->state == TCP_TIME_WAIT || |
sk | 1715 | net/ipv4/tcp.c | sk->state == TCP_CLOSE || |
sk | 1716 | net/ipv4/tcp.c | sk->state == TCP_LISTEN |
sk | 1721 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1727 | net/ipv4/tcp.c | sk->shutdown |= SEND_SHUTDOWN; |
sk | 1733 | net/ipv4/tcp.c | if (sk->partial) |
sk | 1734 | net/ipv4/tcp.c | tcp_send_partial(sk); |
sk | 1740 | net/ipv4/tcp.c | if (tcp_close_state(sk,0)) |
sk | 1741 | net/ipv4/tcp.c | tcp_send_fin(sk); |
sk | 1743 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1751 | net/ipv4/tcp.c | static inline int closing(struct sock * sk) |
sk | 1753 | net/ipv4/tcp.c | switch (sk->state) { |
sk | 1763 | net/ipv4/tcp.c | static void tcp_close(struct sock *sk, unsigned long timeout) |
sk | 1772 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1775 | net/ipv4/tcp.c | if(sk->state == TCP_LISTEN) |
sk | 1778 | net/ipv4/tcp.c | tcp_set_state(sk, TCP_CLOSE); |
sk | 1779 | net/ipv4/tcp.c | tcp_close_pending(sk); |
sk | 1780 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1781 | net/ipv4/tcp.c | sk->dead = 1; |
sk | 1785 | net/ipv4/tcp.c | sk->keepopen = 1; |
sk | 1786 | net/ipv4/tcp.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 1788 | net/ipv4/tcp.c | if (!sk->dead) |
sk | 1789 | net/ipv4/tcp.c | sk->state_change(sk); |
sk | 1797 | net/ipv4/tcp.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
sk | 1804 | net/ipv4/tcp.c | if (sk->partial) |
sk | 1805 | net/ipv4/tcp.c | tcp_send_partial(sk); |
sk | 1812 | net/ipv4/tcp.c | if (tcp_close_state(sk,1)==1) |
sk | 1814 | net/ipv4/tcp.c | tcp_send_fin(sk); |
sk | 1819 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1821 | net/ipv4/tcp.c | while(closing(sk) && current->timeout) |
sk | 1823 | net/ipv4/tcp.c | interruptible_sleep_on(sk->sleep); |
sk | 1830 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1839 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1840 | net/ipv4/tcp.c | sk->dead = 1; |
sk | 1848 | net/ipv4/tcp.c | static struct sock *tcp_accept(struct sock *sk, int flags) |
sk | 1858 | net/ipv4/tcp.c | if (sk->state != TCP_LISTEN) |
sk | 1860 | net/ipv4/tcp.c | sk->err = EINVAL; |
sk | 1866 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1868 | net/ipv4/tcp.c | while((skb = tcp_dequeue_established(sk)) == NULL) |
sk | 1873 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1874 | net/ipv4/tcp.c | sk->err = EAGAIN; |
sk | 1878 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1879 | net/ipv4/tcp.c | interruptible_sleep_on(sk->sleep); |
sk | 1883 | net/ipv4/tcp.c | sk->err = ERESTARTSYS; |
sk | 1886 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1894 | net/ipv4/tcp.c | newsk = skb->sk; |
sk | 1897 | net/ipv4/tcp.c | sk->ack_backlog--; |
sk | 1898 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1906 | net/ipv4/tcp.c | static int tcp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len) |
sk | 1916 | net/ipv4/tcp.c | if (sk->state != TCP_CLOSE) |
sk | 1923 | net/ipv4/tcp.c | if(sk->daddr) |
sk | 1946 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1947 | net/ipv4/tcp.c | sk->daddr = usin->sin_addr.s_addr; |
sk | 1948 | net/ipv4/tcp.c | sk->write_seq = tcp_init_seq(); |
sk | 1949 | net/ipv4/tcp.c | sk->window_seq = sk->write_seq; |
sk | 1950 | net/ipv4/tcp.c | sk->rcv_ack_seq = sk->write_seq -1; |
sk | 1951 | net/ipv4/tcp.c | sk->err = 0; |
sk | 1952 | net/ipv4/tcp.c | sk->dummy_th.dest = usin->sin_port; |
sk | 1953 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1955 | net/ipv4/tcp.c | buff = sock_wmalloc(sk,MAX_SYN_SIZE,0, GFP_KERNEL); |
sk | 1960 | net/ipv4/tcp.c | lock_sock(sk); |
sk | 1961 | net/ipv4/tcp.c | buff->sk = sk; |
sk | 1963 | net/ipv4/tcp.c | buff->localroute = sk->localroute; |
sk | 1970 | net/ipv4/tcp.c | tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev, |
sk | 1971 | net/ipv4/tcp.c | IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 1974 | net/ipv4/tcp.c | sock_wfree(sk, buff); |
sk | 1975 | net/ipv4/tcp.c | release_sock(sk); |
sk | 1978 | net/ipv4/tcp.c | if ((rt = sk->ip_route_cache) != NULL && !sk->saddr) |
sk | 1979 | net/ipv4/tcp.c | sk->saddr = rt->rt_src; |
sk | 1980 | net/ipv4/tcp.c | sk->rcv_saddr = sk->saddr; |
sk | 1984 | net/ipv4/tcp.c | memcpy(t1,(void *)&(sk->dummy_th), sizeof(*t1)); |
sk | 1985 | net/ipv4/tcp.c | buff->seq = sk->write_seq++; |
sk | 1987 | net/ipv4/tcp.c | sk->sent_seq = sk->write_seq; |
sk | 1988 | net/ipv4/tcp.c | buff->end_seq = sk->write_seq; |
sk | 1996 | net/ipv4/tcp.c | sk->window_clamp=rt->rt_window; |
sk | 1998 | net/ipv4/tcp.c | sk->window_clamp=0; |
sk | 2000 | net/ipv4/tcp.c | if (sk->user_mss) |
sk | 2001 | net/ipv4/tcp.c | sk->mtu = sk->user_mss; |
sk | 2003 | net/ipv4/tcp.c | sk->mtu = rt->rt_mtu - sizeof(struct iphdr) - sizeof(struct tcphdr); |
sk | 2005 | net/ipv4/tcp.c | sk->mtu = 576 - sizeof(struct iphdr) - sizeof(struct tcphdr); |
sk | 2011 | net/ipv4/tcp.c | if(sk->mtu <32) |
sk | 2012 | net/ipv4/tcp.c | sk->mtu = 32; /* Sanity limit */ |
sk | 2014 | net/ipv4/tcp.c | sk->mtu = min(sk->mtu, dev->mtu - sizeof(struct iphdr) - sizeof(struct tcphdr)); |
sk | 2028 | net/ipv4/tcp.c | sk->mtu=skip_pick_mtu(sk->mtu,dev); |
sk | 2038 | net/ipv4/tcp.c | ptr[2] = (sk->mtu) >> 8; |
sk | 2039 | net/ipv4/tcp.c | ptr[3] = (sk->mtu) & 0xff; |
sk | 2041 | net/ipv4/tcp.c | tcp_send_check(t1, sk->saddr, sk->daddr, |
sk | 2049 | net/ipv4/tcp.c | tcp_set_state(sk,TCP_SYN_SENT); |
sk | 2051 | net/ipv4/tcp.c | sk->rto = rt->rt_irtt; |
sk | 2053 | net/ipv4/tcp.c | sk->rto = TCP_TIMEOUT_INIT; |
sk | 2054 | net/ipv4/tcp.c | sk->retransmit_timer.function=&tcp_retransmit_timer; |
sk | 2055 | net/ipv4/tcp.c | sk->retransmit_timer.data = (unsigned long)sk; |
sk | 2056 | net/ipv4/tcp.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); /* Timer for repeating the SYN until an answer */ |
sk | 2057 | net/ipv4/tcp.c | sk->retransmits = 0; /* Now works the right way instead of a hacked |
sk | 2060 | net/ipv4/tcp.c | sk->prot->queue_xmit(sk, dev, buff, 0); |
sk | 2061 | net/ipv4/tcp.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 2065 | net/ipv4/tcp.c | release_sock(sk); |
sk | 2073 | net/ipv4/tcp.c | int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval, int optlen) |
sk | 2078 | net/ipv4/tcp.c | return ip_setsockopt(sk,level,optname,optval,optlen); |
sk | 2099 | net/ipv4/tcp.c | sk->user_mss=val; |
sk | 2102 | net/ipv4/tcp.c | sk->nonagle=(val==0)?0:1; |
sk | 2109 | net/ipv4/tcp.c | int tcp_getsockopt(struct sock *sk, int level, int optname, char *optval, int *optlen) |
sk | 2114 | net/ipv4/tcp.c | return ip_getsockopt(sk,level,optname,optval,optlen); |
sk | 2119 | net/ipv4/tcp.c | val=sk->user_mss; |
sk | 2122 | net/ipv4/tcp.c | val=sk->nonagle; |
sk | 40 | net/ipv4/tcp_input.c | extern __inline__ void tcp_delack_estimator(struct sock *sk) |
sk | 46 | net/ipv4/tcp_input.c | if (sk->lrcvtime == 0) |
sk | 48 | net/ipv4/tcp_input.c | sk->lrcvtime = jiffies; |
sk | 49 | net/ipv4/tcp_input.c | sk->ato = HZ/3; |
sk | 55 | net/ipv4/tcp_input.c | m = jiffies - sk->lrcvtime; |
sk | 57 | net/ipv4/tcp_input.c | sk->lrcvtime = jiffies; |
sk | 62 | net/ipv4/tcp_input.c | if (m > (sk->rtt >> 3)) |
sk | 64 | net/ipv4/tcp_input.c | sk->ato = sk->rtt >> 3; |
sk | 71 | net/ipv4/tcp_input.c | sk->ato = (sk->ato >> 1) + m; |
sk | 85 | net/ipv4/tcp_input.c | extern __inline__ void tcp_rtt_estimator(struct sock *sk, struct sk_buff *oskb) |
sk | 99 | net/ipv4/tcp_input.c | m -= (sk->rtt >> 3); /* m is now error in rtt est */ |
sk | 100 | net/ipv4/tcp_input.c | sk->rtt += m; /* rtt = 7/8 rtt + 1/8 new */ |
sk | 103 | net/ipv4/tcp_input.c | m -= (sk->mdev >> 2); /* similar update on mdev */ |
sk | 104 | net/ipv4/tcp_input.c | sk->mdev += m; /* mdev = 3/4 mdev + 1/4 new */ |
sk | 110 | net/ipv4/tcp_input.c | sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1; |
sk | 111 | net/ipv4/tcp_input.c | if (sk->rto > 120*HZ) |
sk | 112 | net/ipv4/tcp_input.c | sk->rto = 120*HZ; |
sk | 113 | net/ipv4/tcp_input.c | if (sk->rto < HZ/5) /* Was 1*HZ - keep .2 as minimum cos of the BSD delayed acks */ |
sk | 114 | net/ipv4/tcp_input.c | sk->rto = HZ/5; |
sk | 115 | net/ipv4/tcp_input.c | sk->backoff = 0; |
sk | 138 | net/ipv4/tcp_input.c | struct sock * sk; |
sk | 140 | net/ipv4/tcp_input.c | sk = (struct sock *) th_cache_sk; |
sk | 141 | net/ipv4/tcp_input.c | if (!sk || saddr != th_cache_saddr || daddr != th_cache_daddr || |
sk | 143 | net/ipv4/tcp_input.c | sk = get_sock(&tcp_prot, dport, saddr, sport, daddr); |
sk | 144 | net/ipv4/tcp_input.c | if (sk) { |
sk | 149 | net/ipv4/tcp_input.c | th_cache_sk=sk; |
sk | 152 | net/ipv4/tcp_input.c | return sk; |
sk | 159 | net/ipv4/tcp_input.c | static void bad_tcp_sequence(struct sock *sk, struct tcphdr *th, short len, |
sk | 172 | net/ipv4/tcp_input.c | if (sk->state==TCP_SYN_SENT || sk->state==TCP_SYN_RECV) |
sk | 174 | net/ipv4/tcp_input.c | tcp_send_reset(sk->saddr,sk->daddr,th,sk->prot,NULL,dev, sk->ip_tos,sk->ip_ttl); |
sk | 187 | net/ipv4/tcp_input.c | tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr); |
sk | 195 | net/ipv4/tcp_input.c | extern __inline__ int tcp_sequence(struct sock *sk, u32 seq, u32 end_seq) |
sk | 197 | net/ipv4/tcp_input.c | u32 end_window = sk->acked_seq + sk->window; |
sk | 201 | net/ipv4/tcp_input.c | (before(seq, end_window) && !before(end_seq, sk->acked_seq)); |
sk | 209 | net/ipv4/tcp_input.c | static int tcp_reset(struct sock *sk, struct sk_buff *skb) |
sk | 211 | net/ipv4/tcp_input.c | sk->zapped = 1; |
sk | 215 | net/ipv4/tcp_input.c | sk->err = ECONNRESET; |
sk | 216 | net/ipv4/tcp_input.c | if (sk->state == TCP_SYN_SENT) |
sk | 217 | net/ipv4/tcp_input.c | sk->err = ECONNREFUSED; |
sk | 218 | net/ipv4/tcp_input.c | if (sk->state == TCP_CLOSE_WAIT) |
sk | 219 | net/ipv4/tcp_input.c | sk->err = EPIPE; |
sk | 229 | net/ipv4/tcp_input.c | if(sk->state!=TCP_TIME_WAIT) |
sk | 231 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 232 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 235 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 236 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 238 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 239 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 256 | net/ipv4/tcp_input.c | static void tcp_options(struct sock *sk, struct tcphdr *th) |
sk | 285 | net/ipv4/tcp_input.c | sk->mtu=min(sk->mtu,ntohs(*(unsigned short *)ptr)); |
sk | 298 | net/ipv4/tcp_input.c | sk->mtu=min(sk->mtu, 536); /* default MSS if none sent */ |
sk | 301 | net/ipv4/tcp_input.c | sk->mss = min(sk->max_window >> 1, sk->mtu); |
sk | 303 | net/ipv4/tcp_input.c | sk->mss = min(sk->max_window, sk->mtu); |
sk | 304 | net/ipv4/tcp_input.c | sk->max_unacked = 2 * sk->mss; |
sk | 317 | net/ipv4/tcp_input.c | static void tcp_conn_request(struct sock *sk, struct sk_buff *skb, |
sk | 327 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 329 | net/ipv4/tcp_input.c | sk->data_ready(sk,0); |
sk | 333 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 334 | net/ipv4/tcp_input.c | printk("Reset on %p: Connect on dead socket.\n",sk); |
sk | 335 | net/ipv4/tcp_input.c | tcp_send_reset(daddr, saddr, th, sk->prot, opt, dev, sk->ip_tos,sk->ip_ttl); |
sk | 349 | net/ipv4/tcp_input.c | if (sk->ack_backlog >= sk->max_ack_backlog) |
sk | 373 | net/ipv4/tcp_input.c | memcpy(newsk, sk, sizeof(*newsk)); |
sk | 378 | net/ipv4/tcp_input.c | sk->opt = (struct options*)kmalloc(sizeof(struct options)+opt->optlen, GFP_ATOMIC); |
sk | 379 | net/ipv4/tcp_input.c | if (!sk->opt) |
sk | 386 | net/ipv4/tcp_input.c | if (ip_options_echo(sk->opt, opt, daddr, saddr, skb)) |
sk | 388 | net/ipv4/tcp_input.c | kfree_s(sk->opt, sizeof(struct options)+opt->optlen); |
sk | 417 | net/ipv4/tcp_input.c | newsk->localroute = sk->localroute; |
sk | 465 | net/ipv4/tcp_input.c | newsk->ip_ttl=sk->ip_ttl; |
sk | 484 | net/ipv4/tcp_input.c | if (sk->user_mss) |
sk | 485 | net/ipv4/tcp_input.c | newsk->mtu = sk->user_mss; |
sk | 509 | net/ipv4/tcp_input.c | sk->mtu=skip_pick_mtu(sk->mtu,dev); |
sk | 518 | net/ipv4/tcp_input.c | tcp_send_synack(newsk, sk, skb); |
sk | 532 | net/ipv4/tcp_input.c | void tcp_window_shrunk(struct sock * sk, u32 window_seq) |
sk | 538 | net/ipv4/tcp_input.c | skb2 = sk->send_head; |
sk | 539 | net/ipv4/tcp_input.c | sk->send_head = NULL; |
sk | 540 | net/ipv4/tcp_input.c | sk->send_tail = NULL; |
sk | 554 | net/ipv4/tcp_input.c | if (sk->packets_out > 0) |
sk | 555 | net/ipv4/tcp_input.c | sk->packets_out--; |
sk | 563 | net/ipv4/tcp_input.c | skb_queue_head(&sk->write_queue,skb); |
sk | 570 | net/ipv4/tcp_input.c | if (sk->send_head == NULL) |
sk | 572 | net/ipv4/tcp_input.c | sk->send_head = skb; |
sk | 573 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
sk | 577 | net/ipv4/tcp_input.c | sk->send_tail->link3 = skb; |
sk | 578 | net/ipv4/tcp_input.c | sk->send_tail = skb; |
sk | 594 | net/ipv4/tcp_input.c | static int tcp_ack(struct sock *sk, struct tcphdr *th, u32 ack, int len) |
sk | 606 | net/ipv4/tcp_input.c | if(sk->zapped) |
sk | 614 | net/ipv4/tcp_input.c | if (sk->ip_xmit_timeout == TIME_KEEPOPEN) |
sk | 615 | net/ipv4/tcp_input.c | sk->retransmits = 0; |
sk | 622 | net/ipv4/tcp_input.c | if (after(ack, sk->sent_seq) || before(ack, sk->rcv_ack_seq)) |
sk | 636 | net/ipv4/tcp_input.c | if (window_seq > sk->max_window) |
sk | 638 | net/ipv4/tcp_input.c | sk->max_window = window_seq; |
sk | 642 | net/ipv4/tcp_input.c | sk->mss = min(window_seq>>1, sk->mtu); |
sk | 644 | net/ipv4/tcp_input.c | sk->mss = min(window_seq, sk->mtu); |
sk | 652 | net/ipv4/tcp_input.c | if (after(sk->window_seq, window_seq)) { |
sk | 654 | net/ipv4/tcp_input.c | tcp_window_shrunk(sk, window_seq); |
sk | 660 | net/ipv4/tcp_input.c | sk->window_seq = window_seq; |
sk | 665 | net/ipv4/tcp_input.c | if (sk->send_tail == NULL || sk->send_head == NULL) |
sk | 667 | net/ipv4/tcp_input.c | sk->send_head = NULL; |
sk | 668 | net/ipv4/tcp_input.c | sk->send_tail = NULL; |
sk | 669 | net/ipv4/tcp_input.c | sk->packets_out= 0; |
sk | 676 | net/ipv4/tcp_input.c | if (sk->ip_xmit_timeout == TIME_WRITE && |
sk | 677 | net/ipv4/tcp_input.c | sk->cong_window < 2048 && after(ack, sk->rcv_ack_seq)) |
sk | 689 | net/ipv4/tcp_input.c | if (sk->cong_window < sk->ssthresh) |
sk | 693 | net/ipv4/tcp_input.c | sk->cong_window++; |
sk | 700 | net/ipv4/tcp_input.c | if (sk->cong_count >= sk->cong_window) |
sk | 702 | net/ipv4/tcp_input.c | sk->cong_window++; |
sk | 703 | net/ipv4/tcp_input.c | sk->cong_count = 0; |
sk | 706 | net/ipv4/tcp_input.c | sk->cong_count++; |
sk | 714 | net/ipv4/tcp_input.c | sk->rcv_ack_seq = ack; |
sk | 721 | net/ipv4/tcp_input.c | sk->err_soft = 0; |
sk | 729 | net/ipv4/tcp_input.c | if (sk->ip_xmit_timeout == TIME_PROBE0) |
sk | 731 | net/ipv4/tcp_input.c | sk->retransmits = 0; /* Our probe was answered */ |
sk | 737 | net/ipv4/tcp_input.c | if (skb_peek(&sk->write_queue) != NULL && /* should always be non-null */ |
sk | 738 | net/ipv4/tcp_input.c | ! before (sk->window_seq, sk->write_queue.next->end_seq)) |
sk | 740 | net/ipv4/tcp_input.c | sk->backoff = 0; |
sk | 746 | net/ipv4/tcp_input.c | sk->rto = ((sk->rtt >> 2) + sk->mdev) >> 1; |
sk | 747 | net/ipv4/tcp_input.c | if (sk->rto > 120*HZ) |
sk | 748 | net/ipv4/tcp_input.c | sk->rto = 120*HZ; |
sk | 749 | net/ipv4/tcp_input.c | if (sk->rto < HZ/5) /* Was 1*HZ, then 1 - turns out we must allow about |
sk | 752 | net/ipv4/tcp_input.c | sk->rto = HZ/5; |
sk | 760 | net/ipv4/tcp_input.c | while(sk->send_head != NULL) |
sk | 763 | net/ipv4/tcp_input.c | if (sk->send_head->link3 && |
sk | 764 | net/ipv4/tcp_input.c | after(sk->send_head->end_seq, sk->send_head->link3->end_seq)) |
sk | 772 | net/ipv4/tcp_input.c | if (before(sk->send_head->end_seq, ack+1)) |
sk | 775 | net/ipv4/tcp_input.c | if (sk->retransmits) |
sk | 790 | net/ipv4/tcp_input.c | if (sk->send_head->link3) /* Any more queued retransmits? */ |
sk | 791 | net/ipv4/tcp_input.c | sk->retransmits = 1; |
sk | 793 | net/ipv4/tcp_input.c | sk->retransmits = 0; |
sk | 811 | net/ipv4/tcp_input.c | if (sk->packets_out > 0) |
sk | 812 | net/ipv4/tcp_input.c | sk->packets_out --; |
sk | 814 | net/ipv4/tcp_input.c | oskb = sk->send_head; |
sk | 817 | net/ipv4/tcp_input.c | tcp_rtt_estimator(sk,oskb); |
sk | 821 | net/ipv4/tcp_input.c | oskb = sk->send_head; |
sk | 823 | net/ipv4/tcp_input.c | sk->send_head = oskb->link3; |
sk | 824 | net/ipv4/tcp_input.c | if (sk->send_head == NULL) |
sk | 826 | net/ipv4/tcp_input.c | sk->send_tail = NULL; |
sk | 837 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 838 | net/ipv4/tcp_input.c | sk->write_space(sk); |
sk | 859 | net/ipv4/tcp_input.c | if (skb_peek(&sk->write_queue) != NULL) |
sk | 861 | net/ipv4/tcp_input.c | if (!before(sk->window_seq, sk->write_queue.next->end_seq) && |
sk | 862 | net/ipv4/tcp_input.c | (sk->retransmits == 0 || |
sk | 863 | net/ipv4/tcp_input.c | sk->ip_xmit_timeout != TIME_WRITE || |
sk | 864 | net/ipv4/tcp_input.c | !after(sk->write_queue.next->end_seq, sk->rcv_ack_seq)) |
sk | 865 | net/ipv4/tcp_input.c | && sk->packets_out < sk->cong_window) |
sk | 871 | net/ipv4/tcp_input.c | tcp_write_xmit(sk); |
sk | 873 | net/ipv4/tcp_input.c | else if (before(sk->window_seq, sk->write_queue.next->end_seq) && |
sk | 874 | net/ipv4/tcp_input.c | sk->send_head == NULL && |
sk | 875 | net/ipv4/tcp_input.c | sk->ack_backlog == 0 && |
sk | 876 | net/ipv4/tcp_input.c | sk->state != TCP_TIME_WAIT) |
sk | 881 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto); |
sk | 899 | net/ipv4/tcp_input.c | switch(sk->state) { |
sk | 905 | net/ipv4/tcp_input.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 917 | net/ipv4/tcp_input.c | if (sk->send_head || skb_peek(&sk->write_queue) != NULL || sk->ack_backlog) { |
sk | 918 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 919 | net/ipv4/tcp_input.c | } else if (sk->keepopen) { |
sk | 920 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN); |
sk | 922 | net/ipv4/tcp_input.c | del_timer(&sk->retransmit_timer); |
sk | 923 | net/ipv4/tcp_input.c | sk->ip_xmit_timeout = 0; |
sk | 934 | net/ipv4/tcp_input.c | if (sk->packets_out == 0 && sk->partial != NULL && |
sk | 935 | net/ipv4/tcp_input.c | skb_peek(&sk->write_queue) == NULL && sk->send_head == NULL) |
sk | 938 | net/ipv4/tcp_input.c | tcp_send_partial(sk); |
sk | 949 | net/ipv4/tcp_input.c | if (sk->state == TCP_LAST_ACK) |
sk | 951 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 952 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 953 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 955 | net/ipv4/tcp_input.c | sk->rcv_ack_seq,sk->write_seq,sk->acked_seq,sk->fin_seq); |
sk | 956 | net/ipv4/tcp_input.c | if (sk->rcv_ack_seq == sk->write_seq /*&& sk->acked_seq == sk->fin_seq*/) |
sk | 959 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 960 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 972 | net/ipv4/tcp_input.c | if (sk->state == TCP_FIN_WAIT1) |
sk | 975 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 976 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 977 | net/ipv4/tcp_input.c | if (sk->rcv_ack_seq == sk->write_seq) |
sk | 980 | net/ipv4/tcp_input.c | sk->shutdown |= SEND_SHUTDOWN; |
sk | 981 | net/ipv4/tcp_input.c | tcp_set_state(sk, TCP_FIN_WAIT2); |
sk | 991 | net/ipv4/tcp_input.c | if (sk->state == TCP_CLOSING) |
sk | 994 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 995 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 996 | net/ipv4/tcp_input.c | if (sk->rcv_ack_seq == sk->write_seq) |
sk | 999 | net/ipv4/tcp_input.c | tcp_time_wait(sk); |
sk | 1007 | net/ipv4/tcp_input.c | if(sk->state==TCP_SYN_RECV) |
sk | 1009 | net/ipv4/tcp_input.c | tcp_set_state(sk, TCP_ESTABLISHED); |
sk | 1010 | net/ipv4/tcp_input.c | tcp_options(sk,th); |
sk | 1011 | net/ipv4/tcp_input.c | sk->dummy_th.dest=th->source; |
sk | 1012 | net/ipv4/tcp_input.c | sk->copied_seq = sk->acked_seq; |
sk | 1013 | net/ipv4/tcp_input.c | if(!sk->dead) |
sk | 1014 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 1015 | net/ipv4/tcp_input.c | if(sk->max_window==0) |
sk | 1017 | net/ipv4/tcp_input.c | sk->max_window=32; /* Sanity check */ |
sk | 1018 | net/ipv4/tcp_input.c | sk->mss=min(sk->max_window,sk->mtu); |
sk | 1051 | net/ipv4/tcp_input.c | if (((!flag) || (flag&4)) && sk->send_head != NULL && |
sk | 1052 | net/ipv4/tcp_input.c | (((flag&2) && sk->retransmits) || |
sk | 1053 | net/ipv4/tcp_input.c | (sk->send_head->when + sk->rto < jiffies))) |
sk | 1055 | net/ipv4/tcp_input.c | if(sk->send_head->when + sk->rto < jiffies) |
sk | 1056 | net/ipv4/tcp_input.c | tcp_retransmit(sk,0); |
sk | 1059 | net/ipv4/tcp_input.c | tcp_do_retransmit(sk, 1); |
sk | 1060 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 1067 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 1068 | net/ipv4/tcp_input.c | printk("Ack ignored %u %u\n",ack,sk->sent_seq); |
sk | 1074 | net/ipv4/tcp_input.c | if (after(ack, sk->sent_seq)) |
sk | 1083 | net/ipv4/tcp_input.c | if (sk->keepopen) |
sk | 1085 | net/ipv4/tcp_input.c | if(sk->ip_xmit_timeout==TIME_KEEPOPEN) |
sk | 1086 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN); |
sk | 1108 | net/ipv4/tcp_input.c | static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) |
sk | 1110 | net/ipv4/tcp_input.c | sk->fin_seq = skb->end_seq; |
sk | 1112 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 1114 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 1115 | net/ipv4/tcp_input.c | sock_wake_async(sk->socket, 1); |
sk | 1118 | net/ipv4/tcp_input.c | switch(sk->state) |
sk | 1127 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSE_WAIT); |
sk | 1129 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 1144 | net/ipv4/tcp_input.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 1159 | net/ipv4/tcp_input.c | if(sk->ip_xmit_timeout != TIME_WRITE) |
sk | 1160 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 1161 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSING); |
sk | 1167 | net/ipv4/tcp_input.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 1168 | net/ipv4/tcp_input.c | sk->shutdown|=SHUTDOWN_MASK; |
sk | 1169 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_TIME_WAIT); |
sk | 1177 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_LAST_ACK); |
sk | 1180 | net/ipv4/tcp_input.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 1190 | net/ipv4/tcp_input.c | static inline u32 tcp_queue_ack(struct sk_buff * skb, struct sock * sk) |
sk | 1198 | net/ipv4/tcp_input.c | tcp_fin(skb,sk,skb->h.th); |
sk | 1207 | net/ipv4/tcp_input.c | static void tcp_queue(struct sk_buff * skb, struct sock * sk, |
sk | 1210 | net/ipv4/tcp_input.c | struct sk_buff_head * list = &sk->receive_queue; |
sk | 1234 | net/ipv4/tcp_input.c | ack_seq = sk->acked_seq; |
sk | 1236 | net/ipv4/tcp_input.c | ack_seq = tcp_queue_ack(skb, sk); |
sk | 1246 | net/ipv4/tcp_input.c | ack_seq = tcp_queue_ack(next, sk); |
sk | 1255 | net/ipv4/tcp_input.c | sk->acked_seq = ack_seq; |
sk | 1266 | net/ipv4/tcp_input.c | if (!sk->delay_acks || th->fin) { |
sk | 1267 | net/ipv4/tcp_input.c | tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr); |
sk | 1271 | net/ipv4/tcp_input.c | int timeout = sk->ato; |
sk | 1274 | net/ipv4/tcp_input.c | if (sk->bytes_rcv > sk->max_unacked) { |
sk | 1278 | net/ipv4/tcp_input.c | sk->ack_backlog++; |
sk | 1279 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 1281 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_WRITE, timeout); |
sk | 1293 | net/ipv4/tcp_input.c | static int tcp_data(struct sk_buff *skb, struct sock *sk, |
sk | 1308 | net/ipv4/tcp_input.c | sk->bytes_rcv += skb->len; |
sk | 1317 | net/ipv4/tcp_input.c | tcp_send_ack(sk->sent_seq, sk->acked_seq,sk, th, saddr); |
sk | 1328 | net/ipv4/tcp_input.c | if(sk->shutdown & RCV_SHUTDOWN) |
sk | 1353 | net/ipv4/tcp_input.c | shut_seq = sk->acked_seq+1; /* Last byte */ |
sk | 1357 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 1359 | net/ipv4/tcp_input.c | sk, new_seq, shut_seq, sk->blog); |
sk | 1360 | net/ipv4/tcp_input.c | if(sk->dead) |
sk | 1362 | net/ipv4/tcp_input.c | sk->acked_seq = new_seq + th->fin; |
sk | 1363 | net/ipv4/tcp_input.c | tcp_send_reset(sk->saddr, sk->daddr, skb->h.th, |
sk | 1364 | net/ipv4/tcp_input.c | sk->prot, NULL, skb->dev, sk->ip_tos, sk->ip_ttl); |
sk | 1366 | net/ipv4/tcp_input.c | sk->err = EPIPE; |
sk | 1367 | net/ipv4/tcp_input.c | sk->error_report(sk); |
sk | 1368 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 1369 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 1379 | net/ipv4/tcp_input.c | tcp_queue(skb, sk, th, saddr); |
sk | 1396 | net/ipv4/tcp_input.c | while (sock_rspace(sk) < sk->mtu) |
sk | 1398 | net/ipv4/tcp_input.c | struct sk_buff * skb1 = skb_peek(&sk->receive_queue); |
sk | 1417 | net/ipv4/tcp_input.c | tcp_send_ack(sk->sent_seq, sk->acked_seq, sk, th, saddr); |
sk | 1418 | net/ipv4/tcp_input.c | sk->ack_backlog++; |
sk | 1419 | net/ipv4/tcp_input.c | tcp_reset_xmit_timer(sk, TIME_WRITE, min(sk->ato, HZ/2)); |
sk | 1426 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 1428 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 1430 | net/ipv4/tcp_input.c | sk->data_ready(sk,0); |
sk | 1444 | net/ipv4/tcp_input.c | static void tcp_check_urg(struct sock * sk, struct tcphdr * th) |
sk | 1453 | net/ipv4/tcp_input.c | if (after(sk->copied_seq, ptr)) |
sk | 1457 | net/ipv4/tcp_input.c | if (sk->urg_data && !after(ptr, sk->urg_seq)) |
sk | 1461 | net/ipv4/tcp_input.c | if (sk->proc != 0) { |
sk | 1462 | net/ipv4/tcp_input.c | if (sk->proc > 0) { |
sk | 1463 | net/ipv4/tcp_input.c | kill_proc(sk->proc, SIGURG, 1); |
sk | 1465 | net/ipv4/tcp_input.c | kill_pg(-sk->proc, SIGURG, 1); |
sk | 1468 | net/ipv4/tcp_input.c | sk->urg_data = URG_NOTYET; |
sk | 1469 | net/ipv4/tcp_input.c | sk->urg_seq = ptr; |
sk | 1476 | net/ipv4/tcp_input.c | static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len) |
sk | 1483 | net/ipv4/tcp_input.c | tcp_check_urg(sk,th); |
sk | 1489 | net/ipv4/tcp_input.c | if (sk->urg_data == URG_NOTYET) { |
sk | 1495 | net/ipv4/tcp_input.c | ptr = sk->urg_seq - ntohl(th->seq) + th->doff*4; |
sk | 1497 | net/ipv4/tcp_input.c | sk->urg_data = URG_VALID | *(ptr + (unsigned char *) th); |
sk | 1498 | net/ipv4/tcp_input.c | if (!sk->dead) |
sk | 1499 | net/ipv4/tcp_input.c | sk->data_ready(sk,0); |
sk | 1515 | net/ipv4/tcp_input.c | struct sock *sk; |
sk | 1525 | net/ipv4/tcp_input.c | sk = skb->sk; |
sk | 1550 | net/ipv4/tcp_input.c | sk = get_tcp_sock(saddr, th->source, daddr, th->dest); |
sk | 1551 | net/ipv4/tcp_input.c | if (!sk) |
sk | 1553 | net/ipv4/tcp_input.c | skb->sk = sk; |
sk | 1567 | net/ipv4/tcp_input.c | if (sk->users) |
sk | 1569 | net/ipv4/tcp_input.c | __skb_queue_tail(&sk->back_log, skb); |
sk | 1583 | net/ipv4/tcp_input.c | if (sk->zapped || sk->state==TCP_CLOSE) |
sk | 1586 | net/ipv4/tcp_input.c | if (!sk->prot) |
sk | 1597 | net/ipv4/tcp_input.c | skb->sk=sk; |
sk | 1598 | net/ipv4/tcp_input.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
sk | 1611 | net/ipv4/tcp_input.c | if(sk->state!=TCP_ESTABLISHED) /* Skip this lot for normal flow */ |
sk | 1618 | net/ipv4/tcp_input.c | if(sk->state==TCP_LISTEN) |
sk | 1621 | net/ipv4/tcp_input.c | tcp_send_reset(daddr,saddr,th,sk->prot,opt,dev,sk->ip_tos, sk->ip_ttl); |
sk | 1640 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr, opt, dev, tcp_init_seq()); |
sk | 1661 | net/ipv4/tcp_input.c | if (sk->state == TCP_SYN_RECV && th->syn && skb->seq+1 == sk->acked_seq) |
sk | 1673 | net/ipv4/tcp_input.c | if(sk->state==TCP_SYN_SENT) |
sk | 1679 | net/ipv4/tcp_input.c | if(!tcp_ack(sk,th,skb->ack_seq,len)) |
sk | 1685 | net/ipv4/tcp_input.c | sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl); |
sk | 1690 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
sk | 1697 | net/ipv4/tcp_input.c | sk->prot, opt,dev,sk->ip_tos,sk->ip_ttl); |
sk | 1706 | net/ipv4/tcp_input.c | sk->acked_seq = skb->seq+1; |
sk | 1707 | net/ipv4/tcp_input.c | sk->lastwin_seq = skb->seq+1; |
sk | 1708 | net/ipv4/tcp_input.c | sk->fin_seq = skb->seq; |
sk | 1709 | net/ipv4/tcp_input.c | tcp_send_ack(sk->sent_seq,sk->acked_seq,sk,th,sk->daddr); |
sk | 1710 | net/ipv4/tcp_input.c | tcp_set_state(sk, TCP_ESTABLISHED); |
sk | 1711 | net/ipv4/tcp_input.c | tcp_options(sk,th); |
sk | 1712 | net/ipv4/tcp_input.c | sk->dummy_th.dest=th->source; |
sk | 1713 | net/ipv4/tcp_input.c | sk->copied_seq = sk->acked_seq; |
sk | 1714 | net/ipv4/tcp_input.c | if(!sk->dead) |
sk | 1716 | net/ipv4/tcp_input.c | sk->state_change(sk); |
sk | 1717 | net/ipv4/tcp_input.c | sock_wake_async(sk->socket, 0); |
sk | 1719 | net/ipv4/tcp_input.c | if(sk->max_window==0) |
sk | 1721 | net/ipv4/tcp_input.c | sk->max_window = 32; |
sk | 1722 | net/ipv4/tcp_input.c | sk->mss = min(sk->max_window, sk->mtu); |
sk | 1732 | net/ipv4/tcp_input.c | if(sk->saddr==saddr && sk->daddr==daddr && |
sk | 1733 | net/ipv4/tcp_input.c | sk->dummy_th.source==th->source && |
sk | 1734 | net/ipv4/tcp_input.c | sk->dummy_th.dest==th->dest) |
sk | 1737 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
sk | 1739 | net/ipv4/tcp_input.c | tcp_set_state(sk,TCP_SYN_RECV); |
sk | 1769 | net/ipv4/tcp_input.c | if (sk->state == TCP_TIME_WAIT && th->syn && sk->dead && |
sk | 1770 | net/ipv4/tcp_input.c | after(skb->seq, sk->acked_seq) && !th->rst) |
sk | 1772 | net/ipv4/tcp_input.c | u32 seq = sk->write_seq; |
sk | 1773 | net/ipv4/tcp_input.c | if(sk->debug) |
sk | 1776 | net/ipv4/tcp_input.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
sk | 1777 | net/ipv4/tcp_input.c | skb->sk = NULL; |
sk | 1778 | net/ipv4/tcp_input.c | sk->err=ECONNRESET; |
sk | 1779 | net/ipv4/tcp_input.c | tcp_set_state(sk, TCP_CLOSE); |
sk | 1780 | net/ipv4/tcp_input.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 1781 | net/ipv4/tcp_input.c | sk=get_sock(&tcp_prot, th->dest, saddr, th->source, daddr); |
sk | 1783 | net/ipv4/tcp_input.c | if (sk && sk->state==TCP_LISTEN) |
sk | 1785 | net/ipv4/tcp_input.c | skb->sk = sk; |
sk | 1786 | net/ipv4/tcp_input.c | atomic_add(skb->truesize, &sk->rmem_alloc); |
sk | 1787 | net/ipv4/tcp_input.c | tcp_conn_request(sk, skb, daddr, saddr,opt, dev,seq+128000); |
sk | 1802 | net/ipv4/tcp_input.c | if (!tcp_sequence(sk, skb->seq, skb->end_seq-th->syn)) |
sk | 1804 | net/ipv4/tcp_input.c | bad_tcp_sequence(sk, th, len, opt, saddr, dev); |
sk | 1810 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
sk | 1819 | net/ipv4/tcp_input.c | return tcp_reset(sk,skb); |
sk | 1822 | net/ipv4/tcp_input.c | tcp_delack_estimator(sk); |
sk | 1829 | net/ipv4/tcp_input.c | if(th->ack && !tcp_ack(sk,th,skb->ack_seq,len)) |
sk | 1835 | net/ipv4/tcp_input.c | if(sk->state==TCP_SYN_RECV) |
sk | 1837 | net/ipv4/tcp_input.c | tcp_send_reset(daddr, saddr, th,sk->prot, opt, dev,sk->ip_tos,sk->ip_ttl); |
sk | 1856 | net/ipv4/tcp_input.c | if (sk->rmem_alloc >= sk->rcvbuf) |
sk | 1868 | net/ipv4/tcp_input.c | tcp_urg(sk, th, len); |
sk | 1874 | net/ipv4/tcp_input.c | if(tcp_data(skb,sk, saddr, len)) |
sk | 1893 | net/ipv4/tcp_input.c | skb->sk = NULL; |
sk | 31 | net/ipv4/tcp_output.c | void tcp_send_skb(struct sock *sk, struct sk_buff *skb) |
sk | 86 | net/ipv4/tcp_output.c | if (after(skb->end_seq, sk->window_seq) || |
sk | 87 | net/ipv4/tcp_output.c | (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) || |
sk | 88 | net/ipv4/tcp_output.c | sk->packets_out >= sk->cong_window) |
sk | 98 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->write_queue, skb); |
sk | 100 | net/ipv4/tcp_output.c | if (before(sk->window_seq, sk->write_queue.next->end_seq) && |
sk | 101 | net/ipv4/tcp_output.c | sk->send_head == NULL && sk->ack_backlog == 0) |
sk | 102 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto); |
sk | 110 | net/ipv4/tcp_output.c | th->ack_seq = htonl(sk->acked_seq); |
sk | 111 | net/ipv4/tcp_output.c | th->window = htons(tcp_select_window(sk)); |
sk | 113 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
sk | 115 | net/ipv4/tcp_output.c | sk->sent_seq = sk->write_seq; |
sk | 123 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, 0); |
sk | 126 | net/ipv4/tcp_output.c | sk->ack_backlog = 0; |
sk | 127 | net/ipv4/tcp_output.c | sk->bytes_rcv = 0; |
sk | 135 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 148 | net/ipv4/tcp_output.c | struct sk_buff * tcp_dequeue_partial(struct sock * sk) |
sk | 155 | net/ipv4/tcp_output.c | skb = sk->partial; |
sk | 157 | net/ipv4/tcp_output.c | sk->partial = NULL; |
sk | 158 | net/ipv4/tcp_output.c | del_timer(&sk->partial_timer); |
sk | 168 | net/ipv4/tcp_output.c | void tcp_send_partial(struct sock *sk) |
sk | 172 | net/ipv4/tcp_output.c | if (sk == NULL) |
sk | 174 | net/ipv4/tcp_output.c | while ((skb = tcp_dequeue_partial(sk)) != NULL) |
sk | 175 | net/ipv4/tcp_output.c | tcp_send_skb(sk, skb); |
sk | 182 | net/ipv4/tcp_output.c | void tcp_enqueue_partial(struct sk_buff * skb, struct sock * sk) |
sk | 189 | net/ipv4/tcp_output.c | tmp = sk->partial; |
sk | 191 | net/ipv4/tcp_output.c | del_timer(&sk->partial_timer); |
sk | 192 | net/ipv4/tcp_output.c | sk->partial = skb; |
sk | 193 | net/ipv4/tcp_output.c | init_timer(&sk->partial_timer); |
sk | 197 | net/ipv4/tcp_output.c | sk->partial_timer.expires = jiffies+HZ; |
sk | 198 | net/ipv4/tcp_output.c | sk->partial_timer.function = (void (*)(unsigned long)) tcp_send_partial; |
sk | 199 | net/ipv4/tcp_output.c | sk->partial_timer.data = (unsigned long) sk; |
sk | 200 | net/ipv4/tcp_output.c | add_timer(&sk->partial_timer); |
sk | 203 | net/ipv4/tcp_output.c | tcp_send_skb(sk, tmp); |
sk | 212 | net/ipv4/tcp_output.c | void tcp_write_xmit(struct sock *sk) |
sk | 221 | net/ipv4/tcp_output.c | if(sk->zapped) |
sk | 232 | net/ipv4/tcp_output.c | while((skb = skb_peek(&sk->write_queue)) != NULL && |
sk | 233 | net/ipv4/tcp_output.c | !after(skb->end_seq, sk->window_seq) && |
sk | 234 | net/ipv4/tcp_output.c | (sk->retransmits == 0 || |
sk | 235 | net/ipv4/tcp_output.c | sk->ip_xmit_timeout != TIME_WRITE || |
sk | 236 | net/ipv4/tcp_output.c | !after(skb->end_seq, sk->rcv_ack_seq)) |
sk | 237 | net/ipv4/tcp_output.c | && sk->packets_out < sk->cong_window) |
sk | 246 | net/ipv4/tcp_output.c | if (before(skb->end_seq, sk->rcv_ack_seq +1)) |
sk | 253 | net/ipv4/tcp_output.c | sk->retransmits = 0; |
sk | 255 | net/ipv4/tcp_output.c | if (!sk->dead) |
sk | 256 | net/ipv4/tcp_output.c | sk->write_space(sk); |
sk | 274 | net/ipv4/tcp_output.c | if (size > sk->mtu - sizeof(struct iphdr)) |
sk | 281 | net/ipv4/tcp_output.c | th->ack_seq = htonl(sk->acked_seq); |
sk | 282 | net/ipv4/tcp_output.c | th->window = htons(tcp_select_window(sk)); |
sk | 284 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
sk | 286 | net/ipv4/tcp_output.c | sk->sent_seq = skb->end_seq; |
sk | 292 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, skb->dev, skb, skb->free); |
sk | 295 | net/ipv4/tcp_output.c | sk->ack_backlog = 0; |
sk | 296 | net/ipv4/tcp_output.c | sk->bytes_rcv = 0; |
sk | 302 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 313 | net/ipv4/tcp_output.c | void tcp_do_retransmit(struct sock *sk, int all) |
sk | 321 | net/ipv4/tcp_output.c | prot = sk->prot; |
sk | 322 | net/ipv4/tcp_output.c | skb = sk->send_head; |
sk | 379 | net/ipv4/tcp_output.c | rt = ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute); |
sk | 391 | net/ipv4/tcp_output.c | if(skb->sk) |
sk | 393 | net/ipv4/tcp_output.c | skb->sk->err_soft=ENETUNREACH; |
sk | 394 | net/ipv4/tcp_output.c | skb->sk->error_report(skb->sk); |
sk | 431 | net/ipv4/tcp_output.c | th->ack_seq = htonl(sk->acked_seq); |
sk | 432 | net/ipv4/tcp_output.c | sk->ack_backlog = 0; |
sk | 433 | net/ipv4/tcp_output.c | sk->bytes_rcv = 0; |
sk | 434 | net/ipv4/tcp_output.c | th->window = ntohs(tcp_select_window(sk)); |
sk | 435 | net/ipv4/tcp_output.c | tcp_send_check(th, sk->saddr, sk->daddr, size, skb); |
sk | 451 | net/ipv4/tcp_output.c | if (sk && !skb_device_locked(skb)) |
sk | 457 | net/ipv4/tcp_output.c | dev_queue_xmit(skb, dev, sk->priority); |
sk | 467 | net/ipv4/tcp_output.c | sk->retransmits++; |
sk | 468 | net/ipv4/tcp_output.c | sk->prot->retransmits++; |
sk | 483 | net/ipv4/tcp_output.c | if (ct >= sk->cong_window) |
sk | 517 | net/ipv4/tcp_output.c | buff->sk = NULL; |
sk | 569 | net/ipv4/tcp_output.c | void tcp_send_fin(struct sock *sk) |
sk | 571 | net/ipv4/tcp_output.c | struct proto *prot =(struct proto *)sk->prot; |
sk | 572 | net/ipv4/tcp_output.c | struct tcphdr *th =(struct tcphdr *)&sk->dummy_th; |
sk | 578 | net/ipv4/tcp_output.c | buff = sock_wmalloc(sk, MAX_RESET_SIZE,1 , GFP_KERNEL); |
sk | 591 | net/ipv4/tcp_output.c | buff->sk = sk; |
sk | 592 | net/ipv4/tcp_output.c | buff->localroute = sk->localroute; |
sk | 599 | net/ipv4/tcp_output.c | tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev, |
sk | 600 | net/ipv4/tcp_output.c | IPPROTO_TCP, sk->opt, |
sk | 601 | net/ipv4/tcp_output.c | sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 611 | net/ipv4/tcp_output.c | sock_wfree(sk,buff); |
sk | 612 | net/ipv4/tcp_output.c | sk->write_seq++; |
sk | 613 | net/ipv4/tcp_output.c | t=del_timer(&sk->timer); |
sk | 615 | net/ipv4/tcp_output.c | add_timer(&sk->timer); |
sk | 617 | net/ipv4/tcp_output.c | tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 629 | net/ipv4/tcp_output.c | buff->seq = sk->write_seq; |
sk | 630 | net/ipv4/tcp_output.c | sk->write_seq++; |
sk | 631 | net/ipv4/tcp_output.c | buff->end_seq = sk->write_seq; |
sk | 633 | net/ipv4/tcp_output.c | t1->ack_seq = htonl(sk->acked_seq); |
sk | 634 | net/ipv4/tcp_output.c | t1->window = htons(sk->window=tcp_select_window(sk)); |
sk | 636 | net/ipv4/tcp_output.c | tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff); |
sk | 643 | net/ipv4/tcp_output.c | if (skb_peek(&sk->write_queue) != NULL) |
sk | 651 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->write_queue, buff); |
sk | 655 | net/ipv4/tcp_output.c | sk->sent_seq = sk->write_seq; |
sk | 656 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, dev, buff, 0); |
sk | 657 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 662 | net/ipv4/tcp_output.c | void tcp_send_synack(struct sock * newsk, struct sock * sk, struct sk_buff * skb) |
sk | 673 | net/ipv4/tcp_output.c | sk->err = ENOMEM; |
sk | 680 | net/ipv4/tcp_output.c | buff->sk = newsk; |
sk | 687 | net/ipv4/tcp_output.c | tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev, |
sk | 688 | net/ipv4/tcp_output.c | IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache); |
sk | 696 | net/ipv4/tcp_output.c | sk->err = tmp; |
sk | 700 | net/ipv4/tcp_output.c | skb->sk = sk; |
sk | 735 | net/ipv4/tcp_output.c | skb->sk = newsk; |
sk | 741 | net/ipv4/tcp_output.c | atomic_sub(skb->truesize, &sk->rmem_alloc); |
sk | 744 | net/ipv4/tcp_output.c | skb_queue_tail(&sk->receive_queue,skb); |
sk | 745 | net/ipv4/tcp_output.c | sk->ack_backlog++; |
sk | 754 | net/ipv4/tcp_output.c | struct sock *sk, |
sk | 762 | net/ipv4/tcp_output.c | if(sk->zapped) |
sk | 770 | net/ipv4/tcp_output.c | buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC); |
sk | 780 | net/ipv4/tcp_output.c | sk->ack_backlog++; |
sk | 781 | net/ipv4/tcp_output.c | if (sk->ip_xmit_timeout != TIME_WRITE && tcp_connected(sk->state)) |
sk | 783 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk, TIME_WRITE, HZ); |
sk | 792 | net/ipv4/tcp_output.c | buff->sk = sk; |
sk | 793 | net/ipv4/tcp_output.c | buff->localroute = sk->localroute; |
sk | 800 | net/ipv4/tcp_output.c | tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev, |
sk | 801 | net/ipv4/tcp_output.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 805 | net/ipv4/tcp_output.c | sock_wfree(sk, buff); |
sk | 810 | net/ipv4/tcp_output.c | memcpy(t1, &sk->dummy_th, sizeof(*t1)); |
sk | 819 | net/ipv4/tcp_output.c | sk->window = tcp_select_window(sk); |
sk | 820 | net/ipv4/tcp_output.c | t1->window = ntohs(sk->window); |
sk | 828 | net/ipv4/tcp_output.c | if (ack == sk->acked_seq) { |
sk | 829 | net/ipv4/tcp_output.c | sk->ack_backlog = 0; |
sk | 830 | net/ipv4/tcp_output.c | sk->bytes_rcv = 0; |
sk | 831 | net/ipv4/tcp_output.c | sk->ack_timed = 0; |
sk | 833 | net/ipv4/tcp_output.c | if (sk->send_head == NULL && skb_peek(&sk->write_queue) == NULL |
sk | 834 | net/ipv4/tcp_output.c | && sk->ip_xmit_timeout == TIME_WRITE) |
sk | 835 | net/ipv4/tcp_output.c | if(sk->keepopen) |
sk | 836 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN); |
sk | 838 | net/ipv4/tcp_output.c | delete_timer(sk); |
sk | 846 | net/ipv4/tcp_output.c | tcp_send_check(t1, sk->saddr, daddr, sizeof(*t1), buff); |
sk | 847 | net/ipv4/tcp_output.c | if (sk->debug) |
sk | 849 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 858 | net/ipv4/tcp_output.c | void tcp_write_wakeup(struct sock *sk) |
sk | 865 | net/ipv4/tcp_output.c | if (sk->zapped) |
sk | 874 | net/ipv4/tcp_output.c | if (sk->state != TCP_ESTABLISHED && |
sk | 875 | net/ipv4/tcp_output.c | sk->state != TCP_CLOSE_WAIT && |
sk | 876 | net/ipv4/tcp_output.c | sk->state != TCP_FIN_WAIT1 && |
sk | 877 | net/ipv4/tcp_output.c | sk->state != TCP_LAST_ACK && |
sk | 878 | net/ipv4/tcp_output.c | sk->state != TCP_CLOSING |
sk | 883 | net/ipv4/tcp_output.c | if ( before(sk->sent_seq, sk->window_seq) && |
sk | 884 | net/ipv4/tcp_output.c | (skb=skb_peek(&sk->write_queue))) |
sk | 904 | net/ipv4/tcp_output.c | win_size = sk->window_seq - sk->sent_seq; |
sk | 917 | net/ipv4/tcp_output.c | buff = sock_wmalloc(sk, win_size + th->doff * 4 + |
sk | 919 | net/ipv4/tcp_output.c | sk->prot->max_header + 15, |
sk | 931 | net/ipv4/tcp_output.c | buff->sk = sk; |
sk | 932 | net/ipv4/tcp_output.c | buff->localroute = sk->localroute; |
sk | 938 | net/ipv4/tcp_output.c | tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev, |
sk | 939 | net/ipv4/tcp_output.c | IPPROTO_TCP, sk->opt, buff->truesize, |
sk | 940 | net/ipv4/tcp_output.c | sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 943 | net/ipv4/tcp_output.c | sock_wfree(sk, buff); |
sk | 962 | net/ipv4/tcp_output.c | nth->ack_seq = htonl(sk->acked_seq); |
sk | 963 | net/ipv4/tcp_output.c | nth->window = htons(tcp_select_window(sk)); |
sk | 977 | net/ipv4/tcp_output.c | buff->end_seq = sk->sent_seq + win_size; |
sk | 978 | net/ipv4/tcp_output.c | sk->sent_seq = buff->end_seq; /* Hack */ |
sk | 986 | net/ipv4/tcp_output.c | tcp_send_check(nth, sk->saddr, sk->daddr, |
sk | 991 | net/ipv4/tcp_output.c | buff = sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC); |
sk | 996 | net/ipv4/tcp_output.c | buff->sk = sk; |
sk | 997 | net/ipv4/tcp_output.c | buff->localroute = sk->localroute; |
sk | 1004 | net/ipv4/tcp_output.c | tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev, |
sk | 1005 | net/ipv4/tcp_output.c | IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache); |
sk | 1008 | net/ipv4/tcp_output.c | sock_wfree(sk, buff); |
sk | 1013 | net/ipv4/tcp_output.c | memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1)); |
sk | 1020 | net/ipv4/tcp_output.c | t1->seq = htonl(sk->sent_seq-1); |
sk | 1022 | net/ipv4/tcp_output.c | t1->ack_seq = htonl(sk->acked_seq); |
sk | 1023 | net/ipv4/tcp_output.c | t1->window = htons(tcp_select_window(sk)); |
sk | 1024 | net/ipv4/tcp_output.c | tcp_send_check(t1, sk->saddr, sk->daddr, sizeof(*t1), buff); |
sk | 1032 | net/ipv4/tcp_output.c | sk->prot->queue_xmit(sk, dev, buff, 1); |
sk | 1040 | net/ipv4/tcp_output.c | void tcp_send_probe0(struct sock *sk) |
sk | 1042 | net/ipv4/tcp_output.c | if (sk->zapped) |
sk | 1045 | net/ipv4/tcp_output.c | tcp_write_wakeup(sk); |
sk | 1047 | net/ipv4/tcp_output.c | sk->backoff++; |
sk | 1048 | net/ipv4/tcp_output.c | sk->rto = min(sk->rto << 1, 120*HZ); |
sk | 1049 | net/ipv4/tcp_output.c | sk->retransmits++; |
sk | 1050 | net/ipv4/tcp_output.c | sk->prot->retransmits ++; |
sk | 1051 | net/ipv4/tcp_output.c | tcp_reset_xmit_timer (sk, TIME_PROBE0, sk->rto); |
sk | 29 | net/ipv4/tcp_timer.c | void tcp_reset_xmit_timer(struct sock *sk, int why, unsigned long when) |
sk | 31 | net/ipv4/tcp_timer.c | del_timer(&sk->retransmit_timer); |
sk | 32 | net/ipv4/tcp_timer.c | sk->ip_xmit_timeout = why; |
sk | 38 | net/ipv4/tcp_timer.c | sk->retransmit_timer.expires=jiffies+when; |
sk | 39 | net/ipv4/tcp_timer.c | add_timer(&sk->retransmit_timer); |
sk | 52 | net/ipv4/tcp_timer.c | static void tcp_retransmit_time(struct sock *sk, int all) |
sk | 54 | net/ipv4/tcp_timer.c | tcp_do_retransmit(sk, all); |
sk | 73 | net/ipv4/tcp_timer.c | sk->backoff++; |
sk | 74 | net/ipv4/tcp_timer.c | sk->rto = min(sk->rto << 1, 120*HZ); |
sk | 75 | net/ipv4/tcp_timer.c | tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto); |
sk | 88 | net/ipv4/tcp_timer.c | void tcp_retransmit(struct sock *sk, int all) |
sk | 92 | net/ipv4/tcp_timer.c | tcp_retransmit_time(sk, all); |
sk | 96 | net/ipv4/tcp_timer.c | sk->ssthresh = sk->cong_window >> 1; /* remember window where we lost */ |
sk | 98 | net/ipv4/tcp_timer.c | sk->cong_count = 0; |
sk | 100 | net/ipv4/tcp_timer.c | sk->cong_window = 1; |
sk | 103 | net/ipv4/tcp_timer.c | tcp_retransmit_time(sk, all); |
sk | 110 | net/ipv4/tcp_timer.c | static int tcp_write_timeout(struct sock *sk) |
sk | 115 | net/ipv4/tcp_timer.c | if ((sk->state == TCP_ESTABLISHED && sk->retransmits && !(sk->retransmits & 7)) |
sk | 116 | net/ipv4/tcp_timer.c | || (sk->state != TCP_ESTABLISHED && sk->retransmits > TCP_RETR1)) |
sk | 122 | net/ipv4/tcp_timer.c | ip_rt_advice(&sk->ip_route_cache, 0); |
sk | 129 | net/ipv4/tcp_timer.c | if(sk->retransmits > TCP_SYN_RETRIES && sk->state==TCP_SYN_SENT) |
sk | 131 | net/ipv4/tcp_timer.c | if(sk->err_soft) |
sk | 132 | net/ipv4/tcp_timer.c | sk->err=sk->err_soft; |
sk | 134 | net/ipv4/tcp_timer.c | sk->err=ETIMEDOUT; |
sk | 135 | net/ipv4/tcp_timer.c | sk->error_report(sk); |
sk | 136 | net/ipv4/tcp_timer.c | del_timer(&sk->retransmit_timer); |
sk | 138 | net/ipv4/tcp_timer.c | tcp_set_state(sk,TCP_CLOSE); |
sk | 145 | net/ipv4/tcp_timer.c | if (sk->retransmits > TCP_RETR2) |
sk | 147 | net/ipv4/tcp_timer.c | if(sk->err_soft) |
sk | 148 | net/ipv4/tcp_timer.c | sk->err = sk->err_soft; |
sk | 150 | net/ipv4/tcp_timer.c | sk->err = ETIMEDOUT; |
sk | 151 | net/ipv4/tcp_timer.c | sk->error_report(sk); |
sk | 152 | net/ipv4/tcp_timer.c | del_timer(&sk->retransmit_timer); |
sk | 156 | net/ipv4/tcp_timer.c | if (sk->state == TCP_FIN_WAIT1 || sk->state == TCP_FIN_WAIT2 || sk->state == TCP_CLOSING ) |
sk | 158 | net/ipv4/tcp_timer.c | tcp_set_state(sk,TCP_TIME_WAIT); |
sk | 159 | net/ipv4/tcp_timer.c | tcp_reset_msl_timer (sk, TIME_CLOSE, TCP_TIMEWAIT_LEN); |
sk | 166 | net/ipv4/tcp_timer.c | tcp_set_state(sk, TCP_CLOSE); |
sk | 177 | net/ipv4/tcp_timer.c | static void tcp_time_write_timeout(struct sock * sk) |
sk | 184 | net/ipv4/tcp_timer.c | skb = sk->send_head; |
sk | 186 | net/ipv4/tcp_timer.c | if (sk->ack_backlog) |
sk | 187 | net/ipv4/tcp_timer.c | tcp_read_wakeup(sk); |
sk | 196 | net/ipv4/tcp_timer.c | if (jiffies < skb->when + sk->rto) |
sk | 198 | net/ipv4/tcp_timer.c | if (sk->ack_backlog) |
sk | 199 | net/ipv4/tcp_timer.c | tcp_read_wakeup(sk); |
sk | 200 | net/ipv4/tcp_timer.c | tcp_reset_xmit_timer (sk, TIME_WRITE, skb->when + sk->rto - jiffies); |
sk | 209 | net/ipv4/tcp_timer.c | sk->prot->retransmit (sk, 0); |
sk | 210 | net/ipv4/tcp_timer.c | tcp_write_timeout(sk); |
sk | 226 | net/ipv4/tcp_timer.c | struct sock *sk = (struct sock*)data; |
sk | 227 | net/ipv4/tcp_timer.c | int why = sk->ip_xmit_timeout; |
sk | 233 | net/ipv4/tcp_timer.c | if(sk->zapped) |
sk | 240 | net/ipv4/tcp_timer.c | if (sk->users) |
sk | 243 | net/ipv4/tcp_timer.c | sk->retransmit_timer.expires = jiffies+HZ; |
sk | 244 | net/ipv4/tcp_timer.c | add_timer(&sk->retransmit_timer); |
sk | 248 | net/ipv4/tcp_timer.c | if (sk->ack_backlog && !sk->dead) |
sk | 249 | net/ipv4/tcp_timer.c | sk->data_ready(sk,0); |
sk | 257 | net/ipv4/tcp_timer.c | tcp_send_probe0(sk); |
sk | 258 | net/ipv4/tcp_timer.c | tcp_write_timeout(sk); |
sk | 263 | net/ipv4/tcp_timer.c | tcp_time_write_timeout(sk); |
sk | 272 | net/ipv4/tcp_timer.c | tcp_reset_xmit_timer (sk, TIME_KEEPOPEN, TCP_TIMEOUT_LEN); |
sk | 274 | net/ipv4/tcp_timer.c | if (sk->prot->write_wakeup) |
sk | 275 | net/ipv4/tcp_timer.c | sk->prot->write_wakeup (sk); |
sk | 276 | net/ipv4/tcp_timer.c | sk->retransmits++; |
sk | 277 | net/ipv4/tcp_timer.c | sk->prot->retransmits++; |
sk | 278 | net/ipv4/tcp_timer.c | tcp_write_timeout(sk); |
sk | 88 | net/ipv4/timer.c | struct sock *sk = (struct sock*)data; |
sk | 89 | net/ipv4/timer.c | int why = sk->timeout; |
sk | 95 | net/ipv4/timer.c | if (sk->users) |
sk | 97 | net/ipv4/timer.c | sk->timer.expires = jiffies+HZ; |
sk | 98 | net/ipv4/timer.c | add_timer(&sk->timer); |
sk | 105 | net/ipv4/timer.c | if (sk->ack_backlog && !sk->zapped) |
sk | 107 | net/ipv4/timer.c | sk->prot->read_wakeup (sk); |
sk | 108 | net/ipv4/timer.c | if (! sk->dead) |
sk | 109 | net/ipv4/timer.c | sk->data_ready(sk,0); |
sk | 118 | net/ipv4/timer.c | if (!sk->dead) { |
sk | 119 | net/ipv4/timer.c | reset_timer(sk, TIME_DONE, TCP_DONE_TIME); |
sk | 123 | net/ipv4/timer.c | if (sk->state != TCP_CLOSE) |
sk | 128 | net/ipv4/timer.c | destroy_sock (sk); |
sk | 137 | net/ipv4/timer.c | destroy_sock(sk); |
sk | 142 | net/ipv4/timer.c | sk->state = TCP_CLOSE; |
sk | 143 | net/ipv4/timer.c | delete_timer (sk); |
sk | 144 | net/ipv4/timer.c | if (!sk->dead) |
sk | 145 | net/ipv4/timer.c | sk->state_change(sk); |
sk | 146 | net/ipv4/timer.c | sk->shutdown = SHUTDOWN_MASK; |
sk | 147 | net/ipv4/timer.c | reset_timer (sk, TIME_DONE, TCP_DONE_TIME); |
sk | 138 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len); |
sk | 158 | net/ipv4/udp.c | struct sock *sk; |
sk | 166 | net/ipv4/udp.c | sk = get_sock(&udp_prot, uh->source, daddr, uh->dest, saddr); |
sk | 168 | net/ipv4/udp.c | if (sk == NULL) |
sk | 173 | net/ipv4/udp.c | if (sk->cong_window > 1) |
sk | 174 | net/ipv4/udp.c | sk->cong_window = sk->cong_window/2; |
sk | 180 | net/ipv4/udp.c | sk->err = EPROTO; |
sk | 181 | net/ipv4/udp.c | sk->error_report(sk); |
sk | 203 | net/ipv4/udp.c | if(sk->bsdism && sk->state!=TCP_ESTABLISHED) |
sk | 205 | net/ipv4/udp.c | sk->err = icmp_err_convert[code].errno; |
sk | 206 | net/ipv4/udp.c | sk->error_report(sk); |
sk | 300 | net/ipv4/udp.c | static int udp_send(struct sock *sk, struct sockaddr_in *sin, |
sk | 311 | net/ipv4/udp.c | ufh.uh.source = sk->dummy_th.source; |
sk | 325 | net/ipv4/udp.c | if(sk->no_check) |
sk | 326 | net/ipv4/udp.c | a = ip_build_xmit(sk, udp_getfrag_nosum, &ufh, ulen, |
sk | 327 | net/ipv4/udp.c | sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP, noblock); |
sk | 329 | net/ipv4/udp.c | a = ip_build_xmit(sk, udp_getfrag, &ufh, ulen, |
sk | 330 | net/ipv4/udp.c | sin->sin_addr.s_addr, saddr, sk->opt, rt, IPPROTO_UDP, noblock); |
sk | 338 | net/ipv4/udp.c | static int udp_sendto(struct sock *sk, const unsigned char *from, int len, int noblock, |
sk | 366 | net/ipv4/udp.c | if (sk->state != TCP_ESTABLISHED) |
sk | 369 | net/ipv4/udp.c | sin.sin_port = sk->dummy_th.dest; |
sk | 370 | net/ipv4/udp.c | sin.sin_addr.s_addr = sk->daddr; |
sk | 392 | net/ipv4/udp.c | if(!sk->broadcast && ip_chk_addr(usin->sin_addr.s_addr)==IS_BROADCAST) |
sk | 395 | net/ipv4/udp.c | lock_sock(sk); |
sk | 398 | net/ipv4/udp.c | tmp = udp_send(sk, usin, from, len, flags, saddr, noblock); |
sk | 401 | net/ipv4/udp.c | release_sock(sk); |
sk | 409 | net/ipv4/udp.c | static int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len, int noblock, |
sk | 413 | net/ipv4/udp.c | return udp_sendto(sk,msg->msg_iov[0].iov_base,len, noblock, flags, msg->msg_name, msg->msg_namelen); |
sk | 433 | net/ipv4/udp.c | err=udp_sendto(sk,buf,len, noblock, flags, msg->msg_name, msg->msg_namelen); |
sk | 444 | net/ipv4/udp.c | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) |
sk | 453 | net/ipv4/udp.c | if (sk->state == TCP_LISTEN) return(-EINVAL); |
sk | 454 | net/ipv4/udp.c | amount = sock_wspace(sk); |
sk | 468 | net/ipv4/udp.c | if (sk->state == TCP_LISTEN) return(-EINVAL); |
sk | 470 | net/ipv4/udp.c | skb = skb_peek(&sk->receive_queue); |
sk | 499 | net/ipv4/udp.c | int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len, |
sk | 520 | net/ipv4/udp.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
sk | 532 | net/ipv4/udp.c | sk->stamp=skb->stamp; |
sk | 542 | net/ipv4/udp.c | skb_free_datagram(sk, skb); |
sk | 546 | net/ipv4/udp.c | int udp_connect(struct sock *sk, struct sockaddr_in *usin, int addr_len) |
sk | 557 | net/ipv4/udp.c | if(!sk->broadcast && ip_chk_addr(usin->sin_addr.s_addr)==IS_BROADCAST) |
sk | 560 | net/ipv4/udp.c | rt=ip_rt_route((__u32)usin->sin_addr.s_addr, sk->localroute); |
sk | 563 | net/ipv4/udp.c | if(!sk->saddr) |
sk | 564 | net/ipv4/udp.c | sk->saddr = rt->rt_src; /* Update source address */ |
sk | 565 | net/ipv4/udp.c | if(!sk->rcv_saddr) |
sk | 566 | net/ipv4/udp.c | sk->rcv_saddr = rt->rt_src; |
sk | 567 | net/ipv4/udp.c | sk->daddr = usin->sin_addr.s_addr; |
sk | 568 | net/ipv4/udp.c | sk->dummy_th.dest = usin->sin_port; |
sk | 569 | net/ipv4/udp.c | sk->state = TCP_ESTABLISHED; |
sk | 571 | net/ipv4/udp.c | sk->ip_route_cache = rt; |
sk | 576 | net/ipv4/udp.c | static void udp_close(struct sock *sk, unsigned long timeout) |
sk | 578 | net/ipv4/udp.c | lock_sock(sk); |
sk | 579 | net/ipv4/udp.c | sk->state = TCP_CLOSE; |
sk | 580 | net/ipv4/udp.c | if(uh_cache_sk==sk) |
sk | 582 | net/ipv4/udp.c | release_sock(sk); |
sk | 583 | net/ipv4/udp.c | destroy_sock(sk); |
sk | 595 | net/ipv4/udp.c | struct sock *sk; |
sk | 664 | net/ipv4/udp.c | sk=get_sock_mcast(udp_prot.sock_array[ntohs(uh->dest)&(SOCK_ARRAY_SIZE-1)], uh->dest, |
sk | 666 | net/ipv4/udp.c | if(sk) |
sk | 672 | net/ipv4/udp.c | sknext=get_sock_mcast(sk->next, uh->dest, saddr, uh->source, daddr); |
sk | 678 | net/ipv4/udp.c | udp_deliver(sk, uh, skb1, dev,saddr,daddr,len); |
sk | 679 | net/ipv4/udp.c | sk=sknext; |
sk | 689 | net/ipv4/udp.c | sk=(struct sock *)uh_cache_sk; |
sk | 692 | net/ipv4/udp.c | sk = get_sock(&udp_prot, uh->dest, saddr, uh->source, daddr); |
sk | 697 | net/ipv4/udp.c | uh_cache_sk=sk; |
sk | 700 | net/ipv4/udp.c | if (sk == NULL) |
sk | 711 | net/ipv4/udp.c | skb->sk = NULL; |
sk | 715 | net/ipv4/udp.c | return udp_deliver(sk,uh,skb,dev, saddr, daddr, len); |
sk | 718 | net/ipv4/udp.c | static int udp_deliver(struct sock *sk, struct udphdr *uh, struct sk_buff *skb, struct device *dev, long saddr, long daddr, int len) |
sk | 720 | net/ipv4/udp.c | skb->sk = sk; |
sk | 739 | net/ipv4/udp.c | if (sock_queue_rcv_skb(sk,skb)<0) |
sk | 744 | net/ipv4/udp.c | skb->sk = NULL; |
sk | 143 | net/ipx/af_ipx.c | ipx_remove_socket(ipx_socket *sk) |
sk | 153 | net/ipx/af_ipx.c | intrfc = sk->protinfo.af_ipx.intrfc; |
sk | 160 | net/ipx/af_ipx.c | if(s==sk) { |
sk | 167 | net/ipx/af_ipx.c | if(s->next==sk) { |
sk | 168 | net/ipx/af_ipx.c | s->next=sk->next; |
sk | 185 | net/ipx/af_ipx.c | ipx_destroy_socket(ipx_socket *sk) |
sk | 189 | net/ipx/af_ipx.c | ipx_remove_socket(sk); |
sk | 190 | net/ipx/af_ipx.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { |
sk | 194 | net/ipx/af_ipx.c | kfree_s(sk,sizeof(*sk)); |
sk | 241 | net/ipx/af_ipx.c | ipxitf_insert_socket(ipx_interface *intrfc, ipx_socket *sk) |
sk | 245 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.intrfc = intrfc; |
sk | 246 | net/ipx/af_ipx.c | sk->next = NULL; |
sk | 248 | net/ipx/af_ipx.c | intrfc->if_sklist = sk; |
sk | 252 | net/ipx/af_ipx.c | s->next = sk; |
sk | 604 | net/ipx/af_ipx.c | if(skb->sk) |
sk | 606 | net/ipx/af_ipx.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
sk | 607 | net/ipx/af_ipx.c | skb->sk=NULL; |
sk | 619 | net/ipx/af_ipx.c | if (!send_to_wire && skb->sk) |
sk | 621 | net/ipx/af_ipx.c | atomic_sub(skb->truesize, &skb->sk->wmem_alloc); |
sk | 622 | net/ipx/af_ipx.c | skb->sk=NULL; |
sk | 1170 | net/ipx/af_ipx.c | static int ipxrtr_route_packet(ipx_socket *sk, struct sockaddr_ipx *usipx, struct iovec *iov, int len) |
sk | 1199 | net/ipx/af_ipx.c | skb=sock_alloc_send_skb(sk, size, 0, 0, &err); |
sk | 1206 | net/ipx/af_ipx.c | skb->sk=sk; |
sk | 1216 | net/ipx/af_ipx.c | ipx->ipx_source.net = sk->protinfo.af_ipx.intrfc->if_netnum; |
sk | 1218 | net/ipx/af_ipx.c | memcpy(ipx->ipx_source.node, sk->protinfo.af_ipx.node, IPX_NODE_LEN); |
sk | 1220 | net/ipx/af_ipx.c | if ((err = ntohs(sk->protinfo.af_ipx.port)) == 0x453 || err == 0x452) |
sk | 1228 | net/ipx/af_ipx.c | ipx->ipx_source.net = sk->protinfo.af_ipx.intrfc->if_netnum; |
sk | 1229 | net/ipx/af_ipx.c | memcpy(ipx->ipx_source.node, sk->protinfo.af_ipx.intrfc->if_node, IPX_NODE_LEN); |
sk | 1232 | net/ipx/af_ipx.c | ipx->ipx_source.sock = sk->protinfo.af_ipx.port; |
sk | 1509 | net/ipx/af_ipx.c | ipx_socket *sk; |
sk | 1512 | net/ipx/af_ipx.c | sk=(ipx_socket *)sock->data; |
sk | 1528 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.type=opt; |
sk | 1536 | net/ipx/af_ipx.c | return sock_setsockopt(sk,level,optname,optval,optlen); |
sk | 1546 | net/ipx/af_ipx.c | ipx_socket *sk; |
sk | 1550 | net/ipx/af_ipx.c | sk=(ipx_socket *)sock->data; |
sk | 1559 | net/ipx/af_ipx.c | val=sk->protinfo.af_ipx.type; |
sk | 1567 | net/ipx/af_ipx.c | return sock_getsockopt(sk,level,optname,optval,optlen); |
sk | 1587 | net/ipx/af_ipx.c | static void def_callback1(struct sock *sk) |
sk | 1589 | net/ipx/af_ipx.c | if(!sk->dead) |
sk | 1590 | net/ipx/af_ipx.c | wake_up_interruptible(sk->sleep); |
sk | 1593 | net/ipx/af_ipx.c | static void def_callback2(struct sock *sk, int len) |
sk | 1595 | net/ipx/af_ipx.c | if(!sk->dead) |
sk | 1597 | net/ipx/af_ipx.c | wake_up_interruptible(sk->sleep); |
sk | 1598 | net/ipx/af_ipx.c | sock_wake_async(sk->socket, 1); |
sk | 1605 | net/ipx/af_ipx.c | ipx_socket *sk; |
sk | 1606 | net/ipx/af_ipx.c | sk=(ipx_socket *)kmalloc(sizeof(*sk),GFP_KERNEL); |
sk | 1607 | net/ipx/af_ipx.c | if(sk==NULL) |
sk | 1614 | net/ipx/af_ipx.c | kfree_s((void *)sk,sizeof(*sk)); |
sk | 1617 | net/ipx/af_ipx.c | sk->dead=0; |
sk | 1618 | net/ipx/af_ipx.c | sk->next=NULL; |
sk | 1619 | net/ipx/af_ipx.c | sk->broadcast=0; |
sk | 1620 | net/ipx/af_ipx.c | sk->rcvbuf=SK_RMEM_MAX; |
sk | 1621 | net/ipx/af_ipx.c | sk->sndbuf=SK_WMEM_MAX; |
sk | 1622 | net/ipx/af_ipx.c | sk->wmem_alloc=0; |
sk | 1623 | net/ipx/af_ipx.c | sk->rmem_alloc=0; |
sk | 1624 | net/ipx/af_ipx.c | sk->users=0; |
sk | 1625 | net/ipx/af_ipx.c | sk->shutdown=0; |
sk | 1626 | net/ipx/af_ipx.c | sk->prot=NULL; /* So we use default free mechanisms */ |
sk | 1627 | net/ipx/af_ipx.c | sk->err=0; |
sk | 1628 | net/ipx/af_ipx.c | skb_queue_head_init(&sk->receive_queue); |
sk | 1629 | net/ipx/af_ipx.c | skb_queue_head_init(&sk->write_queue); |
sk | 1630 | net/ipx/af_ipx.c | sk->send_head=NULL; |
sk | 1631 | net/ipx/af_ipx.c | skb_queue_head_init(&sk->back_log); |
sk | 1632 | net/ipx/af_ipx.c | sk->state=TCP_CLOSE; |
sk | 1633 | net/ipx/af_ipx.c | sk->socket=sock; |
sk | 1634 | net/ipx/af_ipx.c | sk->type=sock->type; |
sk | 1635 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.type=0; /* General user level IPX */ |
sk | 1636 | net/ipx/af_ipx.c | sk->debug=0; |
sk | 1637 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.intrfc = NULL; |
sk | 1638 | net/ipx/af_ipx.c | memset(&sk->protinfo.af_ipx.dest_addr,'\0', |
sk | 1639 | net/ipx/af_ipx.c | sizeof(sk->protinfo.af_ipx.dest_addr)); |
sk | 1640 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.port = 0; |
sk | 1641 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.ncp_server = 0; |
sk | 1642 | net/ipx/af_ipx.c | sk->mtu=IPX_MTU; |
sk | 1646 | net/ipx/af_ipx.c | sock->data=(void *)sk; |
sk | 1647 | net/ipx/af_ipx.c | sk->sleep=sock->wait; |
sk | 1650 | net/ipx/af_ipx.c | sk->state_change=def_callback1; |
sk | 1651 | net/ipx/af_ipx.c | sk->data_ready=def_callback2; |
sk | 1652 | net/ipx/af_ipx.c | sk->write_space=def_callback1; |
sk | 1653 | net/ipx/af_ipx.c | sk->error_report=def_callback1; |
sk | 1655 | net/ipx/af_ipx.c | sk->zapped=1; |
sk | 1662 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 1663 | net/ipx/af_ipx.c | if(sk==NULL) |
sk | 1665 | net/ipx/af_ipx.c | if(!sk->dead) |
sk | 1666 | net/ipx/af_ipx.c | sk->state_change(sk); |
sk | 1667 | net/ipx/af_ipx.c | sk->dead=1; |
sk | 1669 | net/ipx/af_ipx.c | ipx_destroy_socket(sk); |
sk | 1698 | net/ipx/af_ipx.c | ipx_socket *sk; |
sk | 1702 | net/ipx/af_ipx.c | sk=(ipx_socket *)sock->data; |
sk | 1704 | net/ipx/af_ipx.c | if(sk->zapped==0) |
sk | 1723 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.port=addr->sipx_port; |
sk | 1740 | net/ipx/af_ipx.c | memcpy(sk->protinfo.af_ipx.node, intrfc->if_node, |
sk | 1745 | net/ipx/af_ipx.c | memcpy(sk->protinfo.af_ipx.node, addr->sipx_node, IPX_NODE_LEN); |
sk | 1748 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.node, |
sk | 1749 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.port) != NULL) |
sk | 1751 | net/ipx/af_ipx.c | if(sk->debug) |
sk | 1764 | net/ipx/af_ipx.c | memcpy(sk->protinfo.af_ipx.node, intrfc->if_node, |
sk | 1768 | net/ipx/af_ipx.c | if(sk->debug) |
sk | 1781 | net/ipx/af_ipx.c | if(sk->debug) |
sk | 1789 | net/ipx/af_ipx.c | ipxitf_insert_socket(intrfc, sk); |
sk | 1790 | net/ipx/af_ipx.c | sk->zapped=0; |
sk | 1791 | net/ipx/af_ipx.c | if(sk->debug) |
sk | 1799 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 1802 | net/ipx/af_ipx.c | sk->state = TCP_CLOSE; |
sk | 1809 | net/ipx/af_ipx.c | if(sk->protinfo.af_ipx.port==0) |
sk | 1818 | net/ipx/af_ipx.c | memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc->if_node, |
sk | 1828 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.dest_addr.net=addr->sipx_network; |
sk | 1829 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.dest_addr.sock=addr->sipx_port; |
sk | 1830 | net/ipx/af_ipx.c | memcpy(sk->protinfo.af_ipx.dest_addr.node, |
sk | 1832 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.type=addr->sipx_type; |
sk | 1834 | net/ipx/af_ipx.c | sk->state=TCP_ESTABLISHED; |
sk | 1857 | net/ipx/af_ipx.c | ipx_socket *sk; |
sk | 1859 | net/ipx/af_ipx.c | sk=(ipx_socket *)sock->data; |
sk | 1864 | net/ipx/af_ipx.c | if(sk->state!=TCP_ESTABLISHED) |
sk | 1866 | net/ipx/af_ipx.c | addr=&sk->protinfo.af_ipx.dest_addr; |
sk | 1871 | net/ipx/af_ipx.c | if (sk->protinfo.af_ipx.intrfc != NULL) { |
sk | 1872 | net/ipx/af_ipx.c | sipx.sipx_network = sk->protinfo.af_ipx.intrfc->if_netnum; |
sk | 1874 | net/ipx/af_ipx.c | memcpy(sipx.sipx_node, sk->protinfo.af_ipx.node, IPX_NODE_LEN); |
sk | 1877 | net/ipx/af_ipx.c | sk->protinfo.af_ipx.intrfc->if_node, IPX_NODE_LEN); |
sk | 1884 | net/ipx/af_ipx.c | sipx.sipx_port = sk->protinfo.af_ipx.port; |
sk | 1888 | net/ipx/af_ipx.c | sipx.sipx_type = sk->protinfo.af_ipx.type; |
sk | 1993 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 1998 | net/ipx/af_ipx.c | if (sk->zapped) return -EIO; /* Socket not bound */ |
sk | 2003 | net/ipx/af_ipx.c | if(sk->protinfo.af_ipx.port == 0) |
sk | 2011 | net/ipx/af_ipx.c | memcpy(uaddr.sipx_node, sk->protinfo.af_ipx.intrfc |
sk | 2026 | net/ipx/af_ipx.c | if(sk->state!=TCP_ESTABLISHED) |
sk | 2030 | net/ipx/af_ipx.c | usipx->sipx_type=sk->protinfo.af_ipx.type; |
sk | 2031 | net/ipx/af_ipx.c | usipx->sipx_port=sk->protinfo.af_ipx.dest_addr.sock; |
sk | 2032 | net/ipx/af_ipx.c | usipx->sipx_network=sk->protinfo.af_ipx.dest_addr.net; |
sk | 2033 | net/ipx/af_ipx.c | memcpy(usipx->sipx_node,sk->protinfo.af_ipx.dest_addr.node,IPX_NODE_LEN); |
sk | 2036 | net/ipx/af_ipx.c | retval = ipxrtr_route_packet(sk, usipx, msg->msg_iov, len); |
sk | 2046 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 2054 | net/ipx/af_ipx.c | if(sk->err) |
sk | 2055 | net/ipx/af_ipx.c | return sock_error(sk); |
sk | 2057 | net/ipx/af_ipx.c | if (sk->zapped) |
sk | 2061 | net/ipx/af_ipx.c | skb=skb_recv_datagram(sk,flags,noblock,&er); |
sk | 2081 | net/ipx/af_ipx.c | skb_free_datagram(sk, skb); |
sk | 2085 | net/ipx/af_ipx.c | static int ipx_shutdown(struct socket *sk,int how) |
sk | 2092 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 2094 | net/ipx/af_ipx.c | return datagram_select(sk,sel_type,wait); |
sk | 2101 | net/ipx/af_ipx.c | ipx_socket *sk=(ipx_socket *)sock->data; |
sk | 2109 | net/ipx/af_ipx.c | amount=sk->sndbuf-sk->wmem_alloc; |
sk | 2118 | net/ipx/af_ipx.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
sk | 2146 | net/ipx/af_ipx.c | if (sk) |
sk | 2148 | net/ipx/af_ipx.c | if(sk->stamp.tv_sec==0) |
sk | 2153 | net/ipx/af_ipx.c | memcpy_tofs((void *)arg,&sk->stamp,sizeof(struct timeval)); |
sk | 72 | net/netrom/af_netrom.c | static void nr_remove_socket(struct sock *sk) |
sk | 80 | net/netrom/af_netrom.c | if ((s = nr_list) == sk) { |
sk | 87 | net/netrom/af_netrom.c | if (s->next == sk) { |
sk | 88 | net/netrom/af_netrom.c | s->next = sk->next; |
sk | 137 | net/netrom/af_netrom.c | static void nr_insert_socket(struct sock *sk) |
sk | 144 | net/netrom/af_netrom.c | sk->next = nr_list; |
sk | 145 | net/netrom/af_netrom.c | nr_list = sk; |
sk | 238 | net/netrom/af_netrom.c | void nr_destroy_socket(struct sock *sk) /* Not static as its used by the timer */ |
sk | 246 | net/netrom/af_netrom.c | del_timer(&sk->timer); |
sk | 248 | net/netrom/af_netrom.c | nr_remove_socket(sk); |
sk | 249 | net/netrom/af_netrom.c | nr_clear_queues(sk); /* Flush the queues */ |
sk | 251 | net/netrom/af_netrom.c | while ((skb = skb_dequeue(&sk->receive_queue)) != NULL) { |
sk | 252 | net/netrom/af_netrom.c | if (skb->sk != sk) { /* A pending connection */ |
sk | 253 | net/netrom/af_netrom.c | skb->sk->dead = 1; /* Queue the unaccepted socket for death */ |
sk | 254 | net/netrom/af_netrom.c | nr_set_timer(skb->sk); |
sk | 255 | net/netrom/af_netrom.c | skb->sk->nr->state = NR_STATE_0; |
sk | 261 | net/netrom/af_netrom.c | if (sk->wmem_alloc || sk->rmem_alloc) { /* Defer: outstanding buffers */ |
sk | 262 | net/netrom/af_netrom.c | init_timer(&sk->timer); |
sk | 263 | net/netrom/af_netrom.c | sk->timer.expires = jiffies + 10 * HZ; |
sk | 264 | net/netrom/af_netrom.c | sk->timer.function = nr_destroy_timer; |
sk | 265 | net/netrom/af_netrom.c | sk->timer.data = (unsigned long)sk; |
sk | 266 | net/netrom/af_netrom.c | add_timer(&sk->timer); |
sk | 268 | net/netrom/af_netrom.c | kfree_s(sk->nr, sizeof(*sk->nr)); |
sk | 269 | net/netrom/af_netrom.c | kfree_s(sk, sizeof(*sk)); |
sk | 288 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 291 | net/netrom/af_netrom.c | sk = (struct sock *)sock->data; |
sk | 294 | net/netrom/af_netrom.c | return sock_setsockopt(sk, level, optname, optval, optlen); |
sk | 311 | net/netrom/af_netrom.c | sk->nr->rtt = (opt * PR_SLOWHZ) / 2; |
sk | 317 | net/netrom/af_netrom.c | sk->nr->t2 = opt * PR_SLOWHZ; |
sk | 323 | net/netrom/af_netrom.c | sk->nr->n2 = opt; |
sk | 327 | net/netrom/af_netrom.c | sk->nr->hdrincl = opt ? 1 : 0; |
sk | 338 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 342 | net/netrom/af_netrom.c | sk = (struct sock *)sock->data; |
sk | 345 | net/netrom/af_netrom.c | return sock_getsockopt(sk, level, optname, optval, optlen); |
sk | 352 | net/netrom/af_netrom.c | val = (sk->nr->t1 * 2) / PR_SLOWHZ; |
sk | 356 | net/netrom/af_netrom.c | val = sk->nr->t2 / PR_SLOWHZ; |
sk | 360 | net/netrom/af_netrom.c | val = sk->nr->n2; |
sk | 364 | net/netrom/af_netrom.c | val = sk->nr->hdrincl; |
sk | 386 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 388 | net/netrom/af_netrom.c | if (sk->state != TCP_LISTEN) { |
sk | 389 | net/netrom/af_netrom.c | memset(&sk->nr->user_addr, '\0', AX25_ADDR_LEN); |
sk | 390 | net/netrom/af_netrom.c | sk->max_ack_backlog = backlog; |
sk | 391 | net/netrom/af_netrom.c | sk->state = TCP_LISTEN; |
sk | 398 | net/netrom/af_netrom.c | static void def_callback1(struct sock *sk) |
sk | 400 | net/netrom/af_netrom.c | if (!sk->dead) |
sk | 401 | net/netrom/af_netrom.c | wake_up_interruptible(sk->sleep); |
sk | 404 | net/netrom/af_netrom.c | static void def_callback2(struct sock *sk, int len) |
sk | 406 | net/netrom/af_netrom.c | if (!sk->dead) |
sk | 407 | net/netrom/af_netrom.c | wake_up_interruptible(sk->sleep); |
sk | 412 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 418 | net/netrom/af_netrom.c | if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL) |
sk | 422 | net/netrom/af_netrom.c | kfree_s(sk, sizeof(*sk)); |
sk | 426 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->receive_queue); |
sk | 427 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->write_queue); |
sk | 428 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->back_log); |
sk | 430 | net/netrom/af_netrom.c | init_timer(&sk->timer); |
sk | 432 | net/netrom/af_netrom.c | sk->socket = sock; |
sk | 433 | net/netrom/af_netrom.c | sk->type = sock->type; |
sk | 434 | net/netrom/af_netrom.c | sk->protocol = protocol; |
sk | 435 | net/netrom/af_netrom.c | sk->dead = 0; |
sk | 436 | net/netrom/af_netrom.c | sk->next = NULL; |
sk | 437 | net/netrom/af_netrom.c | sk->broadcast = 0; |
sk | 438 | net/netrom/af_netrom.c | sk->allocation = GFP_KERNEL; |
sk | 439 | net/netrom/af_netrom.c | sk->rcvbuf = SK_RMEM_MAX; |
sk | 440 | net/netrom/af_netrom.c | sk->sndbuf = SK_WMEM_MAX; |
sk | 441 | net/netrom/af_netrom.c | sk->wmem_alloc = 0; |
sk | 442 | net/netrom/af_netrom.c | sk->rmem_alloc = 0; |
sk | 443 | net/netrom/af_netrom.c | sk->users = 0; |
sk | 444 | net/netrom/af_netrom.c | sk->debug = 0; |
sk | 445 | net/netrom/af_netrom.c | sk->destroy = 0; |
sk | 446 | net/netrom/af_netrom.c | sk->prot = NULL; /* So we use default free mechanisms */ |
sk | 447 | net/netrom/af_netrom.c | sk->err = 0; |
sk | 448 | net/netrom/af_netrom.c | sk->localroute = 0; |
sk | 449 | net/netrom/af_netrom.c | sk->send_head = NULL; |
sk | 450 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 451 | net/netrom/af_netrom.c | sk->shutdown = 0; |
sk | 452 | net/netrom/af_netrom.c | sk->priority = SOPRI_NORMAL; |
sk | 453 | net/netrom/af_netrom.c | sk->ack_backlog = 0; |
sk | 454 | net/netrom/af_netrom.c | sk->mtu = NETROM_MTU; /* 236 */ |
sk | 455 | net/netrom/af_netrom.c | sk->zapped = 1; |
sk | 456 | net/netrom/af_netrom.c | sk->window = nr_default.window; |
sk | 458 | net/netrom/af_netrom.c | sk->state_change = def_callback1; |
sk | 459 | net/netrom/af_netrom.c | sk->data_ready = def_callback2; |
sk | 460 | net/netrom/af_netrom.c | sk->write_space = def_callback1; |
sk | 461 | net/netrom/af_netrom.c | sk->error_report = def_callback1; |
sk | 464 | net/netrom/af_netrom.c | sock->data = (void *)sk; |
sk | 465 | net/netrom/af_netrom.c | sk->sleep = sock->wait; |
sk | 505 | net/netrom/af_netrom.c | nr->sk = sk; |
sk | 506 | net/netrom/af_netrom.c | sk->nr = nr; |
sk | 513 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 519 | net/netrom/af_netrom.c | if ((sk = (struct sock *)kmalloc(sizeof(*sk), GFP_ATOMIC)) == NULL) |
sk | 523 | net/netrom/af_netrom.c | kfree_s(sk, sizeof(*sk)); |
sk | 527 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->receive_queue); |
sk | 528 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->write_queue); |
sk | 529 | net/netrom/af_netrom.c | skb_queue_head_init(&sk->back_log); |
sk | 531 | net/netrom/af_netrom.c | init_timer(&sk->timer); |
sk | 533 | net/netrom/af_netrom.c | sk->type = osk->type; |
sk | 534 | net/netrom/af_netrom.c | sk->socket = osk->socket; |
sk | 535 | net/netrom/af_netrom.c | sk->dead = 0; |
sk | 536 | net/netrom/af_netrom.c | sk->next = NULL; |
sk | 537 | net/netrom/af_netrom.c | sk->priority = osk->priority; |
sk | 538 | net/netrom/af_netrom.c | sk->broadcast = 0; |
sk | 539 | net/netrom/af_netrom.c | sk->protocol = osk->protocol; |
sk | 540 | net/netrom/af_netrom.c | sk->rcvbuf = osk->rcvbuf; |
sk | 541 | net/netrom/af_netrom.c | sk->sndbuf = osk->sndbuf; |
sk | 542 | net/netrom/af_netrom.c | sk->wmem_alloc = 0; |
sk | 543 | net/netrom/af_netrom.c | sk->rmem_alloc = 0; |
sk | 544 | net/netrom/af_netrom.c | sk->users = 0; |
sk | 545 | net/netrom/af_netrom.c | sk->ack_backlog = 0; |
sk | 546 | net/netrom/af_netrom.c | sk->destroy = 0; |
sk | 547 | net/netrom/af_netrom.c | sk->prot = NULL; /* So we use default free mechanisms */ |
sk | 548 | net/netrom/af_netrom.c | sk->err = 0; |
sk | 549 | net/netrom/af_netrom.c | sk->localroute = 0; |
sk | 550 | net/netrom/af_netrom.c | sk->send_head = NULL; |
sk | 551 | net/netrom/af_netrom.c | sk->debug = osk->debug; |
sk | 552 | net/netrom/af_netrom.c | sk->state = TCP_ESTABLISHED; |
sk | 553 | net/netrom/af_netrom.c | sk->window = osk->window; |
sk | 554 | net/netrom/af_netrom.c | sk->shutdown = 0; |
sk | 555 | net/netrom/af_netrom.c | sk->mtu = osk->mtu; |
sk | 556 | net/netrom/af_netrom.c | sk->sleep = osk->sleep; |
sk | 557 | net/netrom/af_netrom.c | sk->zapped = osk->zapped; |
sk | 559 | net/netrom/af_netrom.c | sk->state_change = def_callback1; |
sk | 560 | net/netrom/af_netrom.c | sk->data_ready = def_callback2; |
sk | 561 | net/netrom/af_netrom.c | sk->write_space = def_callback1; |
sk | 562 | net/netrom/af_netrom.c | sk->error_report = def_callback1; |
sk | 588 | net/netrom/af_netrom.c | sk->nr = nr; |
sk | 589 | net/netrom/af_netrom.c | nr->sk = sk; |
sk | 591 | net/netrom/af_netrom.c | return sk; |
sk | 596 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)oldsock->data; |
sk | 598 | net/netrom/af_netrom.c | return nr_create(newsock, sk->protocol); |
sk | 603 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 605 | net/netrom/af_netrom.c | if (sk == NULL) return 0; |
sk | 607 | net/netrom/af_netrom.c | switch (sk->nr->state) { |
sk | 610 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 611 | net/netrom/af_netrom.c | sk->state_change(sk); |
sk | 612 | net/netrom/af_netrom.c | sk->dead = 1; |
sk | 613 | net/netrom/af_netrom.c | nr_destroy_socket(sk); |
sk | 617 | net/netrom/af_netrom.c | sk->nr->state = NR_STATE_0; |
sk | 618 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 619 | net/netrom/af_netrom.c | sk->state_change(sk); |
sk | 620 | net/netrom/af_netrom.c | sk->dead = 1; |
sk | 621 | net/netrom/af_netrom.c | nr_destroy_socket(sk); |
sk | 625 | net/netrom/af_netrom.c | nr_write_internal(sk, NR_DISCACK); |
sk | 626 | net/netrom/af_netrom.c | sk->nr->state = NR_STATE_0; |
sk | 627 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 628 | net/netrom/af_netrom.c | sk->state_change(sk); |
sk | 629 | net/netrom/af_netrom.c | sk->dead = 1; |
sk | 630 | net/netrom/af_netrom.c | nr_destroy_socket(sk); |
sk | 634 | net/netrom/af_netrom.c | nr_clear_queues(sk); |
sk | 635 | net/netrom/af_netrom.c | sk->nr->n2count = 0; |
sk | 636 | net/netrom/af_netrom.c | nr_write_internal(sk, NR_DISCREQ); |
sk | 637 | net/netrom/af_netrom.c | sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk); |
sk | 638 | net/netrom/af_netrom.c | sk->nr->t2timer = 0; |
sk | 639 | net/netrom/af_netrom.c | sk->nr->t4timer = 0; |
sk | 640 | net/netrom/af_netrom.c | sk->nr->state = NR_STATE_2; |
sk | 641 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 642 | net/netrom/af_netrom.c | sk->state_change(sk); |
sk | 643 | net/netrom/af_netrom.c | sk->dead = 1; |
sk | 644 | net/netrom/af_netrom.c | sk->destroy = 1; |
sk | 652 | net/netrom/af_netrom.c | sk->socket = NULL; /* Not used, but we should do this. **/ |
sk | 659 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 664 | net/netrom/af_netrom.c | sk = (struct sock *)sock->data; |
sk | 666 | net/netrom/af_netrom.c | if (sk->zapped == 0) |
sk | 673 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 684 | net/netrom/af_netrom.c | sk->nr->user_addr = addr->fsa_digipeater[0]; |
sk | 685 | net/netrom/af_netrom.c | sk->nr->source_addr = addr->fsa_ax25.sax25_call; |
sk | 695 | net/netrom/af_netrom.c | sk->nr->user_addr = *user; |
sk | 696 | net/netrom/af_netrom.c | sk->nr->source_addr = *source; |
sk | 699 | net/netrom/af_netrom.c | sk->nr->device = dev; |
sk | 700 | net/netrom/af_netrom.c | nr_insert_socket(sk); |
sk | 702 | net/netrom/af_netrom.c | sk->zapped = 0; |
sk | 704 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 713 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 718 | net/netrom/af_netrom.c | if (sk->state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { |
sk | 723 | net/netrom/af_netrom.c | if (sk->state == TCP_CLOSE && sock->state == SS_CONNECTING) { |
sk | 728 | net/netrom/af_netrom.c | if (sk->state == TCP_ESTABLISHED) |
sk | 731 | net/netrom/af_netrom.c | sk->state = TCP_CLOSE; |
sk | 737 | net/netrom/af_netrom.c | if (sk->zapped) { /* Must bind first - autobinding in this may or may not work */ |
sk | 738 | net/netrom/af_netrom.c | sk->zapped = 0; |
sk | 751 | net/netrom/af_netrom.c | sk->nr->user_addr = *user; |
sk | 752 | net/netrom/af_netrom.c | sk->nr->source_addr = *source; |
sk | 753 | net/netrom/af_netrom.c | sk->nr->device = dev; |
sk | 755 | net/netrom/af_netrom.c | nr_insert_socket(sk); /* Finish the bind */ |
sk | 758 | net/netrom/af_netrom.c | sk->nr->dest_addr = addr->sax25_call; |
sk | 763 | net/netrom/af_netrom.c | sk->nr->my_index = circuit / 256; |
sk | 764 | net/netrom/af_netrom.c | sk->nr->my_id = circuit % 256; |
sk | 770 | net/netrom/af_netrom.c | sk->state = TCP_SYN_SENT; |
sk | 771 | net/netrom/af_netrom.c | nr_establish_data_link(sk); |
sk | 772 | net/netrom/af_netrom.c | sk->nr->state = NR_STATE_1; |
sk | 773 | net/netrom/af_netrom.c | nr_set_timer(sk); |
sk | 776 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) |
sk | 784 | net/netrom/af_netrom.c | while (sk->state == TCP_SYN_SENT) { |
sk | 785 | net/netrom/af_netrom.c | interruptible_sleep_on(sk->sleep); |
sk | 792 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED) { |
sk | 795 | net/netrom/af_netrom.c | return sock_error(sk); /* Always set at this point */ |
sk | 812 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 821 | net/netrom/af_netrom.c | sk = (struct sock *)sock->data; |
sk | 823 | net/netrom/af_netrom.c | if (sk->type != SOCK_SEQPACKET) |
sk | 826 | net/netrom/af_netrom.c | if (sk->state != TCP_LISTEN) |
sk | 835 | net/netrom/af_netrom.c | if ((skb = skb_dequeue(&sk->receive_queue)) == NULL) { |
sk | 840 | net/netrom/af_netrom.c | interruptible_sleep_on(sk->sleep); |
sk | 848 | net/netrom/af_netrom.c | newsk = skb->sk; |
sk | 853 | net/netrom/af_netrom.c | skb->sk = NULL; |
sk | 855 | net/netrom/af_netrom.c | sk->ack_backlog--; |
sk | 865 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 867 | net/netrom/af_netrom.c | sk = (struct sock *)sock->data; |
sk | 870 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED) |
sk | 874 | net/netrom/af_netrom.c | sax->fsa_ax25.sax25_call = sk->nr->user_addr; |
sk | 875 | net/netrom/af_netrom.c | sax->fsa_digipeater[0] = sk->nr->dest_addr; |
sk | 880 | net/netrom/af_netrom.c | sax->fsa_ax25.sax25_call = sk->nr->source_addr; |
sk | 889 | net/netrom/af_netrom.c | struct sock *sk; |
sk | 896 | net/netrom/af_netrom.c | skb->sk = NULL; /* Initially we don't know who its for */ |
sk | 925 | net/netrom/af_netrom.c | if (((frametype & 0x0F) != NR_CONNREQ && (sk = nr_find_socket(circuit_index, circuit_id)) != NULL) || |
sk | 926 | net/netrom/af_netrom.c | ((frametype & 0x0F) == NR_CONNREQ && (sk = nr_find_peer(circuit_index, circuit_id)) != NULL)) { |
sk | 930 | net/netrom/af_netrom.c | sk->nr->bpqext = 1; |
sk | 932 | net/netrom/af_netrom.c | sk->nr->bpqext = 0; |
sk | 934 | net/netrom/af_netrom.c | return nr_process_rx_frame(sk, skb); |
sk | 940 | net/netrom/af_netrom.c | sk = nr_find_listener(dest); |
sk | 944 | net/netrom/af_netrom.c | if (sk == NULL || sk->ack_backlog == sk->max_ack_backlog || (make = nr_make_new(sk)) == NULL) { |
sk | 951 | net/netrom/af_netrom.c | skb->sk = make; |
sk | 989 | net/netrom/af_netrom.c | sk->ack_backlog++; |
sk | 990 | net/netrom/af_netrom.c | make->pair = sk; |
sk | 994 | net/netrom/af_netrom.c | skb_queue_head(&sk->receive_queue, skb); |
sk | 998 | net/netrom/af_netrom.c | if (!sk->dead) |
sk | 999 | net/netrom/af_netrom.c | sk->data_ready(sk, skb->len); |
sk | 1006 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1014 | net/netrom/af_netrom.c | if (sk->err) |
sk | 1015 | net/netrom/af_netrom.c | return sock_error(sk); |
sk | 1020 | net/netrom/af_netrom.c | if (sk->zapped) |
sk | 1023 | net/netrom/af_netrom.c | if (sk->nr->device == NULL) |
sk | 1030 | net/netrom/af_netrom.c | if (ax25cmp(&sk->nr->dest_addr, &sax.sax25_call) != 0) |
sk | 1035 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED) |
sk | 1038 | net/netrom/af_netrom.c | sax.sax25_call = sk->nr->dest_addr; |
sk | 1041 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1045 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1050 | net/netrom/af_netrom.c | if ((skb = sock_alloc_send_skb(sk, size, 0, 0, &err)) == NULL) |
sk | 1053 | net/netrom/af_netrom.c | skb->sk = sk; |
sk | 1065 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1070 | net/netrom/af_netrom.c | *asmptr++ = sk->nr->your_index; |
sk | 1071 | net/netrom/af_netrom.c | *asmptr++ = sk->nr->your_id; |
sk | 1076 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1087 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1093 | net/netrom/af_netrom.c | if (sk->debug) |
sk | 1096 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED) { |
sk | 1101 | net/netrom/af_netrom.c | nr_output(sk, skb); /* Shove it onto the queue */ |
sk | 1110 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1116 | net/netrom/af_netrom.c | if (sk->err) |
sk | 1117 | net/netrom/af_netrom.c | return sock_error(sk); |
sk | 1126 | net/netrom/af_netrom.c | if (sk->state != TCP_ESTABLISHED) |
sk | 1130 | net/netrom/af_netrom.c | if ((skb = skb_recv_datagram(sk, flags, noblock, &er)) == NULL) |
sk | 1133 | net/netrom/af_netrom.c | if (!sk->nr->hdrincl) { |
sk | 1152 | net/netrom/af_netrom.c | skb_free_datagram(sk, skb); |
sk | 1157 | net/netrom/af_netrom.c | static int nr_shutdown(struct socket *sk, int how) |
sk | 1164 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1166 | net/netrom/af_netrom.c | return datagram_select(sk, sel_type, wait); |
sk | 1171 | net/netrom/af_netrom.c | struct sock *sk = (struct sock *)sock->data; |
sk | 1179 | net/netrom/af_netrom.c | amount = sk->sndbuf - sk->wmem_alloc; |
sk | 1188 | net/netrom/af_netrom.c | if ((skb = skb_peek(&sk->receive_queue)) != NULL) |
sk | 1197 | net/netrom/af_netrom.c | if (sk != NULL) { |
sk | 1198 | net/netrom/af_netrom.c | if (sk->stamp.tv_sec==0) |
sk | 1202 | net/netrom/af_netrom.c | memcpy_tofs((void *)arg, &sk->stamp, sizeof(struct timeval)); |
sk | 53 | net/netrom/nr_in.c | static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more) |
sk | 58 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
sk | 59 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
sk | 63 | net/netrom/nr_in.c | if (!more && sk->nr->fraglen > 0) { /* End of fragment */ |
sk | 64 | net/netrom/nr_in.c | sk->nr->fraglen += skb->len; |
sk | 65 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->frag_queue, skb); |
sk | 67 | net/netrom/nr_in.c | if ((skbn = alloc_skb(sk->nr->fraglen, GFP_ATOMIC)) == NULL) |
sk | 72 | net/netrom/nr_in.c | skbn->sk = sk; |
sk | 73 | net/netrom/nr_in.c | sk->rmem_alloc += skbn->truesize; |
sk | 76 | net/netrom/nr_in.c | skbo = skb_dequeue(&sk->nr->frag_queue); |
sk | 80 | net/netrom/nr_in.c | while ((skbo = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
sk | 86 | net/netrom/nr_in.c | sk->nr->fraglen = 0; |
sk | 89 | net/netrom/nr_in.c | return sock_queue_rcv_skb(sk, skbn); |
sk | 97 | net/netrom/nr_in.c | static int nr_state1_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
sk | 102 | net/netrom/nr_in.c | nr_calculate_rtt(sk); |
sk | 103 | net/netrom/nr_in.c | sk->window = skb->data[20]; |
sk | 104 | net/netrom/nr_in.c | sk->nr->your_index = skb->data[17]; |
sk | 105 | net/netrom/nr_in.c | sk->nr->your_id = skb->data[18]; |
sk | 106 | net/netrom/nr_in.c | sk->nr->t1timer = 0; |
sk | 107 | net/netrom/nr_in.c | sk->nr->t2timer = 0; |
sk | 108 | net/netrom/nr_in.c | sk->nr->t4timer = 0; |
sk | 109 | net/netrom/nr_in.c | sk->nr->vs = 0; |
sk | 110 | net/netrom/nr_in.c | sk->nr->va = 0; |
sk | 111 | net/netrom/nr_in.c | sk->nr->vr = 0; |
sk | 112 | net/netrom/nr_in.c | sk->nr->vl = 0; |
sk | 113 | net/netrom/nr_in.c | sk->nr->state = NR_STATE_3; |
sk | 114 | net/netrom/nr_in.c | sk->state = TCP_ESTABLISHED; |
sk | 115 | net/netrom/nr_in.c | sk->nr->n2count = 0; |
sk | 117 | net/netrom/nr_in.c | if (!sk->dead) |
sk | 118 | net/netrom/nr_in.c | sk->state_change(sk); |
sk | 122 | net/netrom/nr_in.c | nr_clear_queues(sk); |
sk | 123 | net/netrom/nr_in.c | sk->nr->state = NR_STATE_0; |
sk | 124 | net/netrom/nr_in.c | sk->state = TCP_CLOSE; |
sk | 125 | net/netrom/nr_in.c | sk->err = ECONNREFUSED; |
sk | 126 | net/netrom/nr_in.c | if (!sk->dead) |
sk | 127 | net/netrom/nr_in.c | sk->state_change(sk); |
sk | 128 | net/netrom/nr_in.c | sk->dead = 1; |
sk | 143 | net/netrom/nr_in.c | static int nr_state2_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
sk | 148 | net/netrom/nr_in.c | nr_write_internal(sk, NR_DISCACK); |
sk | 151 | net/netrom/nr_in.c | sk->nr->state = NR_STATE_0; |
sk | 152 | net/netrom/nr_in.c | sk->state = TCP_CLOSE; |
sk | 153 | net/netrom/nr_in.c | sk->err = 0; |
sk | 154 | net/netrom/nr_in.c | if (!sk->dead) |
sk | 155 | net/netrom/nr_in.c | sk->state_change(sk); |
sk | 156 | net/netrom/nr_in.c | sk->dead = 1; |
sk | 171 | net/netrom/nr_in.c | static int nr_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
sk | 185 | net/netrom/nr_in.c | nr_write_internal(sk, NR_CONNACK); |
sk | 189 | net/netrom/nr_in.c | nr_clear_queues(sk); |
sk | 190 | net/netrom/nr_in.c | nr_write_internal(sk, NR_DISCACK); |
sk | 191 | net/netrom/nr_in.c | sk->nr->state = NR_STATE_0; |
sk | 192 | net/netrom/nr_in.c | sk->state = TCP_CLOSE; |
sk | 193 | net/netrom/nr_in.c | sk->err = 0; |
sk | 194 | net/netrom/nr_in.c | if (!sk->dead) |
sk | 195 | net/netrom/nr_in.c | sk->state_change(sk); |
sk | 196 | net/netrom/nr_in.c | sk->dead = 1; |
sk | 200 | net/netrom/nr_in.c | nr_clear_queues(sk); |
sk | 201 | net/netrom/nr_in.c | sk->nr->state = NR_STATE_0; |
sk | 202 | net/netrom/nr_in.c | sk->state = TCP_CLOSE; |
sk | 203 | net/netrom/nr_in.c | sk->err = ECONNRESET; |
sk | 204 | net/netrom/nr_in.c | if (!sk->dead) |
sk | 205 | net/netrom/nr_in.c | sk->state_change(sk); |
sk | 206 | net/netrom/nr_in.c | sk->dead = 1; |
sk | 214 | net/netrom/nr_in.c | sk->nr->condition |= PEER_RX_BUSY_CONDITION; |
sk | 215 | net/netrom/nr_in.c | sk->nr->t4timer = nr_default.busy_delay; |
sk | 217 | net/netrom/nr_in.c | sk->nr->condition &= ~PEER_RX_BUSY_CONDITION; |
sk | 218 | net/netrom/nr_in.c | sk->nr->t4timer = 0; |
sk | 220 | net/netrom/nr_in.c | if (!nr_validate_nr(sk, nr)) { |
sk | 224 | net/netrom/nr_in.c | nr_frames_acked(sk, nr); |
sk | 225 | net/netrom/nr_in.c | nr_send_nak_frame(sk); |
sk | 227 | net/netrom/nr_in.c | if (sk->nr->condition & PEER_RX_BUSY_CONDITION) { |
sk | 228 | net/netrom/nr_in.c | nr_frames_acked(sk, nr); |
sk | 230 | net/netrom/nr_in.c | nr_check_iframes_acked(sk, nr); |
sk | 244 | net/netrom/nr_in.c | sk->nr->condition |= PEER_RX_BUSY_CONDITION; |
sk | 245 | net/netrom/nr_in.c | sk->nr->t4timer = nr_default.busy_delay; |
sk | 247 | net/netrom/nr_in.c | sk->nr->condition &= ~PEER_RX_BUSY_CONDITION; |
sk | 248 | net/netrom/nr_in.c | sk->nr->t4timer = 0; |
sk | 250 | net/netrom/nr_in.c | if (nr_validate_nr(sk, nr)) { |
sk | 252 | net/netrom/nr_in.c | nr_frames_acked(sk, nr); |
sk | 253 | net/netrom/nr_in.c | nr_send_nak_frame(sk); |
sk | 255 | net/netrom/nr_in.c | if (sk->nr->condition & PEER_RX_BUSY_CONDITION) { |
sk | 256 | net/netrom/nr_in.c | nr_frames_acked(sk, nr); |
sk | 258 | net/netrom/nr_in.c | nr_check_iframes_acked(sk, nr); |
sk | 263 | net/netrom/nr_in.c | skb_queue_head(&sk->nr->reseq_queue, skb); |
sk | 264 | net/netrom/nr_in.c | if (sk->nr->condition & OWN_RX_BUSY_CONDITION) |
sk | 268 | net/netrom/nr_in.c | save_vr = sk->nr->vr; |
sk | 269 | net/netrom/nr_in.c | while ((skbn = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
sk | 271 | net/netrom/nr_in.c | if (ns == sk->nr->vr) { |
sk | 272 | net/netrom/nr_in.c | if (nr_queue_rx_frame(sk, skbn, frametype & NR_MORE_FLAG) == 0) { |
sk | 273 | net/netrom/nr_in.c | sk->nr->vr = (sk->nr->vr + 1) % NR_MODULUS; |
sk | 275 | net/netrom/nr_in.c | sk->nr->condition |= OWN_RX_BUSY_CONDITION; |
sk | 278 | net/netrom/nr_in.c | } else if (nr_in_rx_window(sk, ns)) { |
sk | 286 | net/netrom/nr_in.c | skb_queue_tail(&sk->nr->reseq_queue, skbn); |
sk | 288 | net/netrom/nr_in.c | } while (save_vr != sk->nr->vr); |
sk | 292 | net/netrom/nr_in.c | if (((sk->nr->vl + sk->window) % NR_MODULUS) == sk->nr->vr) { |
sk | 293 | net/netrom/nr_in.c | nr_enquiry_response(sk); |
sk | 295 | net/netrom/nr_in.c | if (!(sk->nr->condition & ACK_PENDING_CONDITION)) { |
sk | 296 | net/netrom/nr_in.c | sk->nr->t2timer = sk->nr->t2; |
sk | 297 | net/netrom/nr_in.c | sk->nr->condition |= ACK_PENDING_CONDITION; |
sk | 310 | net/netrom/nr_in.c | int nr_process_rx_frame(struct sock *sk, struct sk_buff *skb) |
sk | 314 | net/netrom/nr_in.c | if (sk->nr->state == NR_STATE_0 && sk->dead) |
sk | 317 | net/netrom/nr_in.c | if (sk->nr->state != NR_STATE_1 && sk->nr->state != NR_STATE_2 && |
sk | 318 | net/netrom/nr_in.c | sk->nr->state != NR_STATE_3) { |
sk | 319 | net/netrom/nr_in.c | printk("nr_process_rx_frame: frame received - state: %d\n", sk->nr->state); |
sk | 323 | net/netrom/nr_in.c | del_timer(&sk->timer); |
sk | 327 | net/netrom/nr_in.c | switch (sk->nr->state) |
sk | 330 | net/netrom/nr_in.c | queued = nr_state1_machine(sk, skb, frametype); |
sk | 333 | net/netrom/nr_in.c | queued = nr_state2_machine(sk, skb, frametype); |
sk | 336 | net/netrom/nr_in.c | queued = nr_state3_machine(sk, skb, frametype); |
sk | 340 | net/netrom/nr_in.c | nr_set_timer(sk); |
sk | 49 | net/netrom/nr_out.c | void nr_output(struct sock *sk, struct sk_buff *skb) |
sk | 55 | net/netrom/nr_out.c | mtu = sk->nr->device->mtu; |
sk | 65 | net/netrom/nr_out.c | if ((skbn = sock_alloc_send_skb(sk, frontlen + mtu, 0, 0, &err)) == NULL) |
sk | 68 | net/netrom/nr_out.c | skbn->sk = sk; |
sk | 87 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skbn); /* Throw it on the queue */ |
sk | 93 | net/netrom/nr_out.c | skb_queue_tail(&sk->write_queue, skb); /* Throw it on the queue */ |
sk | 96 | net/netrom/nr_out.c | if (sk->nr->state == NR_STATE_3) |
sk | 97 | net/netrom/nr_out.c | nr_kick(sk); |
sk | 104 | net/netrom/nr_out.c | static void nr_send_iframe(struct sock *sk, struct sk_buff *skb) |
sk | 109 | net/netrom/nr_out.c | skb->data[2] = sk->nr->vs; |
sk | 110 | net/netrom/nr_out.c | skb->data[3] = sk->nr->vr; |
sk | 112 | net/netrom/nr_out.c | if (sk->nr->condition & OWN_RX_BUSY_CONDITION) |
sk | 115 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skb); |
sk | 118 | net/netrom/nr_out.c | void nr_send_nak_frame(struct sock *sk) |
sk | 122 | net/netrom/nr_out.c | if ((skb = skb_peek(&sk->nr->ack_queue)) == NULL) |
sk | 128 | net/netrom/nr_out.c | skbn->data[2] = sk->nr->va; |
sk | 129 | net/netrom/nr_out.c | skbn->data[3] = sk->nr->vr; |
sk | 131 | net/netrom/nr_out.c | if (sk->nr->condition & OWN_RX_BUSY_CONDITION) |
sk | 134 | net/netrom/nr_out.c | nr_transmit_buffer(sk, skbn); |
sk | 136 | net/netrom/nr_out.c | sk->nr->condition &= ~ACK_PENDING_CONDITION; |
sk | 137 | net/netrom/nr_out.c | sk->nr->vl = sk->nr->vr; |
sk | 138 | net/netrom/nr_out.c | sk->nr->t1timer = 0; |
sk | 141 | net/netrom/nr_out.c | void nr_kick(struct sock *sk) |
sk | 147 | net/netrom/nr_out.c | del_timer(&sk->timer); |
sk | 149 | net/netrom/nr_out.c | start = (skb_peek(&sk->nr->ack_queue) == NULL) ? sk->nr->va : sk->nr->vs; |
sk | 150 | net/netrom/nr_out.c | end = (sk->nr->va + sk->window) % NR_MODULUS; |
sk | 152 | net/netrom/nr_out.c | if (!(sk->nr->condition & PEER_RX_BUSY_CONDITION) && |
sk | 154 | net/netrom/nr_out.c | skb_peek(&sk->write_queue) != NULL) { |
sk | 156 | net/netrom/nr_out.c | sk->nr->vs = start; |
sk | 166 | net/netrom/nr_out.c | skb = skb_dequeue(&sk->write_queue); |
sk | 170 | net/netrom/nr_out.c | skb_queue_head(&sk->write_queue, skb); |
sk | 174 | net/netrom/nr_out.c | next = (sk->nr->vs + 1) % NR_MODULUS; |
sk | 180 | net/netrom/nr_out.c | nr_send_iframe(sk, skbn); |
sk | 182 | net/netrom/nr_out.c | sk->nr->vs = next; |
sk | 187 | net/netrom/nr_out.c | skb_queue_tail(&sk->nr->ack_queue, skb); |
sk | 189 | net/netrom/nr_out.c | } while (!last && (skb = skb_dequeue(&sk->write_queue)) != NULL); |
sk | 191 | net/netrom/nr_out.c | sk->nr->vl = sk->nr->vr; |
sk | 192 | net/netrom/nr_out.c | sk->nr->condition &= ~ACK_PENDING_CONDITION; |
sk | 194 | net/netrom/nr_out.c | if (sk->nr->t1timer == 0) { |
sk | 195 | net/netrom/nr_out.c | sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk); |
sk | 199 | net/netrom/nr_out.c | nr_set_timer(sk); |
sk | 202 | net/netrom/nr_out.c | void nr_transmit_buffer(struct sock *sk, struct sk_buff *skb) |
sk | 211 | net/netrom/nr_out.c | memcpy(dptr, &sk->nr->source_addr, AX25_ADDR_LEN); |
sk | 217 | net/netrom/nr_out.c | memcpy(dptr, &sk->nr->dest_addr, AX25_ADDR_LEN); |
sk | 230 | net/netrom/nr_out.c | sk->state = TCP_CLOSE; |
sk | 231 | net/netrom/nr_out.c | sk->err = ENETUNREACH; |
sk | 232 | net/netrom/nr_out.c | if (!sk->dead) |
sk | 233 | net/netrom/nr_out.c | sk->state_change(sk); |
sk | 234 | net/netrom/nr_out.c | sk->dead = 1; |
sk | 243 | net/netrom/nr_out.c | void nr_establish_data_link(struct sock *sk) |
sk | 245 | net/netrom/nr_out.c | sk->nr->condition = 0x00; |
sk | 246 | net/netrom/nr_out.c | sk->nr->n2count = 0; |
sk | 248 | net/netrom/nr_out.c | nr_write_internal(sk, NR_CONNREQ); |
sk | 250 | net/netrom/nr_out.c | sk->nr->t2timer = 0; |
sk | 251 | net/netrom/nr_out.c | sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk); |
sk | 257 | net/netrom/nr_out.c | void nr_enquiry_response(struct sock *sk) |
sk | 261 | net/netrom/nr_out.c | if (sk->nr->condition & OWN_RX_BUSY_CONDITION) { |
sk | 264 | net/netrom/nr_out.c | if (skb_peek(&sk->nr->reseq_queue) != NULL) { |
sk | 269 | net/netrom/nr_out.c | nr_write_internal(sk, frametype); |
sk | 271 | net/netrom/nr_out.c | sk->nr->vl = sk->nr->vr; |
sk | 272 | net/netrom/nr_out.c | sk->nr->condition &= ~ACK_PENDING_CONDITION; |
sk | 275 | net/netrom/nr_out.c | void nr_check_iframes_acked(struct sock *sk, unsigned short nr) |
sk | 277 | net/netrom/nr_out.c | if (sk->nr->vs == nr) { |
sk | 278 | net/netrom/nr_out.c | nr_frames_acked(sk, nr); |
sk | 279 | net/netrom/nr_out.c | nr_calculate_rtt(sk); |
sk | 280 | net/netrom/nr_out.c | sk->nr->t1timer = 0; |
sk | 281 | net/netrom/nr_out.c | sk->nr->n2count = 0; |
sk | 283 | net/netrom/nr_out.c | if (sk->nr->va != nr) { |
sk | 284 | net/netrom/nr_out.c | nr_frames_acked(sk, nr); |
sk | 285 | net/netrom/nr_out.c | sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk); |
sk | 47 | net/netrom/nr_subr.c | void nr_clear_queues(struct sock *sk) |
sk | 51 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->write_queue)) != NULL) { |
sk | 52 | net/netrom/nr_subr.c | skb->sk = sk; |
sk | 57 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
sk | 58 | net/netrom/nr_subr.c | skb->sk = sk; |
sk | 63 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->reseq_queue)) != NULL) { |
sk | 67 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->frag_queue)) != NULL) { |
sk | 77 | net/netrom/nr_subr.c | void nr_frames_acked(struct sock *sk, unsigned short nr) |
sk | 84 | net/netrom/nr_subr.c | if (sk->nr->va != nr) { |
sk | 85 | net/netrom/nr_subr.c | while (skb_peek(&sk->nr->ack_queue) != NULL && sk->nr->va != nr) { |
sk | 86 | net/netrom/nr_subr.c | skb = skb_dequeue(&sk->nr->ack_queue); |
sk | 87 | net/netrom/nr_subr.c | skb->sk = sk; |
sk | 90 | net/netrom/nr_subr.c | sk->nr->va = (sk->nr->va + 1) % NR_MODULUS; |
sk | 100 | net/netrom/nr_subr.c | void nr_requeue_frames(struct sock *sk) |
sk | 104 | net/netrom/nr_subr.c | while ((skb = skb_dequeue(&sk->nr->ack_queue)) != NULL) { |
sk | 106 | net/netrom/nr_subr.c | skb_queue_head(&sk->write_queue, skb); |
sk | 117 | net/netrom/nr_subr.c | int nr_validate_nr(struct sock *sk, unsigned short nr) |
sk | 119 | net/netrom/nr_subr.c | unsigned short vc = sk->nr->va; |
sk | 121 | net/netrom/nr_subr.c | while (vc != sk->nr->vs) { |
sk | 126 | net/netrom/nr_subr.c | if (nr == sk->nr->vs) return 1; |
sk | 134 | net/netrom/nr_subr.c | int nr_in_rx_window(struct sock *sk, unsigned short ns) |
sk | 136 | net/netrom/nr_subr.c | unsigned short vc = sk->nr->vr; |
sk | 137 | net/netrom/nr_subr.c | unsigned short vt = (sk->nr->vl + sk->window) % NR_MODULUS; |
sk | 151 | net/netrom/nr_subr.c | void nr_write_internal(struct sock *sk, int frametype) |
sk | 164 | net/netrom/nr_subr.c | len += (sk->nr->bpqext) ? 2 : 1; |
sk | 188 | net/netrom/nr_subr.c | timeout = (sk->nr->rtt / PR_SLOWHZ) * 2; |
sk | 189 | net/netrom/nr_subr.c | *dptr++ = sk->nr->my_index; |
sk | 190 | net/netrom/nr_subr.c | *dptr++ = sk->nr->my_id; |
sk | 194 | net/netrom/nr_subr.c | *dptr++ = sk->window; |
sk | 195 | net/netrom/nr_subr.c | memcpy(dptr, &sk->nr->user_addr, AX25_ADDR_LEN); |
sk | 200 | net/netrom/nr_subr.c | memcpy(dptr, &sk->nr->source_addr, AX25_ADDR_LEN); |
sk | 210 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_index; |
sk | 211 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_id; |
sk | 212 | net/netrom/nr_subr.c | *dptr++ = sk->nr->my_index; |
sk | 213 | net/netrom/nr_subr.c | *dptr++ = sk->nr->my_id; |
sk | 215 | net/netrom/nr_subr.c | *dptr++ = sk->window; |
sk | 216 | net/netrom/nr_subr.c | if (sk->nr->bpqext) *dptr++ = nr_default.ttl; |
sk | 221 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_index; |
sk | 222 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_id; |
sk | 229 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_index; |
sk | 230 | net/netrom/nr_subr.c | *dptr++ = sk->nr->your_id; |
sk | 232 | net/netrom/nr_subr.c | *dptr++ = sk->nr->vr; |
sk | 239 | net/netrom/nr_subr.c | nr_transmit_buffer(sk, skb); |
sk | 283 | net/netrom/nr_subr.c | skbn->sk = NULL; |
sk | 292 | net/netrom/nr_subr.c | unsigned short nr_calculate_t1(struct sock *sk) |
sk | 296 | net/netrom/nr_subr.c | for (t = 2, n = 0; n < sk->nr->n2count; n++) |
sk | 301 | net/netrom/nr_subr.c | return t * sk->nr->rtt; |
sk | 307 | net/netrom/nr_subr.c | void nr_calculate_rtt(struct sock *sk) |
sk | 309 | net/netrom/nr_subr.c | if (sk->nr->t1timer > 0 && sk->nr->n2count == 0) |
sk | 310 | net/netrom/nr_subr.c | sk->nr->rtt = (9 * sk->nr->rtt + sk->nr->t1 - sk->nr->t1timer) / 10; |
sk | 314 | net/netrom/nr_subr.c | if (sk->nr->rtt < (NR_T1CLAMPLO)) |
sk | 315 | net/netrom/nr_subr.c | sk->nr->rtt = (NR_T1CLAMPLO); |
sk | 317 | net/netrom/nr_subr.c | if (sk->nr->rtt == 0) |
sk | 318 | net/netrom/nr_subr.c | sk->nr->rtt = PR_SLOWHZ; |
sk | 322 | net/netrom/nr_subr.c | if (sk->nr->rtt > (NR_T1CLAMPHI)) |
sk | 323 | net/netrom/nr_subr.c | sk->nr->rtt = (NR_T1CLAMPHI); |
sk | 48 | net/netrom/nr_timer.c | void nr_set_timer(struct sock *sk) |
sk | 54 | net/netrom/nr_timer.c | del_timer(&sk->timer); |
sk | 57 | net/netrom/nr_timer.c | sk->timer.next = sk->timer.prev = NULL; |
sk | 58 | net/netrom/nr_timer.c | sk->timer.data = (unsigned long)sk; |
sk | 59 | net/netrom/nr_timer.c | sk->timer.function = &nr_timer; |
sk | 61 | net/netrom/nr_timer.c | sk->timer.expires = jiffies+10; |
sk | 62 | net/netrom/nr_timer.c | add_timer(&sk->timer); |
sk | 65 | net/netrom/nr_timer.c | static void nr_reset_timer(struct sock *sk) |
sk | 71 | net/netrom/nr_timer.c | del_timer(&sk->timer); |
sk | 74 | net/netrom/nr_timer.c | sk->timer.data = (unsigned long)sk; |
sk | 75 | net/netrom/nr_timer.c | sk->timer.function = &nr_timer; |
sk | 76 | net/netrom/nr_timer.c | sk->timer.expires = jiffies+10; |
sk | 77 | net/netrom/nr_timer.c | add_timer(&sk->timer); |
sk | 88 | net/netrom/nr_timer.c | struct sock *sk = (struct sock *)param; |
sk | 90 | net/netrom/nr_timer.c | switch (sk->nr->state) { |
sk | 94 | net/netrom/nr_timer.c | if (sk->destroy || (sk->state == TCP_LISTEN && sk->dead)) { |
sk | 95 | net/netrom/nr_timer.c | del_timer(&sk->timer); |
sk | 96 | net/netrom/nr_timer.c | nr_destroy_socket(sk); |
sk | 105 | net/netrom/nr_timer.c | if (sk->rmem_alloc < (sk->rcvbuf / 2) && (sk->nr->condition & OWN_RX_BUSY_CONDITION)) { |
sk | 106 | net/netrom/nr_timer.c | sk->nr->condition &= ~OWN_RX_BUSY_CONDITION; |
sk | 107 | net/netrom/nr_timer.c | nr_write_internal(sk, NR_INFOACK); |
sk | 108 | net/netrom/nr_timer.c | sk->nr->condition &= ~ACK_PENDING_CONDITION; |
sk | 109 | net/netrom/nr_timer.c | sk->nr->vl = sk->nr->vr; |
sk | 115 | net/netrom/nr_timer.c | nr_kick(sk); |
sk | 122 | net/netrom/nr_timer.c | if (sk->nr->t2timer > 0 && --sk->nr->t2timer == 0) { |
sk | 123 | net/netrom/nr_timer.c | if (sk->nr->state == NR_STATE_3) { |
sk | 124 | net/netrom/nr_timer.c | if (sk->nr->condition & ACK_PENDING_CONDITION) { |
sk | 125 | net/netrom/nr_timer.c | sk->nr->condition &= ~ACK_PENDING_CONDITION; |
sk | 126 | net/netrom/nr_timer.c | nr_enquiry_response(sk); |
sk | 131 | net/netrom/nr_timer.c | if (sk->nr->t4timer > 0 && --sk->nr->t4timer == 0) { |
sk | 132 | net/netrom/nr_timer.c | sk->nr->condition &= ~PEER_RX_BUSY_CONDITION; |
sk | 135 | net/netrom/nr_timer.c | if (sk->nr->t1timer == 0 || --sk->nr->t1timer > 0) { |
sk | 136 | net/netrom/nr_timer.c | nr_reset_timer(sk); |
sk | 140 | net/netrom/nr_timer.c | switch (sk->nr->state) { |
sk | 142 | net/netrom/nr_timer.c | if (sk->nr->n2count == sk->nr->n2) { |
sk | 143 | net/netrom/nr_timer.c | nr_clear_queues(sk); |
sk | 144 | net/netrom/nr_timer.c | sk->nr->state = NR_STATE_0; |
sk | 145 | net/netrom/nr_timer.c | sk->state = TCP_CLOSE; |
sk | 146 | net/netrom/nr_timer.c | sk->err = ETIMEDOUT; |
sk | 147 | net/netrom/nr_timer.c | if (!sk->dead) |
sk | 148 | net/netrom/nr_timer.c | sk->state_change(sk); |
sk | 149 | net/netrom/nr_timer.c | sk->dead = 1; |
sk | 151 | net/netrom/nr_timer.c | sk->nr->n2count++; |
sk | 152 | net/netrom/nr_timer.c | nr_write_internal(sk, NR_CONNREQ); |
sk | 157 | net/netrom/nr_timer.c | if (sk->nr->n2count == sk->nr->n2) { |
sk | 158 | net/netrom/nr_timer.c | nr_clear_queues(sk); |
sk | 159 | net/netrom/nr_timer.c | sk->nr->state = NR_STATE_0; |
sk | 160 | net/netrom/nr_timer.c | sk->state = TCP_CLOSE; |
sk | 161 | net/netrom/nr_timer.c | sk->err = ETIMEDOUT; |
sk | 162 | net/netrom/nr_timer.c | if (!sk->dead) |
sk | 163 | net/netrom/nr_timer.c | sk->state_change(sk); |
sk | 164 | net/netrom/nr_timer.c | sk->dead = 1; |
sk | 166 | net/netrom/nr_timer.c | sk->nr->n2count++; |
sk | 167 | net/netrom/nr_timer.c | nr_write_internal(sk, NR_DISCREQ); |
sk | 172 | net/netrom/nr_timer.c | if (sk->nr->n2count == sk->nr->n2) { |
sk | 173 | net/netrom/nr_timer.c | nr_clear_queues(sk); |
sk | 174 | net/netrom/nr_timer.c | sk->nr->state = NR_STATE_0; |
sk | 175 | net/netrom/nr_timer.c | sk->state = TCP_CLOSE; |
sk | 176 | net/netrom/nr_timer.c | sk->err = ETIMEDOUT; |
sk | 177 | net/netrom/nr_timer.c | if (!sk->dead) |
sk | 178 | net/netrom/nr_timer.c | sk->state_change(sk); |
sk | 179 | net/netrom/nr_timer.c | sk->dead = 1; |
sk | 181 | net/netrom/nr_timer.c | sk->nr->n2count++; |
sk | 182 | net/netrom/nr_timer.c | nr_requeue_frames(sk); |
sk | 187 | net/netrom/nr_timer.c | sk->nr->t1timer = sk->nr->t1 = nr_calculate_t1(sk); |
sk | 189 | net/netrom/nr_timer.c | nr_set_timer(sk); |
sk | 94 | net/unix/af_unix.c | static void unix_remove_socket(unix_socket *sk) |
sk | 103 | net/unix/af_unix.c | if(*s==sk) |
sk | 105 | net/unix/af_unix.c | *s=sk->next; |
sk | 114 | net/unix/af_unix.c | static void unix_insert_socket(unix_socket *sk) |
sk | 117 | net/unix/af_unix.c | sk->next=unix_socket_list; |
sk | 118 | net/unix/af_unix.c | unix_socket_list=sk; |
sk | 146 | net/unix/af_unix.c | unix_socket *sk=(unix_socket *)data; |
sk | 147 | net/unix/af_unix.c | if(sk->protinfo.af_unix.locks==0 && sk->wmem_alloc==0) |
sk | 149 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name) |
sk | 150 | net/unix/af_unix.c | kfree(sk->protinfo.af_unix.name); |
sk | 151 | net/unix/af_unix.c | kfree_s(sk,sizeof(*sk)); |
sk | 159 | net/unix/af_unix.c | sk->timer.expires=jiffies+10*HZ; /* No real hurry try it every 10 seconds or so */ |
sk | 160 | net/unix/af_unix.c | add_timer(&sk->timer); |
sk | 164 | net/unix/af_unix.c | static void unix_delayed_delete(unix_socket *sk) |
sk | 166 | net/unix/af_unix.c | sk->timer.data=(unsigned long)sk; |
sk | 167 | net/unix/af_unix.c | sk->timer.expires=jiffies+HZ; /* Normally 1 second after will clean up. After that we try every 10 */ |
sk | 168 | net/unix/af_unix.c | sk->timer.function=unix_destroy_timer; |
sk | 169 | net/unix/af_unix.c | add_timer(&sk->timer); |
sk | 172 | net/unix/af_unix.c | static void unix_destroy_socket(unix_socket *sk) |
sk | 176 | net/unix/af_unix.c | unix_remove_socket(sk); |
sk | 178 | net/unix/af_unix.c | while((skb=skb_dequeue(&sk->receive_queue))!=NULL) |
sk | 180 | net/unix/af_unix.c | if(sk->state==TCP_LISTEN) |
sk | 182 | net/unix/af_unix.c | unix_socket *osk=skb->sk; |
sk | 195 | net/unix/af_unix.c | if(sk->protinfo.af_unix.inode!=NULL) |
sk | 197 | net/unix/af_unix.c | iput(sk->protinfo.af_unix.inode); |
sk | 198 | net/unix/af_unix.c | sk->protinfo.af_unix.inode=NULL; |
sk | 201 | net/unix/af_unix.c | if(--sk->protinfo.af_unix.locks==0 && sk->wmem_alloc==0) |
sk | 203 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name) |
sk | 204 | net/unix/af_unix.c | kfree(sk->protinfo.af_unix.name); |
sk | 205 | net/unix/af_unix.c | kfree_s(sk,sizeof(*sk)); |
sk | 209 | net/unix/af_unix.c | sk->dead=1; |
sk | 210 | net/unix/af_unix.c | unix_delayed_delete(sk); /* Try every so often until buffers are all freed */ |
sk | 229 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 232 | net/unix/af_unix.c | return sock_setsockopt(sk,level,optname,optval,optlen); |
sk | 237 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 240 | net/unix/af_unix.c | return sock_getsockopt(sk,level,optname,optval,optlen); |
sk | 245 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 246 | net/unix/af_unix.c | if(sk->type!=SOCK_STREAM) |
sk | 248 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name==NULL) |
sk | 250 | net/unix/af_unix.c | sk->max_ack_backlog=backlog; |
sk | 251 | net/unix/af_unix.c | sk->state=TCP_LISTEN; |
sk | 255 | net/unix/af_unix.c | static void def_callback1(struct sock *sk) |
sk | 257 | net/unix/af_unix.c | if(!sk->dead) |
sk | 258 | net/unix/af_unix.c | wake_up_interruptible(sk->sleep); |
sk | 261 | net/unix/af_unix.c | static void def_callback2(struct sock *sk, int len) |
sk | 263 | net/unix/af_unix.c | if(!sk->dead) |
sk | 265 | net/unix/af_unix.c | wake_up_interruptible(sk->sleep); |
sk | 266 | net/unix/af_unix.c | sock_wake_async(sk->socket, 1); |
sk | 270 | net/unix/af_unix.c | static void def_callback3(struct sock *sk) |
sk | 272 | net/unix/af_unix.c | if(!sk->dead) |
sk | 274 | net/unix/af_unix.c | wake_up_interruptible(sk->sleep); |
sk | 275 | net/unix/af_unix.c | sock_wake_async(sk->socket, 2); |
sk | 281 | net/unix/af_unix.c | unix_socket *sk; |
sk | 284 | net/unix/af_unix.c | sk=(unix_socket *)kmalloc(sizeof(*sk),GFP_KERNEL); |
sk | 285 | net/unix/af_unix.c | if(sk==NULL) |
sk | 300 | net/unix/af_unix.c | kfree_s(sk,sizeof(*sk)); |
sk | 303 | net/unix/af_unix.c | sk->type=sock->type; |
sk | 304 | net/unix/af_unix.c | init_timer(&sk->timer); |
sk | 305 | net/unix/af_unix.c | skb_queue_head_init(&sk->write_queue); |
sk | 306 | net/unix/af_unix.c | skb_queue_head_init(&sk->receive_queue); |
sk | 307 | net/unix/af_unix.c | skb_queue_head_init(&sk->back_log); |
sk | 308 | net/unix/af_unix.c | sk->protinfo.af_unix.family=AF_UNIX; |
sk | 309 | net/unix/af_unix.c | sk->protinfo.af_unix.inode=NULL; |
sk | 310 | net/unix/af_unix.c | sk->protinfo.af_unix.locks=1; /* Us */ |
sk | 311 | net/unix/af_unix.c | sk->protinfo.af_unix.readsem=MUTEX; /* single task reading lock */ |
sk | 312 | net/unix/af_unix.c | sk->protinfo.af_unix.name=NULL; |
sk | 313 | net/unix/af_unix.c | sk->protinfo.af_unix.other=NULL; |
sk | 314 | net/unix/af_unix.c | sk->protocol=0; |
sk | 315 | net/unix/af_unix.c | sk->rmem_alloc=0; |
sk | 316 | net/unix/af_unix.c | sk->wmem_alloc=0; |
sk | 317 | net/unix/af_unix.c | sk->dead=0; |
sk | 318 | net/unix/af_unix.c | sk->next=NULL; |
sk | 319 | net/unix/af_unix.c | sk->broadcast=0; |
sk | 320 | net/unix/af_unix.c | sk->rcvbuf=SK_RMEM_MAX; |
sk | 321 | net/unix/af_unix.c | sk->sndbuf=SK_WMEM_MAX; |
sk | 322 | net/unix/af_unix.c | sk->allocation=GFP_KERNEL; |
sk | 323 | net/unix/af_unix.c | sk->users=0; |
sk | 324 | net/unix/af_unix.c | sk->bsdism=0; |
sk | 325 | net/unix/af_unix.c | sk->debug=0; |
sk | 326 | net/unix/af_unix.c | sk->prot=NULL; |
sk | 327 | net/unix/af_unix.c | sk->err=0; |
sk | 328 | net/unix/af_unix.c | sk->localroute=0; |
sk | 329 | net/unix/af_unix.c | sk->send_head=NULL; |
sk | 330 | net/unix/af_unix.c | sk->state=TCP_CLOSE; |
sk | 331 | net/unix/af_unix.c | sk->priority=SOPRI_NORMAL; |
sk | 332 | net/unix/af_unix.c | sk->ack_backlog=0; |
sk | 333 | net/unix/af_unix.c | sk->shutdown=0; |
sk | 334 | net/unix/af_unix.c | sk->state_change=def_callback1; |
sk | 335 | net/unix/af_unix.c | sk->data_ready=def_callback2; |
sk | 336 | net/unix/af_unix.c | sk->write_space=def_callback3; |
sk | 337 | net/unix/af_unix.c | sk->error_report=def_callback1; |
sk | 338 | net/unix/af_unix.c | sk->mtu=4096; |
sk | 339 | net/unix/af_unix.c | sk->socket=sock; |
sk | 340 | net/unix/af_unix.c | sock->data=(void *)sk; |
sk | 341 | net/unix/af_unix.c | sk->sleep=sock->wait; |
sk | 342 | net/unix/af_unix.c | sk->zapped=0; |
sk | 343 | net/unix/af_unix.c | unix_insert_socket(sk); |
sk | 354 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 359 | net/unix/af_unix.c | if(sk==NULL) |
sk | 362 | net/unix/af_unix.c | sk->state_change(sk); |
sk | 363 | net/unix/af_unix.c | sk->dead=1; |
sk | 364 | net/unix/af_unix.c | skpair=(unix_socket *)sk->protinfo.af_unix.other; /* Person we send to (default) */ |
sk | 365 | net/unix/af_unix.c | if(sk->type==SOCK_STREAM && skpair!=NULL && skpair->state!=TCP_LISTEN) |
sk | 372 | net/unix/af_unix.c | sk->protinfo.af_unix.other=NULL; /* No pair */ |
sk | 373 | net/unix/af_unix.c | unix_destroy_socket(sk); /* Try and flush out this socket. Throw our buffers at least */ |
sk | 414 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 418 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name) |
sk | 427 | net/unix/af_unix.c | if(sk->protinfo.af_unix.inode!=NULL) |
sk | 430 | net/unix/af_unix.c | sk->protinfo.af_unix.name=kmalloc(addr_len+1, GFP_KERNEL); |
sk | 431 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name==NULL) |
sk | 433 | net/unix/af_unix.c | memcpy(sk->protinfo.af_unix.name, sunaddr->sun_path, addr_len+1); |
sk | 438 | net/unix/af_unix.c | err=do_mknod(sk->protinfo.af_unix.name,S_IFSOCK|S_IRWXUGO,0); |
sk | 440 | net/unix/af_unix.c | err=open_namei(sk->protinfo.af_unix.name, 2, S_IFSOCK, &sk->protinfo.af_unix.inode, NULL); |
sk | 446 | net/unix/af_unix.c | kfree_s(sk->protinfo.af_unix.name,addr_len+1); |
sk | 447 | net/unix/af_unix.c | sk->protinfo.af_unix.name=NULL; |
sk | 460 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 466 | net/unix/af_unix.c | if(sk->type==SOCK_STREAM && sk->protinfo.af_unix.other) |
sk | 468 | net/unix/af_unix.c | if(sock->state==SS_CONNECTING && sk->state==TCP_ESTABLISHED) |
sk | 473 | net/unix/af_unix.c | if(sock->state==SS_CONNECTING && sk->state == TCP_CLOSE) |
sk | 492 | net/unix/af_unix.c | if(sk->type==SOCK_DGRAM) |
sk | 494 | net/unix/af_unix.c | if(sk->protinfo.af_unix.other) |
sk | 496 | net/unix/af_unix.c | sk->protinfo.af_unix.other->protinfo.af_unix.locks--; |
sk | 497 | net/unix/af_unix.c | sk->protinfo.af_unix.other=NULL; |
sk | 503 | net/unix/af_unix.c | if(other->type!=sk->type) |
sk | 506 | net/unix/af_unix.c | sk->protinfo.af_unix.other=other; |
sk | 508 | net/unix/af_unix.c | sk->state=TCP_ESTABLISHED; |
sk | 519 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk, 0, 0, 0, &err); /* Marker object */ |
sk | 522 | net/unix/af_unix.c | skb->sk=sk; /* So they know it is us */ |
sk | 524 | net/unix/af_unix.c | sk->state=TCP_CLOSE; |
sk | 532 | net/unix/af_unix.c | if(other->type!=sk->type) |
sk | 539 | net/unix/af_unix.c | sk->protinfo.af_unix.other=other; |
sk | 541 | net/unix/af_unix.c | sk->state=TCP_SYN_SENT; |
sk | 551 | net/unix/af_unix.c | while(sk->state==TCP_SYN_SENT) |
sk | 558 | net/unix/af_unix.c | interruptible_sleep_on(sk->sleep); |
sk | 570 | net/unix/af_unix.c | if(sk->state==TCP_CLOSE) |
sk | 572 | net/unix/af_unix.c | sk->protinfo.af_unix.other->protinfo.af_unix.locks--; |
sk | 573 | net/unix/af_unix.c | sk->protinfo.af_unix.other=NULL; |
sk | 608 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 612 | net/unix/af_unix.c | if(sk->type!=SOCK_STREAM) |
sk | 616 | net/unix/af_unix.c | if(sk->state!=TCP_LISTEN) |
sk | 622 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name!=NULL) |
sk | 624 | net/unix/af_unix.c | newsk->protinfo.af_unix.name=kmalloc(strlen(sk->protinfo.af_unix.name)+1, GFP_KERNEL); |
sk | 627 | net/unix/af_unix.c | strcpy(newsk->protinfo.af_unix.name, sk->protinfo.af_unix.name); |
sk | 633 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
sk | 641 | net/unix/af_unix.c | interruptible_sleep_on(sk->sleep); |
sk | 651 | net/unix/af_unix.c | tsk=skb->sk; |
sk | 653 | net/unix/af_unix.c | sk->ack_backlog--; |
sk | 659 | net/unix/af_unix.c | sk->protinfo.af_unix.locks--; /* Locked to child socket not master */ |
sk | 669 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 674 | net/unix/af_unix.c | if(sk->protinfo.af_unix.other==NULL) |
sk | 676 | net/unix/af_unix.c | sk=sk->protinfo.af_unix.other; |
sk | 679 | net/unix/af_unix.c | if(sk->protinfo.af_unix.name==NULL) |
sk | 685 | net/unix/af_unix.c | *uaddr_len=sizeof(sunaddr->sun_family)+strlen(sk->protinfo.af_unix.name)+1; |
sk | 686 | net/unix/af_unix.c | strcpy(sunaddr->sun_path,sk->protinfo.af_unix.name); /* 108 byte limited */ |
sk | 720 | net/unix/af_unix.c | static int unix_fd_copy(struct sock *sk, struct cmsghdr *cmsg, struct file **fp) |
sk | 764 | net/unix/af_unix.c | static void unix_fd_free(struct sock *sk, struct file **fp, int num) |
sk | 891 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 904 | net/unix/af_unix.c | if(sk->err) |
sk | 905 | net/unix/af_unix.c | return sock_error(sk); |
sk | 918 | net/unix/af_unix.c | if(sk->state==TCP_ESTABLISHED) |
sk | 927 | net/unix/af_unix.c | if(sk->protinfo.af_unix.other==NULL) |
sk | 946 | net/unix/af_unix.c | fpnum=unix_fd_copy(sk,cm,fp); |
sk | 962 | net/unix/af_unix.c | if(size>(sk->sndbuf-sizeof(struct sk_buff))/2) /* Keep two messages in the pipe so it schedules better */ |
sk | 966 | net/unix/af_unix.c | unix_fd_free(sk,fp,fpnum); |
sk | 969 | net/unix/af_unix.c | size=(sk->sndbuf-sizeof(struct sk_buff))/2; |
sk | 986 | net/unix/af_unix.c | skb=sock_alloc_send_skb(sk,size,limit,nonblock, &err); |
sk | 990 | net/unix/af_unix.c | unix_fd_free(sk,fp,fpnum); |
sk | 993 | net/unix/af_unix.c | sk->err=-err; |
sk | 1000 | net/unix/af_unix.c | skb->sk=sk; |
sk | 1016 | net/unix/af_unix.c | other=sk->protinfo.af_unix.other; |
sk | 1020 | net/unix/af_unix.c | sk->protinfo.af_unix.other=NULL; |
sk | 1057 | net/unix/af_unix.c | static void unix_data_wait(unix_socket * sk) |
sk | 1060 | net/unix/af_unix.c | if (!skb_peek(&sk->receive_queue)) { |
sk | 1061 | net/unix/af_unix.c | sk->socket->flags |= SO_WAITDATA; |
sk | 1062 | net/unix/af_unix.c | interruptible_sleep_on(sk->sleep); |
sk | 1063 | net/unix/af_unix.c | sk->socket->flags &= ~SO_WAITDATA; |
sk | 1070 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 1087 | net/unix/af_unix.c | if(sk->err) |
sk | 1088 | net/unix/af_unix.c | return sock_error(sk); |
sk | 1110 | net/unix/af_unix.c | down(&sk->protinfo.af_unix.readsem); /* Lock the socket */ |
sk | 1124 | net/unix/af_unix.c | skb=skb_dequeue(&sk->receive_queue); |
sk | 1127 | net/unix/af_unix.c | up(&sk->protinfo.af_unix.readsem); |
sk | 1128 | net/unix/af_unix.c | if(sk->shutdown & RCV_SHUTDOWN) |
sk | 1136 | net/unix/af_unix.c | unix_data_wait(sk); |
sk | 1137 | net/unix/af_unix.c | down(&sk->protinfo.af_unix.readsem); |
sk | 1143 | net/unix/af_unix.c | if(skb->sk->protinfo.af_unix.name) |
sk | 1145 | net/unix/af_unix.c | memcpy(sunaddr->sun_path, skb->sk->protinfo.af_unix.name, 108); |
sk | 1167 | net/unix/af_unix.c | skb_queue_head(&sk->receive_queue, skb); |
sk | 1176 | net/unix/af_unix.c | up(&sk->protinfo.af_unix.readsem); |
sk | 1184 | net/unix/af_unix.c | unix_socket *sk=(unix_socket *)sock->data; |
sk | 1185 | net/unix/af_unix.c | unix_socket *other=sk->protinfo.af_unix.other; |
sk | 1188 | net/unix/af_unix.c | sk->shutdown|=SEND_SHUTDOWN; |
sk | 1189 | net/unix/af_unix.c | sk->state_change(sk); |
sk | 1196 | net/unix/af_unix.c | other=sk->protinfo.af_unix.other; |
sk | 1199 | net/unix/af_unix.c | sk->shutdown|=RCV_SHUTDOWN; |
sk | 1200 | net/unix/af_unix.c | sk->state_change(sk); |
sk | 1218 | net/unix/af_unix.c | unix_socket *sk=sock->data; |
sk | 1229 | net/unix/af_unix.c | amount=sk->sndbuf-sk->wmem_alloc; |
sk | 1237 | net/unix/af_unix.c | if(sk->state==TCP_LISTEN) |
sk | 1240 | net/unix/af_unix.c | if((skb=skb_peek(&sk->receive_queue))!=NULL) |
sk | 195 | net/unix/garbage.c | unix_socket *f=NULL,*sk; |
sk | 222 | net/unix/garbage.c | if((sk=unix_get_socket(*fp++))!=NULL) |
sk | 229 | net/unix/garbage.c | f=sk; |
sk | 231 | net/unix/garbage.c | maybe_mark_and_push(sk); |