aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/sock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h118
1 files changed, 95 insertions, 23 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index a1042d08becd53..312cb25cbd18bf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -88,6 +88,7 @@ do { spin_lock_init(&((__sk)->sk_lock.slock)); \
} while(0)
struct sock;
+struct proto;
/**
* struct sock_common - minimal network layer representation of sockets
@@ -98,10 +99,11 @@ struct sock;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_refcnt: reference count
+ * @skc_prot: protocol handlers inside a network family
*
* This is the minimal network layer representation of sockets, the header
- * for struct sock and struct tcp_tw_bucket.
- */
+ * for struct sock and struct inet_timewait_sock.
+ */
struct sock_common {
unsigned short skc_family;
volatile unsigned char skc_state;
@@ -110,11 +112,12 @@ struct sock_common {
struct hlist_node skc_node;
struct hlist_node skc_bind_node;
atomic_t skc_refcnt;
+ struct proto *skc_prot;
};
/**
* struct sock - network layer representation of sockets
- * @__sk_common: shared layout with tcp_tw_bucket
+ * @__sk_common: shared layout with inet_timewait_sock
* @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
* @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
* @sk_lock: synchronizer
@@ -136,11 +139,10 @@ struct sock_common {
* @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_lingertime: %SO_LINGER l_linger setting
- * @sk_hashent: hash entry in several tables (e.g. tcp_ehash)
+ * @sk_hashent: hash entry in several tables (e.g. inet_hashinfo.ehash)
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
* @sk_error_queue: rarely used
- * @sk_prot: protocol handlers inside a network family
* @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt, IPV6_ADDRFORM for instance)
* @sk_err: last error
* @sk_err_soft: errors that don't cause failure but are the cause of a persistent failure not just 'timed out'
@@ -173,7 +175,7 @@ struct sock_common {
*/
struct sock {
/*
- * Now struct tcp_tw_bucket also uses sock_common, so please just
+ * Now struct inet_timewait_sock also uses sock_common, so please just
* don't add nothing before this first member (__sk_common) --acme
*/
struct sock_common __sk_common;
@@ -184,6 +186,7 @@ struct sock {
#define sk_node __sk_common.skc_node
#define sk_bind_node __sk_common.skc_bind_node
#define sk_refcnt __sk_common.skc_refcnt
+#define sk_prot __sk_common.skc_prot
unsigned char sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4;
@@ -218,7 +221,6 @@ struct sock {
struct sk_buff *tail;
} sk_backlog;
struct sk_buff_head sk_error_queue;
- struct proto *sk_prot;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
int sk_err,
@@ -253,28 +255,28 @@ struct sock {
/*
* Hashed lists helper routines
*/
-static inline struct sock *__sk_head(struct hlist_head *head)
+static inline struct sock *__sk_head(const struct hlist_head *head)
{
return hlist_entry(head->first, struct sock, sk_node);
}
-static inline struct sock *sk_head(struct hlist_head *head)
+static inline struct sock *sk_head(const struct hlist_head *head)
{
return hlist_empty(head) ? NULL : __sk_head(head);
}
-static inline struct sock *sk_next(struct sock *sk)
+static inline struct sock *sk_next(const struct sock *sk)
{
return sk->sk_node.next ?
hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}
-static inline int sk_unhashed(struct sock *sk)
+static inline int sk_unhashed(const struct sock *sk)
{
return hlist_unhashed(&sk->sk_node);
}
-static inline int sk_hashed(struct sock *sk)
+static inline int sk_hashed(const struct sock *sk)
{
return sk->sk_node.pprev != NULL;
}
@@ -384,6 +386,11 @@ enum sock_flags {
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
};
+static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+{
+ nsk->sk_flags = osk->sk_flags;
+}
+
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
__set_bit(flag, &sk->sk_flags);
@@ -549,6 +556,10 @@ struct proto {
kmem_cache_t *slab;
unsigned int obj_size;
+ kmem_cache_t *twsk_slab;
+ unsigned int twsk_obj_size;
+ atomic_t *orphan_count;
+
struct request_sock_ops *rsk_prot;
struct module *owner;
@@ -556,7 +567,9 @@ struct proto {
char name[32];
struct list_head node;
-
+#ifdef SOCK_REFCNT_DEBUG
+ atomic_t socks;
+#endif
struct {
int inuse;
u8 __pad[SMP_CACHE_BYTES - sizeof(int)];
@@ -566,6 +579,31 @@ struct proto {
extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);
+#ifdef SOCK_REFCNT_DEBUG
+static inline void sk_refcnt_debug_inc(struct sock *sk)
+{
+ atomic_inc(&sk->sk_prot->socks);
+}
+
+static inline void sk_refcnt_debug_dec(struct sock *sk)
+{
+ atomic_dec(&sk->sk_prot->socks);
+ printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
+ sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
+}
+
+static inline void sk_refcnt_debug_release(const struct sock *sk)
+{
+ if (atomic_read(&sk->sk_refcnt) != 1)
+ printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
+ sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
+}
+#else /* SOCK_REFCNT_DEBUG */
+#define sk_refcnt_debug_inc(sk) do { } while (0)
+#define sk_refcnt_debug_dec(sk) do { } while (0)
+#define sk_refcnt_debug_release(sk) do { } while (0)
+#endif /* SOCK_REFCNT_DEBUG */
+
/* Called with local bh disabled */
static __inline__ void sock_prot_inc_use(struct proto *prot)
{
@@ -577,6 +615,15 @@ static __inline__ void sock_prot_dec_use(struct proto *prot)
prot->stats[smp_processor_id()].inuse--;
}
+/* With per-bucket locks this operation is not-atomic, so that
+ * this version is not worse.
+ */
+static inline void __sk_prot_rehash(struct sock *sk)
+{
+ sk->sk_prot->unhash(sk);
+ sk->sk_prot->hash(sk);
+}
+
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)
@@ -688,6 +735,8 @@ extern struct sock *sk_alloc(int family,
unsigned int __nocast priority,
struct proto *prot, int zero_it);
extern void sk_free(struct sock *sk);
+extern struct sock *sk_clone(const struct sock *sk,
+ const unsigned int __nocast priority);
extern struct sk_buff *sock_wmalloc(struct sock *sk,
unsigned long size, int force,
@@ -981,6 +1030,16 @@ sk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
+static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+{
+ __sk_dst_set(sk, dst);
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_TSO) {
+ if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
+ sk->sk_route_caps &= ~NETIF_F_TSO;
+ }
+}
+
static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
{
sk->sk_wmem_queued += skb->truesize;
@@ -1141,7 +1200,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
int hdr_len;
hdr_len = SKB_DATA_ALIGN(sk->sk_prot->max_header);
- skb = alloc_skb(size + hdr_len, gfp);
+ skb = alloc_skb_fclone(size + hdr_len, gfp);
if (skb) {
skb->truesize += mem;
if (sk->sk_forward_alloc >= (int)skb->truesize ||
@@ -1223,16 +1282,19 @@ static inline int sock_intr_errno(long timeo)
static __inline__ void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- struct timeval *stamp = &skb->stamp;
+ struct timeval stamp;
+
+ skb_get_timestamp(skb, &stamp);
if (sock_flag(sk, SOCK_RCVTSTAMP)) {
/* Race occurred between timestamp enabling and packet
receiving. Fill in the current time for now. */
- if (stamp->tv_sec == 0)
- do_gettimeofday(stamp);
+ if (stamp.tv_sec == 0)
+ do_gettimeofday(&stamp);
+ skb_set_timestamp(skb, &stamp);
put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP, sizeof(struct timeval),
- stamp);
+ &stamp);
} else
- sk->sk_stamp = *stamp;
+ sk->sk_stamp = stamp;
}
/**
@@ -1257,11 +1319,11 @@ extern int sock_get_timestamp(struct sock *, struct timeval __user *);
*/
#if 0
-#define NETDEBUG(x) do { } while (0)
-#define LIMIT_NETDEBUG(x) do {} while(0)
+#define NETDEBUG(fmt, args...) do { } while (0)
+#define LIMIT_NETDEBUG(fmt, args...) do { } while(0)
#else
-#define NETDEBUG(x) do { x; } while (0)
-#define LIMIT_NETDEBUG(x) do { if (net_ratelimit()) { x; } } while(0)
+#define NETDEBUG(fmt, args...) printk(fmt,##args)
+#define LIMIT_NETDEBUG(fmt, args...) do { if (net_ratelimit()) printk(fmt,##args); } while(0)
#endif
/*
@@ -1308,4 +1370,14 @@ static inline int siocdevprivate_ioctl(unsigned int fd, unsigned int cmd, unsign
}
#endif
+extern void sk_init(void);
+
+#ifdef CONFIG_SYSCTL
+extern struct ctl_table core_table[];
+extern int sysctl_optmem_max;
+#endif
+
+extern __u32 sysctl_wmem_default;
+extern __u32 sysctl_rmem_default;
+
#endif /* _SOCK_H */