[T106][ZXW-22]7520V3SCV2.01.01.02P42U09_VEC_V0.8_AP_VEC origin source commit

Change-Id: Ic6e05d89ecd62fc34f82b23dcf306c93764aec4b
diff --git a/ap/os/linux/linux-3.4.x/include/net/sock.h b/ap/os/linux/linux-3.4.x/include/net/sock.h
new file mode 100755
index 0000000..efab120
--- /dev/null
+++ b/ap/os/linux/linux-3.4.x/include/net/sock.h
@@ -0,0 +1,2183 @@
+/*
+ * INET		An implementation of the TCP/IP protocol suite for the LINUX
+ *		operating system.  INET is implemented using the  BSD Socket
+ *		interface as the means of communication with the user level.
+ *
+ *		Definitions for the AF_INET socket handler.
+ *
+ * Version:	@(#)sock.h	1.0.4	05/13/93
+ *
+ * Authors:	Ross Biro
+ *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *		Corey Minyard <wf-rch!minyard@relay.EU.net>
+ *		Florian La Roche <flla@stud.uni-sb.de>
+ *
+ * Fixes:
+ *		Alan Cox	:	Volatiles in skbuff pointers. See
+ *					skbuff comments. May be overdone,
+ *					better to prove they can be removed
+ *					than the reverse.
+ *		Alan Cox	:	Added a zapped field for tcp to note
+ *					a socket is reset and must stay shut up
+ *		Alan Cox	:	New fields for options
+ *	Pauline Middelink	:	identd support
+ *		Alan Cox	:	Eliminate low level recv/recvfrom
+ *		David S. Miller	:	New socket lookup architecture.
+ *              Steve Whitehouse:       Default routines for sock_ops
+ *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
+ *              			protinfo be just a void pointer, as the
+ *              			protocol specific parts were moved to
+ *              			respective headers and ipv4/v6, etc now
+ *              			use private slabcaches for its socks
+ *              Pedro Hortas	:	New flags field for socket options
+ *
+ *
+ *		This program is free software; you can redistribute it and/or
+ *		modify it under the terms of the GNU General Public License
+ *		as published by the Free Software Foundation; either version
+ *		2 of the License, or (at your option) any later version.
+ */
+#ifndef _SOCK_H
+#define _SOCK_H
+
+#include <linux/hardirq.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/list_nulls.h>
+#include <linux/timer.h>
+#include <linux/cache.h>
+#include <linux/lockdep.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>	/* struct sk_buff */
+#include <linux/mm.h>
+#include <linux/security.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/memcontrol.h>
+#include <linux/res_counter.h>
+#include <linux/static_key.h>
+#include <linux/aio.h>
+#include <linux/sched.h>
+
+#include <linux/filter.h>
+#include <linux/rculist_nulls.h>
+#include <linux/poll.h>
+
+#include <linux/atomic.h>
+#include <net/dst.h>
+#include <net/checksum.h>
+
+#include <net/SI/sock_track.h>
+
+struct cgroup;
+struct cgroup_subsys;
+#ifdef CONFIG_NET
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss);
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp);
+#else
+static inline
+int mem_cgroup_sockets_init(struct cgroup *cgrp, struct cgroup_subsys *ss)
+{
+	return 0;
+}
+static inline
+void mem_cgroup_sockets_destroy(struct cgroup *cgrp)
+{
+}
+#endif
+/*
+ * This structure really needs to be cleaned up.
+ * Most of it is for TCP, and not used by any of
+ * the other protocols.
+ */
+
+/* Define this to get the SOCK_DBG debugging facility. */
+#define SOCK_DEBUGGING
+#ifdef SOCK_DEBUGGING
+#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
+					printk(KERN_DEBUG msg); } while (0)
+#else
+/* Validate arguments and do nothing */
+static inline __printf(2, 3)
+void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
+{
+}
+#endif
+
+/* This is the per-socket lock.  The spinlock provides a synchronization
+ * between user contexts and software interrupt processing, whereas the
+ * mini-semaphore synchronizes multiple users amongst themselves.
+ */
+typedef struct {
+	spinlock_t		slock;
+	int			owned;
+	wait_queue_head_t	wq;
+	/*
+	 * We express the mutex-alike socket_lock semantics
+	 * to the lock validator by explicitly managing
+	 * the slock as a lock variant (in addition to
+	 * the slock itself):
+	 */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
+} socket_lock_t;
+
+struct sock;
+struct proto;
+struct net;
+
+/**
+ *	struct sock_common - minimal network layer representation of sockets
+ *	@skc_daddr: Foreign IPv4 addr
+ *	@skc_rcv_saddr: Bound local IPv4 addr
+ *	@skc_hash: hash value used with various protocol lookup tables
+ *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
+ *	@skc_family: network address family
+ *	@skc_state: Connection state
+ *	@skc_reuse: %SO_REUSEADDR setting
+ *	@skc_bound_dev_if: bound device index if != 0
+ *	@skc_bind_node: bind hash linkage for various protocol lookup tables
+ *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
+ *	@skc_prot: protocol handlers inside a network family
+ *	@skc_net: reference to the network namespace of this socket
+ *	@skc_node: main hash linkage for various protocol lookup tables
+ *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ *	@skc_tx_queue_mapping: tx queue number for this connection
+ *	@skc_refcnt: reference count
+ *
+ *	This is the minimal network layer representation of sockets, the header
+ *	for struct sock and struct inet_timewait_sock.
+ */
+struct sock_common {
+	/* skc_daddr and skc_rcv_saddr must be grouped :
+	 * cf INET_MATCH() and INET_TW_MATCH()
+	 */
+	__be32			skc_daddr;
+	__be32			skc_rcv_saddr;
+
+	union  {
+		unsigned int	skc_hash;
+		__u16		skc_u16hashes[2];
+	};
+	unsigned short		skc_family;
+	volatile unsigned char	skc_state;
+	unsigned char		skc_reuse;
+	int			skc_bound_dev_if;
+	union {
+		struct hlist_node	skc_bind_node;
+		struct hlist_nulls_node skc_portaddr_node;
+	};
+	struct proto		*skc_prot;
+#ifdef CONFIG_NET_NS
+	struct net	 	*skc_net;
+#endif
+	/*
+	 * fields between dontcopy_begin/dontcopy_end
+	 * are not copied in sock_copy()
+	 */
+	/* private: */
+	int			skc_dontcopy_begin[0];
+	/* public: */
+	union {
+		struct hlist_node	skc_node;
+		struct hlist_nulls_node skc_nulls_node;
+	};
+	int			skc_tx_queue_mapping;
+	atomic_t		skc_refcnt;
+	/* private: */
+	int                     skc_dontcopy_end[0];
+	/* public: */
+};
+
+struct cg_proto;
+/**
+  *	struct sock - network layer representation of sockets
+  *	@__sk_common: shared layout with inet_timewait_sock
+  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
+  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
+  *	@sk_lock:	synchronizer
+  *	@sk_rcvbuf: size of receive buffer in bytes
+  *	@sk_wq: sock wait queue and async head
+  *	@sk_dst_cache: destination cache
+  *	@sk_dst_lock: destination cache lock
+  *	@sk_policy: flow policy
+  *	@sk_receive_queue: incoming packets
+  *	@sk_wmem_alloc: transmit queue bytes committed
+  *	@sk_write_queue: Packet sending queue
+  *	@sk_async_wait_queue: DMA copied packets
+  *	@sk_omem_alloc: "o" is "option" or "other"
+  *	@sk_wmem_queued: persistent queue size
+  *	@sk_forward_alloc: space allocated forward
+  *	@sk_allocation: allocation mode
+  *	@sk_sndbuf: size of send buffer in bytes
+  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
+  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
+  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
+  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
+  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
+  *	@sk_gso_max_size: Maximum GSO segment size to build
+  *	@sk_gso_max_segs: Maximum number of GSO segments
+  *	@sk_lingertime: %SO_LINGER l_linger setting
+  *	@sk_backlog: always used with the per-socket spinlock held
+  *	@sk_callback_lock: used with the callbacks in the end of this struct
+  *	@sk_error_queue: rarely used
+  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
+  *			  IPV6_ADDRFORM for instance)
+  *	@sk_err: last error
+  *	@sk_err_soft: errors that don't cause failure but are the cause of a
+  *		      persistent failure not just 'timed out'
+  *	@sk_drops: raw/udp drops counter
+  *	@sk_ack_backlog: current listen backlog
+  *	@sk_max_ack_backlog: listen backlog set in listen()
+  *	@sk_priority: %SO_PRIORITY setting
+  *	@sk_cgrp_prioidx: socket group's priority map index
+  *	@sk_type: socket type (%SOCK_STREAM, etc)
+  *	@sk_protocol: which protocol this socket belongs in this network family
+  *	@sk_peer_pid: &struct pid for this socket's peer
+  *	@sk_peer_cred: %SO_PEERCRED setting
+  *	@sk_rcvlowat: %SO_RCVLOWAT setting
+  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
+  *	@sk_sndtimeo: %SO_SNDTIMEO setting
+  *	@sk_rxhash: flow hash received from netif layer
+  *	@sk_filter: socket filtering instructions
+  *	@sk_protinfo: private area, net family specific, when not using slab
+  *	@sk_timer: sock cleanup timer
+  *	@sk_stamp: time stamp of last packet received
+  *	@sk_socket: Identd and reporting IO signals
+  *	@sk_user_data: RPC layer private data
+  *	@sk_sndmsg_page: cached page for sendmsg
+  *	@sk_sndmsg_off: cached offset for sendmsg
+  *	@sk_peek_off: current peek_offset value
+  *	@sk_send_head: front of stuff to transmit
+  *	@sk_security: used by security modules
+  *	@sk_mark: generic packet mark
+  *	@sk_classid: this socket's cgroup classid
+  *	@sk_cgrp: this socket's cgroup-specific proto data
+  *	@sk_write_pending: a write to stream socket waits to start
+  *	@sk_state_change: callback to indicate change in the state of the sock
+  *	@sk_data_ready: callback to indicate there is data to be processed
+  *	@sk_write_space: callback to indicate there is bf sending space available
+  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
+  *	@sk_backlog_rcv: callback to process the backlog
+  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
+ */
+struct sock {
+	/*
+	 * Now struct inet_timewait_sock also uses sock_common, so please just
+	 * don't add nothing before this first member (__sk_common) --acme
+	 */
+	struct sock_common	__sk_common;
+#define sk_node			__sk_common.skc_node
+#define sk_nulls_node		__sk_common.skc_nulls_node
+#define sk_refcnt		__sk_common.skc_refcnt
+#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
+
+#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
+#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
+#define sk_hash			__sk_common.skc_hash
+#define sk_family		__sk_common.skc_family
+#define sk_state		__sk_common.skc_state
+#define sk_reuse		__sk_common.skc_reuse
+#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
+#define sk_bind_node		__sk_common.skc_bind_node
+#define sk_prot			__sk_common.skc_prot
+#define sk_net			__sk_common.skc_net
+	socket_lock_t		sk_lock;
+	struct sk_buff_head	sk_receive_queue;
+	/*
+	 * The backlog queue is special, it is always used with
+	 * the per-socket spinlock held and requires low latency
+	 * access. Therefore we special case it's implementation.
+	 * Note : rmem_alloc is in this structure to fill a hole
+	 * on 64bit arches, not because its logically part of
+	 * backlog.
+	 */
+	struct {
+		atomic_t	rmem_alloc;
+		int		len;
+		struct sk_buff	*head;
+		struct sk_buff	*tail;
+	} sk_backlog;
+#define sk_rmem_alloc sk_backlog.rmem_alloc
+	int			sk_forward_alloc;
+#ifdef CONFIG_RPS
+	__u32			sk_rxhash;
+#endif
+	atomic_t		sk_drops;
+	int			sk_rcvbuf;
+	struct sk_filter __rcu	*sk_filter;
+	struct socket_wq __rcu	*sk_wq;
+
+#ifdef CONFIG_NET_DMA
+	struct sk_buff_head	sk_async_wait_queue;
+#endif
+
+#ifdef CONFIG_XFRM
+	struct xfrm_policy	*sk_policy[2];
+#endif
+	unsigned long 		sk_flags;
+	struct dst_entry	*sk_dst_cache;
+	spinlock_t		sk_dst_lock;
+	atomic_t		sk_wmem_alloc;
+	atomic_t		sk_omem_alloc;
+	int			sk_sndbuf;
+	struct sk_buff_head	sk_write_queue;
+	kmemcheck_bitfield_begin(flags);
+	unsigned int		sk_shutdown  : 2,
+				sk_no_check  : 2,
+				sk_userlocks : 4,
+				sk_protocol  : 8,
+#define SK_PROTOCOL_MAX U8_MAX               //CVE-2015-8543
+				sk_type      : 16;
+	kmemcheck_bitfield_end(flags);
+	int			sk_wmem_queued;
+	gfp_t			sk_allocation;
+	netdev_features_t	sk_route_caps;
+	netdev_features_t	sk_route_nocaps;
+	int			sk_gso_type;
+	unsigned int		sk_gso_max_size;
+	u16			sk_gso_max_segs;
+	int			sk_rcvlowat;
+	unsigned long	        sk_lingertime;
+	struct sk_buff_head	sk_error_queue;
+	struct proto		*sk_prot_creator;
+	rwlock_t		sk_callback_lock;
+	int			sk_err,
+				sk_err_soft;
+	unsigned short		sk_ack_backlog;
+	unsigned short		sk_max_ack_backlog;
+	__u32			sk_priority;
+#ifdef CONFIG_CGROUPS
+	__u32			sk_cgrp_prioidx;
+#endif
+	struct pid		*sk_peer_pid;
+	const struct cred	*sk_peer_cred;
+	long			sk_rcvtimeo;
+	long			sk_sndtimeo;
+	void			*sk_protinfo;
+	struct timer_list	sk_timer;
+	ktime_t			sk_stamp;
+	struct socket		*sk_socket;
+	void			*sk_user_data;
+	struct page		*sk_sndmsg_page;
+	struct sk_buff		*sk_send_head;
+	__u32			sk_sndmsg_off;
+	__s32			sk_peek_off;
+	int			sk_write_pending;
+#ifdef CONFIG_SECURITY
+	void			*sk_security;
+#endif
+	__u32			sk_mark;
+	u32			sk_classid;
+	struct cg_proto		*sk_cgrp;
+    unsigned long       sk_send_sum;  //µ½Ä¿Ç°ÎªÖ¹·¢ËͳöÈ¥µÄÊý¾Ý°ü×ܸöÊý
+    int         conn_list_init;       //¼Ç¼µÄconnÁ´±íÊÇ·ñ³õʼ»¯
+    struct list_head conn_head;       //connÁ´±í
+	void			(*sk_state_change)(struct sock *sk);
+	void			(*sk_data_ready)(struct sock *sk, int bytes);
+	void			(*sk_write_space)(struct sock *sk);
+	void			(*sk_error_report)(struct sock *sk);
+  	int			(*sk_backlog_rcv)(struct sock *sk,
+						  struct sk_buff *skb);  
+	void                    (*sk_destruct)(struct sock *sk);
+};
+
+static inline int sk_peek_offset(struct sock *sk, int flags)
+{
+	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
+		return sk->sk_peek_off;
+	else
+		return 0;
+}
+
+static inline void sk_peek_offset_bwd(struct sock *sk, int val)
+{
+	if (sk->sk_peek_off >= 0) {
+		if (sk->sk_peek_off >= val)
+			sk->sk_peek_off -= val;
+		else
+			sk->sk_peek_off = 0;
+	}
+}
+
+static inline void sk_peek_offset_fwd(struct sock *sk, int val)
+{
+	if (sk->sk_peek_off >= 0)
+		sk->sk_peek_off += val;
+}
+
+/*
+ * Hashed lists helper routines
+ */
+static inline struct sock *sk_entry(const struct hlist_node *node)
+{
+	return hlist_entry(node, struct sock, sk_node);
+}
+
+static inline struct sock *__sk_head(const struct hlist_head *head)
+{
+	return hlist_entry(head->first, struct sock, sk_node);
+}
+
+static inline struct sock *sk_head(const struct hlist_head *head)
+{
+	return hlist_empty(head) ? NULL : __sk_head(head);
+}
+
+static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
+{
+	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
+}
+
+static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
+{
+	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
+}
+
+static inline struct sock *sk_next(const struct sock *sk)
+{
+	return sk->sk_node.next ?
+		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
+}
+
+static inline struct sock *sk_nulls_next(const struct sock *sk)
+{
+	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
+		hlist_nulls_entry(sk->sk_nulls_node.next,
+				  struct sock, sk_nulls_node) :
+		NULL;
+}
+
+static inline int sk_unhashed(const struct sock *sk)
+{
+	return hlist_unhashed(&sk->sk_node);
+}
+
+static inline int sk_hashed(const struct sock *sk)
+{
+	return !sk_unhashed(sk);
+}
+
+static __inline__ void sk_node_init(struct hlist_node *node)
+{
+	node->pprev = NULL;
+}
+
+static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
+{
+	node->pprev = NULL;
+}
+
+static __inline__ void __sk_del_node(struct sock *sk)
+{
+	__hlist_del(&sk->sk_node);
+}
+
+/* NB: equivalent to hlist_del_init_rcu */
+static __inline__ int __sk_del_node_init(struct sock *sk)
+{
+	if (sk_hashed(sk)) {
+		__sk_del_node(sk);
+		sk_node_init(&sk->sk_node);
+		return 1;
+	}
+	return 0;
+}
+
+/* Grab socket reference count. This operation is valid only
+   when sk is ALREADY grabbed f.e. it is found in hash table
+   or a list and the lookup is made under lock preventing hash table
+   modifications.
+ */
+
+static inline void sock_hold(struct sock *sk)
+{
+	atomic_inc(&sk->sk_refcnt);
+}
+
+/* Ungrab socket in the context, which assumes that socket refcnt
+   cannot hit zero, f.e. it is true in context of any socketcall.
+ */
+static inline void __sock_put(struct sock *sk)
+{
+	atomic_dec(&sk->sk_refcnt);
+}
+
+static __inline__ int sk_del_node_init(struct sock *sk)
+{
+	int rc = __sk_del_node_init(sk);
+
+	if (rc) {
+		/* paranoid for a while -acme */
+		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+		__sock_put(sk);
+	}
+	return rc;
+}
+#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
+
+static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
+{
+	if (sk_hashed(sk)) {
+		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
+		return 1;
+	}
+	return 0;
+}
+
+static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
+{
+	int rc = __sk_nulls_del_node_init_rcu(sk);
+
+	if (rc) {
+		/* paranoid for a while -acme */
+		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+		__sock_put(sk);
+	}
+	return rc;
+}
+
+static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+	hlist_add_head(&sk->sk_node, list);
+}
+
+static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
+{
+	sock_hold(sk);
+	__sk_add_node(sk, list);
+}
+
+static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
+{
+	sock_hold(sk);
+	hlist_add_head_rcu(&sk->sk_node, list);
+}
+
+static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+}
+
+static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+	sock_hold(sk);
+	__sk_nulls_add_node_rcu(sk, list);
+}
+
+static __inline__ void __sk_del_bind_node(struct sock *sk)
+{
+	__hlist_del(&sk->sk_bind_node);
+}
+
+static __inline__ void sk_add_bind_node(struct sock *sk,
+					struct hlist_head *list)
+{
+	hlist_add_head(&sk->sk_bind_node, list);
+}
+
+#define sk_for_each(__sk, node, list) \
+	hlist_for_each_entry(__sk, node, list, sk_node)
+#define sk_for_each_rcu(__sk, node, list) \
+	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
+#define sk_nulls_for_each(__sk, node, list) \
+	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
+#define sk_nulls_for_each_rcu(__sk, node, list) \
+	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
+#define sk_for_each_from(__sk, node) \
+	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
+		hlist_for_each_entry_from(__sk, node, sk_node)
+#define sk_nulls_for_each_from(__sk, node) \
+	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
+		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
+#define sk_for_each_safe(__sk, node, tmp, list) \
+	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
+#define sk_for_each_bound(__sk, node, list) \
+	hlist_for_each_entry(__sk, node, list, sk_bind_node)
+
+/* Sock flags */
+enum sock_flags {
+	SOCK_DEAD,
+	SOCK_DONE,
+	SOCK_URGINLINE,
+	SOCK_KEEPOPEN,
+	SOCK_LINGER,
+	SOCK_DESTROY,
+	SOCK_BROADCAST,
+	SOCK_TIMESTAMP,
+	SOCK_ZAPPED,
+	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
+	SOCK_DBG, /* %SO_DEBUG setting */
+	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
+	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
+	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
+	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
+	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
+	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
+	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
+	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
+	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
+	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
+	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
+	SOCK_FASYNC, /* fasync() active */
+	SOCK_RXQ_OVFL,
+	SOCK_ZEROCOPY, /* buffers from userspace */
+	SOCK_WIFI_STATUS, /* push wifi status to userspace */
+	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
+		     * Will use last 4 bytes of packet sent from
+		     * user-space instead.
+		     */
+};
+
+static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
+{
+	nsk->sk_flags = osk->sk_flags;
+}
+
+static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+	__set_bit(flag, &sk->sk_flags);
+}
+
+static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
+{
+	__clear_bit(flag, &sk->sk_flags);
+}
+
+static inline int sock_flag(struct sock *sk, enum sock_flags flag)
+{
+	return test_bit(flag, &sk->sk_flags);
+}
+
+static inline void sk_acceptq_removed(struct sock *sk)
+{
+	sk->sk_ack_backlog--;
+}
+
+static inline void sk_acceptq_added(struct sock *sk)
+{
+	sk->sk_ack_backlog++;
+}
+
+static inline int sk_acceptq_is_full(struct sock *sk)
+{
+	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+}
+
+/*
+ * Compute minimal free write space needed to queue new packets.
+ */
+static inline int sk_stream_min_wspace(struct sock *sk)
+{
+	return sk->sk_wmem_queued >> 1;
+}
+
+static inline int sk_stream_wspace(struct sock *sk)
+{
+	return sk->sk_sndbuf - sk->sk_wmem_queued;
+}
+
+extern void sk_stream_write_space(struct sock *sk);
+
+static inline int sk_stream_memory_free(struct sock *sk)
+{
+	return sk->sk_wmem_queued < sk->sk_sndbuf;
+}
+
+/* OOB backlog add */
+static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+	/* dont let skb dst not refcounted, we are going to leave rcu lock */
+	skb_dst_force(skb);
+
+	if (!sk->sk_backlog.tail)
+		sk->sk_backlog.head = skb;
+	else
+		sk->sk_backlog.tail->next = skb;
+
+	sk->sk_backlog.tail = skb;
+	skb->next = NULL;
+}
+
+/*
+ * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+{
+	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+	return qsize > sk->sk_rcvbuf;
+}
+
+/* The per-socket spinlock must be held here. */
+static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+	if (sk_rcvqueues_full(sk, skb))
+	{
+        TCP_SOCK_TRACK(sk, TCP_RECV_BUFF_FULL);
+		return -ENOBUFS;
+    }
+
+	__sk_add_backlog(sk, skb);
+	sk->sk_backlog.len += skb->truesize;
+	return 0;
+}
+
+static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+	return sk->sk_backlog_rcv(sk, skb);
+}
+
+static inline void sock_rps_record_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+	struct rps_sock_flow_table *sock_flow_table;
+
+	rcu_read_lock();
+	sock_flow_table = rcu_dereference(rps_sock_flow_table);
+	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
+	rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_reset_flow(const struct sock *sk)
+{
+#ifdef CONFIG_RPS
+	struct rps_sock_flow_table *sock_flow_table;
+
+	rcu_read_lock();
+	sock_flow_table = rcu_dereference(rps_sock_flow_table);
+	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
+	rcu_read_unlock();
+#endif
+}
+
+static inline void sock_rps_save_rxhash(struct sock *sk,
+					const struct sk_buff *skb)
+{
+#ifdef CONFIG_RPS
+	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
+		sock_rps_reset_flow(sk);
+		sk->sk_rxhash = skb->rxhash;
+	}
+#endif
+}
+
+static inline void sock_rps_reset_rxhash(struct sock *sk)
+{
+#ifdef CONFIG_RPS
+	sock_rps_reset_flow(sk);
+	sk->sk_rxhash = 0;
+#endif
+}
+
+#define sk_wait_event(__sk, __timeo, __condition)			\
+	({	int __rc;						\
+		release_sock(__sk);					\
+		__rc = __condition;					\
+		if (!__rc) {						\
+			*(__timeo) = schedule_timeout(*(__timeo));	\
+		}							\
+		lock_sock(__sk);					\
+		__rc = __condition;					\
+		__rc;							\
+	})
+
+extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
+extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
+extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
+extern int sk_stream_error(struct sock *sk, int flags, int err);
+extern void sk_stream_kill_queues(struct sock *sk);
+
+extern int sk_wait_data(struct sock *sk, long *timeo);
+
+struct request_sock_ops;
+struct timewait_sock_ops;
+struct inet_hashinfo;
+struct raw_hashinfo;
+struct module;
+
+/*
+ * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * un-modified. Special care is taken when initializing object to zero.
+ */
+static inline void sk_prot_clear_nulls(struct sock *sk, int size)
+{
+	if (offsetof(struct sock, sk_node.next) != 0)
+		memset(sk, 0, offsetof(struct sock, sk_node.next));
+	memset(&sk->sk_node.pprev, 0,
+	       size - offsetof(struct sock, sk_node.pprev));
+}
+
+/* Networking protocol blocks we attach to sockets.
+ * socket layer -> transport layer interface
+ * transport -> network interface is defined by struct inet_proto
+ */
+struct proto {
+	void			(*close)(struct sock *sk, 
+					long timeout);
+	int			(*connect)(struct sock *sk,
+				        struct sockaddr *uaddr, 
+					int addr_len);
+	int			(*disconnect)(struct sock *sk, int flags);
+
+	struct sock *		(*accept) (struct sock *sk, int flags, int *err);
+
+	int			(*ioctl)(struct sock *sk, int cmd,
+					 unsigned long arg);
+	int			(*init)(struct sock *sk);
+	void			(*destroy)(struct sock *sk);
+	void			(*shutdown)(struct sock *sk, int how);
+	int			(*setsockopt)(struct sock *sk, int level, 
+					int optname, char __user *optval,
+					unsigned int optlen);
+	int			(*getsockopt)(struct sock *sk, int level, 
+					int optname, char __user *optval, 
+					int __user *option);  	 
+#ifdef CONFIG_COMPAT
+	int			(*compat_setsockopt)(struct sock *sk,
+					int level,
+					int optname, char __user *optval,
+					unsigned int optlen);
+	int			(*compat_getsockopt)(struct sock *sk,
+					int level,
+					int optname, char __user *optval,
+					int __user *option);
+	int			(*compat_ioctl)(struct sock *sk,
+					unsigned int cmd, unsigned long arg);
+#endif
+	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
+					   struct msghdr *msg, size_t len);
+	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
+					   struct msghdr *msg,
+					size_t len, int noblock, int flags, 
+					int *addr_len);
+	int			(*sendpage)(struct sock *sk, struct page *page,
+					int offset, size_t size, int flags);
+	int			(*bind)(struct sock *sk, 
+					struct sockaddr *uaddr, int addr_len);
+
+	int			(*backlog_rcv) (struct sock *sk, 
+						struct sk_buff *skb);
+
+	/* Keeping track of sk's, looking them up, and port selection methods. */
+	void			(*hash)(struct sock *sk);
+	void			(*unhash)(struct sock *sk);
+	void			(*rehash)(struct sock *sk);
+	int			(*get_port)(struct sock *sk, unsigned short snum);
+	void			(*clear_sk)(struct sock *sk, int size);
+
+	/* Keeping track of sockets in use */
+#ifdef CONFIG_PROC_FS
+	unsigned int		inuse_idx;
+#endif
+
+	/* Memory pressure */
+	void			(*enter_memory_pressure)(struct sock *sk);
+	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
+	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
+	/*
+	 * Pressure flag: try to collapse.
+	 * Technical note: it is used by multiple contexts non atomically.
+	 * All the __sk_mem_schedule() is of this nature: accounting
+	 * is strict, actions are advisory and have some latency.
+	 */
+	int			*memory_pressure;
+	long			*sysctl_mem;
+	int			*sysctl_wmem;
+	int			*sysctl_rmem;
+	int			max_header;
+	bool			no_autobind;
+
+	struct kmem_cache	*slab;
+	unsigned int		obj_size;
+	int			slab_flags;
+
+	struct percpu_counter	*orphan_count;
+
+	struct request_sock_ops	*rsk_prot;
+	struct timewait_sock_ops *twsk_prot;
+
+	union {
+		struct inet_hashinfo	*hashinfo;
+		struct udp_table	*udp_table;
+		struct raw_hashinfo	*raw_hash;
+	} h;
+
+	struct module		*owner;
+
+	char			name[32];
+
+	struct list_head	node;
+#ifdef SOCK_REFCNT_DEBUG
+	atomic_t		socks;
+#endif
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+	/*
+	 * cgroup specific init/deinit functions. Called once for all
+	 * protocols that implement it, from cgroups populate function.
+	 * This function has to setup any files the protocol want to
+	 * appear in the kmem cgroup filesystem.
+	 */
+	int			(*init_cgroup)(struct cgroup *cgrp,
+					       struct cgroup_subsys *ss);
+	void			(*destroy_cgroup)(struct cgroup *cgrp);
+	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
+#endif
+};
+
+struct cg_proto {
+	void			(*enter_memory_pressure)(struct sock *sk);
+	struct res_counter	*memory_allocated;	/* Current allocated memory. */
+	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
+	int			*memory_pressure;
+	long			*sysctl_mem;
+	/*
+	 * memcg field is used to find which memcg we belong directly
+	 * Each memcg struct can hold more than one cg_proto, so container_of
+	 * won't really cut.
+	 *
+	 * The elegant solution would be having an inverse function to
+	 * proto_cgroup in struct proto, but that means polluting the structure
+	 * for everybody, instead of just for memcg users.
+	 */
+	struct mem_cgroup	*memcg;
+};
+
+extern int proto_register(struct proto *prot, int alloc_slab);
+extern void proto_unregister(struct proto *prot);
+
+#ifdef SOCK_REFCNT_DEBUG
+static inline void sk_refcnt_debug_inc(struct sock *sk)
+{
+	atomic_inc(&sk->sk_prot->socks);
+}
+
+static inline void sk_refcnt_debug_dec(struct sock *sk)
+{
+	atomic_dec(&sk->sk_prot->socks);
+	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
+	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
+}
+
+static inline void sk_refcnt_debug_release(const struct sock *sk)
+{
+	if (atomic_read(&sk->sk_refcnt) != 1)
+		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
+		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
+}
+#else /* SOCK_REFCNT_DEBUG */
+#define sk_refcnt_debug_inc(sk) do { } while (0)
+#define sk_refcnt_debug_dec(sk) do { } while (0)
+#define sk_refcnt_debug_release(sk) do { } while (0)
+#endif /* SOCK_REFCNT_DEBUG */
+
+#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
+extern struct static_key memcg_socket_limit_enabled;
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+					       struct cg_proto *cg_proto)
+{
+	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
+}
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
+#else
+#define mem_cgroup_sockets_enabled 0
+static inline struct cg_proto *parent_cg_proto(struct proto *proto,
+					       struct cg_proto *cg_proto)
+{
+	return NULL;
+}
+#endif
+
+
+static inline bool sk_has_memory_pressure(const struct sock *sk)
+{
+	return sk->sk_prot->memory_pressure != NULL;
+}
+
+static inline bool sk_under_memory_pressure(const struct sock *sk)
+{
+	if (!sk->sk_prot->memory_pressure)
+		return false;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return !!*sk->sk_cgrp->memory_pressure;
+
+	return !!*sk->sk_prot->memory_pressure;
+}
+
+static inline void sk_leave_memory_pressure(struct sock *sk)
+{
+	int *memory_pressure = sk->sk_prot->memory_pressure;
+
+	if (!memory_pressure)
+		return;
+
+	if (*memory_pressure)
+		*memory_pressure = 0;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+		struct proto *prot = sk->sk_prot;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			if (*cg_proto->memory_pressure)
+				*cg_proto->memory_pressure = 0;
+	}
+
+}
+
+static inline void sk_enter_memory_pressure(struct sock *sk)
+{
+	if (!sk->sk_prot->enter_memory_pressure)
+		return;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+		struct proto *prot = sk->sk_prot;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			cg_proto->enter_memory_pressure(sk);
+	}
+
+	sk->sk_prot->enter_memory_pressure(sk);
+}
+
+static inline long sk_prot_mem_limits(const struct sock *sk, int index)
+{
+	long *prot = sk->sk_prot->sysctl_mem;
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		prot = sk->sk_cgrp->sysctl_mem;
+	return prot[index];
+}
+
+static inline void memcg_memory_allocated_add(struct cg_proto *prot,
+					      unsigned long amt,
+					      int *parent_status)
+{
+	struct res_counter *fail;
+	int ret;
+
+	ret = res_counter_charge_nofail(prot->memory_allocated,
+					amt << PAGE_SHIFT, &fail);
+	if (ret < 0)
+		*parent_status = OVER_LIMIT;
+}
+
+static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
+					      unsigned long amt)
+{
+	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
+}
+
+static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
+{
+	u64 ret;
+	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
+	return ret >> PAGE_SHIFT;
+}
+
+static inline long
+sk_memory_allocated(const struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return memcg_memory_allocated_read(sk->sk_cgrp);
+
+	return atomic_long_read(prot->memory_allocated);
+}
+
+static inline long
+sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
+		/* update the root cgroup regardless */
+		atomic_long_add_return(amt, prot->memory_allocated);
+		return memcg_memory_allocated_read(sk->sk_cgrp);
+	}
+
+	return atomic_long_add_return(amt, prot->memory_allocated);
+}
+
+static inline void
+sk_memory_allocated_sub(struct sock *sk, int amt)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		memcg_memory_allocated_sub(sk->sk_cgrp, amt);
+
+	atomic_long_sub(amt, prot->memory_allocated);
+}
+
+static inline void sk_sockets_allocated_dec(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			percpu_counter_dec(cg_proto->sockets_allocated);
+	}
+
+	percpu_counter_dec(prot->sockets_allocated);
+}
+
+static inline void sk_sockets_allocated_inc(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
+		struct cg_proto *cg_proto = sk->sk_cgrp;
+
+		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
+			percpu_counter_inc(cg_proto->sockets_allocated);
+	}
+
+	percpu_counter_inc(prot->sockets_allocated);
+}
+
+static inline int
+sk_sockets_allocated_read_positive(struct sock *sk)
+{
+	struct proto *prot = sk->sk_prot;
+
+	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
+		return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
+
+	return percpu_counter_read_positive(prot->sockets_allocated);
+}
+
+static inline int
+proto_sockets_allocated_sum_positive(struct proto *prot)
+{
+	return percpu_counter_sum_positive(prot->sockets_allocated);
+}
+
+static inline long
+proto_memory_allocated(struct proto *prot)
+{
+	return atomic_long_read(prot->memory_allocated);
+}
+
+static inline bool
+proto_memory_pressure(struct proto *prot)
+{
+	if (!prot->memory_pressure)
+		return false;
+	return !!*prot->memory_pressure;
+}
+
+
+#ifdef CONFIG_PROC_FS
+/* Called with local bh disabled */
+extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
+extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
+#else
+static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
+		int inc)
+{
+}
+#endif
+
+
+/* With per-bucket locks this operation is not-atomic, so that
+ * this version is not worse.
+ */
+static inline void __sk_prot_rehash(struct sock *sk)
+{
+	sk->sk_prot->unhash(sk);
+	sk->sk_prot->hash(sk);
+}
+
+void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);
+
+/* About 10 seconds */
+#define SOCK_DESTROY_TIME (10*HZ)
+
+/* Sockets 0-1023 can't be bound to unless you are superuser */
+#define PROT_SOCK	1024
+
+#define SHUTDOWN_MASK	3
+#define RCV_SHUTDOWN	1
+#define SEND_SHUTDOWN	2
+
+#define SOCK_SNDBUF_LOCK	1
+#define SOCK_RCVBUF_LOCK	2
+#define SOCK_BINDADDR_LOCK	4
+#define SOCK_BINDPORT_LOCK	8
+
+/* sock_iocb: used to kick off async processing of socket ios */
+struct sock_iocb {
+	struct list_head	list;
+
+	int			flags;
+	int			size;
+	struct socket		*sock;
+	struct sock		*sk;
+	struct scm_cookie	*scm;
+	struct msghdr		*msg, async_msg;
+	struct kiocb		*kiocb;
+};
+
+static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
+{
+	return (struct sock_iocb *)iocb->private;
+}
+
+static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
+{
+	return si->kiocb;
+}
+
+struct socket_alloc {
+	struct socket socket;
+	struct inode vfs_inode;
+};
+
+static inline struct socket *SOCKET_I(struct inode *inode)
+{
+	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
+}
+
+static inline struct inode *SOCK_INODE(struct socket *socket)
+{
+	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
+}
+
+/*
+ * Functions for memory accounting
+ */
+extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
+extern void __sk_mem_reclaim(struct sock *sk);
+
+#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
+#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
+#define SK_MEM_SEND	0
+#define SK_MEM_RECV	1
+
+static inline int sk_mem_pages(int amt)
+{
+	return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
+}
+
+static inline int sk_has_account(struct sock *sk)
+{
+	/* return true if protocol supports memory accounting */
+	return !!sk->sk_prot->memory_allocated;
+}
+
+static inline int sk_wmem_schedule(struct sock *sk, int size)
+{
+	if (!sk_has_account(sk))
+		return 1;
+	return size <= sk->sk_forward_alloc ||
+		__sk_mem_schedule(sk, size, SK_MEM_SEND);
+}
+
+static inline int sk_rmem_schedule(struct sock *sk, int size)
+{
+	if (!sk_has_account(sk))
+		return 1;
+	return size <= sk->sk_forward_alloc ||
+		__sk_mem_schedule(sk, size, SK_MEM_RECV);
+}
+
+static inline void sk_mem_reclaim(struct sock *sk)
+{
+	if (!sk_has_account(sk))
+		return;
+	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
+		__sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_reclaim_partial(struct sock *sk)
+{
+	if (!sk_has_account(sk))
+		return;
+	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
+		__sk_mem_reclaim(sk);
+}
+
+static inline void sk_mem_charge(struct sock *sk, int size)
+{
+	if (!sk_has_account(sk))
+		return;
+	sk->sk_forward_alloc -= size;
+}
+
+static inline void sk_mem_uncharge(struct sock *sk, int size)
+{
+	if (!sk_has_account(sk))
+		return;
+	sk->sk_forward_alloc += size;
+}
+
+static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
+{
+	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+	sk->sk_wmem_queued -= skb->truesize;
+	sk_mem_uncharge(sk, skb->truesize);
+	__kfree_skb(skb);
+}
+
+/* Used by processes to "lock" a socket state, so that
+ * interrupts and bottom half handlers won't change it
+ * from under us. It essentially blocks any incoming
+ * packets, so that we won't get any new data or any
+ * packets that change the state of the socket.
+ *
+ * While locked, BH processing will add new packets to
+ * the backlog queue.  This queue is processed by the
+ * owner of the socket lock right before it is released.
+ *
+ * Since ~2.3.5 it is also exclusive sleep lock serializing
+ * accesses from user process context.
+ */
+#define sock_owned_by_user(sk)	((sk)->sk_lock.owned)
+
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
+do {									\
+	sk->sk_lock.owned = 0;						\
+	init_waitqueue_head(&sk->sk_lock.wq);				\
+	spin_lock_init(&(sk)->sk_lock.slock);				\
+	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
+			sizeof((sk)->sk_lock));				\
+	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
+		       	(skey), (sname));				\
+	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
+} while (0)
+
+extern void lock_sock_nested(struct sock *sk, int subclass);
+
+static inline void lock_sock(struct sock *sk)
+{
+	lock_sock_nested(sk, 0);
+}
+
+extern void release_sock(struct sock *sk);
+
+/* BH context may only use the following locking interface. */
+#define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
+#define bh_lock_sock_nested(__sk) \
+				spin_lock_nested(&((__sk)->sk_lock.slock), \
+				SINGLE_DEPTH_NESTING)
+#define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))
+
+extern bool lock_sock_fast(struct sock *sk);
+/**
+ * unlock_sock_fast - complement of lock_sock_fast
+ * @sk: socket
+ * @slow: slow mode
+ *
+ * fast unlock socket for user context.
+ * If slow mode is on, we call regular release_sock()
+ */
+static inline void unlock_sock_fast(struct sock *sk, bool slow)
+{
+	if (slow)
+		release_sock(sk);
+	else
+		spin_unlock_bh(&sk->sk_lock.slock);
+}
+
+
+extern struct sock		*sk_alloc(struct net *net, int family,
+					  gfp_t priority,
+					  struct proto *prot);
+extern void			sk_free(struct sock *sk);
+extern void			sk_release_kernel(struct sock *sk);
+extern struct sock		*sk_clone_lock(const struct sock *sk,
+					       const gfp_t priority);
+
+extern struct sk_buff		*sock_wmalloc(struct sock *sk,
+					      unsigned long size, int force,
+					      gfp_t priority);
+extern struct sk_buff		*sock_rmalloc(struct sock *sk,
+					      unsigned long size, int force,
+					      gfp_t priority);
+extern void			sock_wfree(struct sk_buff *skb);
+extern void			sock_rfree(struct sk_buff *skb);
+
+extern int			sock_setsockopt(struct socket *sock, int level,
+						int op, char __user *optval,
+						unsigned int optlen);
+
+extern int			sock_getsockopt(struct socket *sock, int level,
+						int op, char __user *optval, 
+						int __user *optlen);
+extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
+						     unsigned long size,
+						     int noblock,
+						     int *errcode);
+extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,
+						      unsigned long header_len,
+						      unsigned long data_len,
+						      int noblock,
+						      int *errcode);
+extern void *sock_kmalloc(struct sock *sk, int size,
+			  gfp_t priority);
+extern void sock_kfree_s(struct sock *sk, void *mem, int size);
+extern void sk_send_sigurg(struct sock *sk);
+
+#ifdef CONFIG_CGROUPS
+extern void sock_update_classid(struct sock *sk);
+#else
+static inline void sock_update_classid(struct sock *sk)
+{
+}
+#endif
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * does not implement a particular function.
+ */
+extern int                      sock_no_bind(struct socket *, 
+					     struct sockaddr *, int);
+extern int                      sock_no_connect(struct socket *,
+						struct sockaddr *, int, int);
+extern int                      sock_no_socketpair(struct socket *,
+						   struct socket *);
+extern int                      sock_no_accept(struct socket *,
+					       struct socket *, int);
+extern int                      sock_no_getname(struct socket *,
+						struct sockaddr *, int *, int);
+extern unsigned int             sock_no_poll(struct file *, struct socket *,
+					     struct poll_table_struct *);
+extern int                      sock_no_ioctl(struct socket *, unsigned int,
+					      unsigned long);
+extern int			sock_no_listen(struct socket *, int);
+extern int                      sock_no_shutdown(struct socket *, int);
+extern int			sock_no_getsockopt(struct socket *, int , int,
+						   char __user *, int __user *);
+extern int			sock_no_setsockopt(struct socket *, int, int,
+						   char __user *, unsigned int);
+extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
+						struct msghdr *, size_t);
+extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
+						struct msghdr *, size_t, int);
+extern int			sock_no_mmap(struct file *file,
+					     struct socket *sock,
+					     struct vm_area_struct *vma);
+extern ssize_t			sock_no_sendpage(struct socket *sock,
+						struct page *page,
+						int offset, size_t size, 
+						int flags);
+
+/*
+ * Functions to fill in entries in struct proto_ops when a protocol
+ * uses the inet style.
+ */
+extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
+				  char __user *optval, int __user *optlen);
+extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+			       struct msghdr *msg, size_t size, int flags);
+extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
+				  char __user *optval, unsigned int optlen);
+extern int compat_sock_common_getsockopt(struct socket *sock, int level,
+		int optname, char __user *optval, int __user *optlen);
+extern int compat_sock_common_setsockopt(struct socket *sock, int level,
+		int optname, char __user *optval, unsigned int optlen);
+
+extern void sk_common_release(struct sock *sk);
+
+/*
+ *	Default socket callbacks and setup code
+ */
+ 
+/* Initialise core socket variables */
+extern void sock_init_data(struct socket *sock, struct sock *sk);
+
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
+/**
+ *	sk_filter_release - release a socket filter
+ *	@fp: filter to remove
+ *
+ *	Remove a filter from a socket and release its resources.
+ */
+
+static inline void sk_filter_release(struct sk_filter *fp)
+{
+	if (atomic_dec_and_test(&fp->refcnt))
+		call_rcu(&fp->rcu, sk_filter_release_rcu);
+}
+
+static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
+{
+	unsigned int size = sk_filter_len(fp);
+
+	atomic_sub(size, &sk->sk_omem_alloc);
+	sk_filter_release(fp);
+}
+
+static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
+{
+	atomic_inc(&fp->refcnt);
+	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
+}
+
+/*
+ * Socket reference counting postulates.
+ *
+ * * Each user of socket SHOULD hold a reference count.
+ * * Each access point to socket (an hash table bucket, reference from a list,
+ *   running timer, skb in flight MUST hold a reference count.
+ * * When reference count hits 0, it means it will never increase back.
+ * * When reference count hits 0, it means that no references from
+ *   outside exist to this socket and current process on current CPU
+ *   is last user and may/should destroy this socket.
+ * * sk_free is called from any context: process, BH, IRQ. When
+ *   it is called, socket has no references from outside -> sk_free
+ *   may release descendant resources allocated by the socket, but
+ *   to the time when it is called, socket is NOT referenced by any
+ *   hash tables, lists etc.
+ * * Packets, delivered from outside (from network or from another process)
+ *   and enqueued on receive/error queues SHOULD NOT grab reference count,
+ *   when they sit in queue. Otherwise, packets will leak to hole, when
+ *   socket is looked up by one cpu and unhasing is made by another CPU.
+ *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
+ *   (leak to backlog). Packet socket does all the processing inside
+ *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
+ *   use separate SMP lock, so that they are prone too.
+ */
+
+/* Ungrab socket and destroy it, if it was the last reference. */
+//ʵÏַŵ½sock.cÖУ¬ÒÔ±ãÌí¼Ó×ÔÑдúÂë
+void sock_put(struct sock *sk);
+#if 0
+static inline void sock_put(struct sock *sk)
+{
+	if (atomic_dec_and_test(&sk->sk_refcnt))
+		sk_free(sk);
+}
+#endif
+extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
+			  const int nested);
+
+static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
+{
+	sk->sk_tx_queue_mapping = tx_queue;
+}
+
+static inline void sk_tx_queue_clear(struct sock *sk)
+{
+	sk->sk_tx_queue_mapping = -1;
+}
+
+static inline int sk_tx_queue_get(const struct sock *sk)
+{
+	return sk ? sk->sk_tx_queue_mapping : -1;
+}
+
+static inline void sk_set_socket(struct sock *sk, struct socket *sock)
+{
+	sk_tx_queue_clear(sk);
+	sk->sk_socket = sock;
+}
+
+static inline wait_queue_head_t *sk_sleep(struct sock *sk)
+{
+	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
+	return &rcu_dereference_raw(sk->sk_wq)->wait;
+}
+/* Detach socket from process context.
+ * Announce socket dead, detach it from wait queue and inode.
+ * Note that parent inode held reference count on this struct sock,
+ * we do not release it in this function, because protocol
+ * probably wants some additional cleanups or even continuing
+ * to work with this socket (TCP).
+ */
+static inline void sock_orphan(struct sock *sk)
+{
+	write_lock_bh(&sk->sk_callback_lock);
+	sock_set_flag(sk, SOCK_DEAD);
+	sk_set_socket(sk, NULL);
+	sk->sk_wq  = NULL;
+	write_unlock_bh(&sk->sk_callback_lock);
+}
+
+static inline void sock_graft(struct sock *sk, struct socket *parent)
+{
+	write_lock_bh(&sk->sk_callback_lock);
+	sk->sk_wq = parent->wq;
+	parent->sk = sk;
+	sk_set_socket(sk, parent);
+	security_sock_graft(sk, parent);
+	write_unlock_bh(&sk->sk_callback_lock);
+}
+
+extern int sock_i_uid(struct sock *sk);
+extern unsigned long sock_i_ino(struct sock *sk);
+
+static inline struct dst_entry *
+__sk_dst_get(struct sock *sk)
+{
+	return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
+						       lockdep_is_held(&sk->sk_lock.slock));
+}
+
+static inline struct dst_entry *
+sk_dst_get(struct sock *sk)
+{
+	struct dst_entry *dst;
+
+	rcu_read_lock();
+	dst = rcu_dereference(sk->sk_dst_cache);
+	if (dst)
+		dst_hold(dst);
+	rcu_read_unlock();
+	return dst;
+}
+
+extern void sk_reset_txq(struct sock *sk);
+
+static inline void dst_negative_advice(struct sock *sk)
+{
+	struct dst_entry *ndst, *dst = __sk_dst_get(sk);
+
+	if (dst && dst->ops->negative_advice) {
+		ndst = dst->ops->negative_advice(dst);
+
+		if (ndst != dst) {
+			rcu_assign_pointer(sk->sk_dst_cache, ndst);
+			sk_reset_txq(sk);
+		}
+	}
+}
+
+static inline void
+__sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+	struct dst_entry *old_dst;
+
+	sk_tx_queue_clear(sk);
+	/*
+	 * This can be called while sk is owned by the caller only,
+	 * with no state that can be checked in a rcu_dereference_check() cond
+	 */
+	old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+	rcu_assign_pointer(sk->sk_dst_cache, dst);
+	dst_release(old_dst);
+}
+
+static inline void
+sk_dst_set(struct sock *sk, struct dst_entry *dst)
+{
+	spin_lock(&sk->sk_dst_lock);
+	__sk_dst_set(sk, dst);
+	spin_unlock(&sk->sk_dst_lock);
+}
+
+static inline void
+__sk_dst_reset(struct sock *sk)
+{
+	__sk_dst_set(sk, NULL);
+}
+
+static inline void
+sk_dst_reset(struct sock *sk)
+{
+	spin_lock(&sk->sk_dst_lock);
+	__sk_dst_reset(sk);
+	spin_unlock(&sk->sk_dst_lock);
+}
+
+extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+
+extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
+
+static inline int sk_can_gso(const struct sock *sk)
+{
+	return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
+}
+
+extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
+
+static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
+{
+	sk->sk_route_nocaps |= flags;
+	sk->sk_route_caps &= ~flags;
+}
+
+static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
+					   char __user *from, char *to,
+					   int copy, int offset)
+{
+	if (skb->ip_summed == CHECKSUM_NONE) {
+		int err = 0;
+		__wsum csum = csum_and_copy_from_user(from, to, copy, 0, &err);
+		if (err)
+			return err;
+		skb->csum = csum_block_add(skb->csum, csum, offset);
+	} else if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) {
+		if (!access_ok(VERIFY_READ, from, copy) ||
+		    __copy_from_user_nocache(to, from, copy))
+			return -EFAULT;
+	} else if (copy_from_user(to, from, copy))
+		return -EFAULT;
+
+	return 0;
+}
+
+static inline int skb_add_data_nocache(struct sock *sk, struct sk_buff *skb,
+				       char __user *from, int copy)
+{
+	int err, offset = skb->len;
+
+	err = skb_do_copy_data_nocache(sk, skb, from, skb_put(skb, copy),
+				       copy, offset);
+	if (err)
+		__skb_trim(skb, offset);
+
+	return err;
+}
+
+static inline int skb_copy_to_page_nocache(struct sock *sk, char __user *from,
+					   struct sk_buff *skb,
+					   struct page *page,
+					   int off, int copy)
+{
+	int err;
+
+	err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off,
+				       copy, skb->len);
+	if (err)
+		return err;
+
+	skb->len	     += copy;
+	skb->data_len	     += copy;
+	skb->truesize	     += copy;
+	sk->sk_wmem_queued   += copy;
+	sk_mem_charge(sk, copy);
+	return 0;
+}
+
+static inline int skb_copy_to_page(struct sock *sk, char __user *from,
+				   struct sk_buff *skb, struct page *page,
+				   int off, int copy)
+{
+	if (skb->ip_summed == CHECKSUM_NONE) {
+		int err = 0;
+		__wsum csum = csum_and_copy_from_user(from,
+						     page_address(page) + off,
+							    copy, 0, &err);
+		if (err)
+			return err;
+		skb->csum = csum_block_add(skb->csum, csum, skb->len);
+	} else if (copy_from_user(page_address(page) + off, from, copy))
+		return -EFAULT;
+
+	skb->len	     += copy;
+	skb->data_len	     += copy;
+	skb->truesize	     += copy;
+	sk->sk_wmem_queued   += copy;
+	sk_mem_charge(sk, copy);
+	return 0;
+}
+
+/**
+ * sk_wmem_alloc_get - returns write allocations
+ * @sk: socket
+ *
+ * Returns sk_wmem_alloc minus initial offset of one
+ */
+static inline int sk_wmem_alloc_get(const struct sock *sk)
+{
+	return atomic_read(&sk->sk_wmem_alloc) - 1;
+}
+
+/**
+ * sk_rmem_alloc_get - returns read allocations
+ * @sk: socket
+ *
+ * Returns sk_rmem_alloc
+ */
+static inline int sk_rmem_alloc_get(const struct sock *sk)
+{
+	return atomic_read(&sk->sk_rmem_alloc);
+}
+
+/**
+ * sk_has_allocations - check if allocations are outstanding
+ * @sk: socket
+ *
+ * Returns true if socket has write or read allocations
+ */
+static inline int sk_has_allocations(const struct sock *sk)
+{
+	return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
+}
+
+/**
+ * wq_has_sleeper - check if there are any waiting processes
+ * @wq: struct socket_wq
+ *
+ * Returns true if socket_wq has waiting processes
+ *
+ * The purpose of the wq_has_sleeper and sock_poll_wait is to wrap the memory
+ * barrier call. They were added due to the race found within the tcp code.
+ *
+ * Consider following tcp code paths:
+ *
+ * CPU1                  CPU2
+ *
+ * sys_select            receive packet
+ *   ...                 ...
+ *   __add_wait_queue    update tp->rcv_nxt
+ *   ...                 ...
+ *   tp->rcv_nxt check   sock_def_readable
+ *   ...                 {
+ *   schedule               rcu_read_lock();
+ *                          wq = rcu_dereference(sk->sk_wq);
+ *                          if (wq && waitqueue_active(&wq->wait))
+ *                              wake_up_interruptible(&wq->wait)
+ *                          ...
+ *                       }
+ *
+ * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
+ * in its cache, and so does the tp->rcv_nxt update on CPU2 side.  The CPU1
+ * could then endup calling schedule and sleep forever if there are no more
+ * data on the socket.
+ *
+ */
+static inline bool wq_has_sleeper(struct socket_wq *wq)
+{
+
+	/*
+	 * We need to be sure we are in sync with the
+	 * add_wait_queue modifications to the wait queue.
+	 *
+	 * This memory barrier is paired in the sock_poll_wait.
+	 */
+	smp_mb();
+	return wq && waitqueue_active(&wq->wait);
+}
+
+/**
+ * sock_poll_wait - place memory barrier behind the poll_wait call.
+ * @filp:           file
+ * @wait_address:   socket wait queue
+ * @p:              poll_table
+ *
+ * See the comments in the wq_has_sleeper function.
+ */
+static inline void sock_poll_wait(struct file *filp,
+		wait_queue_head_t *wait_address, poll_table *p)
+{
+	if (!poll_does_not_wait(p) && wait_address) {
+		poll_wait(filp, wait_address, p);
+		/*
+		 * We need to be sure we are in sync with the
+		 * socket flags modification.
+		 *
+		 * This memory barrier is paired in the wq_has_sleeper.
+		*/
+		smp_mb();
+	}
+}
+
+/*
+ * 	Queue a received datagram if it will fit. Stream and sequenced
+ *	protocols can't normally use this as they need to fit buffers in
+ *	and play with them.
+ *
+ * 	Inlined as it's very short and called for pretty much every
+ *	packet ever received.
+ */
+
+static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+	skb_orphan(skb);
+	skb->sk = sk;
+	skb->destructor = sock_wfree;
+	/*
+	 * We used to take a refcount on sk, but following operation
+	 * is enough to guarantee sk_free() wont free this sock until
+	 * all in-flight packets are completed
+	 */
+	atomic_add(skb->truesize, &sk->sk_wmem_alloc);
+}
+
+static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
+{
+	skb_orphan(skb);
+	skb->sk = sk;
+	skb->destructor = sock_rfree;
+	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
+	sk_mem_charge(sk, skb->truesize);
+}
+
+extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+			   unsigned long expires);
+
+extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
+
+extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
+
+extern int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb);
+
+/*
+ *	Recover an error report and clear atomically
+ */
+ 
+static inline int sock_error(struct sock *sk)
+{
+	int err;
+	if (likely(!sk->sk_err))
+		return 0;
+	err = xchg(&sk->sk_err, 0);
+	return -err;
+}
+
+static inline unsigned long sock_wspace(struct sock *sk)
+{
+	int amt = 0;
+
+	if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
+		amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
+		if (amt < 0) 
+			amt = 0;
+	}
+	return amt;
+}
+
+static inline void sk_wake_async(struct sock *sk, int how, int band)
+{
+	if (sock_flag(sk, SOCK_FASYNC))
+		sock_wake_async(sk->sk_socket, how, band);
+}
+
+#define SOCK_MIN_SNDBUF 2048
+/*
+ * Since sk_rmem_alloc sums skb->truesize, even a small frame might need
+ * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak
+ */
+#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff))
+
+static inline void sk_stream_moderate_sndbuf(struct sock *sk)
+{
+	if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
+		sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+		sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
+	}
+}
+
+struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
+
+static inline struct page *sk_stream_alloc_page(struct sock *sk)
+{
+	struct page *page = NULL;
+
+	page = alloc_pages(sk->sk_allocation, 0);
+	if (!page) {
+		sk_enter_memory_pressure(sk);
+		sk_stream_moderate_sndbuf(sk);
+	}
+	netslab_inc(SOCK_ALLOC_PAGES);
+	return page;
+}
+
+/*
+ *	Default write policy as shown to user space via poll/select/SIGIO
+ */
+static inline int sock_writeable(const struct sock *sk) 
+{
+	return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+}
+
+static inline gfp_t gfp_any(void)
+{
+	return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
+}
+
+static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
+{
+	return noblock ? 0 : sk->sk_rcvtimeo;
+}
+
+static inline long sock_sndtimeo(const struct sock *sk, int noblock)
+{
+	return noblock ? 0 : sk->sk_sndtimeo;
+}
+
+static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
+{
+	return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+}
+
+/* Alas, with timeout socket operations are not restartable.
+ * Compare this to poll().
+ */
+static inline int sock_intr_errno(long timeo)
+{
+	return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
+}
+
+extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+	struct sk_buff *skb);
+extern void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
+	struct sk_buff *skb);
+
+static __inline__ void
+sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
+{
+	ktime_t kt = skb->tstamp;
+	struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+	/*
+	 * generate control messages if
+	 * - receive time stamping in software requested (SOCK_RCVTSTAMP
+	 *   or SOCK_TIMESTAMPING_RX_SOFTWARE)
+	 * - software time stamp available and wanted
+	 *   (SOCK_TIMESTAMPING_SOFTWARE)
+	 * - hardware time stamps available and wanted
+	 *   (SOCK_TIMESTAMPING_SYS_HARDWARE or
+	 *   SOCK_TIMESTAMPING_RAW_HARDWARE)
+	 */
+	if (sock_flag(sk, SOCK_RCVTSTAMP) ||
+	    sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
+	    (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
+	    (hwtstamps->hwtstamp.tv64 &&
+	     sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
+	    (hwtstamps->syststamp.tv64 &&
+	     sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
+		__sock_recv_timestamp(msg, sk, skb);
+	else
+		sk->sk_stamp = kt;
+
+	if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
+		__sock_recv_wifi_status(msg, sk, skb);
+}
+
+extern void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+				     struct sk_buff *skb);
+
+static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
+					  struct sk_buff *skb)
+{
+#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)			| \
+			   (1UL << SOCK_RCVTSTAMP)			| \
+			   (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)	| \
+			   (1UL << SOCK_TIMESTAMPING_SOFTWARE)		| \
+			   (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) 	| \
+			   (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
+
+	if (sk->sk_flags & FLAGS_TS_OR_DROPS)
+		__sock_recv_ts_and_drops(msg, sk, skb);
+	else
+		sk->sk_stamp = skb->tstamp;
+}
+
+/**
+ * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
+ * @sk:		socket sending this packet
+ * @tx_flags:	filled with instructions for time stamping
+ *
+ * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
+ * parameters are invalid.
+ */
+extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags);
+
+/**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk: socket to eat this skb from
+ * @skb: socket buffer to eat
+ * @copied_early: flag indicating whether DMA operations copied this data early
+ *
+ * This routine must be called with interrupts disabled or with the socket
+ * locked so that the sk_buff queue operation is ok.
+*/
+#ifdef CONFIG_NET_DMA
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+{
+	__skb_unlink(skb, &sk->sk_receive_queue);
+	if (!copied_early)
+		__kfree_skb(skb);
+	else
+		__skb_queue_tail(&sk->sk_async_wait_queue, skb);
+}
+#else
+static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
+{
+	__skb_unlink(skb, &sk->sk_receive_queue);
+	__kfree_skb(skb);
+}
+#endif
+
+static inline
+struct net *sock_net(const struct sock *sk)
+{
+	return read_pnet(&sk->sk_net);
+}
+
+static inline
+void sock_net_set(struct sock *sk, struct net *net)
+{
+	write_pnet(&sk->sk_net, net);
+}
+
+/*
+ * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
+ * They should not hold a reference to a namespace in order to allow
+ * to stop it.
+ * Sockets after sk_change_net should be released using sk_release_kernel
+ */
+static inline void sk_change_net(struct sock *sk, struct net *net)
+{
+	put_net(sock_net(sk));
+	sock_net_set(sk, hold_net(net));
+}
+
+static inline struct sock *skb_steal_sock(struct sk_buff *skb)
+{
+	if (unlikely(skb->sk)) {
+		struct sock *sk = skb->sk;
+
+		skb->destructor = NULL;
+		skb->sk = NULL;
+		return sk;
+	}
+	return NULL;
+}
+
+extern void sock_enable_timestamp(struct sock *sk, int flag);
+extern int sock_get_timestamp(struct sock *, struct timeval __user *);
+extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+
+/* 
+ *	Enable debug/info messages 
+ */
+extern int net_msg_warn;
+#define NETDEBUG(fmt, args...) \
+	do { if (net_msg_warn) printk(fmt,##args); } while (0)
+
+#define LIMIT_NETDEBUG(fmt, args...) \
+	do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
+
+extern __u32 sysctl_wmem_max;
+extern __u32 sysctl_rmem_max;
+
+extern void sk_init(void);
+
+extern int sysctl_optmem_max;
+
+extern __u32 sysctl_wmem_default;
+extern __u32 sysctl_rmem_default;
+
+#endif	/* _SOCK_H */