sock.h 52 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
50
51
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
52
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
53
#include <linux/security.h>
54
#include <linux/slab.h>
55
#include <linux/uaccess.h>
Linus Torvalds's avatar
Linus Torvalds committed
56
57

#include <linux/filter.h>
58
#include <linux/rculist_nulls.h>
59
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
60

61
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
#include <net/dst.h>
#include <net/checksum.h>

/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
77
/* Validate arguments and do nothing */
78
79
static inline __printf(2, 3)
void SOCK_DEBUG(struct sock *sk, const char *msg, ...)
80
81
{
}
Linus Torvalds's avatar
Linus Torvalds committed
82
83
84
85
86
87
88
89
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
90
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
91
	wait_queue_head_t	wq;
92
93
94
95
96
97
98
99
100
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
101
102
103
} socket_lock_t;

struct sock;
104
struct proto;
105
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
106
107

/**
108
 *	struct sock_common - minimal network layer representation of sockets
109
110
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
111
 *	@skc_hash: hash value used with various protocol lookup tables
112
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
113
114
115
116
117
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
118
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
119
 *	@skc_prot: protocol handlers inside a network family
120
 *	@skc_net: reference to the network namespace of this socket
121
122
123
124
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
 *	@skc_refcnt: reference count
125
126
 *
 *	This is the minimal network layer representation of sockets, the header
127
128
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
129
struct sock_common {
130
131
	/* skc_daddr and skc_rcv_saddr must be grouped :
	 * cf INET_MATCH() and INET_TW_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
132
	 */
133
134
	__be32			skc_daddr;
	__be32			skc_rcv_saddr;
Eric Dumazet's avatar
Eric Dumazet committed
135

136
137
138
139
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
Eric Dumazet's avatar
Eric Dumazet committed
140
141
142
143
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
144
145
146
147
	union {
		struct hlist_node	skc_bind_node;
		struct hlist_nulls_node skc_portaddr_node;
	};
148
	struct proto		*skc_prot;
149
#ifdef CONFIG_NET_NS
150
	struct net	 	*skc_net;
151
#endif
152
153
154
155
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
156
	/* private: */
157
	int			skc_dontcopy_begin[0];
158
	/* public: */
159
160
161
162
163
164
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
	atomic_t		skc_refcnt;
165
	/* private: */
166
	int                     skc_dontcopy_end[0];
167
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
168
169
170
171
};

/**
  *	struct sock - network layer representation of sockets
172
  *	@__sk_common: shared layout with inet_timewait_sock
173
174
175
176
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
177
  *	@sk_wq: sock wait queue and async head
178
179
180
181
182
183
  *	@sk_dst_cache: destination cache
  *	@sk_dst_lock: destination cache lock
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
184
  *	@sk_async_wait_queue: DMA copied packets
185
186
187
188
189
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_allocation: allocation mode
  *	@sk_sndbuf: size of send buffer in bytes
Wang Chen's avatar
Wang Chen committed
190
  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
191
  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
192
193
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
194
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
195
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
196
  *	@sk_gso_max_size: Maximum GSO segment size to build
197
198
199
200
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
201
202
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
203
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
204
205
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
206
  *	@sk_drops: raw/udp drops counter
207
208
209
210
211
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
212
213
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
214
215
216
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
217
  *	@sk_rxhash: flow hash received from netif layer
218
219
220
221
222
223
224
225
226
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
  *	@sk_sndmsg_page: cached page for sendmsg
  *	@sk_sndmsg_off: cached offset for sendmsg
  *	@sk_send_head: front of stuff to transmit
227
  *	@sk_security: used by security modules
228
  *	@sk_mark: generic packet mark
229
  *	@sk_classid: this socket's cgroup classid
230
231
232
233
234
235
236
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
Linus Torvalds's avatar
Linus Torvalds committed
237
238
239
 */
struct sock {
	/*
240
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
241
242
243
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
244
245
246
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
247
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
248

249
250
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
251
#define sk_hash			__sk_common.skc_hash
Linus Torvalds's avatar
Linus Torvalds committed
252
253
254
255
256
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
257
#define sk_prot			__sk_common.skc_prot
258
#define sk_net			__sk_common.skc_net
Linus Torvalds's avatar
Linus Torvalds committed
259
	socket_lock_t		sk_lock;
Eric Dumazet's avatar
Eric Dumazet committed
260
	struct sk_buff_head	sk_receive_queue;
261
262
263
264
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
265
266
267
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
268
269
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
270
271
272
273
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
274
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
275
276
277
278
279
280
281
282
283
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	atomic_t		sk_drops;
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
284
	struct socket_wq __rcu	*sk_wq;
Eric Dumazet's avatar
Eric Dumazet committed
285
286
287
288
289

#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
#endif

290
#ifdef CONFIG_XFRM
Linus Torvalds's avatar
Linus Torvalds committed
291
	struct xfrm_policy	*sk_policy[2];
292
#endif
Eric Dumazet's avatar
Eric Dumazet committed
293
294
	unsigned long 		sk_flags;
	struct dst_entry	*sk_dst_cache;
Eric Dumazet's avatar
Eric Dumazet committed
295
	spinlock_t		sk_dst_lock;
Linus Torvalds's avatar
Linus Torvalds committed
296
297
	atomic_t		sk_wmem_alloc;
	atomic_t		sk_omem_alloc;
298
	int			sk_sndbuf;
Linus Torvalds's avatar
Linus Torvalds committed
299
	struct sk_buff_head	sk_write_queue;
Eric Dumazet's avatar
Eric Dumazet committed
300
301
302
303
304
305
306
	kmemcheck_bitfield_begin(flags);
	unsigned int		sk_shutdown  : 2,
				sk_no_check  : 2,
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
	kmemcheck_bitfield_end(flags);
Linus Torvalds's avatar
Linus Torvalds committed
307
	int			sk_wmem_queued;
Al Viro's avatar
Al Viro committed
308
	gfp_t			sk_allocation;
Linus Torvalds's avatar
Linus Torvalds committed
309
	int			sk_route_caps;
Eric Dumazet's avatar
Eric Dumazet committed
310
	int			sk_route_nocaps;
311
	int			sk_gso_type;
312
	unsigned int		sk_gso_max_size;
313
	int			sk_rcvlowat;
Linus Torvalds's avatar
Linus Torvalds committed
314
315
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
316
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
317
318
319
320
321
322
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
	unsigned short		sk_ack_backlog;
	unsigned short		sk_max_ack_backlog;
	__u32			sk_priority;
323
324
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
325
326
327
328
	long			sk_rcvtimeo;
	long			sk_sndtimeo;
	void			*sk_protinfo;
	struct timer_list	sk_timer;
329
	ktime_t			sk_stamp;
Linus Torvalds's avatar
Linus Torvalds committed
330
331
332
333
334
335
	struct socket		*sk_socket;
	void			*sk_user_data;
	struct page		*sk_sndmsg_page;
	struct sk_buff		*sk_send_head;
	__u32			sk_sndmsg_off;
	int			sk_write_pending;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
336
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
337
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
338
#endif
339
	__u32			sk_mark;
340
	u32			sk_classid;
Linus Torvalds's avatar
Linus Torvalds committed
341
342
343
344
345
346
347
348
349
350
351
352
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk, int bytes);
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
  	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);  
	void                    (*sk_destruct)(struct sock *sk);
};

/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
353
354
355
356
357
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

358
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
359
360
361
362
{
	return hlist_entry(head->first, struct sock, sk_node);
}

363
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
364
365
366
367
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

368
369
370
371
372
373
374
375
376
377
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

378
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
379
380
381
382
383
{
	return sk->sk_node.next ?
		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}

384
385
386
387
388
389
390
391
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

392
static inline int sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
393
394
395
396
{
	return hlist_unhashed(&sk->sk_node);
}

397
static inline int sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
398
{
Akinobu Mita's avatar
Akinobu Mita committed
399
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
400
401
402
403
404
405
406
}

static __inline__ void sk_node_init(struct hlist_node *node)
{
	node->pprev = NULL;
}

407
408
409
410
411
static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
{
	node->pprev = NULL;
}

Linus Torvalds's avatar
Linus Torvalds committed
412
413
414
415
416
static __inline__ void __sk_del_node(struct sock *sk)
{
	__hlist_del(&sk->sk_node);
}

417
/* NB: equivalent to hlist_del_init_rcu */
Linus Torvalds's avatar
Linus Torvalds committed
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
static __inline__ int __sk_del_node_init(struct sock *sk)
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
		return 1;
	}
	return 0;
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

static inline void sock_hold(struct sock *sk)
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
static inline void __sock_put(struct sock *sk)
{
	atomic_dec(&sk->sk_refcnt);
}

static __inline__ int sk_del_node_init(struct sock *sk)
{
	int rc = __sk_del_node_init(sk);

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
458
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
459

460
static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
461
462
{
	if (sk_hashed(sk)) {
463
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
464
465
466
467
468
		return 1;
	}
	return 0;
}

469
static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
470
{
471
	int rc = __sk_nulls_del_node_init_rcu(sk);
472
473
474
475
476
477
478
479
480

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Linus Torvalds's avatar
Linus Torvalds committed
481
482
483
484
485
486
487
488
489
490
491
static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
{
	hlist_add_head(&sk->sk_node, list);
}

static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

492
493
494
495
496
497
static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
{
	sock_hold(sk);
	hlist_add_head_rcu(&sk->sk_node, list);
}

498
static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
499
{
500
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
501
502
}

503
static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
504
505
{
	sock_hold(sk);
506
	__sk_nulls_add_node_rcu(sk, list);
507
508
}

Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513
514
515
516
517
518
519
520
521
static __inline__ void __sk_del_bind_node(struct sock *sk)
{
	__hlist_del(&sk->sk_bind_node);
}

static __inline__ void sk_add_bind_node(struct sock *sk,
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

#define sk_for_each(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_node)
522
523
#define sk_for_each_rcu(__sk, node, list) \
	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
524
525
526
527
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
528
529
530
#define sk_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
		hlist_for_each_entry_from(__sk, node, sk_node)
531
532
533
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
#define sk_for_each_safe(__sk, node, tmp, list) \
	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_bind_node)

/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
553
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
554
555
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
556
557
558
559
560
561
562
	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
Eric Dumazet's avatar
Eric Dumazet committed
563
	SOCK_FASYNC, /* fasync() active */
564
	SOCK_RXQ_OVFL,
565
	SOCK_ZEROCOPY, /* buffers from userspace */
Linus Torvalds's avatar
Linus Torvalds committed
566
567
};

Ralf Baechle's avatar
Ralf Baechle committed
568
569
570
571
572
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

static inline int sock_flag(struct sock *sk, enum sock_flags flag)
{
	return test_bit(flag, &sk->sk_flags);
}

static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

static inline int sk_acceptq_is_full(struct sock *sk)
{
600
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
601
602
603
604
605
606
607
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
static inline int sk_stream_min_wspace(struct sock *sk)
{
608
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
609
610
611
612
613
614
615
616
617
618
619
620
621
622
}

static inline int sk_stream_wspace(struct sock *sk)
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

extern void sk_stream_write_space(struct sock *sk);

static inline int sk_stream_memory_free(struct sock *sk)
{
	return sk->sk_wmem_queued < sk->sk_sndbuf;
}

Zhu Yi's avatar
Zhu Yi committed
623
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
624
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
625
{
Eric Dumazet's avatar
Eric Dumazet committed
626
627
628
629
630
631
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
	skb_dst_force(skb);

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
632
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
633
634

	sk->sk_backlog.tail = skb;
635
636
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
637

638
639
640
641
642
643
644
645
646
647
/*
 * Take into account size of receive queue and backlog queue
 */
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

	return qsize + skb->truesize > sk->sk_rcvbuf;
}

Zhu Yi's avatar
Zhu Yi committed
648
/* The per-socket spinlock must be held here. */
649
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
Zhu Yi's avatar
Zhu Yi committed
650
{
651
	if (sk_rcvqueues_full(sk, skb))
Zhu Yi's avatar
Zhu Yi committed
652
653
		return -ENOBUFS;

Zhu Yi's avatar
Zhu Yi committed
654
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
655
656
657
658
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

659
660
661
662
663
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	return sk->sk_backlog_rcv(sk, skb);
}

664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

688
689
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
690
691
{
#ifdef CONFIG_RPS
692
	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
693
		sock_rps_reset_flow(sk);
694
		sk->sk_rxhash = skb->rxhash;
695
696
697
698
	}
#endif
}

699
700
701
702
703
704
705
706
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sock_rps_reset_flow(sk);
	sk->sk_rxhash = 0;
#endif
}

707
708
709
710
711
712
713
714
715
716
717
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
718
719
720
721
722
723
724
725
726

extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);

extern int sk_wait_data(struct sock *sk, long *timeo);

727
struct request_sock_ops;
728
struct timewait_sock_ops;
729
struct inet_hashinfo;
730
struct raw_hashinfo;
731
struct module;
732

Linus Torvalds's avatar
Linus Torvalds committed
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
/* Networking protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
	void			(*close)(struct sock *sk, 
					long timeout);
	int			(*connect)(struct sock *sk,
				        struct sockaddr *uaddr, 
					int addr_len);
	int			(*disconnect)(struct sock *sk, int flags);

	struct sock *		(*accept) (struct sock *sk, int flags, int *err);

	int			(*ioctl)(struct sock *sk, int cmd,
					 unsigned long arg);
	int			(*init)(struct sock *sk);
750
	void			(*destroy)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
751
752
753
	void			(*shutdown)(struct sock *sk, int how);
	int			(*setsockopt)(struct sock *sk, int level, 
					int optname, char __user *optval,
754
					unsigned int optlen);
Linus Torvalds's avatar
Linus Torvalds committed
755
756
757
	int			(*getsockopt)(struct sock *sk, int level, 
					int optname, char __user *optval, 
					int __user *option);  	 
758
#ifdef CONFIG_COMPAT
759
760
761
	int			(*compat_setsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
762
					unsigned int optlen);
763
764
765
766
	int			(*compat_getsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
					int __user *option);
767
768
	int			(*compat_ioctl)(struct sock *sk,
					unsigned int cmd, unsigned long arg);
769
#endif
Linus Torvalds's avatar
Linus Torvalds committed
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg, size_t len);
	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg,
					size_t len, int noblock, int flags, 
					int *addr_len);
	int			(*sendpage)(struct sock *sk, struct page *page,
					int offset, size_t size, int flags);
	int			(*bind)(struct sock *sk, 
					struct sockaddr *uaddr, int addr_len);

	int			(*backlog_rcv) (struct sock *sk, 
						struct sk_buff *skb);

	/* Keeping track of sk's, looking them up, and port selection methods. */
	void			(*hash)(struct sock *sk);
	void			(*unhash)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
787
	void			(*rehash)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
788
	int			(*get_port)(struct sock *sk, unsigned short snum);
789
	void			(*clear_sk)(struct sock *sk, int size);
Linus Torvalds's avatar
Linus Torvalds committed
790

791
	/* Keeping track of sockets in use */
792
#ifdef CONFIG_PROC_FS
793
	unsigned int		inuse_idx;
794
#endif
795

Linus Torvalds's avatar
Linus Torvalds committed
796
	/* Memory pressure */
797
	void			(*enter_memory_pressure)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
798
	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
799
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
Linus Torvalds's avatar
Linus Torvalds committed
800
801
802
	/*
	 * Pressure flag: try to collapse.
	 * Technical note: it is used by multiple contexts non atomically.
803
	 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
804
805
806
	 * is strict, actions are advisory and have some latency.
	 */
	int			*memory_pressure;
Eric Dumazet's avatar
Eric Dumazet committed
807
	long			*sysctl_mem;
Linus Torvalds's avatar
Linus Torvalds committed
808
809
810
	int			*sysctl_wmem;
	int			*sysctl_rmem;
	int			max_header;
811
	bool			no_autobind;
Linus Torvalds's avatar
Linus Torvalds committed
812

813
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
814
	unsigned int		obj_size;
815
	int			slab_flags;
Linus Torvalds's avatar
Linus Torvalds committed
816

817
	struct percpu_counter	*orphan_count;
818

819
	struct request_sock_ops	*rsk_prot;
820
	struct timewait_sock_ops *twsk_prot;
821

822
823
	union {
		struct inet_hashinfo	*hashinfo;
824
		struct udp_table	*udp_table;
825
		struct raw_hashinfo	*raw_hash;
826
	} h;
827

Linus Torvalds's avatar
Linus Torvalds committed
828
829
830
831
832
	struct module		*owner;

	char			name[32];

	struct list_head	node;
833
834
835
#ifdef SOCK_REFCNT_DEBUG
	atomic_t		socks;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
836
837
838
839
840
};

extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);

841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
	atomic_inc(&sk->sk_prot->socks);
}

static inline void sk_refcnt_debug_dec(struct sock *sk)
{
	atomic_dec(&sk->sk_prot->socks);
	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}

static inline void sk_refcnt_debug_release(const struct sock *sk)
{
	if (atomic_read(&sk->sk_refcnt) != 1)
		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
#define sk_refcnt_debug_dec(sk) do { } while (0)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */

866
867

#ifdef CONFIG_PROC_FS
Linus Torvalds's avatar
Linus Torvalds committed
868
/* Called with local bh disabled */
869
870
extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
871
#else
872
873
static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
		int inc)
874
875
876
877
{
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
878

879
880
881
882
883
884
885
886
887
/* With per-bucket locks this operation is not-atomic, so that
 * this version is not worse.
 */
static inline void __sk_prot_rehash(struct sock *sk)
{
	sk->sk_prot->unhash(sk);
	sk->sk_prot->hash(sk);
}

888
889
void sk_prot_clear_portaddr_nulls(struct sock *sk, int size);

Linus Torvalds's avatar
Linus Torvalds committed
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
/* About 10 seconds */
#define SOCK_DESTROY_TIME (10*HZ)

/* Sockets 0-1023 can't be bound to unless you are superuser */
#define PROT_SOCK	1024

#define SHUTDOWN_MASK	3
#define RCV_SHUTDOWN	1
#define SEND_SHUTDOWN	2

#define SOCK_SNDBUF_LOCK	1
#define SOCK_RCVBUF_LOCK	2
#define SOCK_BINDADDR_LOCK	4
#define SOCK_BINDPORT_LOCK	8

/* sock_iocb: used to kick off async processing of socket ios */
struct sock_iocb {
	struct list_head	list;

	int			flags;
	int			size;
	struct socket		*sock;
	struct sock		*sk;
	struct scm_cookie	*scm;
	struct msghdr		*msg, async_msg;
	struct kiocb		*kiocb;
};

static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
{
	return (struct sock_iocb *)iocb->private;
}

static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
{
	return si->kiocb;
}

struct socket_alloc {
	struct socket socket;
	struct inode vfs_inode;
};

static inline struct socket *SOCKET_I(struct inode *inode)
{
	return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
}

static inline struct inode *SOCK_INODE(struct socket *socket)
{
	return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
}

943
944
945
946
947
/*
 * Functions for memory accounting
 */
extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
extern void __sk_mem_reclaim(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
948

949
950
951
952
#define SK_MEM_QUANTUM ((int)PAGE_SIZE)
#define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
#define SK_MEM_SEND	0
#define SK_MEM_RECV	1
Linus Torvalds's avatar
Linus Torvalds committed
953

954
static inline int sk_mem_pages(int amt)
Linus Torvalds's avatar
Linus Torvalds committed
955
{
956
	return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
957
958
}

959
static inline int sk_has_account(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
960
{
961
962
	/* return true if protocol supports memory accounting */
	return !!sk->sk_prot->memory_allocated;
Linus Torvalds's avatar
Linus Torvalds committed
963
964
}

965
static inline int sk_wmem_schedule(struct sock *sk, int size)
Linus Torvalds's avatar
Linus Torvalds committed
966
{
967
968
969
970
	if (!sk_has_account(sk))
		return 1;
	return size <= sk->sk_forward_alloc ||
		__sk_mem_schedule(sk, size, SK_MEM_SEND);
Linus Torvalds's avatar
Linus Torvalds committed
971
972
}

973
static inline int sk_rmem_schedule(struct sock *sk, int size)
974
{
975
976
	if (!sk_has_account(sk))
		return 1;
977
	return size <= sk->sk_forward_alloc ||
978
979
980
981
982
983
984
985
986
987
988
		__sk_mem_schedule(sk, size, SK_MEM_RECV);
}

static inline void sk_mem_reclaim(struct sock *sk)
{
	if (!sk_has_account(sk))
		return;
	if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk);
}

989
990
991
992
993
994
995
996
static inline void sk_mem_reclaim_partial(struct sock *sk)
{
	if (!sk_has_account(sk))
		return;
	if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
		__sk_mem_reclaim(sk);
}

997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
static inline void sk_mem_charge(struct sock *sk, int size)
{
	if (!sk_has_account(sk))
		return;
	sk->sk_forward_alloc -= size;
}

static inline void sk_mem_uncharge(struct sock *sk, int size)
{
	if (!sk_has_account(sk))
		return;
	sk->sk_forward_alloc += size;
}

static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
	sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
	sk->sk_wmem_queued -= skb->truesize;
	sk_mem_uncharge(sk, skb->truesize);
	__kfree_skb(skb);
1017
1018
}

Linus Torvalds's avatar
Linus Torvalds committed
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
/* Used by processes to "lock" a socket state, so that
 * interrupts and bottom half handlers won't change it
 * from under us. It essentially blocks any incoming
 * packets, so that we won't get any new data or any
 * packets that change the state of the socket.
 *
 * While locked, BH processing will add new packets to
 * the backlog queue.  This queue is processed by the
 * owner of the socket lock right before it is released.
 *
 * Since ~2.3.5 it is also exclusive sleep lock serializing
 * accesses from user process context.
 */
1032
#define sock_owned_by_user(sk)	((sk)->sk_lock.owned)
Linus Torvalds's avatar
Linus Torvalds committed
1033

1034
1035
1036
1037
1038
1039
1040
1041
1042
/*
 * Macro so as to not evaluate some arguments when
 * lockdep is not enabled.
 *
 * Mark both the sk_lock and the sk_lock.slock as a
 * per-address-family lock class.
 */
#define sock_lock_init_class_and_name(sk, sname, skey, name, key) 	\
do {									\
1043
	sk->sk_lock.owned = 0;						\
1044
1045
1046
1047
1048
1049
1050
1051
1052
	init_waitqueue_head(&sk->sk_lock.wq);				\
	spin_lock_init(&(sk)->sk_lock.slock);				\
	debug_check_no_locks_freed((void *)&(sk)->sk_lock,		\
			sizeof((sk)->sk_lock));				\
	lockdep_set_class_and_name(&(sk)->sk_lock.slock,		\
		       	(skey), (sname));				\
	lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);	\
} while (0)

Harvey Harrison's avatar
Harvey Harrison committed
1053
extern void lock_sock_nested(struct sock *sk, int subclass);
1054
1055
1056
1057
1058
1059

static inline void lock_sock(struct sock *sk)
{
	lock_sock_nested(sk, 0);
}

Harvey Harrison's avatar
Harvey Harrison committed
1060
extern void release_sock(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
1061
1062
1063

/* BH context may only use the following locking interface. */
#define bh_lock_sock(__sk)	spin_lock(&((__sk)->sk_lock.slock))
1064
1065
1066
#define bh_lock_sock_nested(__sk) \
				spin_lock_nested(&((__sk)->sk_lock.slock), \
				SINGLE_DEPTH_NESTING)
Linus Torvalds's avatar
Linus Torvalds committed
1067
1068
#define bh_unlock_sock(__sk)	spin_unlock(&((__sk)->sk_lock.slock))

1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
extern bool lock_sock_fast(struct sock *sk);
/**
 * unlock_sock_fast - complement of lock_sock_fast
 * @sk: socket
 * @slow: slow mode
 *
 * fast unlock socket for user context.
 * If slow mode is on, we call regular release_sock()
 */
static inline void unlock_sock_fast(struct sock *sk, bool slow)
Eric Dumazet's avatar
Eric Dumazet committed
1079
{
1080
1081
1082
1083
	if (slow)
		release_sock(sk);
	else
		spin_unlock_bh(&sk->sk_lock.slock);
Eric Dumazet's avatar
Eric Dumazet committed
1084
1085
1086
}


1087
extern struct sock		*sk_alloc(struct net *net, int family,
1088
					  gfp_t priority,
1089
					  struct proto *prot);
Linus Torvalds's avatar
Linus Torvalds committed
1090
extern void			sk_free(struct sock *sk);
1091
extern void			sk_release_kernel(struct sock *sk);
1092
extern struct sock		*sk_clone(const struct sock *sk,
1093
					  const gfp_t priority);
Linus Torvalds's avatar
Linus Torvalds committed
1094
1095
1096

extern struct sk_buff		*sock_wmalloc(struct sock *sk,
					      unsigned long size, int force,
1097
					      gfp_t priority);
Linus Torvalds's avatar
Linus Torvalds committed
1098
1099
extern struct sk_buff		*sock_rmalloc(struct sock *sk,
					      unsigned long size, int force,
1100
					      gfp_t priority);
Linus Torvalds's avatar
Linus Torvalds committed
1101
1102
1103
1104
1105
extern void			sock_wfree(struct sk_buff *skb);
extern void			sock_rfree(struct sk_buff *skb);

extern int			sock_setsockopt(struct socket *sock, int level,
						int op, char __user *optval,
1106
						unsigned int optlen);
Linus Torvalds's avatar
Linus Torvalds committed
1107
1108
1109
1110
1111
1112
1113
1114

extern int			sock_getsockopt(struct socket *sock, int level,
						int op, char __user *optval, 
						int __user *optlen);
extern struct sk_buff 		*sock_alloc_send_skb(struct sock *sk,
						     unsigned long size,
						     int noblock,
						     int *errcode);
1115
1116
1117
1118
1119
extern struct sk_buff 		*sock_alloc_send_pskb(struct sock *sk,
						      unsigned long header_len,
						      unsigned long data_len,
						      int noblock,
						      int *errcode);
Victor Fusco's avatar
Victor Fusco committed
1120
extern void *sock_kmalloc(struct sock *sk, int size,
1121
			  gfp_t priority);
Linus Torvalds's avatar
Linus Torvalds committed
1122
1123
1124
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
extern void sk_send_sigurg(struct sock *sk);

1125
1126
1127
1128
1129
1130
1131
1132
#ifdef CONFIG_CGROUPS
extern void sock_update_classid(struct sock *sk);
#else
static inline void sock_update_classid(struct sock *sk)
{
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
/*
 * Functions to fill in entries in struct proto_ops when a protocol
 * does not implement a particular function.
 */
extern int                      sock_no_bind(struct socket *, 
					     struct sockaddr *, int);
extern int                      sock_no_connect(struct socket *,
						struct sockaddr *, int, int);
extern int                      sock_no_socketpair(struct socket *,
						   struct socket *);
extern int                      sock_no_accept(struct socket *,
					       struct socket *, int);
extern int                      sock_no_getname(struct socket *,
						struct sockaddr *, int *, int);
extern unsigned int             sock_no_poll(struct file *, struct socket *,
					     struct poll_table_struct *);
extern int                      sock_no_ioctl(struct socket *, unsigned int,
					      unsigned long);
extern int			sock_no_listen(struct socket *, int);
extern int                      sock_no_shutdown(struct socket *, int);
extern int			sock_no_getsockopt(struct socket *, int , int,
						   char __user *, int __user *);
extern int			sock_no_setsockopt(struct socket *, int, int,
1156
						   char __user *, unsigned int);
Linus Torvalds's avatar
Linus Torvalds committed
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
extern int                      sock_no_sendmsg(struct kiocb *, struct socket *,
						struct msghdr *, size_t);
extern int                      sock_no_recvmsg(struct kiocb *, struct socket *,
						struct msghdr *, size_t, int);
extern int			sock_no_mmap(struct file *file,
					     struct socket *sock,
					     struct vm_area_struct *vma);
extern ssize_t			sock_no_sendpage(struct socket *sock,
						struct page *page,
						int offset, size_t size, 
						int flags);

/*
 * Functions to fill in entries in struct proto_ops when a protocol
 * uses the inet style.
 */
extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
				  char __user *optval, int __user *optlen);
extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
			       struct msghdr *msg, size_t size, int flags);
extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1178
				  char __user *optval, unsigned int optlen);
1179
1180
1181
extern int compat_sock_common_getsockopt(struct socket *sock, int level,
		int optname, char __user *optval, int __user *optlen);
extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1182
		int optname, char __user *optval, unsigned int optlen);
Linus Torvalds's avatar
Linus Torvalds committed
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192

extern void sk_common_release(struct sock *sk);

/*
 *	Default socket callbacks and setup code
 */
 
/* Initialise core socket variables */
extern void sock_init_data(struct socket *sock, struct sock *sk);

1193
1194
extern void sk_filter_release_rcu(struct rcu_head *rcu);

1195
/**
1196
 *	sk_filter_release - release a socket filter
1197
1198
1199
1200
1201
 *	@fp: filter to remove
 *
 *	Remove a filter from a socket and release its resources.
 */

1202
1203
1204
static inline void sk_filter_release(struct sk_filter *fp)
{
	if (atomic_dec_and_test(&fp->refcnt))
1205
		call_rcu(&fp->rcu, sk_filter_release_rcu);
1206
1207
1208
}

static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
Linus Torvalds's avatar
Linus Torvalds committed
1209
1210
1211
1212
{
	unsigned int size = sk_filter_len(fp);

	atomic_sub(size, &sk->sk_omem_alloc);
1213
	sk_filter_release(fp);
Linus Torvalds's avatar
Linus Torvalds committed
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
}

static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
{
	atomic_inc(&fp->refcnt);
	atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
}

/*
 * Socket reference counting postulates.
 *
 * * Each user of socket SHOULD hold a reference count.
 * * Each access point to socket (an hash table bucket, reference from a list,
 *   running timer, skb in flight MUST hold a reference count.
 * * When reference count hits 0, it means it will never increase back.
 * * When reference count hits 0, it means that no references from
 *   outside exist to this socket and current process on current CPU
 *   is last user and may/should destroy this socket.
 * * sk_free is called from any context: process, BH, IRQ. When
 *   it is called, socket has no references from outside -> sk_free
 *   may release descendant resources allocated by the socket, but
 *   to the time when it is called, socket is NOT referenced by any
 *   hash tables, lists etc.
 * * Packets, delivered from outside (from network or from another process)
 *   and enqueued on receive/error queues SHOULD NOT grab reference count,
 *   when they sit in queue. Otherwise, packets will leak to hole, when
 *   socket is looked up by one cpu and unhasing is made by another CPU.
 *   It is true for udp/raw, netlink (leak to receive and error queues), tcp
 *   (leak to backlog). Packet socket does all the processing inside
 *   BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
 *   use separate SMP lock, so that they are prone too.
 */

/* Ungrab socket and destroy it, if it was the last reference. */
static inline void sock_put(struct sock *sk)
{
	if (atomic_dec_and_test(&sk->sk_refcnt))
		sk_free(sk);
}

1254
1255
extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
			  const int nested);
1256

1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
	sk->sk_tx_queue_mapping = tx_queue;
}

static inline void sk_tx_queue_clear(struct sock *sk)
{
	sk->sk_tx_queue_mapping = -1;
}

static inline int sk_tx_queue_get(const struct sock *sk)
{
1269
	return sk ? sk->sk_tx_queue_mapping : -1;
1270
1271
}

1272
1273
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
1274
	sk_tx_queue_clear(sk);
1275
1276
1277
	sk->sk_socket = sock;
}

Eric Dumazet's avatar
Eric Dumazet committed
1278
1279
static inline wait_queue_head_t *sk_sleep(struct sock *sk)
{
1280
1281
	BUILD_BUG_ON(offsetof(struct socket_wq, wait) != 0);
	return &rcu_dereference_raw(sk->sk_wq)->wait;
Eric Dumazet's avatar
Eric Dumazet committed
1282
}
Linus Torvalds's avatar
Linus Torvalds committed
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
/* Detach socket from process context.
 * Announce socket dead, detach it from wait queue and inode.
 * Note that parent inode held reference count on this struct sock,
 * we do not release it in this function, because protocol
 * probably wants some additional cleanups or even continuing
 * to work with this socket (TCP).
 */
static inline void sock_orphan(struct sock *sk)
{
	write_lock_bh(&sk->sk_callback_lock);
	sock_set_flag(sk, SOCK_DEAD);
1294
	sk_set_socket(sk, NULL);
1295
	sk->sk_wq  = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
1296
1297
1298
1299
1300
1301
	write_unlock_bh(&sk->sk_callback_lock);
}

static inline void sock_graft(struct sock *sk, struct socket *parent)
{
	write_lock_bh(&sk->sk_callback_lock);
1302
	sk->sk_wq = parent->wq;
Linus Torvalds's avatar
Linus Torvalds committed
1303
	parent->sk = sk;
1304
	sk_set_socket(sk, parent);
1305
	security_sock_graft(sk, parent);
Linus Torvalds's avatar
Linus Torvalds committed
1306
1307
1308
1309
1310
1311
1312
1313
1314
	write_unlock_bh(&sk->sk_callback_lock);
}

extern int sock_i_uid(struct sock *sk);
extern unsigned long sock_i_ino(struct sock *sk);

static inline struct dst_entry *
__sk_dst_get(struct sock *sk)
{
1315
	return rcu_dereference_check(sk->sk_dst_cache, sock_owned_by_user(sk) ||
Eric Dumazet's avatar
Eric Dumazet committed
1316
						       lockdep_is_held(&sk->sk_lock.slock));
Linus Torvalds's avatar
Linus Torvalds committed
1317
1318
1319
1320
1321
1322
1323
}

static inline struct dst_entry *
sk_dst_get(struct sock *sk)
{
	struct dst_entry *dst;

Eric Dumazet's avatar
Eric Dumazet committed
1324
1325
	rcu_read_lock();
	dst = rcu_dereference(sk->sk_dst_cache);