sock.h 65.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/bitops.h>
50
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
52
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
53
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
54
#include <linux/security.h>
55
#include <linux/slab.h>
56
#include <linux/uaccess.h>
57
#include <linux/page_counter.h>
58
#include <linux/memcontrol.h>
59
#include <linux/static_key.h>
Al Viro's avatar
Al Viro committed
60
#include <linux/sched.h>
61
#include <linux/wait.h>
62
#include <linux/cgroup-defs.h>
Linus Torvalds's avatar
Linus Torvalds committed
63
64

#include <linux/filter.h>
65
#include <linux/rculist_nulls.h>
66
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
67

68
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
69
70
#include <net/dst.h>
#include <net/checksum.h>
Eric Dumazet's avatar
Eric Dumazet committed
71
#include <net/tcp_states.h>
72
#include <linux/net_tstamp.h>
73
#include <net/smc.h>
Linus Torvalds's avatar
Linus Torvalds committed
74
75
76
77
78
79
80
81
82
83
84
85
86

/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
87
/* Validate arguments and do nothing */
88
static inline __printf(2, 3)
Eric Dumazet's avatar
Eric Dumazet committed
89
void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
90
91
{
}
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96
97
98
99
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
100
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
101
	wait_queue_head_t	wq;
102
103
104
105
106
107
108
109
110
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
111
112
113
} socket_lock_t;

struct sock;
114
struct proto;
115
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
116

117
118
119
typedef __u32 __bitwise __portpair;
typedef __u64 __bitwise __addrpair;

Linus Torvalds's avatar
Linus Torvalds committed
120
/**
121
 *	struct sock_common - minimal network layer representation of sockets
122
123
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
124
 *	@skc_hash: hash value used with various protocol lookup tables
125
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
126
127
 *	@skc_dport: placeholder for inet_dport/tw_dport
 *	@skc_num: placeholder for inet_num/tw_num
128
129
130
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
Tom Herbert's avatar
Tom Herbert committed
131
 *	@skc_reuseport: %SO_REUSEPORT setting
132
133
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
134
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
135
 *	@skc_prot: protocol handlers inside a network family
136
 *	@skc_net: reference to the network namespace of this socket
137
138
139
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
140
141
142
 *	@skc_flags: place holder for sk_flags
 *		%SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
 *		%SO_OOBINLINE settings, %SO_TIMESTAMPING settings
143
 *	@skc_incoming_cpu: record/match cpu processing incoming packets
144
 *	@skc_refcnt: reference count
145
146
 *
 *	This is the minimal network layer representation of sockets, the header
147
148
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
149
struct sock_common {
150
	/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
Eric Dumazet's avatar
Eric Dumazet committed
151
	 * address on 64bit arches : cf INET_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
152
	 */
153
	union {
154
		__addrpair	skc_addrpair;
155
156
157
158
159
		struct {
			__be32	skc_daddr;
			__be32	skc_rcv_saddr;
		};
	};
160
161
162
163
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
164
165
	/* skc_dport && skc_num must be grouped as well */
	union {
166
		__portpair	skc_portpair;
167
168
169
170
171
172
		struct {
			__be16	skc_dport;
			__u16	skc_num;
		};
	};

Eric Dumazet's avatar
Eric Dumazet committed
173
174
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
Tom Herbert's avatar
Tom Herbert committed
175
	unsigned char		skc_reuse:4;
176
177
	unsigned char		skc_reuseport:1;
	unsigned char		skc_ipv6only:1;
178
	unsigned char		skc_net_refcnt:1;
Eric Dumazet's avatar
Eric Dumazet committed
179
	int			skc_bound_dev_if;
180
181
	union {
		struct hlist_node	skc_bind_node;
182
		struct hlist_node	skc_portaddr_node;
183
	};
184
	struct proto		*skc_prot;
185
	possible_net_t		skc_net;
186
187
188
189
190
191

#if IS_ENABLED(CONFIG_IPV6)
	struct in6_addr		skc_v6_daddr;
	struct in6_addr		skc_v6_rcv_saddr;
#endif

Eric Dumazet's avatar
Eric Dumazet committed
192
193
	atomic64_t		skc_cookie;

194
195
196
197
198
199
200
201
202
203
	/* following fields are padding to force
	 * offset(struct sock, sk_refcnt) == 128 on 64bit arches
	 * assuming IPV6 is enabled. We use this padding differently
	 * for different kind of 'sockets'
	 */
	union {
		unsigned long	skc_flags;
		struct sock	*skc_listener; /* request_sock */
		struct inet_timewait_death_row *skc_tw_dr; /* inet_timewait_sock */
	};
204
205
206
207
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
208
	/* private: */
209
	int			skc_dontcopy_begin[0];
210
	/* public: */
211
212
213
214
215
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
216
217
218
	union {
		int		skc_incoming_cpu;
		u32		skc_rcv_wnd;
219
		u32		skc_tw_rcv_nxt; /* struct tcp_timewait_sock  */
220
	};
221

222
	atomic_t		skc_refcnt;
223
	/* private: */
224
	int                     skc_dontcopy_end[0];
225
226
227
	union {
		u32		skc_rxhash;
		u32		skc_window_clamp;
228
		u32		skc_tw_snd_nxt; /* struct tcp_timewait_sock */
229
	};
230
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
231
232
233
234
};

/**
  *	struct sock - network layer representation of sockets
235
  *	@__sk_common: shared layout with inet_timewait_sock
236
237
238
239
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
240
  *	@sk_wq: sock wait queue and async head
Shawn Bohrer's avatar
Shawn Bohrer committed
241
  *	@sk_rx_dst: receive input route used by early demux
242
  *	@sk_dst_cache: destination cache
243
  *	@sk_dst_pending_confirm: need to confirm neighbour
244
245
246
247
248
249
250
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
251
  *	@sk_napi_id: id of the last napi context to receive data for sk
252
  *	@sk_ll_usec: usecs to busypoll when there is no data
253
  *	@sk_allocation: allocation mode
254
  *	@sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
255
  *	@sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE)
256
  *	@sk_sndbuf: size of send buffer in bytes
257
  *	@sk_padding: unused element for alignment
258
259
  *	@sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets
  *	@sk_no_check_rx: allow zero checksum in RX packets
260
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
261
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
262
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
263
  *	@sk_gso_max_size: Maximum GSO segment size to build
264
  *	@sk_gso_max_segs: Maximum number of GSO segments
265
266
267
268
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
269
270
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
271
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
272
273
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
274
  *	@sk_drops: raw/udp drops counter
275
276
277
278
279
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
280
281
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
282
283
284
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
285
  *	@sk_txhash: computed flow hash for use on transmit
286
287
288
  *	@sk_filter: socket filtering instructions
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
289
  *	@sk_tsflags: SO_TIMESTAMPING socket options
290
  *	@sk_tskey: counter to disambiguate concurrent tstamp requests
291
292
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
293
  *	@sk_frag: cached page frag
294
  *	@sk_peek_off: current peek_offset value
295
  *	@sk_send_head: front of stuff to transmit
296
  *	@sk_security: used by security modules
297
  *	@sk_mark: generic packet mark
298
  *	@sk_cgrp_data: cgroup data for this cgroup
299
  *	@sk_memcg: this socket's memory cgroup association
300
301
302
303
304
305
306
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
307
  *	@sk_reuseport_cb: reuseport group container
308
309
  *	@sk_rcu: used during RCU grace period
  */
Linus Torvalds's avatar
Linus Torvalds committed
310
311
struct sock {
	/*
312
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
313
314
315
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
316
317
318
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
319
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
320

321
322
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
323
#define sk_hash			__sk_common.skc_hash
Eric Dumazet's avatar
Eric Dumazet committed
324
#define sk_portpair		__sk_common.skc_portpair
Eric Dumazet's avatar
Eric Dumazet committed
325
326
#define sk_num			__sk_common.skc_num
#define sk_dport		__sk_common.skc_dport
Eric Dumazet's avatar
Eric Dumazet committed
327
328
329
#define sk_addrpair		__sk_common.skc_addrpair
#define sk_daddr		__sk_common.skc_daddr
#define sk_rcv_saddr		__sk_common.skc_rcv_saddr
Linus Torvalds's avatar
Linus Torvalds committed
330
331
332
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
Tom Herbert's avatar
Tom Herbert committed
333
#define sk_reuseport		__sk_common.skc_reuseport
334
#define sk_ipv6only		__sk_common.skc_ipv6only
335
#define sk_net_refcnt		__sk_common.skc_net_refcnt
Linus Torvalds's avatar
Linus Torvalds committed
336
337
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
338
#define sk_prot			__sk_common.skc_prot
339
#define sk_net			__sk_common.skc_net
340
341
#define sk_v6_daddr		__sk_common.skc_v6_daddr
#define sk_v6_rcv_saddr	__sk_common.skc_v6_rcv_saddr
Eric Dumazet's avatar
Eric Dumazet committed
342
#define sk_cookie		__sk_common.skc_cookie
343
#define sk_incoming_cpu		__sk_common.skc_incoming_cpu
344
#define sk_flags		__sk_common.skc_flags
345
#define sk_rxhash		__sk_common.skc_rxhash
346

Linus Torvalds's avatar
Linus Torvalds committed
347
	socket_lock_t		sk_lock;
348
349
350
	atomic_t		sk_drops;
	int			sk_rcvlowat;
	struct sk_buff_head	sk_error_queue;
Eric Dumazet's avatar
Eric Dumazet committed
351
	struct sk_buff_head	sk_receive_queue;
352
353
354
355
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
356
357
358
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
359
360
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
361
362
363
364
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
365
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
366
#define sk_rmem_alloc sk_backlog.rmem_alloc
Eric Dumazet's avatar
Eric Dumazet committed
367

368
	int			sk_forward_alloc;
369
#ifdef CONFIG_NET_RX_BUSY_POLL
370
	unsigned int		sk_ll_usec;
371
372
	/* ===== mostly read cache line ===== */
	unsigned int		sk_napi_id;
Eric Dumazet's avatar
Eric Dumazet committed
373
374
375
376
#endif
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
377
378
379
380
	union {
		struct socket_wq __rcu	*sk_wq;
		struct socket_wq	*sk_wq_raw;
	};
381
#ifdef CONFIG_XFRM
382
	struct xfrm_policy __rcu *sk_policy[2];
383
#endif
Eric Dumazet's avatar
Eric Dumazet committed
384
	struct dst_entry	*sk_rx_dst;
385
	struct dst_entry __rcu	*sk_dst_cache;
Linus Torvalds's avatar
Linus Torvalds committed
386
	atomic_t		sk_omem_alloc;
387
	int			sk_sndbuf;
388
389
390
391
392
393

	/* ===== cache line for TX ===== */
	int			sk_wmem_queued;
	atomic_t		sk_wmem_alloc;
	unsigned long		sk_tsq_flags;
	struct sk_buff		*sk_send_head;
Linus Torvalds's avatar
Linus Torvalds committed
394
	struct sk_buff_head	sk_write_queue;
395
396
	__s32			sk_peek_off;
	int			sk_write_pending;
397
398
	__u32			sk_dst_pending_confirm;
	/* Note: 32bit hole on 64bit arches */
399
400
401
402
403
404
405
406
407
408
409
410
411
	long			sk_sndtimeo;
	struct timer_list	sk_timer;
	__u32			sk_priority;
	__u32			sk_mark;
	u32			sk_pacing_rate; /* bytes per second */
	u32			sk_max_pacing_rate;
	struct page_frag	sk_frag;
	netdev_features_t	sk_route_caps;
	netdev_features_t	sk_route_nocaps;
	int			sk_gso_type;
	unsigned int		sk_gso_max_size;
	gfp_t			sk_allocation;
	__u32			sk_txhash;
412
413
414
415
416

	/*
	 * Because of non atomicity rules, all
	 * changes are protected by socket lock.
	 */
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
	unsigned int		__sk_flags_offset[0];
#ifdef __BIG_ENDIAN_BITFIELD
#define SK_FL_PROTO_SHIFT  16
#define SK_FL_PROTO_MASK   0x00ff0000

#define SK_FL_TYPE_SHIFT   0
#define SK_FL_TYPE_MASK    0x0000ffff
#else
#define SK_FL_PROTO_SHIFT  8
#define SK_FL_PROTO_MASK   0x0000ff00

#define SK_FL_TYPE_SHIFT   16
#define SK_FL_TYPE_MASK    0xffff0000
#endif

Eric Dumazet's avatar
Eric Dumazet committed
432
	kmemcheck_bitfield_begin(flags);
433
	unsigned int		sk_padding : 2,
434
435
				sk_no_check_tx : 1,
				sk_no_check_rx : 1,
Eric Dumazet's avatar
Eric Dumazet committed
436
437
438
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
439
#define SK_PROTOCOL_MAX U8_MAX
Eric Dumazet's avatar
Eric Dumazet committed
440
	kmemcheck_bitfield_end(flags);
441

442
	u16			sk_gso_max_segs;
Linus Torvalds's avatar
Linus Torvalds committed
443
	unsigned long	        sk_lingertime;
444
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
445
446
447
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
448
449
	u32			sk_ack_backlog;
	u32			sk_max_ack_backlog;
450
	kuid_t			sk_uid;
451
452
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
453
	long			sk_rcvtimeo;
454
	ktime_t			sk_stamp;
455
	u16			sk_tsflags;
456
	u8			sk_shutdown;
457
	u32			sk_tskey;
Linus Torvalds's avatar
Linus Torvalds committed
458
459
	struct socket		*sk_socket;
	void			*sk_user_data;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
460
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
461
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
462
#endif
463
	struct sock_cgroup_data	sk_cgrp_data;
464
	struct mem_cgroup	*sk_memcg;
Linus Torvalds's avatar
Linus Torvalds committed
465
	void			(*sk_state_change)(struct sock *sk);
466
	void			(*sk_data_ready)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
467
468
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
469
470
	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
471
	void                    (*sk_destruct)(struct sock *sk);
472
	struct sock_reuseport __rcu	*sk_reuseport_cb;
473
	struct rcu_head		sk_rcu;
Linus Torvalds's avatar
Linus Torvalds committed
474
475
};

476
477
478
479
480
#define __sk_user_data(sk) ((*((void __rcu **)&(sk)->sk_user_data)))

#define rcu_dereference_sk_user_data(sk)	rcu_dereference(__sk_user_data((sk)))
#define rcu_assign_sk_user_data(sk, ptr)	rcu_assign_pointer(__sk_user_data((sk)), ptr)

481
482
483
484
485
486
487
488
489
490
491
/*
 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
 * on a socket means that the socket will reuse everybody else's port
 * without looking at the other's sk_reuse value.
 */

#define SK_NO_REUSE	0
#define SK_CAN_REUSE	1
#define SK_FORCE_REUSE	2

492
493
int sk_set_peek_off(struct sock *sk, int val);

494
495
static inline int sk_peek_offset(struct sock *sk, int flags)
{
496
497
498
499
500
501
502
	if (unlikely(flags & MSG_PEEK)) {
		s32 off = READ_ONCE(sk->sk_peek_off);
		if (off >= 0)
			return off;
	}

	return 0;
503
504
505
506
}

static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
507
508
509
510
511
	s32 off = READ_ONCE(sk->sk_peek_off);

	if (unlikely(off >= 0)) {
		off = max_t(s32, off - val, 0);
		WRITE_ONCE(sk->sk_peek_off, off);
512
513
514
515
516
	}
}

static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
517
	sk_peek_offset_bwd(sk, -val);
518
519
}

Linus Torvalds's avatar
Linus Torvalds committed
520
521
522
/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
523
524
525
526
527
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

528
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
529
530
531
532
{
	return hlist_entry(head->first, struct sock, sk_node);
}

533
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
534
535
536
537
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

538
539
540
541
542
543
544
545
546
547
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

548
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
549
{
Geliang Tang's avatar
Geliang Tang committed
550
	return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
Linus Torvalds's avatar
Linus Torvalds committed
551
552
}

553
554
555
556
557
558
559
560
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
561
static inline bool sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
562
563
564
565
{
	return hlist_unhashed(&sk->sk_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
566
static inline bool sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
567
{
Akinobu Mita's avatar
Akinobu Mita committed
568
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
569
570
}

Eric Dumazet's avatar
Eric Dumazet committed
571
static inline void sk_node_init(struct hlist_node *node)
Linus Torvalds's avatar
Linus Torvalds committed
572
573
574
575
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
576
static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
577
578
579
580
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
581
static inline void __sk_del_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
582
583
584
585
{
	__hlist_del(&sk->sk_node);
}

586
/* NB: equivalent to hlist_del_init_rcu */
Eric Dumazet's avatar
Eric Dumazet committed
587
static inline bool __sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
588
589
590
591
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
Eric Dumazet's avatar
Eric Dumazet committed
592
		return true;
Linus Torvalds's avatar
Linus Torvalds committed
593
	}
Eric Dumazet's avatar
Eric Dumazet committed
594
	return false;
Linus Torvalds's avatar
Linus Torvalds committed
595
596
597
598
599
600
601
602
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

603
static __always_inline void sock_hold(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
604
605
606
607
608
609
610
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
611
static __always_inline void __sock_put(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
612
613
614
615
{
	atomic_dec(&sk->sk_refcnt);
}

Eric Dumazet's avatar
Eric Dumazet committed
616
static inline bool sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
617
{
Eric Dumazet's avatar
Eric Dumazet committed
618
	bool rc = __sk_del_node_init(sk);
Linus Torvalds's avatar
Linus Torvalds committed
619
620
621
622
623
624
625
626

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
627
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
628

Eric Dumazet's avatar
Eric Dumazet committed
629
static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
630
631
{
	if (sk_hashed(sk)) {
632
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
Eric Dumazet's avatar
Eric Dumazet committed
633
		return true;
634
	}
Eric Dumazet's avatar
Eric Dumazet committed
635
	return false;
636
637
}

Eric Dumazet's avatar
Eric Dumazet committed
638
static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
639
{
Eric Dumazet's avatar
Eric Dumazet committed
640
	bool rc = __sk_nulls_del_node_init_rcu(sk);
641
642
643
644
645
646
647
648
649

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Eric Dumazet's avatar
Eric Dumazet committed
650
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
651
652
653
654
{
	hlist_add_head(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
655
static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
656
657
658
659
660
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
661
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
662
663
{
	sock_hold(sk);
664
665
666
667
668
	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
	    sk->sk_family == AF_INET6)
		hlist_add_tail_rcu(&sk->sk_node, list);
	else
		hlist_add_head_rcu(&sk->sk_node, list);
669
670
}

Eric Dumazet's avatar
Eric Dumazet committed
671
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
672
{
673
674
675
676
677
	if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
	    sk->sk_family == AF_INET6)
		hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
	else
		hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
678
679
}

Eric Dumazet's avatar
Eric Dumazet committed
680
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
681
682
{
	sock_hold(sk);
683
	__sk_nulls_add_node_rcu(sk, list);
684
685
}

Eric Dumazet's avatar
Eric Dumazet committed
686
static inline void __sk_del_bind_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
687
688
689
690
{
	__hlist_del(&sk->sk_bind_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
691
static inline void sk_add_bind_node(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
692
693
694
695
696
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

697
698
699
700
#define sk_for_each(__sk, list) \
	hlist_for_each_entry(__sk, list, sk_node)
#define sk_for_each_rcu(__sk, list) \
	hlist_for_each_entry_rcu(__sk, list, sk_node)
701
702
703
704
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
705
706
#define sk_for_each_from(__sk) \
	hlist_for_each_entry_from(__sk, sk_node)
707
708
709
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
710
711
712
713
#define sk_for_each_safe(__sk, tmp, list) \
	hlist_for_each_entry_safe(__sk, tmp, list, sk_node)
#define sk_for_each_bound(__sk, list) \
	hlist_for_each_entry(__sk, list, sk_bind_node)
Linus Torvalds's avatar
Linus Torvalds committed
714

715
/**
716
 * sk_for_each_entry_offset_rcu - iterate over a list at a given struct offset
717
718
719
720
721
722
 * @tpos:	the type * to use as a loop cursor.
 * @pos:	the &struct hlist_node to use as a loop cursor.
 * @head:	the head for your list.
 * @offset:	offset of hlist_node within the struct.
 *
 */
723
724
725
#define sk_for_each_entry_offset_rcu(tpos, pos, head, offset)		       \
	for (pos = rcu_dereference((head)->first);			       \
	     pos != NULL &&						       \
726
		({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
727
	     pos = rcu_dereference(pos->next))
728

729
730
731
732
733
734
735
736
737
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
	/* Careful only use this in a context where these parameters
	 * can not change and must all be valid, such as recvmsg from
	 * userspace.
	 */
	return sk->sk_socket->file->f_cred->user_ns;
}

Linus Torvalds's avatar
Linus Torvalds committed
738
739
740
741
742
743
744
745
746
747
748
749
750
751
/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
752
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
753
754
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
755
	SOCK_MEMALLOC, /* VM depends on this socket for swapping */
756
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
Eric Dumazet's avatar
Eric Dumazet committed
757
	SOCK_FASYNC, /* fasync() active */
758
	SOCK_RXQ_OVFL,
759
	SOCK_ZEROCOPY, /* buffers from userspace */
760
	SOCK_WIFI_STATUS, /* push wifi status to userspace */
761
762
763
764
	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
		     * Will use last 4 bytes of packet sent from
		     * user-space instead.
		     */
765
	SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
766
	SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
767
	SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
Linus Torvalds's avatar
Linus Torvalds committed
768
769
};

770
771
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))

Ralf Baechle's avatar
Ralf Baechle committed
772
773
774
775
776
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
777
778
779
780
781
782
783
784
785
786
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

Eric Dumazet's avatar
Eric Dumazet committed
787
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
Linus Torvalds's avatar
Linus Torvalds committed
788
789
790
791
{
	return test_bit(flag, &sk->sk_flags);
}

792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
#ifdef CONFIG_NET
extern struct static_key memalloc_socks;
static inline int sk_memalloc_socks(void)
{
	return static_key_false(&memalloc_socks);
}
#else

static inline int sk_memalloc_socks(void)
{
	return 0;
}

#endif

807
static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
808
{
809
	return gfp_mask | (sk->sk_allocation & __GFP_MEMALLOC);
810
811
}

Linus Torvalds's avatar
Linus Torvalds committed
812
813
814
815
816
817
818
819
820
821
static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

Eric Dumazet's avatar
Eric Dumazet committed
822
static inline bool sk_acceptq_is_full(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
823
{
824
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
825
826
827
828
829
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
Eric Dumazet's avatar
Eric Dumazet committed
830
static inline int sk_stream_min_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
831
{
832
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
833
834
}

Eric Dumazet's avatar
Eric Dumazet committed
835
static inline int sk_stream_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
836
837
838
839
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

840
void sk_stream_write_space(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
841

Zhu Yi's avatar
Zhu Yi committed
842
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
843
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
844
{
Eric Dumazet's avatar
Eric Dumazet committed
845
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
Eric Dumazet's avatar
Eric Dumazet committed
846
	skb_dst_force_safe(skb);
Eric Dumazet's avatar
Eric Dumazet committed
847
848
849
850

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
851
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
852
853

	sk->sk_backlog.tail = skb;
854
855
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
856

857
858
/*
 * Take into account size of receive queue and backlog queue
Eric Dumazet's avatar
Eric Dumazet committed
859
860
 * Do not take into account this skb truesize,
 * to allow even a single big packet to come.
861
 */
862
static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
863
864
865
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

866
	return qsize > limit;
867
868
}

Zhu Yi's avatar
Zhu Yi committed
869
/* The per-socket spinlock must be held here. */
870
871
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
					      unsigned int limit)
Zhu Yi's avatar
Zhu Yi committed
872
{
873
	if (sk_rcvqueues_full(sk, limit))
Zhu Yi's avatar
Zhu Yi committed
874
875
		return -ENOBUFS;

876
877
878
879
880
881
882
883
	/*
	 * If the skb was allocated from pfmemalloc reserves, only
	 * allow SOCK_MEMALLOC sockets to use it as this socket is
	 * helping free memory
	 */
	if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
		return -ENOMEM;

Zhu Yi's avatar
Zhu Yi committed
884
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
885
886
887
888
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

889
int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
890

891
892
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
893
894
895
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		return __sk_backlog_rcv(sk, skb);

896
897
898
	return sk->sk_backlog_rcv(sk, skb);
}

Eric Dumazet's avatar
Eric Dumazet committed
899
900
901
902
903
static inline void sk_incoming_cpu_update(struct sock *sk)
{
	sk->sk_incoming_cpu = raw_smp_processor_id();
}

904
static inline void sock_rps_record_flow_hash(__u32 hash)
905
906
907
908
909
910
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
911
	rps_record_sock_flow(sock_flow_table, hash);
912
913
914
915
	rcu_read_unlock();
#endif
}

916
917
static inline void sock_rps_record_flow(const struct sock *sk)
{
918
#ifdef CONFIG_RPS
Eric Dumazet's avatar
Eric Dumazet committed
919
920
921
922
923
924
925
926
927
928
929
930
931
932
	if (static_key_false(&rfs_needed)) {
		/* Reading sk->sk_rxhash might incur an expensive cache line
		 * miss.
		 *
		 * TCP_ESTABLISHED does cover almost all states where RFS
		 * might be useful, and is cheaper [1] than testing :
		 *	IPv4: inet_sk(sk)->inet_daddr
		 * 	IPv6: ipv6_addr_any(&sk->sk_v6_daddr)
		 * OR	an additional socket flag
		 * [1] : sk_state and sk_prot are in the same cache line.
		 */
		if (sk->sk_state == TCP_ESTABLISHED)
			sock_rps_record_flow_hash(sk->sk_rxhash);
	}
933
#endif
934
935
}

936
937
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
938
939
{
#ifdef CONFIG_RPS
940
	if (unlikely(sk->sk_rxhash != skb->hash))
941
		sk->sk_rxhash = skb->hash;
942
943
944
#endif
}

945
946
947
948
949
950
951
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sk->sk_rxhash = 0;
#endif
}

952
#define sk_wait_event(__sk, __timeo, __condition, __wait)		\
953
954
955
956
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
957
958
959
			*(__timeo) = wait_woken(__wait,			\
						TASK_INTERRUPTIBLE,	\
						*(__timeo));		\
960
		}							\
961
		sched_annotate_sleep();					\
962
963
964
965
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
966

967
968
969
970
971
972
973
int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
void sk_stream_wait_close(struct sock *sk, long timeo_p);
int sk_stream_error(struct sock *sk, int flags, int err);
void sk_stream_kill_queues(struct sock *sk);
void sk_set_memalloc(struct sock *sk);
void sk_clear_memalloc(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
974

975
976
977
978
979
980
981
982
983
984
985
void __sk_flush_backlog(struct sock *sk);

static inline bool sk_flush_backlog(struct sock *sk)
{
	if (unlikely(READ_ONCE(sk->sk_backlog.tail))) {
		__sk_flush_backlog(sk);
		return true;
	}
	return false;
}

986
int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
987

988
struct request_sock_ops;
989
struct timewait_sock_ops;
990
struct inet_hashinfo;
991
struct raw_hashinfo;
992
struct smc_hashinfo;
993
struct module;
994

Eric Dumazet's avatar
Eric Dumazet committed
995
996
997
998
999
1000
/*
 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
 * un-modified. Special care is taken when initializing object to zero.
 */
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
{