sock.h 60.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/bitops.h>
50
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
52
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
53
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
54
#include <linux/security.h>
55
#include <linux/slab.h>
56
#include <linux/uaccess.h>
57
#include <linux/memcontrol.h>
Glauber Costa's avatar
Glauber Costa committed
58
#include <linux/res_counter.h>
59
#include <linux/static_key.h>
Al Viro's avatar
Al Viro committed
60
61
#include <linux/aio.h>
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
62
63

#include <linux/filter.h>
64
#include <linux/rculist_nulls.h>
65
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
66

67
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
68
69
70
#include <net/dst.h>
#include <net/checksum.h>

71
72
struct cgroup;
struct cgroup_subsys;
73
#ifdef CONFIG_NET
74
75
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
76
77
#else
static inline
78
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
79
80
81
82
{
	return 0;
}
static inline
83
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
84
85
86
{
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
99
/* Validate arguments and do nothing */
100
static inline __printf(2, 3)
Eric Dumazet's avatar
Eric Dumazet committed
101
void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
102
103
{
}
Linus Torvalds's avatar
Linus Torvalds committed
104
105
106
107
108
109
110
111
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
112
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
113
	wait_queue_head_t	wq;
114
115
116
117
118
119
120
121
122
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
123
124
125
} socket_lock_t;

struct sock;
126
struct proto;
127
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
128
129

/**
130
 *	struct sock_common - minimal network layer representation of sockets
131
132
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
133
 *	@skc_hash: hash value used with various protocol lookup tables
134
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
135
136
137
138
139
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
140
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
141
 *	@skc_prot: protocol handlers inside a network family
142
 *	@skc_net: reference to the network namespace of this socket
143
144
145
146
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
 *	@skc_refcnt: reference count
147
148
 *
 *	This is the minimal network layer representation of sockets, the header
149
150
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
151
struct sock_common {
152
153
	/* skc_daddr and skc_rcv_saddr must be grouped :
	 * cf INET_MATCH() and INET_TW_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
154
	 */
155
156
	__be32			skc_daddr;
	__be32			skc_rcv_saddr;
Eric Dumazet's avatar
Eric Dumazet committed
157

158
159
160
161
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
Eric Dumazet's avatar
Eric Dumazet committed
162
163
164
165
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
166
167
168
169
	union {
		struct hlist_node	skc_bind_node;
		struct hlist_nulls_node skc_portaddr_node;
	};
170
	struct proto		*skc_prot;
171
#ifdef CONFIG_NET_NS
172
	struct net	 	*skc_net;
173
#endif
174
175
176
177
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
178
	/* private: */
179
	int			skc_dontcopy_begin[0];
180
	/* public: */
181
182
183
184
185
186
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
	atomic_t		skc_refcnt;
187
	/* private: */
188
	int                     skc_dontcopy_end[0];
189
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
190
191
};

Glauber Costa's avatar
Glauber Costa committed
192
struct cg_proto;
Linus Torvalds's avatar
Linus Torvalds committed
193
194
/**
  *	struct sock - network layer representation of sockets
195
  *	@__sk_common: shared layout with inet_timewait_sock
196
197
198
199
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
200
  *	@sk_wq: sock wait queue and async head
201
202
203
204
205
206
  *	@sk_dst_cache: destination cache
  *	@sk_dst_lock: destination cache lock
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
207
  *	@sk_async_wait_queue: DMA copied packets
208
209
210
211
212
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_allocation: allocation mode
  *	@sk_sndbuf: size of send buffer in bytes
Wang Chen's avatar
Wang Chen committed
213
  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
214
  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
215
216
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
217
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
218
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
219
  *	@sk_gso_max_size: Maximum GSO segment size to build
220
221
222
223
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
224
225
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
226
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
227
228
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
229
  *	@sk_drops: raw/udp drops counter
230
231
232
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
233
  *	@sk_cgrp_prioidx: socket group's priority map index
234
235
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
236
237
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
238
239
240
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
241
  *	@sk_rxhash: flow hash received from netif layer
242
243
244
245
246
247
248
249
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
  *	@sk_sndmsg_page: cached page for sendmsg
  *	@sk_sndmsg_off: cached offset for sendmsg
250
  *	@sk_peek_off: current peek_offset value
251
  *	@sk_send_head: front of stuff to transmit
252
  *	@sk_security: used by security modules
253
  *	@sk_mark: generic packet mark
254
  *	@sk_classid: this socket's cgroup classid
Glauber Costa's avatar
Glauber Costa committed
255
  *	@sk_cgrp: this socket's cgroup-specific proto data
256
257
258
259
260
261
262
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
Linus Torvalds's avatar
Linus Torvalds committed
263
264
265
 */
struct sock {
	/*
266
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
267
268
269
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
270
271
272
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
273
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
274

275
276
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
277
#define sk_hash			__sk_common.skc_hash
Linus Torvalds's avatar
Linus Torvalds committed
278
279
280
281
282
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
283
#define sk_prot			__sk_common.skc_prot
284
#define sk_net			__sk_common.skc_net
Linus Torvalds's avatar
Linus Torvalds committed
285
	socket_lock_t		sk_lock;
Eric Dumazet's avatar
Eric Dumazet committed
286
	struct sk_buff_head	sk_receive_queue;
287
288
289
290
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
291
292
293
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
294
295
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
296
297
298
299
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
300
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
301
302
303
304
305
306
307
308
309
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	atomic_t		sk_drops;
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
310
	struct socket_wq __rcu	*sk_wq;
Eric Dumazet's avatar
Eric Dumazet committed
311
312
313
314
315

#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
#endif

316
#ifdef CONFIG_XFRM
Linus Torvalds's avatar
Linus Torvalds committed
317
	struct xfrm_policy	*sk_policy[2];
318
#endif
Eric Dumazet's avatar
Eric Dumazet committed
319
320
	unsigned long 		sk_flags;
	struct dst_entry	*sk_dst_cache;
Eric Dumazet's avatar
Eric Dumazet committed
321
	spinlock_t		sk_dst_lock;
322
	struct dst_entry	*sk_rx_dst;
Linus Torvalds's avatar
Linus Torvalds committed
323
324
	atomic_t		sk_wmem_alloc;
	atomic_t		sk_omem_alloc;
325
	int			sk_sndbuf;
Linus Torvalds's avatar
Linus Torvalds committed
326
	struct sk_buff_head	sk_write_queue;
Eric Dumazet's avatar
Eric Dumazet committed
327
328
329
330
331
332
333
	kmemcheck_bitfield_begin(flags);
	unsigned int		sk_shutdown  : 2,
				sk_no_check  : 2,
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
	kmemcheck_bitfield_end(flags);
Linus Torvalds's avatar
Linus Torvalds committed
334
	int			sk_wmem_queued;
Al Viro's avatar
Al Viro committed
335
	gfp_t			sk_allocation;
336
337
	netdev_features_t	sk_route_caps;
	netdev_features_t	sk_route_nocaps;
338
	int			sk_gso_type;
339
	unsigned int		sk_gso_max_size;
340
	int			sk_rcvlowat;
Linus Torvalds's avatar
Linus Torvalds committed
341
342
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
343
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
344
345
346
347
348
349
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
	unsigned short		sk_ack_backlog;
	unsigned short		sk_max_ack_backlog;
	__u32			sk_priority;
350
351
352
#ifdef CONFIG_CGROUPS
	__u32			sk_cgrp_prioidx;
#endif
353
354
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
355
356
357
358
	long			sk_rcvtimeo;
	long			sk_sndtimeo;
	void			*sk_protinfo;
	struct timer_list	sk_timer;
359
	ktime_t			sk_stamp;
Linus Torvalds's avatar
Linus Torvalds committed
360
361
362
363
364
	struct socket		*sk_socket;
	void			*sk_user_data;
	struct page		*sk_sndmsg_page;
	struct sk_buff		*sk_send_head;
	__u32			sk_sndmsg_off;
365
	__s32			sk_peek_off;
Linus Torvalds's avatar
Linus Torvalds committed
366
	int			sk_write_pending;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
367
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
368
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
369
#endif
370
	__u32			sk_mark;
371
	u32			sk_classid;
Glauber Costa's avatar
Glauber Costa committed
372
	struct cg_proto		*sk_cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
373
374
375
376
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk, int bytes);
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
377
378
	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
379
380
381
	void                    (*sk_destruct)(struct sock *sk);
};

382
383
384
385
386
387
388
389
390
391
392
/*
 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
 * on a socket means that the socket will reuse everybody else's port
 * without looking at the other's sk_reuse value.
 */

#define SK_NO_REUSE	0
#define SK_CAN_REUSE	1
#define SK_FORCE_REUSE	2

393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
static inline int sk_peek_offset(struct sock *sk, int flags)
{
	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
		return sk->sk_peek_off;
	else
		return 0;
}

static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0) {
		if (sk->sk_peek_off >= val)
			sk->sk_peek_off -= val;
		else
			sk->sk_peek_off = 0;
	}
}

static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0)
		sk->sk_peek_off += val;
}

Linus Torvalds's avatar
Linus Torvalds committed
417
418
419
/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
420
421
422
423
424
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

425
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
426
427
428
429
{
	return hlist_entry(head->first, struct sock, sk_node);
}

430
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
431
432
433
434
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

435
436
437
438
439
440
441
442
443
444
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

445
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
446
447
448
449
450
{
	return sk->sk_node.next ?
		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}

451
452
453
454
455
456
457
458
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
459
static inline bool sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
460
461
462
463
{
	return hlist_unhashed(&sk->sk_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
464
static inline bool sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
465
{
Akinobu Mita's avatar
Akinobu Mita committed
466
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
467
468
}

Eric Dumazet's avatar
Eric Dumazet committed
469
static inline void sk_node_init(struct hlist_node *node)
Linus Torvalds's avatar
Linus Torvalds committed
470
471
472
473
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
474
static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
475
476
477
478
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
479
static inline void __sk_del_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
480
481
482
483
{
	__hlist_del(&sk->sk_node);
}

484
/* NB: equivalent to hlist_del_init_rcu */
Eric Dumazet's avatar
Eric Dumazet committed
485
static inline bool __sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
489
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
Eric Dumazet's avatar
Eric Dumazet committed
490
		return true;
Linus Torvalds's avatar
Linus Torvalds committed
491
	}
Eric Dumazet's avatar
Eric Dumazet committed
492
	return false;
Linus Torvalds's avatar
Linus Torvalds committed
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

static inline void sock_hold(struct sock *sk)
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
static inline void __sock_put(struct sock *sk)
{
	atomic_dec(&sk->sk_refcnt);
}

Eric Dumazet's avatar
Eric Dumazet committed
514
static inline bool sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
515
{
Eric Dumazet's avatar
Eric Dumazet committed
516
	bool rc = __sk_del_node_init(sk);
Linus Torvalds's avatar
Linus Torvalds committed
517
518
519
520
521
522
523
524

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
525
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
526

Eric Dumazet's avatar
Eric Dumazet committed
527
static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
528
529
{
	if (sk_hashed(sk)) {
530
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
Eric Dumazet's avatar
Eric Dumazet committed
531
		return true;
532
	}
Eric Dumazet's avatar
Eric Dumazet committed
533
	return false;
534
535
}

Eric Dumazet's avatar
Eric Dumazet committed
536
static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
537
{
Eric Dumazet's avatar
Eric Dumazet committed
538
	bool rc = __sk_nulls_del_node_init_rcu(sk);
539
540
541
542
543
544
545
546
547

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Eric Dumazet's avatar
Eric Dumazet committed
548
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
549
550
551
552
{
	hlist_add_head(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
553
static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
554
555
556
557
558
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
559
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
560
561
562
563
564
{
	sock_hold(sk);
	hlist_add_head_rcu(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
565
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
566
{
567
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
568
569
}

Eric Dumazet's avatar
Eric Dumazet committed
570
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
571
572
{
	sock_hold(sk);
573
	__sk_nulls_add_node_rcu(sk, list);
574
575
}

Eric Dumazet's avatar
Eric Dumazet committed
576
static inline void __sk_del_bind_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
577
578
579
580
{
	__hlist_del(&sk->sk_bind_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
581
static inline void sk_add_bind_node(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
582
583
584
585
586
587
588
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

#define sk_for_each(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_node)
589
590
#define sk_for_each_rcu(__sk, node, list) \
	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
591
592
593
594
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
595
596
597
#define sk_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
		hlist_for_each_entry_from(__sk, node, sk_node)
598
599
600
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
#define sk_for_each_safe(__sk, node, tmp, list) \
	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_bind_node)

/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
620
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
621
622
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
623
624
625
626
627
628
629
	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
Eric Dumazet's avatar
Eric Dumazet committed
630
	SOCK_FASYNC, /* fasync() active */
631
	SOCK_RXQ_OVFL,
632
	SOCK_ZEROCOPY, /* buffers from userspace */
633
	SOCK_WIFI_STATUS, /* push wifi status to userspace */
634
635
636
637
	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
		     * Will use last 4 bytes of packet sent from
		     * user-space instead.
		     */
Linus Torvalds's avatar
Linus Torvalds committed
638
639
};

Ralf Baechle's avatar
Ralf Baechle committed
640
641
642
643
644
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
645
646
647
648
649
650
651
652
653
654
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

Eric Dumazet's avatar
Eric Dumazet committed
655
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
Linus Torvalds's avatar
Linus Torvalds committed
656
657
658
659
660
661
662
663
664
665
666
667
668
669
{
	return test_bit(flag, &sk->sk_flags);
}

static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

Eric Dumazet's avatar
Eric Dumazet committed
670
static inline bool sk_acceptq_is_full(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
671
{
672
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
673
674
675
676
677
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
Eric Dumazet's avatar
Eric Dumazet committed
678
static inline int sk_stream_min_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
679
{
680
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
681
682
}

Eric Dumazet's avatar
Eric Dumazet committed
683
static inline int sk_stream_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
684
685
686
687
688
689
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

extern void sk_stream_write_space(struct sock *sk);

Eric Dumazet's avatar
Eric Dumazet committed
690
static inline bool sk_stream_memory_free(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
691
692
693
694
{
	return sk->sk_wmem_queued < sk->sk_sndbuf;
}

Zhu Yi's avatar
Zhu Yi committed
695
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
696
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
697
{
Eric Dumazet's avatar
Eric Dumazet committed
698
699
700
701
702
703
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
	skb_dst_force(skb);

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
704
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
705
706

	sk->sk_backlog.tail = skb;
707
708
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
709

710
711
/*
 * Take into account size of receive queue and backlog queue
Eric Dumazet's avatar
Eric Dumazet committed
712
713
 * Do not take into account this skb truesize,
 * to allow even a single big packet to come.
714
 */
715
716
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
				     unsigned int limit)
717
718
719
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

720
	return qsize > limit;
721
722
}

Zhu Yi's avatar
Zhu Yi committed
723
/* The per-socket spinlock must be held here. */
724
725
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
					      unsigned int limit)
Zhu Yi's avatar
Zhu Yi committed
726
{
727
	if (sk_rcvqueues_full(sk, skb, limit))
Zhu Yi's avatar
Zhu Yi committed
728
729
		return -ENOBUFS;

Zhu Yi's avatar
Zhu Yi committed
730
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
731
732
733
734
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

735
736
737
738
739
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	return sk->sk_backlog_rcv(sk, skb);
}

740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

764
765
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
766
767
{
#ifdef CONFIG_RPS
768
	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
769
		sock_rps_reset_flow(sk);
770
		sk->sk_rxhash = skb->rxhash;
771
772
773
774
	}
#endif
}

775
776
777
778
779
780
781
782
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sock_rps_reset_flow(sk);
	sk->sk_rxhash = 0;
#endif
}

783
784
785
786
787
788
789
790
791
792
793
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
794
795
796
797
798
799
800
801
802

extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);

extern int sk_wait_data(struct sock *sk, long *timeo);

803
struct request_sock_ops;
804
struct timewait_sock_ops;
805
struct inet_hashinfo;
806
struct raw_hashinfo;
807
struct module;
808

Linus Torvalds's avatar
Linus Torvalds committed
809
810
811
812
813
/* Networking protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
Eric Dumazet's avatar
Eric Dumazet committed
814
	void			(*close)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
815
816
					long timeout);
	int			(*connect)(struct sock *sk,
Eric Dumazet's avatar
Eric Dumazet committed
817
					struct sockaddr *uaddr,
Linus Torvalds's avatar
Linus Torvalds committed
818
819
820
					int addr_len);
	int			(*disconnect)(struct sock *sk, int flags);

Eric Dumazet's avatar
Eric Dumazet committed
821
	struct sock *		(*accept)(struct sock *sk, int flags, int *err);
Linus Torvalds's avatar
Linus Torvalds committed
822
823
824
825

	int			(*ioctl)(struct sock *sk, int cmd,
					 unsigned long arg);
	int			(*init)(struct sock *sk);
826
	void			(*destroy)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
827
	void			(*shutdown)(struct sock *sk, int how);
Eric Dumazet's avatar
Eric Dumazet committed
828
	int			(*setsockopt)(struct sock *sk, int level,
Linus Torvalds's avatar
Linus Torvalds committed
829
					int optname, char __user *optval,
830
					unsigned int optlen);
Eric Dumazet's avatar
Eric Dumazet committed
831
832
833
	int			(*getsockopt)(struct sock *sk, int level,
					int optname, char __user *optval,
					int __user *option);
834
#ifdef CONFIG_COMPAT
835
836
837
	int			(*compat_setsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
838
					unsigned int optlen);
839
840
841
842
	int			(*compat_getsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
					int __user *option);
843
844
	int			(*compat_ioctl)(struct sock *sk,
					unsigned int cmd, unsigned long arg);
845
#endif
Linus Torvalds's avatar
Linus Torvalds committed
846
847
848
849
	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg, size_t len);
	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg,
Eric Dumazet's avatar
Eric Dumazet committed
850
851
					   size_t len, int noblock, int flags,
					   int *addr_len);
Linus Torvalds's avatar
Linus Torvalds committed
852
853
	int			(*sendpage)(struct sock *sk, struct page *page,
					int offset, size_t size, int flags);
Eric Dumazet's avatar
Eric Dumazet committed
854
	int			(*bind)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
855
856
					struct sockaddr *uaddr, int addr_len);

Eric Dumazet's avatar
Eric Dumazet committed
857
	int			(*backlog_rcv) (struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
858
859
860
861
862
						struct sk_buff *skb);

	/* Keeping track of sk's, looking them up, and port selection methods. */
	void			(*hash)(struct sock *sk);
	void			(*unhash)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
863
	void			(*rehash)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
864
	int			(*get_port)(struct sock *sk, unsigned short snum);
865
	void			(*clear_sk)(struct sock *sk, int size);
Linus Torvalds's avatar
Linus Torvalds committed
866

867
	/* Keeping track of sockets in use */
868
#ifdef CONFIG_PROC_FS
869
	unsigned int		inuse_idx;
870
#endif
871

Linus Torvalds's avatar
Linus Torvalds committed
872
	/* Memory pressure */
873
	void			(*enter_memory_pressure)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
874
	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
875
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
Linus Torvalds's avatar
Linus Torvalds committed
876
877
878
	/*
	 * Pressure flag: try to collapse.
	 * Technical note: it is used by multiple contexts non atomically.
879
	 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
880
881
882
	 * is strict, actions are advisory and have some latency.
	 */
	int			*memory_pressure;
Eric Dumazet's avatar
Eric Dumazet committed
883
	long			*sysctl_mem;
Linus Torvalds's avatar
Linus Torvalds committed
884
885
886
	int			*sysctl_wmem;
	int			*sysctl_rmem;
	int			max_header;
887
	bool			no_autobind;
Linus Torvalds's avatar
Linus Torvalds committed
888

889
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
890
	unsigned int		obj_size;
891
	int			slab_flags;
Linus Torvalds's avatar
Linus Torvalds committed
892

893
	struct percpu_counter	*orphan_count;
894

895
	struct request_sock_ops	*rsk_prot;
896
	struct timewait_sock_ops *twsk_prot;
897

898
899
	union {
		struct inet_hashinfo	*hashinfo;
900
		struct udp_table	*udp_table;
901
		struct raw_hashinfo	*raw_hash;
902
	} h;
903

Linus Torvalds's avatar
Linus Torvalds committed
904
905
906
907
908
	struct module		*owner;

	char			name[32];

	struct list_head	node;
909
910
911
#ifdef SOCK_REFCNT_DEBUG
	atomic_t		socks;
#endif
Glauber Costa's avatar
Glauber Costa committed
912
913
914
915
916
917
918
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
	/*
	 * cgroup specific init/deinit functions. Called once for all
	 * protocols that implement it, from cgroups populate function.
	 * This function has to setup any files the protocol want to
	 * appear in the kmem cgroup filesystem.
	 */
919
	int			(*init_cgroup)(struct mem_cgroup *memcg,
Glauber Costa's avatar
Glauber Costa committed
920
					       struct cgroup_subsys *ss);
921
	void			(*destroy_cgroup)(struct mem_cgroup *memcg);
Glauber Costa's avatar
Glauber Costa committed
922
923
924
925
	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};

926
927
928
929
930
931
932
933
934
935
/*
 * Bits in struct cg_proto.flags
 */
enum cg_proto_flags {
	/* Currently active and new sockets should be assigned to cgroups */
	MEMCG_SOCK_ACTIVE,
	/* It was ever activated; we must disarm static keys on destruction */
	MEMCG_SOCK_ACTIVATED,
};

Glauber Costa's avatar
Glauber Costa committed
936
937
938
939
940
941
struct cg_proto {
	void			(*enter_memory_pressure)(struct sock *sk);
	struct res_counter	*memory_allocated;	/* Current allocated memory. */
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
	int			*memory_pressure;
	long			*sysctl_mem;
942
	unsigned long		flags;
Glauber Costa's avatar
Glauber Costa committed
943
944
945
946
947
948
949
950
951
952
	/*
	 * memcg field is used to find which memcg we belong directly
	 * Each memcg struct can hold more than one cg_proto, so container_of
	 * won't really cut.
	 *
	 * The elegant solution would be having an inverse function to
	 * proto_cgroup in struct proto, but that means polluting the structure
	 * for everybody, instead of just for memcg users.
	 */
	struct mem_cgroup	*memcg;
Linus Torvalds's avatar
Linus Torvalds committed
953
954
955
956
957
};

extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);

958
959
960
961
962
963
964
965
966
967
static inline bool memcg_proto_active(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
}

static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
}

968
969
970
971
972
973
974
975
976
977
978
979
980
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
	atomic_inc(&sk->sk_prot->socks);
}

static inline void sk_refcnt_debug_dec(struct sock *sk)
{
	atomic_dec(&sk->sk_prot->socks);
	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}

Glauber Costa's avatar
Glauber Costa committed
981
inline void sk_refcnt_debug_release(const struct sock *sk)
982
983
984
985
986
987
988
989
990
991
992
{
	if (atomic_read(&sk->sk_refcnt) != 1)
		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
#define sk_refcnt_debug_dec(sk) do { } while (0)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */

993
#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
994
extern struct static_key memcg_socket_limit_enabled;
Glauber Costa's avatar
Glauber Costa committed
995
996
997
998
999
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
1000
#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
Glauber Costa's avatar
Glauber Costa committed
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return NULL;
}
#endif


1011
1012
1013
1014
1015
1016
1017
1018
1019
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
	return sk->sk_prot->memory_pressure != NULL;
}

static inline bool sk_under_memory_pressure(const struct sock *sk)
{
	if (!sk->sk_prot->memory_pressure)
		return false;
Glauber Costa's avatar
Glauber Costa committed
1020
1021
1022
1023

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return !!*sk->sk_cgrp->memory_pressure;

1024
1025
1026
1027
1028
1029
1030
	return !!*sk->sk_prot->memory_pressure;
}

static inline void sk_leave_memory_pressure(struct sock *sk)
{
	int *memory_pressure = sk->sk_prot->memory_pressure;

Glauber Costa's avatar
Glauber Costa committed
1031
1032
1033
1034
	if (!memory_pressure)
		return;

	if (*memory_pressure)
1035
		*memory_pressure = 0;
Glauber Costa's avatar
Glauber Costa committed
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			if (*cg_proto->memory_pressure)
				*cg_proto->memory_pressure = 0;
	}

1046
1047
1048
1049
}

static inline void sk_enter_memory_pressure(struct sock *sk)
{
Glauber Costa's avatar
Glauber Costa committed
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
	if (!sk->sk_prot->enter_memory_pressure)
		return;

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			cg_proto->enter_memory_pressure(sk);
	}

	sk->sk_prot->enter_memory_pressure(sk);
1062
1063
1064
1065
1066
}

static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
	long *prot = sk->sk_prot->sysctl_mem;
Glauber Costa's avatar
Glauber Costa committed
1067
1068
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		prot = sk->sk_cgrp->sysctl_mem;
1069
1070
1071
	return prot[index];
}

Glauber Costa's avatar
Glauber Costa committed
1072
1073
1074
1075
1076
1077
1078
static inline void memcg_memory_allocated_add(struct cg_proto *prot,
					      unsigned long amt,
					      int *parent_status)
{
	struct res_counter *fail;
	int ret;

1079
1080
	ret = res_counter_charge_nofail(prot->memory_allocated,
					amt << PAGE_SHIFT, &fail);
Glauber Costa's avatar
Glauber Costa committed
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
	if (ret < 0)
		*parent_status = OVER_LIMIT;
}

static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
					      unsigned long amt)
{
	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
}

static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
{
	u64 ret;
	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
	return ret >> PAGE_SHIFT;
}

1098
1099
1100
1101
static inline long
sk_memory_allocated(const struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1102
1103
1104
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return memcg_memory_allocated_read(sk->sk_cgrp);

1105
1106
1107
1108
	return atomic_long_read(prot->memory_allocated);
}

static inline long
Glauber Costa's avatar
Glauber Costa committed
1109
sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1110
1111
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1112
1113
1114
1115
1116
1117
1118
1119

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
		/* update the root cgroup regardless */
		atomic_long_add_return(amt, prot->memory_allocated);
		return memcg_memory_allocated_read(sk->sk_cgrp);
	}

1120
1121
1122
1123
	return atomic_long_add_return(amt, prot->memory_allocated);
}

static inline void
1124
sk_memory_allocated_sub(struct sock *sk, int amt)
1125
1126
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1127

1128
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
Glauber Costa's avatar
Glauber Costa committed
1129
1130
		memcg_memory_allocated_sub(sk->sk_cgrp, amt);

1131
1132
1133
1134
1135
1136
	atomic_long_sub(amt, prot->memory_allocated);
}

static inline void sk_sockets_allocated_dec(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1137
1138
1139
1140
1141
1142
1143
1144

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_dec(cg_proto->sockets_allocated);
	}

1145
1146
1147
1148
1149
1150
	percpu_counter_dec(prot->sockets_allocated);
}

static inline void sk_sockets_allocated_inc(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1151
1152
1153
1154
1155
1156
1157
1158

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_inc(cg_proto->sockets_allocated);
	}

1159
1160
1161
1162
1163
1164
1165
1166
	percpu_counter_inc(prot->sockets_allocated);
}

static inline int
sk_sockets_allocated_read_positive(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;

Glauber Costa's avatar
Glauber Costa committed
1167
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
1168
		return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated);
Glauber Costa's avatar
Glauber Costa committed
1169

1170
	return percpu_counter_read_positive(prot->sockets_allocated);
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
}

static inline int
proto_sockets_allocated_sum_positive(struct proto *prot)
{
	return percpu_counter_sum_positive(prot->sockets_allocated);
}

static inline long
proto_memory_allocated(struct proto *prot)
{
	return atomic_long_read(prot->memory_allocated);
}

static inline bool
proto_memory_pressure(struct proto *prot)
{
	if (!prot->memory_pressure)
		return false;
	return !!*prot->memory_pressure;
}

1193
1194

#ifdef CONFIG_PROC_FS