sock.h 61 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/bitops.h>
50
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
52
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
53
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
54
#include <linux/security.h>
55
#include <linux/slab.h>
56
#include <linux/uaccess.h>
57
#include <linux/memcontrol.h>
Glauber Costa's avatar
Glauber Costa committed
58
#include <linux/res_counter.h>
59
#include <linux/static_key.h>
Al Viro's avatar
Al Viro committed
60
61
#include <linux/aio.h>
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
62
63

#include <linux/filter.h>
64
#include <linux/rculist_nulls.h>
65
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
66

67
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
68
69
70
#include <net/dst.h>
#include <net/checksum.h>

71
72
struct cgroup;
struct cgroup_subsys;
73
#ifdef CONFIG_NET
74
75
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
76
77
#else
static inline
78
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
79
80
81
82
{
	return 0;
}
static inline
83
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
84
85
86
{
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
99
/* Validate arguments and do nothing */
100
static inline __printf(2, 3)
Eric Dumazet's avatar
Eric Dumazet committed
101
void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
102
103
{
}
Linus Torvalds's avatar
Linus Torvalds committed
104
105
106
107
108
109
110
111
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
112
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
113
	wait_queue_head_t	wq;
114
115
116
117
118
119
120
121
122
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
123
124
125
} socket_lock_t;

struct sock;
126
struct proto;
127
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
128
129

/**
130
 *	struct sock_common - minimal network layer representation of sockets
131
132
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
133
 *	@skc_hash: hash value used with various protocol lookup tables
134
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
135
136
137
138
139
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
140
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
141
 *	@skc_prot: protocol handlers inside a network family
142
 *	@skc_net: reference to the network namespace of this socket
143
144
145
146
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
 *	@skc_refcnt: reference count
147
148
 *
 *	This is the minimal network layer representation of sockets, the header
149
150
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
151
struct sock_common {
152
153
	/* skc_daddr and skc_rcv_saddr must be grouped :
	 * cf INET_MATCH() and INET_TW_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
154
	 */
155
156
	__be32			skc_daddr;
	__be32			skc_rcv_saddr;
Eric Dumazet's avatar
Eric Dumazet committed
157

158
159
160
161
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
Eric Dumazet's avatar
Eric Dumazet committed
162
163
164
165
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
166
167
168
169
	union {
		struct hlist_node	skc_bind_node;
		struct hlist_nulls_node skc_portaddr_node;
	};
170
	struct proto		*skc_prot;
171
#ifdef CONFIG_NET_NS
172
	struct net	 	*skc_net;
173
#endif
174
175
176
177
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
178
	/* private: */
179
	int			skc_dontcopy_begin[0];
180
	/* public: */
181
182
183
184
185
186
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
	atomic_t		skc_refcnt;
187
	/* private: */
188
	int                     skc_dontcopy_end[0];
189
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
190
191
};

Glauber Costa's avatar
Glauber Costa committed
192
struct cg_proto;
Linus Torvalds's avatar
Linus Torvalds committed
193
194
/**
  *	struct sock - network layer representation of sockets
195
  *	@__sk_common: shared layout with inet_timewait_sock
196
197
198
199
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
200
  *	@sk_wq: sock wait queue and async head
Eric Dumazet's avatar
Eric Dumazet committed
201
  *	@sk_rx_dst: receive input route used by early tcp demux
202
203
204
205
206
207
  *	@sk_dst_cache: destination cache
  *	@sk_dst_lock: destination cache lock
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
208
  *	@sk_async_wait_queue: DMA copied packets
209
210
211
212
213
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_allocation: allocation mode
  *	@sk_sndbuf: size of send buffer in bytes
Wang Chen's avatar
Wang Chen committed
214
  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
215
  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
216
217
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
218
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
219
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
220
  *	@sk_gso_max_size: Maximum GSO segment size to build
221
222
223
224
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
225
226
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
227
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
228
229
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
230
  *	@sk_drops: raw/udp drops counter
231
232
233
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
234
  *	@sk_cgrp_prioidx: socket group's priority map index
235
236
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
237
238
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
239
240
241
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
242
  *	@sk_rxhash: flow hash received from netif layer
243
244
245
246
247
248
249
250
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
  *	@sk_sndmsg_page: cached page for sendmsg
  *	@sk_sndmsg_off: cached offset for sendmsg
251
  *	@sk_peek_off: current peek_offset value
252
  *	@sk_send_head: front of stuff to transmit
253
  *	@sk_security: used by security modules
254
  *	@sk_mark: generic packet mark
255
  *	@sk_classid: this socket's cgroup classid
Glauber Costa's avatar
Glauber Costa committed
256
  *	@sk_cgrp: this socket's cgroup-specific proto data
257
258
259
260
261
262
263
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
Linus Torvalds's avatar
Linus Torvalds committed
264
265
266
 */
struct sock {
	/*
267
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
268
269
270
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
271
272
273
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
274
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
275

276
277
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
278
#define sk_hash			__sk_common.skc_hash
Linus Torvalds's avatar
Linus Torvalds committed
279
280
281
282
283
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
284
#define sk_prot			__sk_common.skc_prot
285
#define sk_net			__sk_common.skc_net
Linus Torvalds's avatar
Linus Torvalds committed
286
	socket_lock_t		sk_lock;
Eric Dumazet's avatar
Eric Dumazet committed
287
	struct sk_buff_head	sk_receive_queue;
288
289
290
291
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
292
293
294
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
295
296
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
297
298
299
300
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
301
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
302
303
304
305
306
307
308
309
310
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	atomic_t		sk_drops;
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
311
	struct socket_wq __rcu	*sk_wq;
Eric Dumazet's avatar
Eric Dumazet committed
312
313
314
315
316

#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
#endif

317
#ifdef CONFIG_XFRM
Linus Torvalds's avatar
Linus Torvalds committed
318
	struct xfrm_policy	*sk_policy[2];
319
#endif
Eric Dumazet's avatar
Eric Dumazet committed
320
	unsigned long 		sk_flags;
Eric Dumazet's avatar
Eric Dumazet committed
321
	struct dst_entry	*sk_rx_dst;
Eric Dumazet's avatar
Eric Dumazet committed
322
	struct dst_entry	*sk_dst_cache;
Eric Dumazet's avatar
Eric Dumazet committed
323
	spinlock_t		sk_dst_lock;
Linus Torvalds's avatar
Linus Torvalds committed
324
325
	atomic_t		sk_wmem_alloc;
	atomic_t		sk_omem_alloc;
326
	int			sk_sndbuf;
Linus Torvalds's avatar
Linus Torvalds committed
327
	struct sk_buff_head	sk_write_queue;
Eric Dumazet's avatar
Eric Dumazet committed
328
329
330
331
332
333
334
	kmemcheck_bitfield_begin(flags);
	unsigned int		sk_shutdown  : 2,
				sk_no_check  : 2,
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
	kmemcheck_bitfield_end(flags);
Linus Torvalds's avatar
Linus Torvalds committed
335
	int			sk_wmem_queued;
Al Viro's avatar
Al Viro committed
336
	gfp_t			sk_allocation;
337
338
	netdev_features_t	sk_route_caps;
	netdev_features_t	sk_route_nocaps;
339
	int			sk_gso_type;
340
	unsigned int		sk_gso_max_size;
341
	int			sk_rcvlowat;
Linus Torvalds's avatar
Linus Torvalds committed
342
343
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
344
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
345
346
347
348
349
350
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
	unsigned short		sk_ack_backlog;
	unsigned short		sk_max_ack_backlog;
	__u32			sk_priority;
351
352
353
#ifdef CONFIG_CGROUPS
	__u32			sk_cgrp_prioidx;
#endif
354
355
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
356
357
358
359
	long			sk_rcvtimeo;
	long			sk_sndtimeo;
	void			*sk_protinfo;
	struct timer_list	sk_timer;
360
	ktime_t			sk_stamp;
Linus Torvalds's avatar
Linus Torvalds committed
361
362
363
364
365
	struct socket		*sk_socket;
	void			*sk_user_data;
	struct page		*sk_sndmsg_page;
	struct sk_buff		*sk_send_head;
	__u32			sk_sndmsg_off;
366
	__s32			sk_peek_off;
Linus Torvalds's avatar
Linus Torvalds committed
367
	int			sk_write_pending;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
368
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
369
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
370
#endif
371
	__u32			sk_mark;
372
	u32			sk_classid;
Glauber Costa's avatar
Glauber Costa committed
373
	struct cg_proto		*sk_cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
374
375
376
377
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk, int bytes);
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
378
379
	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
380
381
382
	void                    (*sk_destruct)(struct sock *sk);
};

383
384
385
386
387
388
389
390
391
392
393
/*
 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
 * on a socket means that the socket will reuse everybody else's port
 * without looking at the other's sk_reuse value.
 */

#define SK_NO_REUSE	0
#define SK_CAN_REUSE	1
#define SK_FORCE_REUSE	2

394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
static inline int sk_peek_offset(struct sock *sk, int flags)
{
	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
		return sk->sk_peek_off;
	else
		return 0;
}

static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0) {
		if (sk->sk_peek_off >= val)
			sk->sk_peek_off -= val;
		else
			sk->sk_peek_off = 0;
	}
}

static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0)
		sk->sk_peek_off += val;
}

Linus Torvalds's avatar
Linus Torvalds committed
418
419
420
/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
421
422
423
424
425
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

426
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
427
428
429
430
{
	return hlist_entry(head->first, struct sock, sk_node);
}

431
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
432
433
434
435
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

436
437
438
439
440
441
442
443
444
445
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

446
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
447
448
449
450
451
{
	return sk->sk_node.next ?
		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}

452
453
454
455
456
457
458
459
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
460
static inline bool sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
461
462
463
464
{
	return hlist_unhashed(&sk->sk_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
465
static inline bool sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
466
{
Akinobu Mita's avatar
Akinobu Mita committed
467
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
468
469
}

Eric Dumazet's avatar
Eric Dumazet committed
470
static inline void sk_node_init(struct hlist_node *node)
Linus Torvalds's avatar
Linus Torvalds committed
471
472
473
474
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
475
static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
476
477
478
479
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
480
static inline void __sk_del_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
481
482
483
484
{
	__hlist_del(&sk->sk_node);
}

485
/* NB: equivalent to hlist_del_init_rcu */
Eric Dumazet's avatar
Eric Dumazet committed
486
static inline bool __sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
487
488
489
490
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
Eric Dumazet's avatar
Eric Dumazet committed
491
		return true;
Linus Torvalds's avatar
Linus Torvalds committed
492
	}
Eric Dumazet's avatar
Eric Dumazet committed
493
	return false;
Linus Torvalds's avatar
Linus Torvalds committed
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

static inline void sock_hold(struct sock *sk)
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
static inline void __sock_put(struct sock *sk)
{
	atomic_dec(&sk->sk_refcnt);
}

Eric Dumazet's avatar
Eric Dumazet committed
515
static inline bool sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
516
{
Eric Dumazet's avatar
Eric Dumazet committed
517
	bool rc = __sk_del_node_init(sk);
Linus Torvalds's avatar
Linus Torvalds committed
518
519
520
521
522
523
524
525

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
526
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
527

Eric Dumazet's avatar
Eric Dumazet committed
528
static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
529
530
{
	if (sk_hashed(sk)) {
531
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
Eric Dumazet's avatar
Eric Dumazet committed
532
		return true;
533
	}
Eric Dumazet's avatar
Eric Dumazet committed
534
	return false;
535
536
}

Eric Dumazet's avatar
Eric Dumazet committed
537
static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
538
{
Eric Dumazet's avatar
Eric Dumazet committed
539
	bool rc = __sk_nulls_del_node_init_rcu(sk);
540
541
542
543
544
545
546
547
548

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Eric Dumazet's avatar
Eric Dumazet committed
549
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
550
551
552
553
{
	hlist_add_head(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
554
static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
555
556
557
558
559
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
560
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
561
562
563
564
565
{
	sock_hold(sk);
	hlist_add_head_rcu(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
566
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
567
{
568
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
569
570
}

Eric Dumazet's avatar
Eric Dumazet committed
571
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
572
573
{
	sock_hold(sk);
574
	__sk_nulls_add_node_rcu(sk, list);
575
576
}

Eric Dumazet's avatar
Eric Dumazet committed
577
static inline void __sk_del_bind_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
578
579
580
581
{
	__hlist_del(&sk->sk_bind_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
582
static inline void sk_add_bind_node(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
583
584
585
586
587
588
589
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

#define sk_for_each(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_node)
590
591
#define sk_for_each_rcu(__sk, node, list) \
	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
592
593
594
595
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
596
597
598
#define sk_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
		hlist_for_each_entry_from(__sk, node, sk_node)
599
600
601
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
#define sk_for_each_safe(__sk, node, tmp, list) \
	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_bind_node)

/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
621
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
622
623
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
624
625
626
627
628
629
630
	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
Eric Dumazet's avatar
Eric Dumazet committed
631
	SOCK_FASYNC, /* fasync() active */
632
	SOCK_RXQ_OVFL,
633
	SOCK_ZEROCOPY, /* buffers from userspace */
634
	SOCK_WIFI_STATUS, /* push wifi status to userspace */
635
636
637
638
	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
		     * Will use last 4 bytes of packet sent from
		     * user-space instead.
		     */
Linus Torvalds's avatar
Linus Torvalds committed
639
640
};

Ralf Baechle's avatar
Ralf Baechle committed
641
642
643
644
645
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
646
647
648
649
650
651
652
653
654
655
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

Eric Dumazet's avatar
Eric Dumazet committed
656
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
Linus Torvalds's avatar
Linus Torvalds committed
657
658
659
660
{
	return test_bit(flag, &sk->sk_flags);
}

661
662
663
664
665
static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
{
	return GFP_ATOMIC;
}

Linus Torvalds's avatar
Linus Torvalds committed
666
667
668
669
670
671
672
673
674
675
static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

Eric Dumazet's avatar
Eric Dumazet committed
676
static inline bool sk_acceptq_is_full(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
677
{
678
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
679
680
681
682
683
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
Eric Dumazet's avatar
Eric Dumazet committed
684
static inline int sk_stream_min_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
685
{
686
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
687
688
}

Eric Dumazet's avatar
Eric Dumazet committed
689
static inline int sk_stream_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
690
691
692
693
694
695
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

extern void sk_stream_write_space(struct sock *sk);

Eric Dumazet's avatar
Eric Dumazet committed
696
static inline bool sk_stream_memory_free(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
697
698
699
700
{
	return sk->sk_wmem_queued < sk->sk_sndbuf;
}

Zhu Yi's avatar
Zhu Yi committed
701
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
702
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
703
{
Eric Dumazet's avatar
Eric Dumazet committed
704
705
706
707
708
709
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
	skb_dst_force(skb);

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
710
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
711
712

	sk->sk_backlog.tail = skb;
713
714
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
715

716
717
/*
 * Take into account size of receive queue and backlog queue
Eric Dumazet's avatar
Eric Dumazet committed
718
719
 * Do not take into account this skb truesize,
 * to allow even a single big packet to come.
720
 */
721
722
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
				     unsigned int limit)
723
724
725
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

726
	return qsize > limit;
727
728
}

Zhu Yi's avatar
Zhu Yi committed
729
/* The per-socket spinlock must be held here. */
730
731
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
					      unsigned int limit)
Zhu Yi's avatar
Zhu Yi committed
732
{
733
	if (sk_rcvqueues_full(sk, skb, limit))
Zhu Yi's avatar
Zhu Yi committed
734
735
		return -ENOBUFS;

Zhu Yi's avatar
Zhu Yi committed
736
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
737
738
739
740
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

741
742
743
744
745
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	return sk->sk_backlog_rcv(sk, skb);
}

746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

770
771
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
772
773
{
#ifdef CONFIG_RPS
774
	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
775
		sock_rps_reset_flow(sk);
776
		sk->sk_rxhash = skb->rxhash;
777
778
779
780
	}
#endif
}

781
782
783
784
785
786
787
788
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sock_rps_reset_flow(sk);
	sk->sk_rxhash = 0;
#endif
}

789
790
791
792
793
794
795
796
797
798
799
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
800
801
802
803
804
805
806
807
808

extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);

extern int sk_wait_data(struct sock *sk, long *timeo);

809
struct request_sock_ops;
810
struct timewait_sock_ops;
811
struct inet_hashinfo;
812
struct raw_hashinfo;
813
struct module;
814

Linus Torvalds's avatar
Linus Torvalds committed
815
816
817
818
819
/* Networking protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
Eric Dumazet's avatar
Eric Dumazet committed
820
	void			(*close)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
821
822
					long timeout);
	int			(*connect)(struct sock *sk,
Eric Dumazet's avatar
Eric Dumazet committed
823
					struct sockaddr *uaddr,
Linus Torvalds's avatar
Linus Torvalds committed
824
825
826
					int addr_len);
	int			(*disconnect)(struct sock *sk, int flags);

Eric Dumazet's avatar
Eric Dumazet committed
827
	struct sock *		(*accept)(struct sock *sk, int flags, int *err);
Linus Torvalds's avatar
Linus Torvalds committed
828
829
830
831

	int			(*ioctl)(struct sock *sk, int cmd,
					 unsigned long arg);
	int			(*init)(struct sock *sk);
832
	void			(*destroy)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
833
	void			(*shutdown)(struct sock *sk, int how);
Eric Dumazet's avatar
Eric Dumazet committed
834
	int			(*setsockopt)(struct sock *sk, int level,
Linus Torvalds's avatar
Linus Torvalds committed
835
					int optname, char __user *optval,
836
					unsigned int optlen);
Eric Dumazet's avatar
Eric Dumazet committed
837
838
839
	int			(*getsockopt)(struct sock *sk, int level,
					int optname, char __user *optval,
					int __user *option);
840
#ifdef CONFIG_COMPAT
841
842
843
	int			(*compat_setsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
844
					unsigned int optlen);
845
846
847
848
	int			(*compat_getsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
					int __user *option);
849
850
	int			(*compat_ioctl)(struct sock *sk,
					unsigned int cmd, unsigned long arg);
851
#endif
Linus Torvalds's avatar
Linus Torvalds committed
852
853
854
855
	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg, size_t len);
	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg,
Eric Dumazet's avatar
Eric Dumazet committed
856
857
					   size_t len, int noblock, int flags,
					   int *addr_len);
Linus Torvalds's avatar
Linus Torvalds committed
858
859
	int			(*sendpage)(struct sock *sk, struct page *page,
					int offset, size_t size, int flags);
Eric Dumazet's avatar
Eric Dumazet committed
860
	int			(*bind)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
861
862
					struct sockaddr *uaddr, int addr_len);

Eric Dumazet's avatar
Eric Dumazet committed
863
	int			(*backlog_rcv) (struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
864
865
						struct sk_buff *skb);

Eric Dumazet's avatar
Eric Dumazet committed
866
	void		(*release_cb)(struct sock *sk);
867
	void		(*mtu_reduced)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
868

Linus Torvalds's avatar
Linus Torvalds committed
869
870
871
	/* Keeping track of sk's, looking them up, and port selection methods. */
	void			(*hash)(struct sock *sk);
	void			(*unhash)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
872
	void			(*rehash)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
873
	int			(*get_port)(struct sock *sk, unsigned short snum);
874
	void			(*clear_sk)(struct sock *sk, int size);
Linus Torvalds's avatar
Linus Torvalds committed
875

876
	/* Keeping track of sockets in use */
877
#ifdef CONFIG_PROC_FS
878
	unsigned int		inuse_idx;
879
#endif
880

Linus Torvalds's avatar
Linus Torvalds committed
881
	/* Memory pressure */
882
	void			(*enter_memory_pressure)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
883
	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
884
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
Linus Torvalds's avatar
Linus Torvalds committed
885
886
887
	/*
	 * Pressure flag: try to collapse.
	 * Technical note: it is used by multiple contexts non atomically.
888
	 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
889
890
891
	 * is strict, actions are advisory and have some latency.
	 */
	int			*memory_pressure;
Eric Dumazet's avatar
Eric Dumazet committed
892
	long			*sysctl_mem;
Linus Torvalds's avatar
Linus Torvalds committed
893
894
895
	int			*sysctl_wmem;
	int			*sysctl_rmem;
	int			max_header;
896
	bool			no_autobind;
Linus Torvalds's avatar
Linus Torvalds committed
897

898
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
899
	unsigned int		obj_size;
900
	int			slab_flags;
Linus Torvalds's avatar
Linus Torvalds committed
901

902
	struct percpu_counter	*orphan_count;
903

904
	struct request_sock_ops	*rsk_prot;
905
	struct timewait_sock_ops *twsk_prot;
906

907
908
	union {
		struct inet_hashinfo	*hashinfo;
909
		struct udp_table	*udp_table;
910
		struct raw_hashinfo	*raw_hash;
911
	} h;
912

Linus Torvalds's avatar
Linus Torvalds committed
913
914
915
916
917
	struct module		*owner;

	char			name[32];

	struct list_head	node;
918
919
920
#ifdef SOCK_REFCNT_DEBUG
	atomic_t		socks;
#endif
Andrew Morton's avatar
Andrew Morton committed
921
#ifdef CONFIG_MEMCG_KMEM
Glauber Costa's avatar
Glauber Costa committed
922
923
924
925
926
927
	/*
	 * cgroup specific init/deinit functions. Called once for all
	 * protocols that implement it, from cgroups populate function.
	 * This function has to setup any files the protocol want to
	 * appear in the kmem cgroup filesystem.
	 */
928
	int			(*init_cgroup)(struct mem_cgroup *memcg,
Glauber Costa's avatar
Glauber Costa committed
929
					       struct cgroup_subsys *ss);
930
	void			(*destroy_cgroup)(struct mem_cgroup *memcg);
Glauber Costa's avatar
Glauber Costa committed
931
932
933
934
	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};

935
936
937
938
939
940
941
942
943
944
/*
 * Bits in struct cg_proto.flags
 */
enum cg_proto_flags {
	/* Currently active and new sockets should be assigned to cgroups */
	MEMCG_SOCK_ACTIVE,
	/* It was ever activated; we must disarm static keys on destruction */
	MEMCG_SOCK_ACTIVATED,
};

Glauber Costa's avatar
Glauber Costa committed
945
946
947
948
949
950
struct cg_proto {
	void			(*enter_memory_pressure)(struct sock *sk);
	struct res_counter	*memory_allocated;	/* Current allocated memory. */
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
	int			*memory_pressure;
	long			*sysctl_mem;
951
	unsigned long		flags;
Glauber Costa's avatar
Glauber Costa committed
952
953
954
955
956
957
958
959
960
961
	/*
	 * memcg field is used to find which memcg we belong directly
	 * Each memcg struct can hold more than one cg_proto, so container_of
	 * won't really cut.
	 *
	 * The elegant solution would be having an inverse function to
	 * proto_cgroup in struct proto, but that means polluting the structure
	 * for everybody, instead of just for memcg users.
	 */
	struct mem_cgroup	*memcg;
Linus Torvalds's avatar
Linus Torvalds committed
962
963
964
965
966
};

extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);

967
968
969
970
971
972
973
974
975
976
static inline bool memcg_proto_active(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
}

static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
}

977
978
979
980
981
982
983
984
985
986
987
988
989
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
	atomic_inc(&sk->sk_prot->socks);
}

static inline void sk_refcnt_debug_dec(struct sock *sk)
{
	atomic_dec(&sk->sk_prot->socks);
	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}

Glauber Costa's avatar
Glauber Costa committed
990
inline void sk_refcnt_debug_release(const struct sock *sk)
991
992
993
994
995
996
997
998
999
1000
1001
{
	if (atomic_read(&sk->sk_refcnt) != 1)
		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
#define sk_refcnt_debug_dec(sk) do { } while (0)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */

Andrew Morton's avatar
Andrew Morton committed
1002
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
1003
extern struct static_key memcg_socket_limit_enabled;
Glauber Costa's avatar
Glauber Costa committed
1004
1005
1006
1007
1008
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
1009
#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
Glauber Costa's avatar
Glauber Costa committed
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return NULL;
}
#endif


1020
1021
1022
1023
1024
1025
1026
1027
1028
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
	return sk->sk_prot->memory_pressure != NULL;
}

static inline bool sk_under_memory_pressure(const struct sock *sk)
{
	if (!sk->sk_prot->memory_pressure)
		return false;
Glauber Costa's avatar
Glauber Costa committed
1029
1030
1031
1032

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return !!*sk->sk_cgrp->memory_pressure;

1033
1034
1035
1036
1037
1038
1039
	return !!*sk->sk_prot->memory_pressure;
}

static inline void sk_leave_memory_pressure(struct sock *sk)
{
	int *memory_pressure = sk->sk_prot->memory_pressure;

Glauber Costa's avatar
Glauber Costa committed
1040
1041
1042
1043
	if (!memory_pressure)
		return;

	if (*memory_pressure)
1044
		*memory_pressure = 0;
Glauber Costa's avatar
Glauber Costa committed
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			if (*cg_proto->memory_pressure)
				*cg_proto->memory_pressure = 0;
	}

1055
1056
1057
1058
}

static inline void sk_enter_memory_pressure(struct sock *sk)
{
Glauber Costa's avatar
Glauber Costa committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
	if (!sk->sk_prot->enter_memory_pressure)
		return;

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			cg_proto->enter_memory_pressure(sk);
	}

	sk->sk_prot->enter_memory_pressure(sk);
1071
1072
1073
1074
1075
}

static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
	long *prot = sk->sk_prot->sysctl_mem;
Glauber Costa's avatar
Glauber Costa committed
1076
1077
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		prot = sk->sk_cgrp->sysctl_mem;
1078
1079
1080
	return prot[index];
}

Glauber Costa's avatar
Glauber Costa committed
1081
1082
1083
1084
1085
1086
1087
static inline void memcg_memory_allocated_add(struct cg_proto *prot,
					      unsigned long amt,
					      int *parent_status)
{
	struct res_counter *fail;
	int ret;

1088
1089
	ret = res_counter_charge_nofail(prot->memory_allocated,
					amt << PAGE_SHIFT, &fail);
Glauber Costa's avatar
Glauber Costa committed
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
	if (ret < 0)
		*parent_status = OVER_LIMIT;
}

static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
					      unsigned long amt)
{
	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
}

static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
{
	u64 ret;
	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
	return ret >> PAGE_SHIFT;
}

1107
1108
1109
1110
static inline long
sk_memory_allocated(const struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1111
1112
1113
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return memcg_memory_allocated_read(sk->sk_cgrp);

1114
1115
1116
1117
	return atomic_long_read(prot->memory_allocated);
}

static inline long
Glauber Costa's avatar
Glauber Costa committed
1118
sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1119
1120
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1121
1122
1123
1124
1125
1126
1127
1128

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status);
		/* update the root cgroup regardless */
		atomic_long_add_return(amt, prot->memory_allocated);
		return memcg_memory_allocated_read(sk->sk_cgrp);
	}

1129
1130
1131
1132
	return atomic_long_add_return(amt, prot->memory_allocated);
}

static inline void
1133
sk_memory_allocated_sub(struct sock *sk, int amt)
1134
1135
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1136

1137
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
Glauber Costa's avatar
Glauber Costa committed
1138
1139
		memcg_memory_allocated_sub(sk->sk_cgrp, amt);

1140
1141
1142
1143
1144
1145
	atomic_long_sub(amt, prot->memory_allocated);
}

static inline void sk_sockets_allocated_dec(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1146
1147
1148
1149
1150
1151
1152
1153

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_dec(cg_proto->sockets_allocated);
	}

1154
1155
1156
1157
1158
1159
	percpu_counter_dec(prot->sockets_allocated);
}

static inline void sk_sockets_allocated_inc(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1160
1161
1162
1163
1164
1165
1166
1167

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			percpu_counter_inc(cg_proto->sockets_allocated);
	}

1168
1169
1170
1171
1172
1173
1174
1175
	percpu_counter_inc(prot->sockets_allocated);
}

static inline int
sk_sockets_allocated_read_positive(struct sock *sk)
{
	struct proto *prot = sk->sk_prot;

Glauber Costa's avatar
Glauber Costa committed
1176
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)