sock.h 62.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 * INET		An implementation of the TCP/IP protocol suite for the LINUX
 *		operating system.  INET is implemented using the  BSD Socket
 *		interface as the means of communication with the user level.
 *
 *		Definitions for the AF_INET socket handler.
 *
 * Version:	@(#)sock.h	1.0.4	05/13/93
 *
10
 * Authors:	Ross Biro
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *		Corey Minyard <wf-rch!minyard@relay.EU.net>
 *		Florian La Roche <flla@stud.uni-sb.de>
 *
 * Fixes:
 *		Alan Cox	:	Volatiles in skbuff pointers. See
 *					skbuff comments. May be overdone,
 *					better to prove they can be removed
 *					than the reverse.
 *		Alan Cox	:	Added a zapped field for tcp to note
 *					a socket is reset and must stay shut up
 *		Alan Cox	:	New fields for options
 *	Pauline Middelink	:	identd support
 *		Alan Cox	:	Eliminate low level recv/recvfrom
 *		David S. Miller	:	New socket lookup architecture.
 *              Steve Whitehouse:       Default routines for sock_ops
 *              Arnaldo C. Melo :	removed net_pinfo, tp_pinfo and made
 *              			protinfo be just a void pointer, as the
 *              			protocol specific parts were moved to
 *              			respective headers and ipv4/v6, etc now
 *              			use private slabcaches for its socks
 *              Pedro Hortas	:	New flags field for socket options
 *
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 */
#ifndef _SOCK_H
#define _SOCK_H

43
#include <linux/hardirq.h>
44
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/list.h>
46
#include <linux/list_nulls.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
48
#include <linux/timer.h>
#include <linux/cache.h>
49
#include <linux/bitops.h>
50
#include <linux/lockdep.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
52
#include <linux/netdevice.h>
#include <linux/skbuff.h>	/* struct sk_buff */
Al Viro's avatar
Al Viro committed
53
#include <linux/mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
54
#include <linux/security.h>
55
#include <linux/slab.h>
56
#include <linux/uaccess.h>
57
#include <linux/memcontrol.h>
Glauber Costa's avatar
Glauber Costa committed
58
#include <linux/res_counter.h>
59
#include <linux/static_key.h>
Al Viro's avatar
Al Viro committed
60
61
#include <linux/aio.h>
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
62
63

#include <linux/filter.h>
64
#include <linux/rculist_nulls.h>
65
#include <linux/poll.h>
Linus Torvalds's avatar
Linus Torvalds committed
66

67
#include <linux/atomic.h>
Linus Torvalds's avatar
Linus Torvalds committed
68
69
70
#include <net/dst.h>
#include <net/checksum.h>

71
72
struct cgroup;
struct cgroup_subsys;
73
#ifdef CONFIG_NET
74
75
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss);
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg);
76
77
#else
static inline
78
int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
79
80
81
82
{
	return 0;
}
static inline
83
void mem_cgroup_sockets_destroy(struct mem_cgroup *memcg)
84
85
86
{
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * This structure really needs to be cleaned up.
 * Most of it is for TCP, and not used by any of
 * the other protocols.
 */

/* Define this to get the SOCK_DBG debugging facility. */
#define SOCK_DEBUGGING
#ifdef SOCK_DEBUGGING
#define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
					printk(KERN_DEBUG msg); } while (0)
#else
99
/* Validate arguments and do nothing */
100
static inline __printf(2, 3)
Eric Dumazet's avatar
Eric Dumazet committed
101
void SOCK_DEBUG(const struct sock *sk, const char *msg, ...)
102
103
{
}
Linus Torvalds's avatar
Linus Torvalds committed
104
105
106
107
108
109
110
111
#endif

/* This is the per-socket lock.  The spinlock provides a synchronization
 * between user contexts and software interrupt processing, whereas the
 * mini-semaphore synchronizes multiple users amongst themselves.
 */
typedef struct {
	spinlock_t		slock;
112
	int			owned;
Linus Torvalds's avatar
Linus Torvalds committed
113
	wait_queue_head_t	wq;
114
115
116
117
118
119
120
121
122
	/*
	 * We express the mutex-alike socket_lock semantics
	 * to the lock validator by explicitly managing
	 * the slock as a lock variant (in addition to
	 * the slock itself):
	 */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
	struct lockdep_map dep_map;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
123
124
125
} socket_lock_t;

struct sock;
126
struct proto;
127
struct net;
Linus Torvalds's avatar
Linus Torvalds committed
128
129

/**
130
 *	struct sock_common - minimal network layer representation of sockets
131
132
 *	@skc_daddr: Foreign IPv4 addr
 *	@skc_rcv_saddr: Bound local IPv4 addr
Eric Dumazet's avatar
Eric Dumazet committed
133
 *	@skc_hash: hash value used with various protocol lookup tables
134
 *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
135
136
 *	@skc_dport: placeholder for inet_dport/tw_dport
 *	@skc_num: placeholder for inet_num/tw_num
137
138
139
140
141
 *	@skc_family: network address family
 *	@skc_state: Connection state
 *	@skc_reuse: %SO_REUSEADDR setting
 *	@skc_bound_dev_if: bound device index if != 0
 *	@skc_bind_node: bind hash linkage for various protocol lookup tables
142
 *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
143
 *	@skc_prot: protocol handlers inside a network family
144
 *	@skc_net: reference to the network namespace of this socket
145
146
147
148
 *	@skc_node: main hash linkage for various protocol lookup tables
 *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
 *	@skc_tx_queue_mapping: tx queue number for this connection
 *	@skc_refcnt: reference count
149
150
 *
 *	This is the minimal network layer representation of sockets, the header
151
152
 *	for struct sock and struct inet_timewait_sock.
 */
Linus Torvalds's avatar
Linus Torvalds committed
153
struct sock_common {
154
155
	/* skc_daddr and skc_rcv_saddr must be grouped on a 8 bytes aligned
	 * address on 64bit arches : cf INET_MATCH() and INET_TW_MATCH()
Eric Dumazet's avatar
Eric Dumazet committed
156
	 */
157
158
159
160
161
162
163
	union {
		unsigned long	skc_addrpair;
		struct {
			__be32	skc_daddr;
			__be32	skc_rcv_saddr;
		};
	};
164
165
166
167
	union  {
		unsigned int	skc_hash;
		__u16		skc_u16hashes[2];
	};
168
169
170
171
172
173
174
175
176
	/* skc_dport && skc_num must be grouped as well */
	union {
		u32		skc_portpair;
		struct {
			__be16	skc_dport;
			__u16	skc_num;
		};
	};

Eric Dumazet's avatar
Eric Dumazet committed
177
178
179
180
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
181
182
183
184
	union {
		struct hlist_node	skc_bind_node;
		struct hlist_nulls_node skc_portaddr_node;
	};
185
	struct proto		*skc_prot;
186
#ifdef CONFIG_NET_NS
187
	struct net	 	*skc_net;
188
#endif
189
190
191
192
	/*
	 * fields between dontcopy_begin/dontcopy_end
	 * are not copied in sock_copy()
	 */
193
	/* private: */
194
	int			skc_dontcopy_begin[0];
195
	/* public: */
196
197
198
199
200
201
	union {
		struct hlist_node	skc_node;
		struct hlist_nulls_node skc_nulls_node;
	};
	int			skc_tx_queue_mapping;
	atomic_t		skc_refcnt;
202
	/* private: */
203
	int                     skc_dontcopy_end[0];
204
	/* public: */
Linus Torvalds's avatar
Linus Torvalds committed
205
206
};

Glauber Costa's avatar
Glauber Costa committed
207
struct cg_proto;
Linus Torvalds's avatar
Linus Torvalds committed
208
209
/**
  *	struct sock - network layer representation of sockets
210
  *	@__sk_common: shared layout with inet_timewait_sock
211
212
213
214
  *	@sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
  *	@sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
  *	@sk_lock:	synchronizer
  *	@sk_rcvbuf: size of receive buffer in bytes
215
  *	@sk_wq: sock wait queue and async head
Eric Dumazet's avatar
Eric Dumazet committed
216
  *	@sk_rx_dst: receive input route used by early tcp demux
217
218
219
220
221
222
  *	@sk_dst_cache: destination cache
  *	@sk_dst_lock: destination cache lock
  *	@sk_policy: flow policy
  *	@sk_receive_queue: incoming packets
  *	@sk_wmem_alloc: transmit queue bytes committed
  *	@sk_write_queue: Packet sending queue
223
  *	@sk_async_wait_queue: DMA copied packets
224
225
226
227
228
  *	@sk_omem_alloc: "o" is "option" or "other"
  *	@sk_wmem_queued: persistent queue size
  *	@sk_forward_alloc: space allocated forward
  *	@sk_allocation: allocation mode
  *	@sk_sndbuf: size of send buffer in bytes
Wang Chen's avatar
Wang Chen committed
229
  *	@sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
230
  *		   %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
231
232
  *	@sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
  *	@sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
Eric Dumazet's avatar
Eric Dumazet committed
233
  *	@sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
234
  *	@sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
235
  *	@sk_gso_max_size: Maximum GSO segment size to build
236
  *	@sk_gso_max_segs: Maximum number of GSO segments
237
238
239
240
  *	@sk_lingertime: %SO_LINGER l_linger setting
  *	@sk_backlog: always used with the per-socket spinlock held
  *	@sk_callback_lock: used with the callbacks in the end of this struct
  *	@sk_error_queue: rarely used
Wang Chen's avatar
Wang Chen committed
241
242
  *	@sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
  *			  IPV6_ADDRFORM for instance)
243
  *	@sk_err: last error
Wang Chen's avatar
Wang Chen committed
244
245
  *	@sk_err_soft: errors that don't cause failure but are the cause of a
  *		      persistent failure not just 'timed out'
Eric Dumazet's avatar
Eric Dumazet committed
246
  *	@sk_drops: raw/udp drops counter
247
248
249
  *	@sk_ack_backlog: current listen backlog
  *	@sk_max_ack_backlog: listen backlog set in listen()
  *	@sk_priority: %SO_PRIORITY setting
250
  *	@sk_cgrp_prioidx: socket group's priority map index
251
252
  *	@sk_type: socket type (%SOCK_STREAM, etc)
  *	@sk_protocol: which protocol this socket belongs in this network family
253
254
  *	@sk_peer_pid: &struct pid for this socket's peer
  *	@sk_peer_cred: %SO_PEERCRED setting
255
256
257
  *	@sk_rcvlowat: %SO_RCVLOWAT setting
  *	@sk_rcvtimeo: %SO_RCVTIMEO setting
  *	@sk_sndtimeo: %SO_SNDTIMEO setting
258
  *	@sk_rxhash: flow hash received from netif layer
259
260
261
262
263
264
  *	@sk_filter: socket filtering instructions
  *	@sk_protinfo: private area, net family specific, when not using slab
  *	@sk_timer: sock cleanup timer
  *	@sk_stamp: time stamp of last packet received
  *	@sk_socket: Identd and reporting IO signals
  *	@sk_user_data: RPC layer private data
265
  *	@sk_frag: cached page frag
266
  *	@sk_peek_off: current peek_offset value
267
  *	@sk_send_head: front of stuff to transmit
268
  *	@sk_security: used by security modules
269
  *	@sk_mark: generic packet mark
270
  *	@sk_classid: this socket's cgroup classid
Glauber Costa's avatar
Glauber Costa committed
271
  *	@sk_cgrp: this socket's cgroup-specific proto data
272
273
274
275
276
277
278
  *	@sk_write_pending: a write to stream socket waits to start
  *	@sk_state_change: callback to indicate change in the state of the sock
  *	@sk_data_ready: callback to indicate there is data to be processed
  *	@sk_write_space: callback to indicate there is bf sending space available
  *	@sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
  *	@sk_backlog_rcv: callback to process the backlog
  *	@sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
Linus Torvalds's avatar
Linus Torvalds committed
279
280
281
 */
struct sock {
	/*
282
	 * Now struct inet_timewait_sock also uses sock_common, so please just
Linus Torvalds's avatar
Linus Torvalds committed
283
284
285
	 * don't add nothing before this first member (__sk_common) --acme
	 */
	struct sock_common	__sk_common;
Eric Dumazet's avatar
Eric Dumazet committed
286
287
288
#define sk_node			__sk_common.skc_node
#define sk_nulls_node		__sk_common.skc_nulls_node
#define sk_refcnt		__sk_common.skc_refcnt
289
#define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
Eric Dumazet's avatar
Eric Dumazet committed
290

291
292
#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
Eric Dumazet's avatar
Eric Dumazet committed
293
#define sk_hash			__sk_common.skc_hash
Linus Torvalds's avatar
Linus Torvalds committed
294
295
296
297
298
#define sk_family		__sk_common.skc_family
#define sk_state		__sk_common.skc_state
#define sk_reuse		__sk_common.skc_reuse
#define sk_bound_dev_if		__sk_common.skc_bound_dev_if
#define sk_bind_node		__sk_common.skc_bind_node
299
#define sk_prot			__sk_common.skc_prot
300
#define sk_net			__sk_common.skc_net
Linus Torvalds's avatar
Linus Torvalds committed
301
	socket_lock_t		sk_lock;
Eric Dumazet's avatar
Eric Dumazet committed
302
	struct sk_buff_head	sk_receive_queue;
303
304
305
306
	/*
	 * The backlog queue is special, it is always used with
	 * the per-socket spinlock held and requires low latency
	 * access. Therefore we special case it's implementation.
Eric Dumazet's avatar
Eric Dumazet committed
307
308
309
	 * Note : rmem_alloc is in this structure to fill a hole
	 * on 64bit arches, not because its logically part of
	 * backlog.
310
311
	 */
	struct {
Eric Dumazet's avatar
Eric Dumazet committed
312
313
314
315
		atomic_t	rmem_alloc;
		int		len;
		struct sk_buff	*head;
		struct sk_buff	*tail;
316
	} sk_backlog;
Eric Dumazet's avatar
Eric Dumazet committed
317
318
319
320
321
322
323
324
325
#define sk_rmem_alloc sk_backlog.rmem_alloc
	int			sk_forward_alloc;
#ifdef CONFIG_RPS
	__u32			sk_rxhash;
#endif
	atomic_t		sk_drops;
	int			sk_rcvbuf;

	struct sk_filter __rcu	*sk_filter;
326
	struct socket_wq __rcu	*sk_wq;
Eric Dumazet's avatar
Eric Dumazet committed
327
328
329
330
331

#ifdef CONFIG_NET_DMA
	struct sk_buff_head	sk_async_wait_queue;
#endif

332
#ifdef CONFIG_XFRM
Linus Torvalds's avatar
Linus Torvalds committed
333
	struct xfrm_policy	*sk_policy[2];
334
#endif
Eric Dumazet's avatar
Eric Dumazet committed
335
	unsigned long 		sk_flags;
Eric Dumazet's avatar
Eric Dumazet committed
336
	struct dst_entry	*sk_rx_dst;
Eric Dumazet's avatar
Eric Dumazet committed
337
	struct dst_entry	*sk_dst_cache;
Eric Dumazet's avatar
Eric Dumazet committed
338
	spinlock_t		sk_dst_lock;
Linus Torvalds's avatar
Linus Torvalds committed
339
340
	atomic_t		sk_wmem_alloc;
	atomic_t		sk_omem_alloc;
341
	int			sk_sndbuf;
Linus Torvalds's avatar
Linus Torvalds committed
342
	struct sk_buff_head	sk_write_queue;
Eric Dumazet's avatar
Eric Dumazet committed
343
344
345
346
347
348
349
	kmemcheck_bitfield_begin(flags);
	unsigned int		sk_shutdown  : 2,
				sk_no_check  : 2,
				sk_userlocks : 4,
				sk_protocol  : 8,
				sk_type      : 16;
	kmemcheck_bitfield_end(flags);
Linus Torvalds's avatar
Linus Torvalds committed
350
	int			sk_wmem_queued;
Al Viro's avatar
Al Viro committed
351
	gfp_t			sk_allocation;
352
353
	netdev_features_t	sk_route_caps;
	netdev_features_t	sk_route_nocaps;
354
	int			sk_gso_type;
355
	unsigned int		sk_gso_max_size;
356
	u16			sk_gso_max_segs;
357
	int			sk_rcvlowat;
Linus Torvalds's avatar
Linus Torvalds committed
358
359
	unsigned long	        sk_lingertime;
	struct sk_buff_head	sk_error_queue;
360
	struct proto		*sk_prot_creator;
Linus Torvalds's avatar
Linus Torvalds committed
361
362
363
364
365
366
	rwlock_t		sk_callback_lock;
	int			sk_err,
				sk_err_soft;
	unsigned short		sk_ack_backlog;
	unsigned short		sk_max_ack_backlog;
	__u32			sk_priority;
367
368
369
#ifdef CONFIG_CGROUPS
	__u32			sk_cgrp_prioidx;
#endif
370
371
	struct pid		*sk_peer_pid;
	const struct cred	*sk_peer_cred;
Linus Torvalds's avatar
Linus Torvalds committed
372
373
374
375
	long			sk_rcvtimeo;
	long			sk_sndtimeo;
	void			*sk_protinfo;
	struct timer_list	sk_timer;
376
	ktime_t			sk_stamp;
Linus Torvalds's avatar
Linus Torvalds committed
377
378
	struct socket		*sk_socket;
	void			*sk_user_data;
379
	struct page_frag	sk_frag;
Linus Torvalds's avatar
Linus Torvalds committed
380
	struct sk_buff		*sk_send_head;
381
	__s32			sk_peek_off;
Linus Torvalds's avatar
Linus Torvalds committed
382
	int			sk_write_pending;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
383
#ifdef CONFIG_SECURITY
Linus Torvalds's avatar
Linus Torvalds committed
384
	void			*sk_security;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
385
#endif
386
	__u32			sk_mark;
387
	u32			sk_classid;
Glauber Costa's avatar
Glauber Costa committed
388
	struct cg_proto		*sk_cgrp;
Linus Torvalds's avatar
Linus Torvalds committed
389
390
391
392
	void			(*sk_state_change)(struct sock *sk);
	void			(*sk_data_ready)(struct sock *sk, int bytes);
	void			(*sk_write_space)(struct sock *sk);
	void			(*sk_error_report)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
393
394
	int			(*sk_backlog_rcv)(struct sock *sk,
						  struct sk_buff *skb);
Linus Torvalds's avatar
Linus Torvalds committed
395
396
397
	void                    (*sk_destruct)(struct sock *sk);
};

398
399
400
401
402
403
404
405
406
407
408
/*
 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
 * on a socket means that the socket will reuse everybody else's port
 * without looking at the other's sk_reuse value.
 */

#define SK_NO_REUSE	0
#define SK_CAN_REUSE	1
#define SK_FORCE_REUSE	2

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
static inline int sk_peek_offset(struct sock *sk, int flags)
{
	if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
		return sk->sk_peek_off;
	else
		return 0;
}

static inline void sk_peek_offset_bwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0) {
		if (sk->sk_peek_off >= val)
			sk->sk_peek_off -= val;
		else
			sk->sk_peek_off = 0;
	}
}

static inline void sk_peek_offset_fwd(struct sock *sk, int val)
{
	if (sk->sk_peek_off >= 0)
		sk->sk_peek_off += val;
}

Linus Torvalds's avatar
Linus Torvalds committed
433
434
435
/*
 * Hashed lists helper routines
 */
Li Zefan's avatar
Li Zefan committed
436
437
438
439
440
static inline struct sock *sk_entry(const struct hlist_node *node)
{
	return hlist_entry(node, struct sock, sk_node);
}

441
static inline struct sock *__sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
442
443
444
445
{
	return hlist_entry(head->first, struct sock, sk_node);
}

446
static inline struct sock *sk_head(const struct hlist_head *head)
Linus Torvalds's avatar
Linus Torvalds committed
447
448
449
450
{
	return hlist_empty(head) ? NULL : __sk_head(head);
}

451
452
453
454
455
456
457
458
459
460
static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
}

static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
{
	return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
}

461
static inline struct sock *sk_next(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
462
463
464
465
466
{
	return sk->sk_node.next ?
		hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
}

467
468
469
470
471
472
473
474
static inline struct sock *sk_nulls_next(const struct sock *sk)
{
	return (!is_a_nulls(sk->sk_nulls_node.next)) ?
		hlist_nulls_entry(sk->sk_nulls_node.next,
				  struct sock, sk_nulls_node) :
		NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
475
static inline bool sk_unhashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
476
477
478
479
{
	return hlist_unhashed(&sk->sk_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
480
static inline bool sk_hashed(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
481
{
Akinobu Mita's avatar
Akinobu Mita committed
482
	return !sk_unhashed(sk);
Linus Torvalds's avatar
Linus Torvalds committed
483
484
}

Eric Dumazet's avatar
Eric Dumazet committed
485
static inline void sk_node_init(struct hlist_node *node)
Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
489
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
490
static inline void sk_nulls_node_init(struct hlist_nulls_node *node)
491
492
493
494
{
	node->pprev = NULL;
}

Eric Dumazet's avatar
Eric Dumazet committed
495
static inline void __sk_del_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
496
497
498
499
{
	__hlist_del(&sk->sk_node);
}

500
/* NB: equivalent to hlist_del_init_rcu */
Eric Dumazet's avatar
Eric Dumazet committed
501
static inline bool __sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
502
503
504
505
{
	if (sk_hashed(sk)) {
		__sk_del_node(sk);
		sk_node_init(&sk->sk_node);
Eric Dumazet's avatar
Eric Dumazet committed
506
		return true;
Linus Torvalds's avatar
Linus Torvalds committed
507
	}
Eric Dumazet's avatar
Eric Dumazet committed
508
	return false;
Linus Torvalds's avatar
Linus Torvalds committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
}

/* Grab socket reference count. This operation is valid only
   when sk is ALREADY grabbed f.e. it is found in hash table
   or a list and the lookup is made under lock preventing hash table
   modifications.
 */

static inline void sock_hold(struct sock *sk)
{
	atomic_inc(&sk->sk_refcnt);
}

/* Ungrab socket in the context, which assumes that socket refcnt
   cannot hit zero, f.e. it is true in context of any socketcall.
 */
static inline void __sock_put(struct sock *sk)
{
	atomic_dec(&sk->sk_refcnt);
}

Eric Dumazet's avatar
Eric Dumazet committed
530
static inline bool sk_del_node_init(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
531
{
Eric Dumazet's avatar
Eric Dumazet committed
532
	bool rc = __sk_del_node_init(sk);
Linus Torvalds's avatar
Linus Torvalds committed
533
534
535
536
537
538
539
540

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}
541
#define sk_del_node_init_rcu(sk)	sk_del_node_init(sk)
Linus Torvalds's avatar
Linus Torvalds committed
542

Eric Dumazet's avatar
Eric Dumazet committed
543
static inline bool __sk_nulls_del_node_init_rcu(struct sock *sk)
544
545
{
	if (sk_hashed(sk)) {
546
		hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
Eric Dumazet's avatar
Eric Dumazet committed
547
		return true;
548
	}
Eric Dumazet's avatar
Eric Dumazet committed
549
	return false;
550
551
}

Eric Dumazet's avatar
Eric Dumazet committed
552
static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
553
{
Eric Dumazet's avatar
Eric Dumazet committed
554
	bool rc = __sk_nulls_del_node_init_rcu(sk);
555
556
557
558
559
560
561
562
563

	if (rc) {
		/* paranoid for a while -acme */
		WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
		__sock_put(sk);
	}
	return rc;
}

Eric Dumazet's avatar
Eric Dumazet committed
564
static inline void __sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
565
566
567
568
{
	hlist_add_head(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
569
static inline void sk_add_node(struct sock *sk, struct hlist_head *list)
Linus Torvalds's avatar
Linus Torvalds committed
570
571
572
573
574
{
	sock_hold(sk);
	__sk_add_node(sk, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
575
static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
576
577
578
579
580
{
	sock_hold(sk);
	hlist_add_head_rcu(&sk->sk_node, list);
}

Eric Dumazet's avatar
Eric Dumazet committed
581
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
582
{
583
	hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
584
585
}

Eric Dumazet's avatar
Eric Dumazet committed
586
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
587
588
{
	sock_hold(sk);
589
	__sk_nulls_add_node_rcu(sk, list);
590
591
}

Eric Dumazet's avatar
Eric Dumazet committed
592
static inline void __sk_del_bind_node(struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
593
594
595
596
{
	__hlist_del(&sk->sk_bind_node);
}

Eric Dumazet's avatar
Eric Dumazet committed
597
static inline void sk_add_bind_node(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
598
599
600
601
602
603
604
					struct hlist_head *list)
{
	hlist_add_head(&sk->sk_bind_node, list);
}

#define sk_for_each(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_node)
605
606
#define sk_for_each_rcu(__sk, node, list) \
	hlist_for_each_entry_rcu(__sk, node, list, sk_node)
607
608
609
610
#define sk_nulls_for_each(__sk, node, list) \
	hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
#define sk_nulls_for_each_rcu(__sk, node, list) \
	hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
611
612
613
#define sk_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
		hlist_for_each_entry_from(__sk, node, sk_node)
614
615
616
#define sk_nulls_for_each_from(__sk, node) \
	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
Linus Torvalds's avatar
Linus Torvalds committed
617
618
619
620
621
#define sk_for_each_safe(__sk, node, tmp, list) \
	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
#define sk_for_each_bound(__sk, node, list) \
	hlist_for_each_entry(__sk, node, list, sk_bind_node)

622
623
624
625
626
627
628
629
630
static inline struct user_namespace *sk_user_ns(struct sock *sk)
{
	/* Careful only use this in a context where these parameters
	 * can not change and must all be valid, such as recvmsg from
	 * userspace.
	 */
	return sk->sk_socket->file->f_cred->user_ns;
}

Linus Torvalds's avatar
Linus Torvalds committed
631
632
633
634
635
636
637
638
639
640
641
642
643
644
/* Sock flags */
enum sock_flags {
	SOCK_DEAD,
	SOCK_DONE,
	SOCK_URGINLINE,
	SOCK_KEEPOPEN,
	SOCK_LINGER,
	SOCK_DESTROY,
	SOCK_BROADCAST,
	SOCK_TIMESTAMP,
	SOCK_ZAPPED,
	SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
	SOCK_DBG, /* %SO_DEBUG setting */
	SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
645
	SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
Linus Torvalds's avatar
Linus Torvalds committed
646
647
	SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
	SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
648
	SOCK_MEMALLOC, /* VM depends on this socket for swapping */
649
650
651
652
653
654
655
	SOCK_TIMESTAMPING_TX_HARDWARE,  /* %SOF_TIMESTAMPING_TX_HARDWARE */
	SOCK_TIMESTAMPING_TX_SOFTWARE,  /* %SOF_TIMESTAMPING_TX_SOFTWARE */
	SOCK_TIMESTAMPING_RX_HARDWARE,  /* %SOF_TIMESTAMPING_RX_HARDWARE */
	SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
	SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
	SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
	SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
Eric Dumazet's avatar
Eric Dumazet committed
656
	SOCK_FASYNC, /* fasync() active */
657
	SOCK_RXQ_OVFL,
658
	SOCK_ZEROCOPY, /* buffers from userspace */
659
	SOCK_WIFI_STATUS, /* push wifi status to userspace */
660
661
662
663
	SOCK_NOFCS, /* Tell NIC not to do the Ethernet FCS.
		     * Will use last 4 bytes of packet sent from
		     * user-space instead.
		     */
Linus Torvalds's avatar
Linus Torvalds committed
664
665
};

Ralf Baechle's avatar
Ralf Baechle committed
666
667
668
669
670
static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
{
	nsk->sk_flags = osk->sk_flags;
}

Linus Torvalds's avatar
Linus Torvalds committed
671
672
673
674
675
676
677
678
679
680
static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
{
	__set_bit(flag, &sk->sk_flags);
}

static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
{
	__clear_bit(flag, &sk->sk_flags);
}

Eric Dumazet's avatar
Eric Dumazet committed
681
static inline bool sock_flag(const struct sock *sk, enum sock_flags flag)
Linus Torvalds's avatar
Linus Torvalds committed
682
683
684
685
{
	return test_bit(flag, &sk->sk_flags);
}

686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
#ifdef CONFIG_NET
extern struct static_key memalloc_socks;
static inline int sk_memalloc_socks(void)
{
	return static_key_false(&memalloc_socks);
}
#else

static inline int sk_memalloc_socks(void)
{
	return 0;
}

#endif

701
702
static inline gfp_t sk_gfp_atomic(struct sock *sk, gfp_t gfp_mask)
{
703
	return GFP_ATOMIC | (sk->sk_allocation & __GFP_MEMALLOC);
704
705
}

Linus Torvalds's avatar
Linus Torvalds committed
706
707
708
709
710
711
712
713
714
715
static inline void sk_acceptq_removed(struct sock *sk)
{
	sk->sk_ack_backlog--;
}

static inline void sk_acceptq_added(struct sock *sk)
{
	sk->sk_ack_backlog++;
}

Eric Dumazet's avatar
Eric Dumazet committed
716
static inline bool sk_acceptq_is_full(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
717
{
718
	return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
Linus Torvalds's avatar
Linus Torvalds committed
719
720
721
722
723
}

/*
 * Compute minimal free write space needed to queue new packets.
 */
Eric Dumazet's avatar
Eric Dumazet committed
724
static inline int sk_stream_min_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
725
{
726
	return sk->sk_wmem_queued >> 1;
Linus Torvalds's avatar
Linus Torvalds committed
727
728
}

Eric Dumazet's avatar
Eric Dumazet committed
729
static inline int sk_stream_wspace(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
730
731
732
733
734
735
{
	return sk->sk_sndbuf - sk->sk_wmem_queued;
}

extern void sk_stream_write_space(struct sock *sk);

Eric Dumazet's avatar
Eric Dumazet committed
736
static inline bool sk_stream_memory_free(const struct sock *sk)
Linus Torvalds's avatar
Linus Torvalds committed
737
738
739
740
{
	return sk->sk_wmem_queued < sk->sk_sndbuf;
}

Zhu Yi's avatar
Zhu Yi committed
741
/* OOB backlog add */
Zhu Yi's avatar
Zhu Yi committed
742
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
743
{
Eric Dumazet's avatar
Eric Dumazet committed
744
745
746
747
748
749
	/* dont let skb dst not refcounted, we are going to leave rcu lock */
	skb_dst_force(skb);

	if (!sk->sk_backlog.tail)
		sk->sk_backlog.head = skb;
	else
750
		sk->sk_backlog.tail->next = skb;
Eric Dumazet's avatar
Eric Dumazet committed
751
752

	sk->sk_backlog.tail = skb;
753
754
	skb->next = NULL;
}
Linus Torvalds's avatar
Linus Torvalds committed
755

756
757
/*
 * Take into account size of receive queue and backlog queue
Eric Dumazet's avatar
Eric Dumazet committed
758
759
 * Do not take into account this skb truesize,
 * to allow even a single big packet to come.
760
 */
761
762
static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
				     unsigned int limit)
763
764
765
{
	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);

766
	return qsize > limit;
767
768
}

Zhu Yi's avatar
Zhu Yi committed
769
/* The per-socket spinlock must be held here. */
770
771
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
					      unsigned int limit)
Zhu Yi's avatar
Zhu Yi committed
772
{
773
	if (sk_rcvqueues_full(sk, skb, limit))
Zhu Yi's avatar
Zhu Yi committed
774
775
		return -ENOBUFS;

Zhu Yi's avatar
Zhu Yi committed
776
	__sk_add_backlog(sk, skb);
Zhu Yi's avatar
Zhu Yi committed
777
778
779
780
	sk->sk_backlog.len += skb->truesize;
	return 0;
}

781
782
extern int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);

783
784
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
785
786
787
	if (sk_memalloc_socks() && skb_pfmemalloc(skb))
		return __sk_backlog_rcv(sk, skb);

788
789
790
	return sk->sk_backlog_rcv(sk, skb);
}

791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
static inline void sock_rps_record_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_record_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

static inline void sock_rps_reset_flow(const struct sock *sk)
{
#ifdef CONFIG_RPS
	struct rps_sock_flow_table *sock_flow_table;

	rcu_read_lock();
	sock_flow_table = rcu_dereference(rps_sock_flow_table);
	rps_reset_sock_flow(sock_flow_table, sk->sk_rxhash);
	rcu_read_unlock();
#endif
}

815
816
static inline void sock_rps_save_rxhash(struct sock *sk,
					const struct sk_buff *skb)
817
818
{
#ifdef CONFIG_RPS
819
	if (unlikely(sk->sk_rxhash != skb->rxhash)) {
820
		sock_rps_reset_flow(sk);
821
		sk->sk_rxhash = skb->rxhash;
822
823
824
825
	}
#endif
}

826
827
828
829
830
831
832
833
static inline void sock_rps_reset_rxhash(struct sock *sk)
{
#ifdef CONFIG_RPS
	sock_rps_reset_flow(sk);
	sk->sk_rxhash = 0;
#endif
}

834
835
836
837
838
839
840
841
842
843
844
#define sk_wait_event(__sk, __timeo, __condition)			\
	({	int __rc;						\
		release_sock(__sk);					\
		__rc = __condition;					\
		if (!__rc) {						\
			*(__timeo) = schedule_timeout(*(__timeo));	\
		}							\
		lock_sock(__sk);					\
		__rc = __condition;					\
		__rc;							\
	})
Linus Torvalds's avatar
Linus Torvalds committed
845
846
847
848
849
850

extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
extern int sk_stream_error(struct sock *sk, int flags, int err);
extern void sk_stream_kill_queues(struct sock *sk);
851
852
extern void sk_set_memalloc(struct sock *sk);
extern void sk_clear_memalloc(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
853
854
855

extern int sk_wait_data(struct sock *sk, long *timeo);

856
struct request_sock_ops;
857
struct timewait_sock_ops;
858
struct inet_hashinfo;
859
struct raw_hashinfo;
860
struct module;
861

Linus Torvalds's avatar
Linus Torvalds committed
862
863
864
865
866
/* Networking protocol blocks we attach to sockets.
 * socket layer -> transport layer interface
 * transport -> network interface is defined by struct inet_proto
 */
struct proto {
Eric Dumazet's avatar
Eric Dumazet committed
867
	void			(*close)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
868
869
					long timeout);
	int			(*connect)(struct sock *sk,
Eric Dumazet's avatar
Eric Dumazet committed
870
					struct sockaddr *uaddr,
Linus Torvalds's avatar
Linus Torvalds committed
871
872
873
					int addr_len);
	int			(*disconnect)(struct sock *sk, int flags);

Eric Dumazet's avatar
Eric Dumazet committed
874
	struct sock *		(*accept)(struct sock *sk, int flags, int *err);
Linus Torvalds's avatar
Linus Torvalds committed
875
876
877
878

	int			(*ioctl)(struct sock *sk, int cmd,
					 unsigned long arg);
	int			(*init)(struct sock *sk);
879
	void			(*destroy)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
880
	void			(*shutdown)(struct sock *sk, int how);
Eric Dumazet's avatar
Eric Dumazet committed
881
	int			(*setsockopt)(struct sock *sk, int level,
Linus Torvalds's avatar
Linus Torvalds committed
882
					int optname, char __user *optval,
883
					unsigned int optlen);
Eric Dumazet's avatar
Eric Dumazet committed
884
885
886
	int			(*getsockopt)(struct sock *sk, int level,
					int optname, char __user *optval,
					int __user *option);
887
#ifdef CONFIG_COMPAT
888
889
890
	int			(*compat_setsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
891
					unsigned int optlen);
892
893
894
895
	int			(*compat_getsockopt)(struct sock *sk,
					int level,
					int optname, char __user *optval,
					int __user *option);
896
897
	int			(*compat_ioctl)(struct sock *sk,
					unsigned int cmd, unsigned long arg);
898
#endif
Linus Torvalds's avatar
Linus Torvalds committed
899
900
901
902
	int			(*sendmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg, size_t len);
	int			(*recvmsg)(struct kiocb *iocb, struct sock *sk,
					   struct msghdr *msg,
Eric Dumazet's avatar
Eric Dumazet committed
903
904
					   size_t len, int noblock, int flags,
					   int *addr_len);
Linus Torvalds's avatar
Linus Torvalds committed
905
906
	int			(*sendpage)(struct sock *sk, struct page *page,
					int offset, size_t size, int flags);
Eric Dumazet's avatar
Eric Dumazet committed
907
	int			(*bind)(struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
908
909
					struct sockaddr *uaddr, int addr_len);

Eric Dumazet's avatar
Eric Dumazet committed
910
	int			(*backlog_rcv) (struct sock *sk,
Linus Torvalds's avatar
Linus Torvalds committed
911
912
						struct sk_buff *skb);

Eric Dumazet's avatar
Eric Dumazet committed
913
	void		(*release_cb)(struct sock *sk);
914
	void		(*mtu_reduced)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
915

Linus Torvalds's avatar
Linus Torvalds committed
916
917
918
	/* Keeping track of sk's, looking them up, and port selection methods. */
	void			(*hash)(struct sock *sk);
	void			(*unhash)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
919
	void			(*rehash)(struct sock *sk);
Linus Torvalds's avatar
Linus Torvalds committed
920
	int			(*get_port)(struct sock *sk, unsigned short snum);
921
	void			(*clear_sk)(struct sock *sk, int size);
Linus Torvalds's avatar
Linus Torvalds committed
922

923
	/* Keeping track of sockets in use */
924
#ifdef CONFIG_PROC_FS
925
	unsigned int		inuse_idx;
926
#endif
927

Linus Torvalds's avatar
Linus Torvalds committed
928
	/* Memory pressure */
929
	void			(*enter_memory_pressure)(struct sock *sk);
Eric Dumazet's avatar
Eric Dumazet committed
930
	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
931
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
Linus Torvalds's avatar
Linus Torvalds committed
932
933
934
	/*
	 * Pressure flag: try to collapse.
	 * Technical note: it is used by multiple contexts non atomically.
935
	 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds's avatar
Linus Torvalds committed
936
937
938
	 * is strict, actions are advisory and have some latency.
	 */
	int			*memory_pressure;
Eric Dumazet's avatar
Eric Dumazet committed
939
	long			*sysctl_mem;
Linus Torvalds's avatar
Linus Torvalds committed
940
941
942
	int			*sysctl_wmem;
	int			*sysctl_rmem;
	int			max_header;
943
	bool			no_autobind;
Linus Torvalds's avatar
Linus Torvalds committed
944

945
	struct kmem_cache	*slab;
Linus Torvalds's avatar
Linus Torvalds committed
946
	unsigned int		obj_size;
947
	int			slab_flags;
Linus Torvalds's avatar
Linus Torvalds committed
948

949
	struct percpu_counter	*orphan_count;
950

951
	struct request_sock_ops	*rsk_prot;
952
	struct timewait_sock_ops *twsk_prot;
953

954
955
	union {
		struct inet_hashinfo	*hashinfo;
956
		struct udp_table	*udp_table;
957
		struct raw_hashinfo	*raw_hash;
958
	} h;
959

Linus Torvalds's avatar
Linus Torvalds committed
960
961
962
963
964
	struct module		*owner;

	char			name[32];

	struct list_head	node;
965
966
967
#ifdef SOCK_REFCNT_DEBUG
	atomic_t		socks;
#endif
Andrew Morton's avatar
Andrew Morton committed
968
#ifdef CONFIG_MEMCG_KMEM
Glauber Costa's avatar
Glauber Costa committed
969
970
971
972
973
974
	/*
	 * cgroup specific init/deinit functions. Called once for all
	 * protocols that implement it, from cgroups populate function.
	 * This function has to setup any files the protocol want to
	 * appear in the kmem cgroup filesystem.
	 */
975
	int			(*init_cgroup)(struct mem_cgroup *memcg,
Glauber Costa's avatar
Glauber Costa committed
976
					       struct cgroup_subsys *ss);
977
	void			(*destroy_cgroup)(struct mem_cgroup *memcg);
Glauber Costa's avatar
Glauber Costa committed
978
979
980
981
	struct cg_proto		*(*proto_cgroup)(struct mem_cgroup *memcg);
#endif
};

982
983
984
985
986
987
988
989
990
991
/*
 * Bits in struct cg_proto.flags
 */
enum cg_proto_flags {
	/* Currently active and new sockets should be assigned to cgroups */
	MEMCG_SOCK_ACTIVE,
	/* It was ever activated; we must disarm static keys on destruction */
	MEMCG_SOCK_ACTIVATED,
};

Glauber Costa's avatar
Glauber Costa committed
992
993
994
995
996
997
struct cg_proto {
	void			(*enter_memory_pressure)(struct sock *sk);
	struct res_counter	*memory_allocated;	/* Current allocated memory. */
	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
	int			*memory_pressure;
	long			*sysctl_mem;
998
	unsigned long		flags;
Glauber Costa's avatar
Glauber Costa committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
	/*
	 * memcg field is used to find which memcg we belong directly
	 * Each memcg struct can hold more than one cg_proto, so container_of
	 * won't really cut.
	 *
	 * The elegant solution would be having an inverse function to
	 * proto_cgroup in struct proto, but that means polluting the structure
	 * for everybody, instead of just for memcg users.
	 */
	struct mem_cgroup	*memcg;
Linus Torvalds's avatar
Linus Torvalds committed
1009
1010
1011
1012
1013
};

extern int proto_register(struct proto *prot, int alloc_slab);
extern void proto_unregister(struct proto *prot);

1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
static inline bool memcg_proto_active(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
}

static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
{
	return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
}

1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
#ifdef SOCK_REFCNT_DEBUG
static inline void sk_refcnt_debug_inc(struct sock *sk)
{
	atomic_inc(&sk->sk_prot->socks);
}

static inline void sk_refcnt_debug_dec(struct sock *sk)
{
	atomic_dec(&sk->sk_prot->socks);
	printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
	       sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
}

Glauber Costa's avatar
Glauber Costa committed
1037
inline void sk_refcnt_debug_release(const struct sock *sk)
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
{
	if (atomic_read(&sk->sk_refcnt) != 1)
		printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
		       sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
#define sk_refcnt_debug_dec(sk) do { } while (0)
#define sk_refcnt_debug_release(sk) do { } while (0)
#endif /* SOCK_REFCNT_DEBUG */

Andrew Morton's avatar
Andrew Morton committed
1049
#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_NET)
1050
extern struct static_key memcg_socket_limit_enabled;
Glauber Costa's avatar
Glauber Costa committed
1051
1052
1053
1054
1055
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
}
1056
#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
Glauber Costa's avatar
Glauber Costa committed
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
#else
#define mem_cgroup_sockets_enabled 0
static inline struct cg_proto *parent_cg_proto(struct proto *proto,
					       struct cg_proto *cg_proto)
{
	return NULL;
}
#endif


1067
1068
1069
1070
1071
1072
1073
1074
1075
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
	return sk->sk_prot->memory_pressure != NULL;
}

static inline bool sk_under_memory_pressure(const struct sock *sk)
{
	if (!sk->sk_prot->memory_pressure)
		return false;
Glauber Costa's avatar
Glauber Costa committed
1076
1077
1078
1079

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return !!*sk->sk_cgrp->memory_pressure;

1080
1081
1082
1083
1084
1085
1086
	return !!*sk->sk_prot->memory_pressure;
}

static inline void sk_leave_memory_pressure(struct sock *sk)
{
	int *memory_pressure = sk->sk_prot->memory_pressure;

Glauber Costa's avatar
Glauber Costa committed
1087
1088
1089
1090
	if (!memory_pressure)
		return;

	if (*memory_pressure)
1091
		*memory_pressure = 0;
Glauber Costa's avatar
Glauber Costa committed
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			if (*cg_proto->memory_pressure)
				*cg_proto->memory_pressure = 0;
	}

1102
1103
1104
1105
}

static inline void sk_enter_memory_pressure(struct sock *sk)
{
Glauber Costa's avatar
Glauber Costa committed
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
	if (!sk->sk_prot->enter_memory_pressure)
		return;

	if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
		struct cg_proto *cg_proto = sk->sk_cgrp;
		struct proto *prot = sk->sk_prot;

		for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
			cg_proto->enter_memory_pressure(sk);
	}

	sk->sk_prot->enter_memory_pressure(sk);
1118
1119
1120
1121
1122
}

static inline long sk_prot_mem_limits(const struct sock *sk, int index)
{
	long *prot = sk->sk_prot->sysctl_mem;
Glauber Costa's avatar
Glauber Costa committed
1123
1124
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		prot = sk->sk_cgrp->sysctl_mem;
1125
1126
1127
	return prot[index];
}

Glauber Costa's avatar
Glauber Costa committed
1128
1129
1130
1131
1132
1133
1134
static inline void memcg_memory_allocated_add(struct cg_proto *prot,
					      unsigned long amt,
					      int *parent_status)
{
	struct res_counter *fail;
	int ret;

1135
1136
	ret = res_counter_charge_nofail(prot->memory_allocated,
					amt << PAGE_SHIFT, &fail);
Glauber Costa's avatar
Glauber Costa committed
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
	if (ret < 0)
		*parent_status = OVER_LIMIT;
}

static inline void memcg_memory_allocated_sub(struct cg_proto *prot,
					      unsigned long amt)
{
	res_counter_uncharge(prot->memory_allocated, amt << PAGE_SHIFT);
}

static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
{
	u64 ret;
	ret = res_counter_read_u64(prot->memory_allocated, RES_USAGE);
	return ret >> PAGE_SHIFT;
}

1154
1155
1156
1157
static inline long
sk_memory_allocated(const struct sock *sk)
{
	struct proto *prot = sk->sk_prot;
Glauber Costa's avatar
Glauber Costa committed
1158
1159
1160
	if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
		return memcg_memory_allocated_read(sk->sk_cgrp);

1161
1162
1163
1164
	return atomic_long_read(prot->memory_allocated);
}

static inline long
Glauber Costa's avatar
Glauber Costa committed
1165
sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
1166
1167
{
	struct proto *prot = sk->sk_prot;