af_smc.c 43.8 KB
Newer Older
1
2
3
4
5
6
7
8
/*
 *  Shared Memory Communications over RDMA (SMC-R) and RoCE
 *
 *  AF_SMC protocol family socket handler keeping the AF_INET sock address type
 *  applies to SOCK_STREAM sockets only
 *  offers an alternative communication option for TCP-protocol sockets
 *  applicable with RoCE-cards only
 *
9
10
11
 *  Initial restrictions:
 *    - support for alternate links postponed
 *
12
 *  Copyright IBM Corp. 2016, 2018
13
14
15
16
17
18
19
20
21
22
 *
 *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
 *              based on prototype from Frank Blaschka
 */

#define KMSG_COMPONENT "smc"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

#include <linux/module.h>
#include <linux/socket.h>
23
#include <linux/workqueue.h>
24
#include <linux/in.h>
25
26
#include <linux/sched/signal.h>

27
#include <net/sock.h>
28
#include <net/tcp.h>
29
#include <net/smc.h>
30
#include <asm/ioctls.h>
31
32

#include "smc.h"
33
#include "smc_clc.h"
Ursula Braun's avatar
Ursula Braun committed
34
#include "smc_llc.h"
35
#include "smc_cdc.h"
36
#include "smc_core.h"
37
#include "smc_ib.h"
38
#include "smc_pnet.h"
Ursula Braun's avatar
Ursula Braun committed
39
#include "smc_tx.h"
Ursula Braun's avatar
Ursula Braun committed
40
#include "smc_rx.h"
41
#include "smc_close.h"
42

43
44
45
46
static DEFINE_MUTEX(smc_create_lgr_pending);	/* serialize link group
						 * creation
						 */

47
static void smc_tcp_listen_work(struct work_struct *);
48
static void smc_connect_work(struct work_struct *);
49

50
51
52
53
54
55
56
static void smc_set_keepalive(struct sock *sk, int val)
{
	struct smc_sock *smc = smc_sk(sk);

	smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
}

57
58
59
60
static struct smc_hashinfo smc_v4_hashinfo = {
	.lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
};

61
62
63
64
static struct smc_hashinfo smc_v6_hashinfo = {
	.lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
};

65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
int smc_hash_sk(struct sock *sk)
{
	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
	struct hlist_head *head;

	head = &h->ht;

	write_lock_bh(&h->lock);
	sk_add_node(sk, head);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
	write_unlock_bh(&h->lock);

	return 0;
}
EXPORT_SYMBOL_GPL(smc_hash_sk);

void smc_unhash_sk(struct sock *sk)
{
	struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;

	write_lock_bh(&h->lock);
	if (sk_del_node_init(sk))
		sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
	write_unlock_bh(&h->lock);
}
EXPORT_SYMBOL_GPL(smc_unhash_sk);

struct proto smc_proto = {
93
94
95
	.name		= "SMC",
	.owner		= THIS_MODULE,
	.keepalive	= smc_set_keepalive,
96
97
	.hash		= smc_hash_sk,
	.unhash		= smc_unhash_sk,
98
	.obj_size	= sizeof(struct smc_sock),
99
	.h.smc_hash	= &smc_v4_hashinfo,
100
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
101
};
102
EXPORT_SYMBOL_GPL(smc_proto);
103

104
105
106
107
108
109
110
111
112
113
114
115
struct proto smc_proto6 = {
	.name		= "SMC6",
	.owner		= THIS_MODULE,
	.keepalive	= smc_set_keepalive,
	.hash		= smc_hash_sk,
	.unhash		= smc_unhash_sk,
	.obj_size	= sizeof(struct smc_sock),
	.h.smc_hash	= &smc_v6_hashinfo,
	.slab_flags	= SLAB_TYPESAFE_BY_RCU,
};
EXPORT_SYMBOL_GPL(smc_proto6);

116
117
118
119
static int smc_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
120
	int rc = 0;
121
122
123
124
125

	if (!sk)
		goto out;

	smc = smc_sk(sk);
126
127
128
129
130
131

	/* cleanup for a dangling non-blocking connect */
	flush_work(&smc->connect_work);
	kfree(smc->connect_info);
	smc->connect_info = NULL;

132
133
134
135
136
137
138
	if (sk->sk_state == SMC_LISTEN)
		/* smc_close_non_accepted() is called and acquires
		 * sock lock for child sockets again
		 */
		lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
	else
		lock_sock(sk);
139

140
	if (!smc->use_fallback) {
141
142
143
144
		rc = smc_close_active(smc);
		sock_set_flag(sk, SOCK_DEAD);
		sk->sk_shutdown |= SHUTDOWN_MASK;
	}
145
146
147
148
	if (smc->clcsock) {
		sock_release(smc->clcsock);
		smc->clcsock = NULL;
	}
149
	if (smc->use_fallback) {
150
151
		if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
			sock_put(sk); /* passive closing */
152
153
154
		sk->sk_state = SMC_CLOSED;
		sk->sk_state_change(sk);
	}
155
156
157
158

	/* detach socket */
	sock_orphan(sk);
	sock->sk = NULL;
159
	if (!smc->use_fallback && sk->sk_state == SMC_CLOSED)
160
		smc_conn_free(&smc->conn);
161
162
	release_sock(sk);

163
164
	sk->sk_prot->unhash(sk);
	sock_put(sk); /* final sock_put */
165
out:
166
	return rc;
167
168
169
170
171
172
173
174
175
176
177
178
}

static void smc_destruct(struct sock *sk)
{
	if (sk->sk_state != SMC_CLOSED)
		return;
	if (!sock_flag(sk, SOCK_DEAD))
		return;

	sk_refcnt_debug_dec(sk);
}

179
180
static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
				   int protocol)
181
182
{
	struct smc_sock *smc;
183
	struct proto *prot;
184
185
	struct sock *sk;

186
187
	prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
	sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
188
189
190
191
192
193
	if (!sk)
		return NULL;

	sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
	sk->sk_state = SMC_INIT;
	sk->sk_destruct = smc_destruct;
194
	sk->sk_protocol = protocol;
195
	smc = smc_sk(sk);
196
	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
197
	INIT_WORK(&smc->connect_work, smc_connect_work);
198
	INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
199
200
	INIT_LIST_HEAD(&smc->accept_q);
	spin_lock_init(&smc->accept_q_lock);
201
	spin_lock_init(&smc->conn.send_lock);
202
	sk->sk_prot->hash(sk);
203
	sk_refcnt_debug_inc(sk);
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223

	return sk;
}

static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
		    int addr_len)
{
	struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
	int rc;

	smc = smc_sk(sk);

	/* replicate tests from inet_bind(), to be safe wrt. future changes */
	rc = -EINVAL;
	if (addr_len < sizeof(struct sockaddr_in))
		goto out;

	rc = -EAFNOSUPPORT;
224
225
226
227
	if (addr->sin_family != AF_INET &&
	    addr->sin_family != AF_INET6 &&
	    addr->sin_family != AF_UNSPEC)
		goto out;
228
	/* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
229
230
	if (addr->sin_family == AF_UNSPEC &&
	    addr->sin_addr.s_addr != htonl(INADDR_ANY))
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
		goto out;

	lock_sock(sk);

	/* Check if socket is already active */
	rc = -EINVAL;
	if (sk->sk_state != SMC_INIT)
		goto out_rel;

	smc->clcsock->sk->sk_reuse = sk->sk_reuse;
	rc = kernel_bind(smc->clcsock, uaddr, addr_len);

out_rel:
	release_sock(sk);
out:
	return rc;
}

static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
				   unsigned long mask)
{
	/* options we don't get control via setsockopt for */
	nsk->sk_type = osk->sk_type;
	nsk->sk_sndbuf = osk->sk_sndbuf;
	nsk->sk_rcvbuf = osk->sk_rcvbuf;
	nsk->sk_sndtimeo = osk->sk_sndtimeo;
	nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
	nsk->sk_mark = osk->sk_mark;
	nsk->sk_priority = osk->sk_priority;
	nsk->sk_rcvlowat = osk->sk_rcvlowat;
	nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
	nsk->sk_err = osk->sk_err;

	nsk->sk_flags &= ~mask;
	nsk->sk_flags |= osk->sk_flags & mask;
}

#define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
			     (1UL << SOCK_KEEPOPEN) | \
			     (1UL << SOCK_LINGER) | \
			     (1UL << SOCK_BROADCAST) | \
			     (1UL << SOCK_TIMESTAMP) | \
			     (1UL << SOCK_DBG) | \
			     (1UL << SOCK_RCVTSTAMP) | \
			     (1UL << SOCK_RCVTSTAMPNS) | \
			     (1UL << SOCK_LOCALROUTE) | \
			     (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
			     (1UL << SOCK_RXQ_OVFL) | \
			     (1UL << SOCK_WIFI_STATUS) | \
			     (1UL << SOCK_NOFCS) | \
			     (1UL << SOCK_FILTER_LOCKED))
/* copy only relevant settings and flags of SOL_SOCKET level from smc to
 * clc socket (since smc is not called for these options from net/core)
 */
static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
{
	smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
}

#define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
			     (1UL << SOCK_KEEPOPEN) | \
			     (1UL << SOCK_LINGER) | \
			     (1UL << SOCK_DBG))
/* copy only settings and flags relevant for smc from clc to smc socket */
static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
{
	smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
}

300
301
302
/* register a new rmb, optionally send confirm_rkey msg to register with peer */
static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
		       bool conf_rkey)
Karsten Graul's avatar
Karsten Graul committed
303
304
{
	/* register memory region for new rmb */
305
306
	if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
		rmb_desc->regerr = 1;
Karsten Graul's avatar
Karsten Graul committed
307
		return -EFAULT;
308
	}
309
310
311
312
313
314
315
	if (!conf_rkey)
		return 0;
	/* exchange confirm_rkey msg with peer */
	if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
		rmb_desc->regerr = 1;
		return -EFAULT;
	}
Karsten Graul's avatar
Karsten Graul committed
316
317
318
	return 0;
}

319
static int smc_clnt_conf_first_link(struct smc_sock *smc)
Ursula Braun's avatar
Ursula Braun committed
320
{
321
	struct net *net = sock_net(smc->clcsock->sk);
Ursula Braun's avatar
Ursula Braun committed
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
	struct smc_link_group *lgr = smc->conn.lgr;
	struct smc_link *link;
	int rest;
	int rc;

	link = &lgr->lnk[SMC_SINGLE_LINK];
	/* receive CONFIRM LINK request from server over RoCE fabric */
	rest = wait_for_completion_interruptible_timeout(
		&link->llc_confirm,
		SMC_LLC_WAIT_FIRST_TIME);
	if (rest <= 0) {
		struct smc_clc_msg_decline dclc;

		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
				      SMC_CLC_DECLINE);
		return rc;
	}

340
341
342
	if (link->llc_confirm_rc)
		return SMC_CLC_DECL_RMBE_EC;

Ursula Braun's avatar
Ursula Braun committed
343
344
345
346
347
	rc = smc_ib_modify_qp_rts(link);
	if (rc)
		return SMC_CLC_DECL_INTERR;

	smc_wr_remember_qp_attr(link);
348

349
	if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
350
351
		return SMC_CLC_DECL_INTERR;

Ursula Braun's avatar
Ursula Braun committed
352
353
354
	/* send CONFIRM LINK response over RoCE fabric */
	rc = smc_llc_send_confirm_link(link,
				       link->smcibdev->mac[link->ibport - 1],
355
356
				       &link->smcibdev->gid[link->ibport - 1],
				       SMC_LLC_RESP);
Ursula Braun's avatar
Ursula Braun committed
357
358
359
	if (rc < 0)
		return SMC_CLC_DECL_TCL;

360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
	/* receive ADD LINK request from server over RoCE fabric */
	rest = wait_for_completion_interruptible_timeout(&link->llc_add,
							 SMC_LLC_WAIT_TIME);
	if (rest <= 0) {
		struct smc_clc_msg_decline dclc;

		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
				      SMC_CLC_DECLINE);
		return rc;
	}

	/* send add link reject message, only one link supported for now */
	rc = smc_llc_send_add_link(link,
				   link->smcibdev->mac[link->ibport - 1],
				   &link->smcibdev->gid[link->ibport - 1],
				   SMC_LLC_RESP);
	if (rc < 0)
		return SMC_CLC_DECL_TCL;

379
	smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
380

381
	return 0;
Ursula Braun's avatar
Ursula Braun committed
382
383
}

384
385
386
static void smc_conn_save_peer_info(struct smc_sock *smc,
				    struct smc_clc_msg_accept_confirm *clc)
{
387
388
	int bufsize = smc_uncompress_bufsize(clc->rmbe_size);

389
	smc->conn.peer_rmbe_idx = clc->rmbe_idx;
390
	smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
391
	smc->conn.peer_rmbe_size = bufsize;
392
	atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
393
	smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
394
395
396
397
398
399
400
401
402
403
404
405
}

static void smc_link_save_peer_info(struct smc_link *link,
				    struct smc_clc_msg_accept_confirm *clc)
{
	link->peer_qpn = ntoh24(clc->qpn);
	memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
	memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
	link->peer_psn = ntoh24(clc->psn);
	link->peer_mtu = clc->qp_mtu;
}

406
407
/* fall back during connect */
static int smc_connect_fallback(struct smc_sock *smc)
408
{
409
410
411
412
413
414
	smc->use_fallback = true;
	smc_copy_sock_settings_to_clc(smc);
	if (smc->sk.sk_state == SMC_INIT)
		smc->sk.sk_state = SMC_ACTIVE;
	return 0;
}
415

416
417
418
419
/* decline and fall back during connect */
static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
{
	int rc;
420

421
422
423
	if (reason_code < 0) { /* error, fallback is not possible */
		if (smc->sk.sk_state == SMC_INIT)
			sock_put(&smc->sk); /* passive closing */
424
		return reason_code;
425
	}
426
427
	if (reason_code != SMC_CLC_DECL_REPLY) {
		rc = smc_clc_send_decline(smc, reason_code);
428
429
430
		if (rc < 0) {
			if (smc->sk.sk_state == SMC_INIT)
				sock_put(&smc->sk); /* passive closing */
431
			return rc;
432
		}
433
	}
434
435
	return smc_connect_fallback(smc);
}
436

437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
/* abort connecting */
static int smc_connect_abort(struct smc_sock *smc, int reason_code,
			     int local_contact)
{
	if (local_contact == SMC_FIRST_CONTACT)
		smc_lgr_forget(smc->conn.lgr);
	mutex_unlock(&smc_create_lgr_pending);
	smc_conn_free(&smc->conn);
	return reason_code;
}

/* check if there is a rdma device available for this connection. */
/* called for connect and listen */
static int smc_check_rdma(struct smc_sock *smc, struct smc_ib_device **ibdev,
			  u8 *ibport)
{
	int reason_code = 0;
454
455
456
457
458

	/* PNET table look up: search active ib_device and port
	 * within same PNETID that also contains the ethernet device
	 * used for the internal TCP socket
	 */
459
460
	smc_pnet_find_roce_resource(smc->clcsock->sk, ibdev, ibport);
	if (!(*ibdev))
461
		reason_code = SMC_CLC_DECL_CNFERR; /* configuration error */
462
463
464
465
466
467
468
469
470
471

	return reason_code;
}

/* CLC handshake during connect */
static int smc_connect_clc(struct smc_sock *smc,
			   struct smc_clc_msg_accept_confirm *aclc,
			   struct smc_ib_device *ibdev, u8 ibport)
{
	int rc = 0;
472
473

	/* do inband token exchange */
474
475
476
	rc = smc_clc_send_proposal(smc, ibdev, ibport);
	if (rc)
		return rc;
477
	/* receive SMC Accept CLC message */
478
479
480
481
482
483
484
485
486
487
488
	return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT);
}

/* setup for RDMA connection of client */
static int smc_connect_rdma(struct smc_sock *smc,
			    struct smc_clc_msg_accept_confirm *aclc,
			    struct smc_ib_device *ibdev, u8 ibport)
{
	int local_contact = SMC_FIRST_CONTACT;
	struct smc_link *link;
	int reason_code = 0;
489

490
	mutex_lock(&smc_create_lgr_pending);
491
492
	local_contact = smc_conn_create(smc, ibdev, ibport, &aclc->lcl,
					aclc->hdr.flag);
493
	if (local_contact < 0) {
494
		if (local_contact == -ENOMEM)
495
			reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/
496
		else if (local_contact == -ENOLINK)
497
			reason_code = SMC_CLC_DECL_SYNCERR; /* synchr. error */
498
499
		else
			reason_code = SMC_CLC_DECL_INTERR; /* other error */
500
		return smc_connect_abort(smc, reason_code, 0);
501
502
	}
	link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
503

504
	smc_conn_save_peer_info(smc, aclc);
505

506
	/* create send buffer and rmb */
507
508
	if (smc_buf_create(smc))
		return smc_connect_abort(smc, SMC_CLC_DECL_MEM, local_contact);
509

510
	if (local_contact == SMC_FIRST_CONTACT)
511
		smc_link_save_peer_info(link, aclc);
512

513
514
515
	if (smc_rmb_rtoken_handling(&smc->conn, aclc))
		return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
					 local_contact);
516

517
518
519
	smc_close_init(smc);
	smc_rx_init(smc);

520
	if (local_contact == SMC_FIRST_CONTACT) {
521
522
523
		if (smc_ib_ready_link(link))
			return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
						 local_contact);
524
	} else {
525
526
527
528
		if (!smc->conn.rmb_desc->reused &&
		    smc_reg_rmb(link, smc->conn.rmb_desc, true))
			return smc_connect_abort(smc, SMC_CLC_DECL_INTERR,
						 local_contact);
529
	}
530
	smc_rmb_sync_sg_for_device(&smc->conn);
531

532
533
534
535
536
	reason_code = smc_clc_send_confirm(smc);
	if (reason_code)
		return smc_connect_abort(smc, reason_code, local_contact);

	smc_tx_init(smc);
537

Ursula Braun's avatar
Ursula Braun committed
538
539
	if (local_contact == SMC_FIRST_CONTACT) {
		/* QP confirmation over RoCE fabric */
540
		reason_code = smc_clnt_conf_first_link(smc);
541
542
543
		if (reason_code)
			return smc_connect_abort(smc, reason_code,
						 local_contact);
Ursula Braun's avatar
Ursula Braun committed
544
	}
545
	mutex_unlock(&smc_create_lgr_pending);
Ursula Braun's avatar
Ursula Braun committed
546

547
	smc_copy_sock_settings_to_clc(smc);
548
549
	if (smc->sk.sk_state == SMC_INIT)
		smc->sk.sk_state = SMC_ACTIVE;
550

551
552
	return 0;
}
553

554
555
556
557
558
559
560
/* perform steps before actually connecting */
static int __smc_connect(struct smc_sock *smc)
{
	struct smc_clc_msg_accept_confirm aclc;
	struct smc_ib_device *ibdev;
	int rc = 0;
	u8 ibport;
561

562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
	sock_hold(&smc->sk); /* sock put in passive closing */

	if (smc->use_fallback)
		return smc_connect_fallback(smc);

	/* if peer has not signalled SMC-capability, fall back */
	if (!tcp_sk(smc->clcsock->sk)->syn_smc)
		return smc_connect_fallback(smc);

	/* IPSec connections opt out of SMC-R optimizations */
	if (using_ipsec(smc))
		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);

	/* check if a RDMA device is available; if not, fall back */
	if (smc_check_rdma(smc, &ibdev, &ibport))
		return smc_connect_decline_fallback(smc, SMC_CLC_DECL_CNFERR);

	/* perform CLC handshake */
	rc = smc_connect_clc(smc, &aclc, ibdev, ibport);
	if (rc)
		return smc_connect_decline_fallback(smc, rc);

	/* connect using rdma */
	rc = smc_connect_rdma(smc, &aclc, ibdev, ibport);
	if (rc)
		return smc_connect_decline_fallback(smc, rc);

	return 0;
590
591
}

592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
static void smc_connect_work(struct work_struct *work)
{
	struct smc_sock *smc = container_of(work, struct smc_sock,
					    connect_work);
	int rc;

	lock_sock(&smc->sk);
	rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
			    smc->connect_info->alen, smc->connect_info->flags);
	if (smc->clcsock->sk->sk_err) {
		smc->sk.sk_err = smc->clcsock->sk->sk_err;
		goto out;
	}
	if (rc < 0) {
		smc->sk.sk_err = -rc;
		goto out;
	}

	rc = __smc_connect(smc);
	if (rc < 0)
		smc->sk.sk_err = -rc;

out:
	smc->sk.sk_state_change(&smc->sk);
	kfree(smc->connect_info);
	smc->connect_info = NULL;
	release_sock(&smc->sk);
}

621
622
623
624
625
626
627
628
629
630
631
632
static int smc_connect(struct socket *sock, struct sockaddr *addr,
		       int alen, int flags)
{
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
	int rc = -EINVAL;

	smc = smc_sk(sk);

	/* separate smc parameter checking to be safe */
	if (alen < sizeof(addr->sa_family))
		goto out_err;
633
	if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
		goto out_err;

	lock_sock(sk);
	switch (sk->sk_state) {
	default:
		goto out;
	case SMC_ACTIVE:
		rc = -EISCONN;
		goto out;
	case SMC_INIT:
		rc = 0;
		break;
	}

	smc_copy_sock_settings_to_clc(smc);
649
	tcp_sk(smc->clcsock->sk)->syn_smc = 1;
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
	if (flags & O_NONBLOCK) {
		if (smc->connect_info) {
			rc = -EALREADY;
			goto out;
		}
		smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
		if (!smc->connect_info) {
			rc = -ENOMEM;
			goto out;
		}
		smc->connect_info->alen = alen;
		smc->connect_info->flags = flags ^ O_NONBLOCK;
		memcpy(&smc->connect_info->addr, addr, alen);
		schedule_work(&smc->connect_work);
		rc = -EINPROGRESS;
	} else {
		rc = kernel_connect(smc->clcsock, addr, alen, flags);
		if (rc)
			goto out;
669

670
671
672
673
674
675
		rc = __smc_connect(smc);
		if (rc < 0)
			goto out;
		else
			rc = 0; /* success cases including fallback */
	}
676
677
678
679
680
681
682
683
684

out:
	release_sock(sk);
out_err:
	return rc;
}

static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
{
685
686
	struct socket *new_clcsock = NULL;
	struct sock *lsk = &lsmc->sk;
687
688
689
	struct sock *new_sk;
	int rc;

690
	release_sock(lsk);
691
	new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
692
693
	if (!new_sk) {
		rc = -ENOMEM;
694
		lsk->sk_err = ENOMEM;
695
		*new_smc = NULL;
696
		lock_sock(lsk);
697
698
699
700
701
		goto out;
	}
	*new_smc = smc_sk(new_sk);

	rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
702
	lock_sock(lsk);
703
	if  (rc < 0)
704
		lsk->sk_err = -rc;
705
	if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
706
707
708
709
		if (new_clcsock)
			sock_release(new_clcsock);
		new_sk->sk_state = SMC_CLOSED;
		sock_set_flag(new_sk, SOCK_DEAD);
710
		new_sk->sk_prot->unhash(new_sk);
711
		sock_put(new_sk); /* final */
712
713
714
715
716
717
718
719
720
		*new_smc = NULL;
		goto out;
	}

	(*new_smc)->clcsock = new_clcsock;
out:
	return rc;
}

721
722
723
724
725
726
727
/* add a just created sock to the accept queue of the listen sock as
 * candidate for a following socket accept call from user space
 */
static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
{
	struct smc_sock *par = smc_sk(parent);

728
	sock_hold(sk); /* sock_put in smc_accept_unlink () */
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
	spin_lock(&par->accept_q_lock);
	list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
	spin_unlock(&par->accept_q_lock);
	sk_acceptq_added(parent);
}

/* remove a socket from the accept queue of its parental listening socket */
static void smc_accept_unlink(struct sock *sk)
{
	struct smc_sock *par = smc_sk(sk)->listen_smc;

	spin_lock(&par->accept_q_lock);
	list_del_init(&smc_sk(sk)->accept_q);
	spin_unlock(&par->accept_q_lock);
	sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
744
	sock_put(sk); /* sock_hold in smc_accept_enqueue */
745
746
747
748
749
}

/* remove a sock from the accept queue to bind it to a new socket created
 * for a socket accept call from user space
 */
750
751
struct sock *smc_accept_dequeue(struct sock *parent,
				struct socket *new_sock)
752
753
754
755
756
757
758
759
760
{
	struct smc_sock *isk, *n;
	struct sock *new_sk;

	list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
		new_sk = (struct sock *)isk;

		smc_accept_unlink(new_sk);
		if (new_sk->sk_state == SMC_CLOSED) {
761
762
763
764
			if (isk->clcsock) {
				sock_release(isk->clcsock);
				isk->clcsock = NULL;
			}
765
			new_sk->sk_prot->unhash(new_sk);
766
			sock_put(new_sk); /* final */
767
768
769
770
771
772
773
774
775
776
			continue;
		}
		if (new_sock)
			sock_graft(new_sk, new_sock);
		return new_sk;
	}
	return NULL;
}

/* clean up for a created but never accepted sock */
777
void smc_close_non_accepted(struct sock *sk)
778
779
780
{
	struct smc_sock *smc = smc_sk(sk);

781
782
783
784
	lock_sock(sk);
	if (!sk->sk_lingertime)
		/* wait for peer closing */
		sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
785
	if (!smc->use_fallback) {
786
		smc_close_active(smc);
787
788
789
		sock_set_flag(sk, SOCK_DEAD);
		sk->sk_shutdown |= SHUTDOWN_MASK;
	}
790
791
792
793
794
795
796
	if (smc->clcsock) {
		struct socket *tcp;

		tcp = smc->clcsock;
		smc->clcsock = NULL;
		sock_release(tcp);
	}
797
	if (smc->use_fallback) {
798
799
800
801
802
		sock_put(sk); /* passive closing */
		sk->sk_state = SMC_CLOSED;
	} else {
		if (sk->sk_state == SMC_CLOSED)
			smc_conn_free(&smc->conn);
803
804
	}
	release_sock(sk);
805
806
	sk->sk_prot->unhash(sk);
	sock_put(sk); /* final sock_put */
807
808
}

Ursula Braun's avatar
Ursula Braun committed
809
810
static int smc_serv_conf_first_link(struct smc_sock *smc)
{
811
	struct net *net = sock_net(smc->clcsock->sk);
Ursula Braun's avatar
Ursula Braun committed
812
813
814
815
816
817
	struct smc_link_group *lgr = smc->conn.lgr;
	struct smc_link *link;
	int rest;
	int rc;

	link = &lgr->lnk[SMC_SINGLE_LINK];
818

819
	if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
820
821
		return SMC_CLC_DECL_INTERR;

Ursula Braun's avatar
Ursula Braun committed
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
	/* send CONFIRM LINK request to client over the RoCE fabric */
	rc = smc_llc_send_confirm_link(link,
				       link->smcibdev->mac[link->ibport - 1],
				       &link->smcibdev->gid[link->ibport - 1],
				       SMC_LLC_REQ);
	if (rc < 0)
		return SMC_CLC_DECL_TCL;

	/* receive CONFIRM LINK response from client over the RoCE fabric */
	rest = wait_for_completion_interruptible_timeout(
		&link->llc_confirm_resp,
		SMC_LLC_WAIT_FIRST_TIME);
	if (rest <= 0) {
		struct smc_clc_msg_decline dclc;

		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
				      SMC_CLC_DECLINE);
839
		return rc;
Ursula Braun's avatar
Ursula Braun committed
840
841
	}

842
843
844
	if (link->llc_confirm_resp_rc)
		return SMC_CLC_DECL_RMBE_EC;

845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
	/* send ADD LINK request to client over the RoCE fabric */
	rc = smc_llc_send_add_link(link,
				   link->smcibdev->mac[link->ibport - 1],
				   &link->smcibdev->gid[link->ibport - 1],
				   SMC_LLC_REQ);
	if (rc < 0)
		return SMC_CLC_DECL_TCL;

	/* receive ADD LINK response from client over the RoCE fabric */
	rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
							 SMC_LLC_WAIT_TIME);
	if (rest <= 0) {
		struct smc_clc_msg_decline dclc;

		rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
				      SMC_CLC_DECLINE);
		return rc;
	}

864
	smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
865

866
	return 0;
Ursula Braun's avatar
Ursula Braun committed
867
868
}

869
870
/* listen worker: finish */
static void smc_listen_out(struct smc_sock *new_smc)
871
872
873
874
{
	struct smc_sock *lsmc = new_smc->listen_smc;
	struct sock *newsmcsk = &new_smc->sk;

875
876
877
878
879
	lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
	if (lsmc->sk.sk_state == SMC_LISTEN) {
		smc_accept_enqueue(&lsmc->sk, newsmcsk);
	} else { /* no longer listening */
		smc_close_non_accepted(newsmcsk);
880
	}
881
	release_sock(&lsmc->sk);
882

883
884
885
886
	/* Wake up accept */
	lsmc->sk.sk_data_ready(&lsmc->sk);
	sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
}
887

888
889
890
891
/* listen worker: finish in state connected */
static void smc_listen_out_connected(struct smc_sock *new_smc)
{
	struct sock *newsmcsk = &new_smc->sk;
892

893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
	sk_refcnt_debug_inc(newsmcsk);
	if (newsmcsk->sk_state == SMC_INIT)
		newsmcsk->sk_state = SMC_ACTIVE;

	smc_listen_out(new_smc);
}

/* listen worker: finish in error state */
static void smc_listen_out_err(struct smc_sock *new_smc)
{
	struct sock *newsmcsk = &new_smc->sk;

	if (newsmcsk->sk_state == SMC_INIT)
		sock_put(&new_smc->sk); /* passive closing */
	newsmcsk->sk_state = SMC_CLOSED;
	smc_conn_free(&new_smc->conn);

	smc_listen_out(new_smc);
}

/* listen worker: decline and fall back if possible */
static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
			       int local_contact)
{
	/* RDMA setup failed, switch back to TCP */
	if (local_contact == SMC_FIRST_CONTACT)
		smc_lgr_forget(new_smc->conn.lgr);
	if (reason_code < 0) { /* error, no fallback possible */
		smc_listen_out_err(new_smc);
		return;
	}
	smc_conn_free(&new_smc->conn);
	new_smc->use_fallback = true;
	if (reason_code && reason_code != SMC_CLC_DECL_REPLY) {
		if (smc_clc_send_decline(new_smc, reason_code) < 0) {
			smc_listen_out_err(new_smc);
			return;
		}
931
	}
932
933
934
935
936
937
938
939
940
	smc_listen_out_connected(new_smc);
}

/* listen worker: check prefixes */
static int smc_listen_rdma_check(struct smc_sock *new_smc,
				 struct smc_clc_msg_proposal *pclc)
{
	struct smc_clc_msg_proposal_prefix *pclc_prfx;
	struct socket *newclcsock = new_smc->clcsock;
941

942
	pclc_prfx = smc_clc_proposal_get_prefix(pclc);
943
944
	if (smc_clc_prfx_match(newclcsock, pclc_prfx))
		return SMC_CLC_DECL_CNFERR;
945

946
947
	return 0;
}
948

949
950
951
952
953
954
/* listen worker: initialize connection and buffers */
static int smc_listen_rdma_init(struct smc_sock *new_smc,
				struct smc_clc_msg_proposal *pclc,
				struct smc_ib_device *ibdev, u8 ibport,
				int *local_contact)
{
955
	/* allocate connection / link group */
956
957
958
959
960
	*local_contact = smc_conn_create(new_smc, ibdev, ibport, &pclc->lcl, 0);
	if (*local_contact < 0) {
		if (*local_contact == -ENOMEM)
			return SMC_CLC_DECL_MEM;/* insufficient memory*/
		return SMC_CLC_DECL_INTERR; /* other error */
961
	}
962

963
	/* create send buffer and rmb */
964
965
	if (smc_buf_create(new_smc))
		return SMC_CLC_DECL_MEM;
966

967
968
969
970
971
972
973
	return 0;
}

/* listen worker: register buffers */
static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
{
	struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
974

975
	if (local_contact != SMC_FIRST_CONTACT) {
Karsten Graul's avatar
Karsten Graul committed
976
		if (!new_smc->conn.rmb_desc->reused) {
977
978
			if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
				return SMC_CLC_DECL_INTERR;
979
980
		}
	}
981
	smc_rmb_sync_sg_for_device(&new_smc->conn);
982

983
984
985
986
987
988
989
990
991
992
	return 0;
}

/* listen worker: finish RDMA setup */
static void smc_listen_rdma_finish(struct smc_sock *new_smc,
				   struct smc_clc_msg_accept_confirm *cclc,
				   int local_contact)
{
	struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
	int reason_code = 0;
993

994
	if (local_contact == SMC_FIRST_CONTACT)
995
		smc_link_save_peer_info(link, cclc);
996

997
	if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
998
		reason_code = SMC_CLC_DECL_INTERR;
999
		goto decline;
1000
1001
1002
	}

	if (local_contact == SMC_FIRST_CONTACT) {
1003
		if (smc_ib_ready_link(link)) {
1004
			reason_code = SMC_CLC_DECL_INTERR;
1005
			goto decline;
1006
		}
Ursula Braun's avatar
Ursula Braun committed
1007
1008
		/* QP confirmation over RoCE fabric */
		reason_code = smc_serv_conf_first_link(new_smc);
1009
1010
		if (reason_code)
			goto decline;
1011
	}
1012
	return;
1013

1014
decline:
1015
	mutex_unlock(&smc_create_lgr_pending);
1016
1017
	smc_listen_decline(new_smc, reason_code, local_contact);
}
Ursula Braun's avatar
Ursula Braun committed
1018

1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
/* setup for RDMA connection of server */
static void smc_listen_work(struct work_struct *work)
{
	struct smc_sock *new_smc = container_of(work, struct smc_sock,
						smc_listen_work);
	struct socket *newclcsock = new_smc->clcsock;
	struct smc_clc_msg_accept_confirm cclc;
	struct smc_clc_msg_proposal *pclc;
	struct smc_ib_device *ibdev;
	u8 buf[SMC_CLC_MAX_LEN];
	int local_contact = 0;
	int reason_code = 0;
	int rc = 0;
	u8 ibport;

	if (new_smc->use_fallback) {
		smc_listen_out_connected(new_smc);
		return;
1037
1038
	}

1039
1040
1041
1042
1043
1044
	/* check if peer is smc capable */
	if (!tcp_sk(newclcsock->sk)->syn_smc) {
		new_smc->use_fallback = true;
		smc_listen_out_connected(new_smc);
		return;
	}
1045

1046
1047
1048
1049
1050
1051
1052
1053
1054
	/* do inband token exchange -
	 * wait for and receive SMC Proposal CLC message
	 */
	pclc = (struct smc_clc_msg_proposal *)&buf;
	reason_code = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
				       SMC_CLC_PROPOSAL);
	if (reason_code) {
		smc_listen_decline(new_smc, reason_code, 0);
		return;
1055
1056
	}

1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
	/* IPSec connections opt out of SMC-R optimizations */
	if (using_ipsec(new_smc)) {
		smc_listen_decline(new_smc, SMC_CLC_DECL_IPSEC, 0);
		return;
	}

	mutex_lock(&smc_create_lgr_pending);
	smc_close_init(new_smc);
	smc_rx_init(new_smc);
	smc_tx_init(new_smc);

	/* check if RDMA is available */
	if (smc_check_rdma(new_smc, &ibdev, &ibport) ||
	    smc_listen_rdma_check(new_smc, pclc) ||
	    smc_listen_rdma_init(new_smc, pclc, ibdev, ibport,
				 &local_contact) ||
	    smc_listen_rdma_reg(new_smc, local_contact)) {
		/* SMC not supported, decline */
		mutex_unlock(&smc_create_lgr_pending);
		smc_listen_decline(new_smc, SMC_CLC_DECL_CNFERR, local_contact);
		return;
	}

	/* send SMC Accept CLC message */
	rc = smc_clc_send_accept(new_smc, local_contact);
	if (rc) {
		mutex_unlock(&smc_create_lgr_pending);
		smc_listen_decline(new_smc, rc, local_contact);
		return;
	}

	/* receive SMC Confirm CLC message */
	reason_code = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
				       SMC_CLC_CONFIRM);
	if (reason_code) {
		mutex_unlock(&smc_create_lgr_pending);
		smc_listen_decline(new_smc, reason_code, local_contact);
		return;
	}

	/* finish worker */
	smc_listen_rdma_finish(new_smc, &cclc, local_contact);
	smc_conn_save_peer_info(new_smc, &cclc);
1100
	mutex_unlock(&smc_create_lgr_pending);
1101
	smc_listen_out_connected(new_smc);
1102
1103
1104
1105
1106
1107
}

static void smc_tcp_listen_work(struct work_struct *work)
{
	struct smc_sock *lsmc = container_of(work, struct smc_sock,
					     tcp_listen_work);
1108
	struct sock *lsk = &lsmc->sk;
1109
1110
1111
	struct smc_sock *new_smc;
	int rc = 0;

1112
1113
	lock_sock(lsk);
	while (lsk->sk_state == SMC_LISTEN) {
1114
1115
1116
1117
1118
1119
1120
		rc = smc_clcsock_accept(lsmc, &new_smc);
		if (rc)
			goto out;
		if (!new_smc)
			continue;

		new_smc->listen_smc = lsmc;
1121
		new_smc->use_fallback = lsmc->use_fallback;
1122
		sock_hold(lsk); /* sock_put in smc_listen_work */
1123
1124
		INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
		smc_copy_sock_settings_to_smc(new_smc);
1125
1126
1127
		sock_hold(&new_smc->sk); /* sock_put in passive closing */
		if (!schedule_work(&new_smc->smc_listen_work))
			sock_put(&new_smc->sk);
1128
1129
1130
	}

out:
1131
	release_sock(lsk);
1132
	sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1133
1134
}

1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
static int smc_listen(struct socket *sock, int backlog)
{
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
	int rc;

	smc = smc_sk(sk);
	lock_sock(sk);

	rc = -EINVAL;
	if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
		goto out;

	rc = 0;
	if (sk->sk_state == SMC_LISTEN) {
		sk->sk_max_ack_backlog = backlog;
		goto out;
	}
	/* some socket options are handled in core, so we could not apply
	 * them to the clc socket -- copy smc socket options to clc socket
	 */
	smc_copy_sock_settings_to_clc(smc);
1157
1158
	if (!smc->use_fallback)
		tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1159
1160
1161
1162
1163
1164
1165

	rc = kernel_listen(smc->clcsock, backlog);
	if (rc)
		goto out;
	sk->sk_max_ack_backlog = backlog;
	sk->sk_ack_backlog = 0;
	sk->sk_state = SMC_LISTEN;
1166
	INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
1167
1168
1169
	sock_hold(sk); /* sock_hold in tcp_listen_worker */
	if (!schedule_work(&smc->tcp_listen_work))
		sock_put(sk);
1170
1171
1172
1173
1174
1175
1176

out:
	release_sock(sk);
	return rc;
}

static int smc_accept(struct socket *sock, struct socket *new_sock,
1177
		      int flags, bool kern)
1178
{
1179
1180
	struct sock *sk = sock->sk, *nsk;
	DECLARE_WAITQUEUE(wait, current);
1181
	struct smc_sock *lsmc;
1182
1183
	long timeo;
	int rc = 0;
1184
1185

	lsmc = smc_sk(sk);
1186
	sock_hold(sk); /* sock_put below */
1187
1188
1189
1190
	lock_sock(sk);

	if (lsmc->sk.sk_state != SMC_LISTEN) {
		rc = -EINVAL;
1191
		release_sock(sk);
1192
1193
1194
		goto out;
	}

1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
	/* Wait for an incoming connection */
	timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
	add_wait_queue_exclusive(sk_sleep(sk), &wait);
	while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
		set_current_state(TASK_INTERRUPTIBLE);
		if (!timeo) {
			rc = -EAGAIN;
			break;
		}
		release_sock(sk);
		timeo = schedule_timeout(timeo);
		/* wakeup by sk_data_ready in smc_listen_work() */
		sched_annotate_sleep();
		lock_sock(sk);
		if (signal_pending(current)) {
			rc = sock_intr_errno(timeo);
			break;
		}
	}
	set_current_state(TASK_RUNNING);
	remove_wait_queue(sk_sleep(sk), &wait);
1216

1217
1218
	if (!rc)
		rc = sock_error(nsk);
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
	release_sock(sk);
	if (rc)
		goto out;

	if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
		/* wait till data arrives on the socket */
		timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
								MSEC_PER_SEC);
		if (smc_sk(nsk)->use_fallback) {
			struct sock *clcsk = smc_sk(nsk)->clcsock->sk;

			lock_sock(clcsk);
			if (skb_queue_empty(&clcsk->sk_receive_queue))
				sk_wait_data(clcsk, &timeo, NULL);
			release_sock(clcsk);
		} else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
			lock_sock(nsk);
1236
			smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1237
1238
1239
			release_sock(nsk);
		}
	}
1240
1241

out:
1242
	sock_put(sk); /* sock_hold above */
1243
1244
1245
1246
	return rc;
}

static int smc_getname(struct socket *sock, struct sockaddr *addr,
1247
		       int peer)
1248
1249
1250
{
	struct smc_sock *smc;

1251
1252
	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
	    (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1253
1254
1255
1256
		return -ENOTCONN;

	smc = smc_sk(sock->sk);

1257
	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
}

static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
{
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
	int rc = -EPIPE;

	smc = smc_sk(sk);
	lock_sock(sk);
1268
1269
1270
	if ((sk->sk_state != SMC_ACTIVE) &&
	    (sk->sk_state != SMC_APPCLOSEWAIT1) &&
	    (sk->sk_state != SMC_INIT))
1271
		goto out;
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281

	if (msg->msg_flags & MSG_FASTOPEN) {
		if (sk->sk_state == SMC_INIT) {
			smc->use_fallback = true;
		} else {
			rc = -EINVAL;
			goto out;
		}
	}

1282
1283
1284
	if (smc->use_fallback)
		rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
	else
Ursula Braun's avatar
Ursula Braun committed
1285
		rc = smc_tx_sendmsg(smc, msg, len);
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
out:
	release_sock(sk);
	return rc;
}

static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
		       int flags)
{
	struct sock *sk = sock->sk;
	struct smc_sock *smc;
	int rc = -ENOTCONN;

	smc = smc_sk(sk);
	lock_sock(sk);
1300
1301
1302
	if ((sk->sk_state == SMC_INIT) ||
	    (sk->sk_state == SMC_LISTEN) ||
	    (sk->sk_state == SMC_CLOSED))
1303
1304
		goto out;

1305
1306
1307
1308
1309
	if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
		rc = 0;
		goto out;
	}

Stefan Raspl's avatar
Stefan Raspl committed
1310
	if (smc->use_fallback) {
1311
		rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);