iscsi_target.c 127 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
3
4
/*******************************************************************************
 * This file contains main functions related to the iSCSI Target Core Driver.
 *
5
 * (c) Copyright 2007-2013 Datera, Inc.
6
7
8
9
10
 *
 * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
 *
 ******************************************************************************/

Herbert Xu's avatar
Herbert Xu committed
11
#include <crypto/hash.h>
12
13
14
#include <linux/string.h>
#include <linux/kthread.h>
#include <linux/completion.h>
15
#include <linux/module.h>
16
#include <linux/vmalloc.h>
Al Viro's avatar
Al Viro committed
17
#include <linux/idr.h>
18
#include <linux/delay.h>
19
#include <linux/sched/signal.h>
20
#include <asm/unaligned.h>
21
#include <linux/inet.h>
22
#include <net/ipv6.h>
23
#include <scsi/scsi_proto.h>
24
#include <scsi/iscsi_proto.h>
25
#include <scsi/scsi_tcq.h>
26
#include <target/target_core_base.h>
27
#include <target/target_core_fabric.h>
28

29
#include <target/iscsi/iscsi_target_core.h>
30
31
32
33
34
35
36
37
38
39
40
41
#include "iscsi_target_parameters.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_datain_values.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_login.h"
#include "iscsi_target_tmr.h"
#include "iscsi_target_tpg.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
#include "iscsi_target_device.h"
42
#include <target/iscsi/iscsi_target_stat.h>
43

44
45
#include <target/iscsi/iscsi_transport.h>

46
47
48
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
49
static DEFINE_MUTEX(np_lock);
50
51

static struct idr tiqn_idr;
52
DEFINE_IDA(sess_ida);
53
54
55
56
57
58
59
60
61
62
struct mutex auth_id_lock;

struct iscsit_global *iscsit_global;

struct kmem_cache *lio_qr_cache;
struct kmem_cache *lio_dr_cache;
struct kmem_cache *lio_ooo_cache;
struct kmem_cache *lio_r2t_cache;

static int iscsit_handle_immediate_data(struct iscsi_cmd *,
63
			struct iscsi_scsi_req *, u32);
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116

struct iscsi_tiqn *iscsit_get_tiqn_for_login(unsigned char *buf)
{
	struct iscsi_tiqn *tiqn = NULL;

	spin_lock(&tiqn_lock);
	list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
		if (!strcmp(tiqn->tiqn, buf)) {

			spin_lock(&tiqn->tiqn_state_lock);
			if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
				tiqn->tiqn_access_count++;
				spin_unlock(&tiqn->tiqn_state_lock);
				spin_unlock(&tiqn_lock);
				return tiqn;
			}
			spin_unlock(&tiqn->tiqn_state_lock);
		}
	}
	spin_unlock(&tiqn_lock);

	return NULL;
}

static int iscsit_set_tiqn_shutdown(struct iscsi_tiqn *tiqn)
{
	spin_lock(&tiqn->tiqn_state_lock);
	if (tiqn->tiqn_state == TIQN_STATE_ACTIVE) {
		tiqn->tiqn_state = TIQN_STATE_SHUTDOWN;
		spin_unlock(&tiqn->tiqn_state_lock);
		return 0;
	}
	spin_unlock(&tiqn->tiqn_state_lock);

	return -1;
}

void iscsit_put_tiqn_for_login(struct iscsi_tiqn *tiqn)
{
	spin_lock(&tiqn->tiqn_state_lock);
	tiqn->tiqn_access_count--;
	spin_unlock(&tiqn->tiqn_state_lock);
}

/*
 * Note that IQN formatting is expected to be done in userspace, and
 * no explict IQN format checks are done here.
 */
struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
{
	struct iscsi_tiqn *tiqn = NULL;
	int ret;

117
	if (strlen(buf) >= ISCSI_IQN_LEN) {
118
119
120
121
122
		pr_err("Target IQN exceeds %d bytes\n",
				ISCSI_IQN_LEN);
		return ERR_PTR(-EINVAL);
	}

123
	tiqn = kzalloc(sizeof(*tiqn), GFP_KERNEL);
124
	if (!tiqn)
125
126
127
128
129
130
131
132
133
134
135
136
137
		return ERR_PTR(-ENOMEM);

	sprintf(tiqn->tiqn, "%s", buf);
	INIT_LIST_HEAD(&tiqn->tiqn_list);
	INIT_LIST_HEAD(&tiqn->tiqn_tpg_list);
	spin_lock_init(&tiqn->tiqn_state_lock);
	spin_lock_init(&tiqn->tiqn_tpg_lock);
	spin_lock_init(&tiqn->sess_err_stats.lock);
	spin_lock_init(&tiqn->login_stats.lock);
	spin_lock_init(&tiqn->logout_stats.lock);

	tiqn->tiqn_state = TIQN_STATE_ACTIVE;

138
	idr_preload(GFP_KERNEL);
139
	spin_lock(&tiqn_lock);
140
141

	ret = idr_alloc(&tiqn_idr, NULL, 0, 0, GFP_NOWAIT);
142
	if (ret < 0) {
143
		pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
144
		spin_unlock(&tiqn_lock);
145
		idr_preload_end();
146
147
148
		kfree(tiqn);
		return ERR_PTR(ret);
	}
149
	tiqn->tiqn_index = ret;
150
	list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
151

152
	spin_unlock(&tiqn_lock);
153
	idr_preload_end();
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224

	pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);

	return tiqn;

}

static void iscsit_wait_for_tiqn(struct iscsi_tiqn *tiqn)
{
	/*
	 * Wait for accesses to said struct iscsi_tiqn to end.
	 */
	spin_lock(&tiqn->tiqn_state_lock);
	while (tiqn->tiqn_access_count != 0) {
		spin_unlock(&tiqn->tiqn_state_lock);
		msleep(10);
		spin_lock(&tiqn->tiqn_state_lock);
	}
	spin_unlock(&tiqn->tiqn_state_lock);
}

void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
{
	/*
	 * iscsit_set_tiqn_shutdown sets tiqn->tiqn_state = TIQN_STATE_SHUTDOWN
	 * while holding tiqn->tiqn_state_lock.  This means that all subsequent
	 * attempts to access this struct iscsi_tiqn will fail from both transport
	 * fabric and control code paths.
	 */
	if (iscsit_set_tiqn_shutdown(tiqn) < 0) {
		pr_err("iscsit_set_tiqn_shutdown() failed\n");
		return;
	}

	iscsit_wait_for_tiqn(tiqn);

	spin_lock(&tiqn_lock);
	list_del(&tiqn->tiqn_list);
	idr_remove(&tiqn_idr, tiqn->tiqn_index);
	spin_unlock(&tiqn_lock);

	pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
			tiqn->tiqn);
	kfree(tiqn);
}

int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
{
	int ret;
	/*
	 * Determine if the network portal is accepting storage traffic.
	 */
	spin_lock_bh(&np->np_thread_lock);
	if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
		spin_unlock_bh(&np->np_thread_lock);
		return -1;
	}
	spin_unlock_bh(&np->np_thread_lock);
	/*
	 * Determine if the portal group is accepting storage traffic.
	 */
	spin_lock_bh(&tpg->tpg_state_lock);
	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
		spin_unlock_bh(&tpg->tpg_state_lock);
		return -1;
	}
	spin_unlock_bh(&tpg->tpg_state_lock);

	/*
	 * Here we serialize access across the TIQN+TPG Tuple.
	 */
225
	ret = down_interruptible(&tpg->np_login_sem);
226
	if (ret != 0)
227
228
		return -1;

229
230
231
232
233
234
235
	spin_lock_bh(&tpg->tpg_state_lock);
	if (tpg->tpg_state != TPG_STATE_ACTIVE) {
		spin_unlock_bh(&tpg->tpg_state_lock);
		up(&tpg->np_login_sem);
		return -1;
	}
	spin_unlock_bh(&tpg->tpg_state_lock);
236
237
238
239

	return 0;
}

240
241
242
243
244
245
246
247
248
249
void iscsit_login_kref_put(struct kref *kref)
{
	struct iscsi_tpg_np *tpg_np = container_of(kref,
				struct iscsi_tpg_np, tpg_np_kref);

	complete(&tpg_np->tpg_np_comp);
}

int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg,
		       struct iscsi_tpg_np *tpg_np)
250
251
252
{
	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;

253
	up(&tpg->np_login_sem);
254

255
256
	if (tpg_np)
		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);
257
258
259
260
261
262
263

	if (tiqn)
		iscsit_put_tiqn_for_login(tiqn);

	return 0;
}

264
bool iscsit_check_np_match(
265
	struct sockaddr_storage *sockaddr,
266
	struct iscsi_np *np,
267
268
269
270
	int network_transport)
{
	struct sockaddr_in *sock_in, *sock_in_e;
	struct sockaddr_in6 *sock_in6, *sock_in6_e;
271
	bool ip_match = false;
272
	u16 port, port_e;
273

274
275
276
277
278
279
280
281
282
283
	if (sockaddr->ss_family == AF_INET6) {
		sock_in6 = (struct sockaddr_in6 *)sockaddr;
		sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;

		if (!memcmp(&sock_in6->sin6_addr.in6_u,
			    &sock_in6_e->sin6_addr.in6_u,
			    sizeof(struct in6_addr)))
			ip_match = true;

		port = ntohs(sock_in6->sin6_port);
284
		port_e = ntohs(sock_in6_e->sin6_port);
285
286
287
288
289
290
291
292
	} else {
		sock_in = (struct sockaddr_in *)sockaddr;
		sock_in_e = (struct sockaddr_in *)&np->np_sockaddr;

		if (sock_in->sin_addr.s_addr == sock_in_e->sin_addr.s_addr)
			ip_match = true;

		port = ntohs(sock_in->sin_port);
293
		port_e = ntohs(sock_in_e->sin_port);
294
295
	}

296
	if (ip_match && (port_e == port) &&
297
298
299
300
301
302
303
	    (np->np_network_transport == network_transport))
		return true;

	return false;
}

static struct iscsi_np *iscsit_get_np(
304
	struct sockaddr_storage *sockaddr,
305
306
307
308
309
	int network_transport)
{
	struct iscsi_np *np;
	bool match;

310
311
	lockdep_assert_held(&np_lock);

312
	list_for_each_entry(np, &g_np_list, np_list) {
313
		spin_lock_bh(&np->np_thread_lock);
314
		if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
315
			spin_unlock_bh(&np->np_thread_lock);
316
317
318
			continue;
		}

319
		match = iscsit_check_np_match(sockaddr, np, network_transport);
320
		if (match) {
321
322
323
324
325
326
			/*
			 * Increment the np_exports reference count now to
			 * prevent iscsit_del_np() below from being called
			 * while iscsi_tpg_add_network_portal() is called.
			 */
			np->np_exports++;
327
			spin_unlock_bh(&np->np_thread_lock);
328
329
			return np;
		}
330
		spin_unlock_bh(&np->np_thread_lock);
331
332
333
334
335
336
	}

	return NULL;
}

struct iscsi_np *iscsit_add_np(
337
	struct sockaddr_storage *sockaddr,
338
339
340
341
	int network_transport)
{
	struct iscsi_np *np;
	int ret;
342
343
344

	mutex_lock(&np_lock);

345
346
347
348
	/*
	 * Locate the existing struct iscsi_np if already active..
	 */
	np = iscsit_get_np(sockaddr, network_transport);
349
350
	if (np) {
		mutex_unlock(&np_lock);
351
		return np;
352
	}
353

354
	np = kzalloc(sizeof(*np), GFP_KERNEL);
355
	if (!np) {
356
		mutex_unlock(&np_lock);
357
358
359
360
361
362
363
364
365
		return ERR_PTR(-ENOMEM);
	}

	np->np_flags |= NPF_IP_NETWORK;
	np->np_network_transport = network_transport;
	spin_lock_init(&np->np_thread_lock);
	init_completion(&np->np_restart_comp);
	INIT_LIST_HEAD(&np->np_list);

366
	timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0);
367

368
369
370
	ret = iscsi_target_setup_login_socket(np, sockaddr);
	if (ret != 0) {
		kfree(np);
371
		mutex_unlock(&np_lock);
372
373
374
375
376
377
378
379
		return ERR_PTR(ret);
	}

	np->np_thread = kthread_run(iscsi_target_login_thread, np, "iscsi_np");
	if (IS_ERR(np->np_thread)) {
		pr_err("Unable to create kthread: iscsi_np\n");
		ret = PTR_ERR(np->np_thread);
		kfree(np);
380
		mutex_unlock(&np_lock);
381
382
383
384
385
386
387
388
389
390
		return ERR_PTR(ret);
	}
	/*
	 * Increment the np_exports reference count now to prevent
	 * iscsit_del_np() below from being run while a new call to
	 * iscsi_tpg_add_network_portal() for a matching iscsi_np is
	 * active.  We don't need to hold np->np_thread_lock at this
	 * point because iscsi_np has not been added to g_np_list yet.
	 */
	np->np_exports = 1;
391
	np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
392
393

	list_add_tail(&np->np_list, &g_np_list);
394
	mutex_unlock(&np_lock);
395

396
397
	pr_debug("CORE[0] - Added Network Portal: %pISpc on %s\n",
		&np->np_sockaddr, np->np_transport->name);
398
399
400
401
402
403
404

	return np;
}

int iscsit_reset_np_thread(
	struct iscsi_np *np,
	struct iscsi_tpg_np *tpg_np,
405
406
	struct iscsi_portal_group *tpg,
	bool shutdown)
407
408
409
410
411
412
413
{
	spin_lock_bh(&np->np_thread_lock);
	if (np->np_thread_state == ISCSI_NP_THREAD_INACTIVE) {
		spin_unlock_bh(&np->np_thread_lock);
		return 0;
	}
	np->np_thread_state = ISCSI_NP_THREAD_RESET;
414
	atomic_inc(&np->np_reset_count);
415
416
417
418
419
420
421
422
423

	if (np->np_thread) {
		spin_unlock_bh(&np->np_thread_lock);
		send_sig(SIGINT, np->np_thread, 1);
		wait_for_completion(&np->np_restart_comp);
		spin_lock_bh(&np->np_thread_lock);
	}
	spin_unlock_bh(&np->np_thread_lock);

424
425
426
427
428
429
	if (tpg_np && shutdown) {
		kref_put(&tpg_np->tpg_np_kref, iscsit_login_kref_put);

		wait_for_completion(&tpg_np->tpg_np_comp);
	}

430
431
432
	return 0;
}

433
static void iscsit_free_np(struct iscsi_np *np)
434
{
435
436
	if (np->np_socket)
		sock_release(np->np_socket);
437
438
439
440
441
442
443
}

int iscsit_del_np(struct iscsi_np *np)
{
	spin_lock_bh(&np->np_thread_lock);
	np->np_exports--;
	if (np->np_exports) {
444
		np->enabled = true;
445
446
447
448
449
450
451
452
453
454
455
456
457
		spin_unlock_bh(&np->np_thread_lock);
		return 0;
	}
	np->np_thread_state = ISCSI_NP_THREAD_SHUTDOWN;
	spin_unlock_bh(&np->np_thread_lock);

	if (np->np_thread) {
		/*
		 * We need to send the signal to wakeup Linux/Net
		 * which may be sleeping in sock_accept()..
		 */
		send_sig(SIGINT, np->np_thread, 1);
		kthread_stop(np->np_thread);
458
		np->np_thread = NULL;
459
	}
460
461

	np->np_transport->iscsit_free_np(np);
462

463
	mutex_lock(&np_lock);
464
	list_del(&np->np_list);
465
	mutex_unlock(&np_lock);
466

467
468
	pr_debug("CORE[0] - Removed Network Portal: %pISpc on %s\n",
		&np->np_sockaddr, np->np_transport->name);
469

470
	iscsit_put_transport(np->np_transport);
471
472
473
474
	kfree(np);
	return 0;
}

475
static void iscsit_get_rx_pdu(struct iscsi_conn *);
476

Varun Prakash's avatar
Varun Prakash committed
477
int iscsit_queue_rsp(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
478
{
479
	return iscsit_add_cmd_to_response_queue(cmd, cmd->conn, cmd->i_state);
480
}
Varun Prakash's avatar
Varun Prakash committed
481
EXPORT_SYMBOL(iscsit_queue_rsp);
482

Varun Prakash's avatar
Varun Prakash committed
483
void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
484
485
{
	spin_lock_bh(&conn->cmd_lock);
486
487
	if (!list_empty(&cmd->i_conn_node) &&
	    !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP))
488
489
490
		list_del_init(&cmd->i_conn_node);
	spin_unlock_bh(&conn->cmd_lock);

491
	__iscsit_free_cmd(cmd, true);
492
}
Varun Prakash's avatar
Varun Prakash committed
493
EXPORT_SYMBOL(iscsit_aborted_task);
494

495
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
496
				      u32, u32, const void *, void *);
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);

static int
iscsit_xmit_nondatain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			  const void *data_buf, u32 data_buf_len)
{
	struct iscsi_hdr *hdr = (struct iscsi_hdr *)cmd->pdu;
	struct kvec *iov;
	u32 niov = 0, tx_size = ISCSI_HDR_LEN;
	int ret;

	iov = &cmd->iov_misc[0];
	iov[niov].iov_base	= cmd->pdu;
	iov[niov++].iov_len	= ISCSI_HDR_LEN;

	if (conn->conn_ops->HeaderDigest) {
		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];

		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
					  ISCSI_HDR_LEN, 0, NULL,
517
					  header_digest);
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543

		iov[0].iov_len += ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;
		pr_debug("Attaching CRC32C HeaderDigest"
			 " to opcode 0x%x 0x%08x\n",
			 hdr->opcode, *header_digest);
	}

	if (data_buf_len) {
		u32 padding = ((-data_buf_len) & 3);

		iov[niov].iov_base	= (void *)data_buf;
		iov[niov++].iov_len	= data_buf_len;
		tx_size += data_buf_len;

		if (padding != 0) {
			iov[niov].iov_base = &cmd->pad_bytes;
			iov[niov++].iov_len = padding;
			tx_size += padding;
			pr_debug("Attaching %u additional"
				 " padding bytes.\n", padding);
		}

		if (conn->conn_ops->DataDigest) {
			iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
						  data_buf, data_buf_len,
544
545
						  padding, &cmd->pad_bytes,
						  &cmd->data_crc);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567

			iov[niov].iov_base = &cmd->data_crc;
			iov[niov++].iov_len = ISCSI_CRC_LEN;
			tx_size += ISCSI_CRC_LEN;
			pr_debug("Attached DataDigest for %u"
				 " bytes opcode 0x%x, CRC 0x%08x\n",
				 data_buf_len, hdr->opcode, cmd->data_crc);
		}
	}

	cmd->iov_misc_count = niov;
	cmd->tx_size = tx_size;

	ret = iscsit_send_tx_data(cmd, conn, 1);
	if (ret < 0) {
		iscsit_tx_thread_wait_for_tcp(conn);
		return ret;
	}

	return 0;
}

568
569
static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
			    u32 data_offset, u32 data_length);
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
static void iscsit_unmap_iovec(struct iscsi_cmd *);
static u32 iscsit_do_crypto_hash_sg(struct ahash_request *, struct iscsi_cmd *,
				    u32, u32, u32, u8 *);
static int
iscsit_xmit_datain_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
		       const struct iscsi_datain *datain)
{
	struct kvec *iov;
	u32 iov_count = 0, tx_size = 0;
	int ret, iov_ret;

	iov = &cmd->iov_data[0];
	iov[iov_count].iov_base	= cmd->pdu;
	iov[iov_count++].iov_len = ISCSI_HDR_LEN;
	tx_size += ISCSI_HDR_LEN;

	if (conn->conn_ops->HeaderDigest) {
		u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];

		iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
					  ISCSI_HDR_LEN, 0, NULL,
591
					  header_digest);
592
593
594
595
596
597
598
599

		iov[0].iov_len += ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;

		pr_debug("Attaching CRC32 HeaderDigest for DataIN PDU 0x%08x\n",
			 *header_digest);
	}

600
601
	iov_ret = iscsit_map_iovec(cmd, &cmd->iov_data[iov_count],
				   cmd->orig_iov_data_count - (iov_count + 2),
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
				   datain->offset, datain->length);
	if (iov_ret < 0)
		return -1;

	iov_count += iov_ret;
	tx_size += datain->length;

	cmd->padding = ((-datain->length) & 3);
	if (cmd->padding) {
		iov[iov_count].iov_base		= cmd->pad_bytes;
		iov[iov_count++].iov_len	= cmd->padding;
		tx_size += cmd->padding;

		pr_debug("Attaching %u padding bytes\n", cmd->padding);
	}

	if (conn->conn_ops->DataDigest) {
		cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash,
							 cmd, datain->offset,
							 datain->length,
							 cmd->padding,
							 cmd->pad_bytes);

		iov[iov_count].iov_base	= &cmd->data_crc;
		iov[iov_count++].iov_len = ISCSI_CRC_LEN;
		tx_size += ISCSI_CRC_LEN;

		pr_debug("Attached CRC32C DataDigest %d bytes, crc 0x%08x\n",
			 datain->length + cmd->padding, cmd->data_crc);
	}

	cmd->iov_data_count = iov_count;
	cmd->tx_size = tx_size;

	ret = iscsit_fe_sendpage_sg(cmd, conn);

	iscsit_unmap_iovec(cmd);

	if (ret < 0) {
		iscsit_tx_thread_wait_for_tcp(conn);
		return ret;
	}

	return 0;
}

static int iscsit_xmit_pdu(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			   struct iscsi_datain_req *dr, const void *buf,
			   u32 buf_len)
{
	if (dr)
		return iscsit_xmit_datain_pdu(conn, cmd, buf);
	else
		return iscsit_xmit_nondatain_pdu(conn, cmd, buf, buf_len);
}

658
659
660
661
662
static enum target_prot_op iscsit_get_sup_prot_ops(struct iscsi_conn *conn)
{
	return TARGET_PROT_NORMAL;
}

663
664
665
static struct iscsit_transport iscsi_target_transport = {
	.name			= "iSCSI/TCP",
	.transport_type		= ISCSI_TCP,
666
	.rdma_shutdown		= false,
667
668
669
670
671
672
	.owner			= NULL,
	.iscsit_setup_np	= iscsit_setup_np,
	.iscsit_accept_np	= iscsit_accept_np,
	.iscsit_free_np		= iscsit_free_np,
	.iscsit_get_login_rx	= iscsit_get_login_rx,
	.iscsit_put_login_tx	= iscsit_put_login_tx,
673
	.iscsit_get_dataout	= iscsit_build_r2ts_for_cmd,
674
675
676
677
	.iscsit_immediate_queue	= iscsit_immediate_queue,
	.iscsit_response_queue	= iscsit_response_queue,
	.iscsit_queue_data_in	= iscsit_queue_rsp,
	.iscsit_queue_status	= iscsit_queue_rsp,
678
	.iscsit_aborted_task	= iscsit_aborted_task,
679
	.iscsit_xmit_pdu	= iscsit_xmit_pdu,
680
	.iscsit_get_rx_pdu	= iscsit_get_rx_pdu,
681
	.iscsit_get_sup_prot_ops = iscsit_get_sup_prot_ops,
682
683
};

684
685
static int __init iscsi_target_init_module(void)
{
686
	int ret = 0, size;
687
688

	pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");
689
	iscsit_global = kzalloc(sizeof(*iscsit_global), GFP_KERNEL);
690
	if (!iscsit_global)
691
		return -1;
692

693
	spin_lock_init(&iscsit_global->ts_bitmap_lock);
694
695
696
	mutex_init(&auth_id_lock);
	idr_init(&tiqn_idr);

697
698
	ret = target_register_template(&iscsi_ops);
	if (ret)
699
700
		goto out;

701
702
	size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
	iscsit_global->ts_bitmap = vzalloc(size);
703
	if (!iscsit_global->ts_bitmap)
704
705
706
707
708
709
		goto configfs_out;

	lio_qr_cache = kmem_cache_create("lio_qr_cache",
			sizeof(struct iscsi_queue_req),
			__alignof__(struct iscsi_queue_req), 0, NULL);
	if (!lio_qr_cache) {
710
		pr_err("Unable to kmem_cache_create() for"
711
				" lio_qr_cache\n");
712
		goto bitmap_out;
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
	}

	lio_dr_cache = kmem_cache_create("lio_dr_cache",
			sizeof(struct iscsi_datain_req),
			__alignof__(struct iscsi_datain_req), 0, NULL);
	if (!lio_dr_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_dr_cache\n");
		goto qr_out;
	}

	lio_ooo_cache = kmem_cache_create("lio_ooo_cache",
			sizeof(struct iscsi_ooo_cmdsn),
			__alignof__(struct iscsi_ooo_cmdsn), 0, NULL);
	if (!lio_ooo_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_ooo_cache\n");
		goto dr_out;
	}

	lio_r2t_cache = kmem_cache_create("lio_r2t_cache",
			sizeof(struct iscsi_r2t), __alignof__(struct iscsi_r2t),
			0, NULL);
	if (!lio_r2t_cache) {
		pr_err("Unable to kmem_cache_create() for"
				" lio_r2t_cache\n");
		goto ooo_out;
	}

742
743
	iscsit_register_transport(&iscsi_target_transport);

744
745
746
747
748
	if (iscsit_load_discovery_tpg() < 0)
		goto r2t_out;

	return ret;
r2t_out:
749
	iscsit_unregister_transport(&iscsi_target_transport);
750
751
752
753
754
755
756
	kmem_cache_destroy(lio_r2t_cache);
ooo_out:
	kmem_cache_destroy(lio_ooo_cache);
dr_out:
	kmem_cache_destroy(lio_dr_cache);
qr_out:
	kmem_cache_destroy(lio_qr_cache);
757
758
bitmap_out:
	vfree(iscsit_global->ts_bitmap);
759
configfs_out:
760
761
762
763
	/* XXX: this probably wants it to be it's own unwind step.. */
	if (iscsit_global->discovery_tpg)
		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
	target_unregister_template(&iscsi_ops);
764
765
766
767
768
769
770
771
out:
	kfree(iscsit_global);
	return -ENOMEM;
}

static void __exit iscsi_target_cleanup_module(void)
{
	iscsit_release_discovery_tpg();
772
	iscsit_unregister_transport(&iscsi_target_transport);
773
774
775
776
777
	kmem_cache_destroy(lio_qr_cache);
	kmem_cache_destroy(lio_dr_cache);
	kmem_cache_destroy(lio_ooo_cache);
	kmem_cache_destroy(lio_r2t_cache);

778
779
780
781
782
	/*
	 * Shutdown discovery sessions and disable discovery TPG
	 */
	if (iscsit_global->discovery_tpg)
		iscsit_tpg_disable_portal_group(iscsit_global->discovery_tpg, 1);
783

784
	target_unregister_template(&iscsi_ops);
785

786
	vfree(iscsit_global->ts_bitmap);
787
788
789
	kfree(iscsit_global);
}

Varun Prakash's avatar
Varun Prakash committed
790
int iscsit_add_reject(
791
	struct iscsi_conn *conn,
792
	u8 reason,
793
	unsigned char *buf)
794
795
796
{
	struct iscsi_cmd *cmd;

797
	cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
798
799
800
801
	if (!cmd)
		return -1;

	cmd->iscsi_opcode = ISCSI_OP_REJECT;
802
	cmd->reject_reason = reason;
803

804
	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
805
806
	if (!cmd->buf_ptr) {
		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
807
		iscsit_free_cmd(cmd, false);
808
809
810
811
		return -1;
	}

	spin_lock_bh(&conn->cmd_lock);
812
	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
813
814
815
816
817
	spin_unlock_bh(&conn->cmd_lock);

	cmd->i_state = ISTATE_SEND_REJECT;
	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);

818
	return -1;
819
}
Varun Prakash's avatar
Varun Prakash committed
820
EXPORT_SYMBOL(iscsit_add_reject);
821

822
823
static int iscsit_add_reject_from_cmd(
	struct iscsi_cmd *cmd,
824
	u8 reason,
825
826
	bool add_to_conn,
	unsigned char *buf)
827
828
{
	struct iscsi_conn *conn;
829
	const bool do_put = cmd->se_cmd.se_tfo != NULL;
830
831
832
833
834
835
836
837
838

	if (!cmd->conn) {
		pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
				cmd->init_task_tag);
		return -1;
	}
	conn = cmd->conn;

	cmd->iscsi_opcode = ISCSI_OP_REJECT;
839
	cmd->reject_reason = reason;
840

841
	cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
842
843
	if (!cmd->buf_ptr) {
		pr_err("Unable to allocate memory for cmd->buf_ptr\n");
844
		iscsit_free_cmd(cmd, false);
845
846
847
848
849
		return -1;
	}

	if (add_to_conn) {
		spin_lock_bh(&conn->cmd_lock);
850
		list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
851
852
853
854
855
		spin_unlock_bh(&conn->cmd_lock);
	}

	cmd->i_state = ISTATE_SEND_REJECT;
	iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
856
857
858
859
	/*
	 * Perform the kref_put now if se_cmd has already been setup by
	 * scsit_setup_scsi_cmd()
	 */
860
	if (do_put) {
861
		pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
862
		target_put_sess_cmd(&cmd->se_cmd);
863
	}
864
865
	return -1;
}
866

867
868
869
870
871
872
873
874
875
static int iscsit_add_reject_cmd(struct iscsi_cmd *cmd, u8 reason,
				 unsigned char *buf)
{
	return iscsit_add_reject_from_cmd(cmd, reason, true, buf);
}

int iscsit_reject_cmd(struct iscsi_cmd *cmd, u8 reason, unsigned char *buf)
{
	return iscsit_add_reject_from_cmd(cmd, reason, false, buf);
876
}
Varun Prakash's avatar
Varun Prakash committed
877
EXPORT_SYMBOL(iscsit_reject_cmd);
878
879
880

/*
 * Map some portion of the allocated scatterlist to an iovec, suitable for
881
 * kernel sockets to copy data in/out.
882
 */
883
884
static int iscsit_map_iovec(struct iscsi_cmd *cmd, struct kvec *iov, int nvec,
			    u32 data_offset, u32 data_length)
885
{
886
	u32 i = 0, orig_data_length = data_length;
887
888
889
890
	struct scatterlist *sg;
	unsigned int page_off;

	/*
891
	 * We know each entry in t_data_sg contains a page.
892
	 */
893
894
	u32 ent = data_offset / PAGE_SIZE;

895
896
897
	if (!data_length)
		return 0;

898
899
	if (ent >= cmd->se_cmd.t_data_nents) {
		pr_err("Initial page entry out-of-bounds\n");
900
		goto overflow;
901
902
903
	}

	sg = &cmd->se_cmd.t_data_sg[ent];
904
905
906
907
908
909
	page_off = (data_offset % PAGE_SIZE);

	cmd->first_data_sg = sg;
	cmd->first_data_sg_off = page_off;

	while (data_length) {
910
911
912
913
914
915
		u32 cur_len;

		if (WARN_ON_ONCE(!sg || i >= nvec))
			goto overflow;

		cur_len = min_t(u32, data_length, sg->length - page_off);
916
917
918
919
920
921
922
923
924
925
926
927
928

		iov[i].iov_base = kmap(sg_page(sg)) + sg->offset + page_off;
		iov[i].iov_len = cur_len;

		data_length -= cur_len;
		page_off = 0;
		sg = sg_next(sg);
		i++;
	}

	cmd->kmapped_nents = i;

	return i;
929
930
931
932
933
934
935
936
937
938

overflow:
	pr_err("offset %d + length %d overflow; %d/%d; sg-list:\n",
	       data_offset, orig_data_length, i, nvec);
	for_each_sg(cmd->se_cmd.t_data_sg, sg,
		    cmd->se_cmd.t_data_nents, i) {
		pr_err("[%d] off %d len %d\n",
		       i, sg->offset, sg->length);
	}
	return -1;
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
}

static void iscsit_unmap_iovec(struct iscsi_cmd *cmd)
{
	u32 i;
	struct scatterlist *sg;

	sg = cmd->first_data_sg;

	for (i = 0; i < cmd->kmapped_nents; i++)
		kunmap(sg_page(&sg[i]));
}

static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
{
954
955
	LIST_HEAD(ack_list);
	struct iscsi_cmd *cmd, *cmd_p;
956
957
958

	conn->exp_statsn = exp_statsn;

959
960
961
	if (conn->sess->sess_ops->RDMAExtensions)
		return;

962
	spin_lock_bh(&conn->cmd_lock);
963
	list_for_each_entry_safe(cmd, cmd_p, &conn->conn_cmd_list, i_conn_node) {
964
965
		spin_lock(&cmd->istate_lock);
		if ((cmd->i_state == ISTATE_SENT_STATUS) &&
966
		    iscsi_sna_lt(cmd->stat_sn, exp_statsn)) {
967
968
			cmd->i_state = ISTATE_REMOVE;
			spin_unlock(&cmd->istate_lock);
969
			list_move_tail(&cmd->i_conn_node, &ack_list);
970
971
972
973
974
			continue;
		}
		spin_unlock(&cmd->istate_lock);
	}
	spin_unlock_bh(&conn->cmd_lock);
975
976

	list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
977
		list_del_init(&cmd->i_conn_node);
978
979
		iscsit_free_cmd(cmd, false);
	}
980
981
982
983
}

static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)
{
984
	u32 iov_count = max(1UL, DIV_ROUND_UP(cmd->se_cmd.data_length, PAGE_SIZE));
985

986
	iov_count += ISCSI_IOV_DATA_BUFFER;
987
	cmd->iov_data = kcalloc(iov_count, sizeof(*cmd->iov_data), GFP_KERNEL);
988
	if (!cmd->iov_data)
989
990
991
992
993
994
		return -ENOMEM;

	cmd->orig_iov_data_count = iov_count;
	return 0;
}

995
996
int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			  unsigned char *buf)
997
{
998
	int data_direction, payload_length;
999
	struct iscsi_scsi_req *hdr;
1000
1001
	int iscsi_task_attr;
	int sam_task_attr;
1002

1003
	atomic_long_inc(&conn->sess->cmd_pdus);
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013

	hdr			= (struct iscsi_scsi_req *) buf;
	payload_length		= ntoh24(hdr->dlength);

	/* FIXME; Add checks for AdditionalHeaderSegment */

	if (!(hdr->flags & ISCSI_FLAG_CMD_WRITE) &&
	    !(hdr->flags & ISCSI_FLAG_CMD_FINAL)) {
		pr_err("ISCSI_FLAG_CMD_WRITE & ISCSI_FLAG_CMD_FINAL"
				" not set. Bad iSCSI Initiator.\n");
1014
1015
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1016
1017
1018
1019
1020
	}

	if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
	     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
		/*
1021
1022
1023
1024
1025
1026
1027
1028
		 * From RFC-3720 Section 10.3.1:
		 *
		 * "Either or both of R and W MAY be 1 when either the
		 *  Expected Data Transfer Length and/or Bidirectional Read
		 *  Expected Data Transfer Length are 0"
		 *
		 * For this case, go ahead and clear the unnecssary bits
		 * to avoid any confusion with ->data_direction.
1029
		 */
1030
1031
		hdr->flags &= ~ISCSI_FLAG_CMD_READ;
		hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
1032

1033
		pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
1034
			" set when Expected Data Transfer Length is 0 for"
1035
			" CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
1036
1037
1038
1039
1040
1041
1042
	}

	if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
	    !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
		pr_err("ISCSI_FLAG_CMD_READ and/or ISCSI_FLAG_CMD_WRITE"
			" MUST be set if Expected Data Transfer Length is not 0."
			" Bad iSCSI Initiator\n");
1043
1044
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1045
1046
1047
1048
1049
	}

	if ((hdr->flags & ISCSI_FLAG_CMD_READ) &&
	    (hdr->flags & ISCSI_FLAG_CMD_WRITE)) {
		pr_err("Bidirectional operations not supported!\n");
1050
1051
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1052
1053
1054
1055
1056
	}

	if (hdr->opcode & ISCSI_OP_IMMEDIATE) {
		pr_err("Illegally set Immediate Bit in iSCSI Initiator"
				" Scsi Command PDU.\n");
1057
1058
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1059
1060
1061
1062
1063
	}

	if (payload_length && !conn->sess->sess_ops->ImmediateData) {
		pr_err("ImmediateData=No but DataSegmentLength=%u,"
			" protocol error.\n", payload_length);
1064
1065
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1066
1067
	}

1068
	if ((be32_to_cpu(hdr->data_length) == payload_length) &&
1069
1070
1071
1072
	    (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))) {
		pr_err("Expected Data Transfer Length and Length of"
			" Immediate Data are the same, but ISCSI_FLAG_CMD_FINAL"
			" bit is not set protocol error\n");
1073
1074
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1075
1076
	}

1077
	if (payload_length > be32_to_cpu(hdr->data_length)) {
1078
1079
1080
		pr_err("DataSegmentLength: %u is greater than"
			" EDTL: %u, protocol error.\n", payload_length,
				hdr->data_length);
1081
1082
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1083
1084
	}

1085
	if (payload_length > conn->conn_ops->MaxXmitDataSegmentLength) {
1086
		pr_err("DataSegmentLength: %u is greater than"
1087
1088
			" MaxXmitDataSegmentLength: %u, protocol error.\n",
			payload_length, conn->conn_ops->MaxXmitDataSegmentLength);
1089
1090
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_PROTOCOL_ERROR, buf);
1091
1092
1093
1094
1095
1096
	}

	if (payload_length > conn->sess->sess_ops->FirstBurstLength) {
		pr_err("DataSegmentLength: %u is greater than"
			" FirstBurstLength: %u, protocol error.\n",
			payload_length, conn->sess->sess_ops->FirstBurstLength);
1097
1098
		return iscsit_add_reject_cmd(cmd,
					     ISCSI_REASON_BOOKMARK_INVALID, buf);
1099
1100
1101
1102
1103
1104
	}

	data_direction = (hdr->flags & ISCSI_FLAG_CMD_WRITE) ? DMA_TO_DEVICE :
			 (hdr->flags & ISCSI_FLAG_CMD_READ) ? DMA_FROM_DEVICE :
			  DMA_NONE;

1105
1106
1107
1108
1109
1110
1111
	cmd->data_direction = data_direction;
	iscsi_task_attr = hdr->flags & ISCSI_FLAG_CMD_ATTR_MASK;
	/*
	 * Figure out the SAM Task Attribute for the incoming SCSI CDB
	 */
	if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
	    (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
1112
		sam_task_attr = TCM_SIMPLE_TAG;
1113
	else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
1114
		sam_task_attr = TCM_ORDERED_TAG;
1115
	else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
1116
		sam_task_attr = TCM_HEAD_TAG;
1117
	else if (iscsi_task_attr == ISCSI_ATTR_ACA)
1118
		sam_task_attr = TCM_ACA_TAG;
1119
1120
	else {
		pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
1121
1122
			" TCM_SIMPLE_TAG\n", iscsi_task_attr);
		sam_task_attr = TCM_SIMPLE_TAG;
1123
1124
	}

1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
	cmd->iscsi_opcode	= ISCSI_OP_SCSI_CMD;
	cmd->i_state		= ISTATE_NEW_CMD;
	cmd->immediate_cmd	= ((hdr->opcode & ISCSI_OP_IMMEDIATE) ? 1 : 0);
	cmd->immediate_data	= (payload_length) ? 1 : 0;
	cmd->unsolicited_data	= ((!(hdr->flags & ISCSI_FLAG_CMD_FINAL) &&
				     (hdr->flags & ISCSI_FLAG_CMD_WRITE)) ? 1 : 0);
	if (cmd->unsolicited_data)
		cmd->cmd_flags |= ICF_NON_IMMEDIATE_UNSOLICITED_DATA;

	conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
1135
	if (hdr->flags & ISCSI_FLAG_CMD_READ)
1136
		cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
1137
	else
1138
		cmd->targ_xfer_tag = 0xFFFFFFFF;
1139
1140
	cmd->cmd_sn		= be32_to_cpu(hdr->cmdsn);
	cmd->exp_stat_sn	= be32_to_cpu(hdr->exp_statsn);
1141
1142
	cmd->first_burst_len	= payload_length;

1143
1144
	if (!conn->sess->sess_ops->RDMAExtensions &&
	     cmd->data_direction == DMA_FROM_DEVICE) {
1145
1146
1147
1148
		struct iscsi_datain_req *dr;

		dr = iscsit_allocate_datain_req();
		if (!dr)
1149
1150
			return iscsit_add_reject_cmd(cmd,
					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1151
1152
1153
1154

		iscsit_attach_datain_req(cmd, dr);
	}

1155
1156
1157
	/*
	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
	 */
1158
	transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
1159
1160
1161
			conn->sess->se_sess, be32_to_cpu(hdr->data_length),
			cmd->data_direction, sam_task_attr,
			cmd->sense_buffer + 2);
1162
1163
1164

	pr_debug("Got SCSI Command, ITT: 0x%08x, CmdSN: 0x%08x,"
		" ExpXferLen: %u, Length: %u, CID: %hu\n", hdr->itt,
1165
1166
1167
		hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
		conn->cid);

1168
1169
1170
	if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
		return iscsit_add_reject_cmd(cmd,
				ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
1171

1172
1173
1174
1175
1176
	cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
						     scsilun_to_int(&hdr->lun));
	if (cmd->sense_reason)
		goto attach_cmd;

1177
1178
	/* only used for printks or comparing with ->ref_task_tag */
	cmd->se_cmd.tag = (__force u32)cmd->init_task_tag;
1179
1180
1181
	cmd->sense_reason = target_setup_cmd_from_cdb(&cmd->se_cmd, hdr->cdb);
	if (cmd->sense_reason) {
		if (cmd->sense_reason == TCM_OUT_OF_RESOURCES) {
1182
1183
			return iscsit_add_reject_cmd(cmd,
					ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1184
		}
1185

1186
1187
		goto attach_cmd;
	}
1188

1189
	if (iscsit_build_pdu_and_seq_lists(cmd, payload_length) < 0) {
1190
1191
		return iscsit_add_reject_cmd(cmd,
				ISCSI_REASON_BOOKMARK_NO_RESOURCES, buf);
1192
1193
1194
1195
	}

attach_cmd:
	spin_lock_bh(&conn->cmd_lock);
1196
	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
1197
1198
1199
1200
1201
1202
	spin_unlock_bh(&conn->cmd_lock);
	/*
	 * Check if we need to delay processing because of ALUA
	 * Active/NonOptimized primary access state..
	 */
	core_alua_check_nonop_delay(&cmd->se_cmd);
1203

1204
1205
1206
	return 0;
}
EXPORT_SYMBOL(iscsit_setup_scsi_cmd);
1207

1208
void iscsit_set_unsolicited_dataout(struct iscsi_cmd *cmd)
1209
1210
1211
1212
1213
1214
1215
{
	iscsit_set_dataout_sequence_values(cmd);

	spin_lock_bh(&cmd->dataout_timeout_lock);
	iscsit_start_dataout_timer(cmd, cmd->conn);
	spin_unlock_bh(&cmd->dataout_timeout_lock);
}
1216
EXPORT_SYMBOL(iscsit_set_unsolicited_dataout);
1217
1218
1219
1220
1221

int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
			    struct iscsi_scsi_req *hdr)
{
	int cmdsn_ret = 0;
1222
1223
1224
1225