fnic_scsi.c 81 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
 *
 * This program is free software; you may redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */
#include <linux/mempool.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/delay.h>
29
#include <linux/gfp.h>
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_tcq.h>
#include <scsi/fc/fc_els.h>
#include <scsi/fc/fc_fcoe.h>
#include <scsi/libfc.h>
#include <scsi/fc_frame.h>
#include "fnic_io.h"
#include "fnic.h"

const char *fnic_state_str[] = {
	[FNIC_IN_FC_MODE] =           "FNIC_IN_FC_MODE",
	[FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
	[FNIC_IN_ETH_MODE] =          "FNIC_IN_ETH_MODE",
	[FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
};

static const char *fnic_ioreq_state_str[] = {
50
	[FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
	[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
	[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
	[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
	[FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
};

static const char *fcpio_status_str[] =  {
	[FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
	[FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
	[FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
	[FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
	[FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
	[FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
	[FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
	[FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
	[FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
	[FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
	[FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
	[FCPIO_FW_ERR] = "FCPIO_FW_ERR",
	[FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
	[FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
	[FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
	[FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
	[FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
	[FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
	[FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
};

const char *fnic_state_to_str(unsigned int state)
{
	if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
		return "unknown";

	return fnic_state_str[state];
}

static const char *fnic_ioreq_state_to_str(unsigned int state)
{
	if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
	    !fnic_ioreq_state_str[state])
		return "unknown";

	return fnic_ioreq_state_str[state];
}

static const char *fnic_fcpio_status_to_str(unsigned int status)
{
	if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
		return "unknown";

	return fcpio_status_str[status];
}

static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);

static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
					    struct scsi_cmnd *sc)
{
	u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);

	return &fnic->io_req_lock[hash];
}

114
115
116
117
118
119
static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic,
					    int tag)
{
	return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)];
}

120
121
122
123
124
125
126
127
128
/*
 * Unmap the data buffer and sense buffer for an io_req,
 * also unmap and free the device-private scatter/gather list.
 */
static void fnic_release_ioreq_buf(struct fnic *fnic,
				   struct fnic_io_req *io_req,
				   struct scsi_cmnd *sc)
{
	if (io_req->sgl_list_pa)
129
		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
130
				 sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
131
				 DMA_TO_DEVICE);
132
133
134
135
136
137
	scsi_dma_unmap(sc);

	if (io_req->sgl_cnt)
		mempool_free(io_req->sgl_list_alloc,
			     fnic->io_sgl_pool[io_req->sgl_type]);
	if (io_req->sense_buf_pa)
138
139
		dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
				 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
}

/* Free up Copy Wq descriptors. Called with copy_wq lock held */
static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
{
	/* if no Ack received from firmware, then nothing to clean */
	if (!fnic->fw_ack_recd[0])
		return 1;

	/*
	 * Update desc_available count based on number of freed descriptors
	 * Account for wraparound
	 */
	if (wq->to_clean_index <= fnic->fw_ack_index[0])
		wq->ring.desc_avail += (fnic->fw_ack_index[0]
					- wq->to_clean_index + 1);
	else
		wq->ring.desc_avail += (wq->ring.desc_count
					- wq->to_clean_index
					+ fnic->fw_ack_index[0] + 1);

	/*
	 * just bump clean index to ack_index+1 accounting for wraparound
	 * this will essentially free up all descriptors between
	 * to_clean_index and fw_ack_index, both inclusive
	 */
	wq->to_clean_index =
		(fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;

	/* we have processed the acks received so far */
	fnic->fw_ack_recd[0] = 0;
	return 0;
}


175
176
177
178
179
180
181
182
183
/**
 * __fnic_set_state_flags
 * Sets/Clears bits in fnic's state_flags
 **/
void
__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
			unsigned long clearbits)
{
	unsigned long flags = 0;
184
	unsigned long host_lock_flags = 0;
185

186
187
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	spin_lock_irqsave(fnic->lport->host->host_lock, host_lock_flags);
188
189
190
191
192
193

	if (clearbits)
		fnic->state_flags &= ~st_flags;
	else
		fnic->state_flags |= st_flags;

194
195
	spin_unlock_irqrestore(fnic->lport->host->host_lock, host_lock_flags);
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
196
197
198
199
200

	return;
}


201
202
203
204
205
206
207
208
209
210
/*
 * fnic_fw_reset_handler
 * Routine to send reset msg to fw
 */
int fnic_fw_reset_handler(struct fnic *fnic)
{
	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
	int ret = 0;
	unsigned long flags;

211
212
213
	/* indicate fwreset to io path */
	fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);

214
215
216
	skb_queue_purge(&fnic->frame_queue);
	skb_queue_purge(&fnic->tx_queue);

217
218
219
220
	/* wait for io cmpl */
	while (atomic_read(&fnic->in_flight))
		schedule_timeout(msecs_to_jiffies(1));

221
222
223
224
225
226
227
	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);

	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
		free_wq_copy_descs(fnic, wq);

	if (!vnic_wq_copy_desc_avail(wq))
		ret = -EAGAIN;
228
	else {
229
		fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
230
231
232
233
234
235
236
		atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
		if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
			  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
			atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
				atomic64_read(
				  &fnic->fnic_stats.fw_stats.active_fw_reqs));
	}
237
238
239

	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);

240
241
	if (!ret) {
		atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets);
242
243
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "Issued fw reset\n");
244
	} else {
245
		fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
246
247
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "Failed to issue fw reset\n");
248
249
	}

250
251
252
253
254
255
256
257
	return ret;
}


/*
 * fnic_flogi_reg_handler
 * Routine to send flogi register msg to fw
 */
258
int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
259
260
{
	struct vnic_wq_copy *wq = &fnic->wq_copy[0];
261
262
	enum fcpio_flogi_reg_format_type format;
	struct fc_lport *lp = fnic->lport;
263
264
265
266
267
268
269
270
271
272
273
274
275
276
	u8 gw_mac[ETH_ALEN];
	int ret = 0;
	unsigned long flags;

	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);

	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
		free_wq_copy_descs(fnic, wq);

	if (!vnic_wq_copy_desc_avail(wq)) {
		ret = -EAGAIN;
		goto flogi_reg_ioreq_end;
	}

277
	if (fnic->ctlr.map_dest) {
278
		memset(gw_mac, 0xff, ETH_ALEN);
279
280
281
282
283
		format = FCPIO_FLOGI_REG_DEF_DEST;
	} else {
		memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
		format = FCPIO_FLOGI_REG_GW_DEST;
	}
284

285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
	if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
		fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
						fc_id, gw_mac,
						fnic->data_src_addr,
						lp->r_a_tov, lp->e_d_tov);
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
			      fc_id, fnic->data_src_addr, gw_mac);
	} else {
		fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
						  format, fc_id, gw_mac);
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "FLOGI reg issued fcid %x map %d dest %pM\n",
			      fc_id, fnic->ctlr.map_dest, gw_mac);
	}
300

301
302
303
304
305
306
	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));

307
308
309
310
311
312
313
314
315
316
317
318
319
flogi_reg_ioreq_end:
	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
	return ret;
}

/*
 * fnic_queue_wq_copy_desc
 * Routine to enqueue a wq copy desc
 */
static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
					  struct vnic_wq_copy *wq,
					  struct fnic_io_req *io_req,
					  struct scsi_cmnd *sc,
320
					  int sg_count)
321
322
323
324
325
{
	struct scatterlist *sg;
	struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
	struct fc_rport_libfc_priv *rp = rport->dd_data;
	struct host_sg_desc *desc;
326
	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
	unsigned int i;
	unsigned long intr_flags;
	int flags;
	u8 exch_flags;
	struct scsi_lun fc_lun;

	if (sg_count) {
		/* For each SGE, create a device desc entry */
		desc = io_req->sgl_list;
		for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
			desc->addr = cpu_to_le64(sg_dma_address(sg));
			desc->len = cpu_to_le32(sg_dma_len(sg));
			desc->_resvd = 0;
			desc++;
		}

343
344
345
346
347
348
		io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
				io_req->sgl_list,
				sizeof(io_req->sgl_list[0]) * sg_count,
				DMA_TO_DEVICE);
		if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
			printk(KERN_ERR "DMA mapping failed\n");
349
350
			return SCSI_MLQUEUE_HOST_BUSY;
		}
351
352
	}

353
	io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
354
355
					      sc->sense_buffer,
					      SCSI_SENSE_BUFFERSIZE,
356
357
358
					      DMA_FROM_DEVICE);
	if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
		dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
359
				sizeof(io_req->sgl_list[0]) * sg_count,
360
361
				DMA_TO_DEVICE);
		printk(KERN_ERR "DMA mapping failed\n");
362
363
364
		return SCSI_MLQUEUE_HOST_BUSY;
	}

365
366
367
368
369
370
371
372
373
374
	int_to_scsilun(sc->device->lun, &fc_lun);

	/* Enqueue the descriptor in the Copy WQ */
	spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);

	if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
		free_wq_copy_descs(fnic, wq);

	if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
		spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
375
376
		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
			  "fnic_queue_wq_copy_desc failure - no descriptors\n");
377
		atomic64_inc(&misc_stats->io_cpwq_alloc_failures);
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
		return SCSI_MLQUEUE_HOST_BUSY;
	}

	flags = 0;
	if (sc->sc_data_direction == DMA_FROM_DEVICE)
		flags = FCPIO_ICMND_RDDATA;
	else if (sc->sc_data_direction == DMA_TO_DEVICE)
		flags = FCPIO_ICMND_WRDATA;

	exch_flags = 0;
	if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
	    (rp->flags & FC_RP_FLAGS_RETRY))
		exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;

	fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
					 0, exch_flags, io_req->sgl_cnt,
					 SCSI_SENSE_BUFFERSIZE,
					 io_req->sgl_list_pa,
					 io_req->sense_buf_pa,
					 0, /* scsi cmd ref, always 0 */
398
399
					 FCPIO_ICMND_PTA_SIMPLE,
					 	/* scsi pri and tag */
400
					 flags,	/* command flags */
401
402
					 sc->cmnd, sc->cmd_len,
					 scsi_bufflen(sc),
403
404
405
406
					 fc_lun.scsi_lun, io_req->port_id,
					 rport->maxframe_size, rp->r_a_tov,
					 rp->e_d_tov);

407
408
409
410
411
412
	atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs);
	if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) >
		  atomic64_read(&fnic->fnic_stats.fw_stats.max_fw_reqs))
		atomic64_set(&fnic->fnic_stats.fw_stats.max_fw_reqs,
		  atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs));

413
414
415
416
417
418
419
420
421
	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
	return 0;
}

/*
 * fnic_queuecommand
 * Routine to send a scsi cdb
 * Called with host_lock held and interrupts disabled.
 */
Jeff Garzik's avatar
Jeff Garzik committed
422
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
423
{
424
	struct fc_lport *lp = shost_priv(sc->device->host);
425
	struct fc_rport *rport;
Hiral Patel's avatar
Hiral Patel committed
426
	struct fnic_io_req *io_req = NULL;
427
	struct fnic *fnic = lport_priv(lp);
428
	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
429
430
	struct vnic_wq_copy *wq;
	int ret;
Hiral Patel's avatar
Hiral Patel committed
431
432
	u64 cmd_trace;
	int sg_count = 0;
433
	unsigned long flags = 0;
434
	unsigned long ptr;
435
	spinlock_t *io_lock = NULL;
436
	int io_lock_acquired = 0;
437
	struct fc_rport_libfc_priv *rp;
438

439
440
441
	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
		return SCSI_MLQUEUE_HOST_BUSY;

442
443
444
	if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET)))
		return SCSI_MLQUEUE_HOST_BUSY;

445
	rport = starget_to_rport(scsi_target(sc->device));
446
447
448
449
450
451
452
453
	if (!rport) {
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				"returning DID_NO_CONNECT for IO as rport is NULL\n");
		sc->result = DID_NO_CONNECT << 16;
		done(sc);
		return 0;
	}

454
455
	ret = fc_remote_port_chkready(rport);
	if (ret) {
456
457
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				"rport is not ready\n");
458
		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
459
460
461
462
463
		sc->result = ret;
		done(sc);
		return 0;
	}

464
	rp = rport->dd_data;
465
	if (!rp || rp->rp_state == RPORT_ST_DELETE) {
466
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
467
468
469
			"rport 0x%x removed, returning DID_NO_CONNECT\n",
			rport->port_id);

470
471
472
473
		atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
		sc->result = DID_NO_CONNECT<<16;
		done(sc);
		return 0;
474
475
	}

476
477
478
479
480
481
482
483
484
485
	if (rp->rp_state != RPORT_ST_READY) {
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			"rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
			rport->port_id, rp->rp_state);

		sc->result = DID_IMM_RETRY << 16;
		done(sc);
		return 0;
	}

486
487
488
	if (lp->state != LPORT_ST_READY || !(lp->link_up))
		return SCSI_MLQUEUE_HOST_BUSY;

489
490
	atomic_inc(&fnic->in_flight);

491
492
493
494
495
496
	/*
	 * Release host lock, use driver resource specific locks from here.
	 * Don't re-enable interrupts in case they were disabled prior to the
	 * caller disabling them.
	 */
	spin_unlock(lp->host->host_lock);
497
498
	CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
	CMD_FLAGS(sc) = FNIC_NO_FLAGS;
499
500
501
502

	/* Get a new io_req for this SCSI IO */
	io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
	if (!io_req) {
503
		atomic64_inc(&fnic_stats->io_stats.alloc_failures);
504
505
506
507
508
509
510
511
		ret = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	}
	memset(io_req, 0, sizeof(*io_req));

	/* Map the data buffer */
	sg_count = scsi_dma_map(sc);
	if (sg_count < 0) {
Hiral Patel's avatar
Hiral Patel committed
512
513
514
		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
			  sc->request->tag, sc, 0, sc->cmnd[0],
			  sg_count, CMD_STATE(sc));
515
516
517
518
519
520
521
522
523
524
525
526
527
		mempool_free(io_req, fnic->io_req_pool);
		goto out;
	}

	/* Determine the type of scatter/gather list we need */
	io_req->sgl_cnt = sg_count;
	io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
	if (sg_count > FNIC_DFLT_SG_DESC_CNT)
		io_req->sgl_type = FNIC_SGL_CACHE_MAX;

	if (sg_count) {
		io_req->sgl_list =
			mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
528
				      GFP_ATOMIC);
529
		if (!io_req->sgl_list) {
530
			atomic64_inc(&fnic_stats->io_stats.alloc_failures);
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
			ret = SCSI_MLQUEUE_HOST_BUSY;
			scsi_dma_unmap(sc);
			mempool_free(io_req, fnic->io_req_pool);
			goto out;
		}

		/* Cache sgl list allocated address before alignment */
		io_req->sgl_list_alloc = io_req->sgl_list;
		ptr = (unsigned long) io_req->sgl_list;
		if (ptr % FNIC_SG_DESC_ALIGN) {
			io_req->sgl_list = (struct host_sg_desc *)
				(((unsigned long) ptr
				  + FNIC_SG_DESC_ALIGN - 1)
				 & ~(FNIC_SG_DESC_ALIGN - 1));
		}
	}

548
549
550
551
552
553
554
	/*
	* Will acquire lock defore setting to IO initialized.
	*/

	io_lock = fnic_io_lock_hash(fnic, sc);
	spin_lock_irqsave(io_lock, flags);

555
	/* initialize rest of io_req */
556
	io_lock_acquired = 1;
557
	io_req->port_id = rport->port_id;
558
	io_req->start_time = jiffies;
559
560
	CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
	CMD_SP(sc) = (char *)io_req;
561
	CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
562
563
564
565
566
567
568
569
570
571
	sc->scsi_done = done;

	/* create copy wq desc and enqueue it */
	wq = &fnic->wq_copy[0];
	ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
	if (ret) {
		/*
		 * In case another thread cancelled the request,
		 * refetch the pointer under the lock.
		 */
Hiral Patel's avatar
Hiral Patel committed
572
573
574
		FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
			  sc->request->tag, sc, 0, 0, 0,
			  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
575
576
577
578
579
580
581
582
		io_req = (struct fnic_io_req *)CMD_SP(sc);
		CMD_SP(sc) = NULL;
		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
		spin_unlock_irqrestore(io_lock, flags);
		if (io_req) {
			fnic_release_ioreq_buf(fnic, io_req, sc);
			mempool_free(io_req, fnic->io_req_pool);
		}
583
584
585
586
		atomic_dec(&fnic->in_flight);
		/* acquire host lock before returning to SCSI */
		spin_lock(lp->host->host_lock);
		return ret;
587
	} else {
588
589
590
591
592
593
594
		atomic64_inc(&fnic_stats->io_stats.active_ios);
		atomic64_inc(&fnic_stats->io_stats.num_ios);
		if (atomic64_read(&fnic_stats->io_stats.active_ios) >
			  atomic64_read(&fnic_stats->io_stats.max_active_ios))
			atomic64_set(&fnic_stats->io_stats.max_active_ios,
			     atomic64_read(&fnic_stats->io_stats.active_ios));

595
596
		/* REVISIT: Use per IO lock in the final code */
		CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
597
598
	}
out:
Hiral Patel's avatar
Hiral Patel committed
599
600
601
602
603
604
605
606
607
	cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
			(u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
			(u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
			sc->cmnd[5]);

	FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
		  sc->request->tag, sc, io_req,
		  sg_count, cmd_trace,
		  (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
608
609

	/* if only we issued IO, will we have the io lock */
610
	if (io_lock_acquired)
611
612
		spin_unlock_irqrestore(io_lock, flags);

613
	atomic_dec(&fnic->in_flight);
614
615
616
617
618
	/* acquire host lock before returning to SCSI */
	spin_lock(lp->host->host_lock);
	return ret;
}

Jeff Garzik's avatar
Jeff Garzik committed
619
620
DEF_SCSI_QCMD(fnic_queuecommand)

621
622
623
624
625
626
627
628
629
630
631
632
/*
 * fnic_fcpio_fw_reset_cmpl_handler
 * Routine to handle fw reset completion
 */
static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
					    struct fcpio_fw_req *desc)
{
	u8 type;
	u8 hdr_status;
	struct fcpio_tag tag;
	int ret = 0;
	unsigned long flags;
633
	struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats;
634
635
636

	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);

637
638
	atomic64_inc(&reset_stats->fw_reset_completions);

639
640
641
	/* Clean up all outstanding io requests */
	fnic_cleanup_io(fnic, SCSI_NO_TAG);

642
643
	atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
	atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
644
	atomic64_set(&fnic->io_cmpl_skip, 0);
645

646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
	spin_lock_irqsave(&fnic->fnic_lock, flags);

	/* fnic should be in FC_TRANS_ETH_MODE */
	if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
		/* Check status of reset completion */
		if (!hdr_status) {
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				      "reset cmpl success\n");
			/* Ready to send flogi out */
			fnic->state = FNIC_IN_ETH_MODE;
		} else {
			FNIC_SCSI_DBG(KERN_DEBUG,
				      fnic->lport->host,
				      "fnic fw_reset : failed %s\n",
				      fnic_fcpio_status_to_str(hdr_status));

			/*
			 * Unable to change to eth mode, cannot send out flogi
			 * Change state to fc mode, so that subsequent Flogi
			 * requests from libFC will cause more attempts to
			 * reset the firmware. Free the cached flogi
			 */
			fnic->state = FNIC_IN_FC_MODE;
669
			atomic64_inc(&reset_stats->fw_reset_failures);
670
671
672
673
674
675
676
			ret = -1;
		}
	} else {
		FNIC_SCSI_DBG(KERN_DEBUG,
			      fnic->lport->host,
			      "Unexpected state %s while processing"
			      " reset cmpl\n", fnic_state_to_str(fnic->state));
677
		atomic64_inc(&reset_stats->fw_reset_failures);
678
679
680
681
682
683
684
685
686
687
688
689
690
		ret = -1;
	}

	/* Thread removing device blocks till firmware reset is complete */
	if (fnic->remove_wait)
		complete(fnic->remove_wait);

	/*
	 * If fnic is being removed, or fw reset failed
	 * free the flogi frame. Else, send it out
	 */
	if (fnic->remove_wait || ret) {
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
691
		skb_queue_purge(&fnic->tx_queue);
692
693
694
695
696
		goto reset_cmpl_handler_end;
	}

	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

697
	fnic_flush_tx(fnic);
698
699

 reset_cmpl_handler_end:
700
701
	fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);

702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
	return ret;
}

/*
 * fnic_fcpio_flogi_reg_cmpl_handler
 * Routine to handle flogi register completion
 */
static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
					     struct fcpio_fw_req *desc)
{
	u8 type;
	u8 hdr_status;
	struct fcpio_tag tag;
	int ret = 0;
	unsigned long flags;

	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);

	/* Update fnic state based on status of flogi reg completion */
	spin_lock_irqsave(&fnic->fnic_lock, flags);

	if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {

		/* Check flogi registration completion status */
		if (!hdr_status) {
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				      "flog reg succeeded\n");
			fnic->state = FNIC_IN_FC_MODE;
		} else {
			FNIC_SCSI_DBG(KERN_DEBUG,
				      fnic->lport->host,
				      "fnic flogi reg :failed %s\n",
				      fnic_fcpio_status_to_str(hdr_status));
			fnic->state = FNIC_IN_ETH_MODE;
			ret = -1;
		}
	} else {
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "Unexpected fnic state %s while"
			      " processing flogi reg completion\n",
			      fnic_state_to_str(fnic->state));
		ret = -1;
	}

746
	if (!ret) {
747
748
749
750
751
752
		if (fnic->stop_rx_link_events) {
			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
			goto reg_cmpl_handler_end;
		}
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);

753
		fnic_flush_tx(fnic);
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
		queue_work(fnic_event_queue, &fnic->frame_work);
	} else {
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
	}

reg_cmpl_handler_end:
	return ret;
}

static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
					u16 request_out)
{
	if (wq->to_clean_index <= wq->to_use_index) {
		/* out of range, stale request_out index */
		if (request_out < wq->to_clean_index ||
		    request_out >= wq->to_use_index)
			return 0;
	} else {
		/* out of range, stale request_out index */
		if (request_out < wq->to_clean_index &&
		    request_out >= wq->to_use_index)
			return 0;
	}
	/* request_out index is in range */
	return 1;
}


/*
 * Mark that ack received and store the Ack index. If there are multiple
 * acks received before Tx thread cleans it up, the latest value will be
 * used which is correct behavior. This state should be in the copy Wq
 * instead of in the fnic
 */
static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
					  unsigned int cq_index,
					  struct fcpio_fw_req *desc)
{
	struct vnic_wq_copy *wq;
	u16 request_out = desc->u.ack.request_out;
	unsigned long flags;
Hiral Patel's avatar
Hiral Patel committed
795
	u64 *ox_id_tag = (u64 *)(void *)desc;
796
797
798
799
800

	/* mark the ack state */
	wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
	spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);

801
	fnic->fnic_stats.misc_stats.last_ack_time = jiffies;
802
803
804
	if (is_ack_index_in_range(wq, request_out)) {
		fnic->fw_ack_index[0] = request_out;
		fnic->fw_ack_recd[0] = 1;
805
806
807
808
	} else
		atomic64_inc(
			&fnic->fnic_stats.misc_stats.ack_index_out_of_range);

809
	spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
Hiral Patel's avatar
Hiral Patel committed
810
811
812
	FNIC_TRACE(fnic_fcpio_ack_handler,
		  fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
		  ox_id_tag[4], ox_id_tag[5]);
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
}

/*
 * fnic_fcpio_icmnd_cmpl_handler
 * Routine to handle icmnd completions
 */
static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
					 struct fcpio_fw_req *desc)
{
	u8 type;
	u8 hdr_status;
	struct fcpio_tag tag;
	u32 id;
	u64 xfer_len = 0;
	struct fcpio_icmnd_cmpl *icmnd_cmpl;
	struct fnic_io_req *io_req;
	struct scsi_cmnd *sc;
830
	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
831
832
	unsigned long flags;
	spinlock_t *io_lock;
Hiral Patel's avatar
Hiral Patel committed
833
	u64 cmd_trace;
834
	unsigned long start_time;
835
	unsigned long io_duration_time;
836
837
838
839

	/* Decode the cmpl description to get the io_req id */
	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
	fcpio_tag_id_dec(&tag, &id);
840
	icmnd_cmpl = &desc->u.icmnd_cmpl;
841

842
	if (id >= fnic->fnic_max_tag_id) {
843
844
845
		shost_printk(KERN_ERR, fnic->lport->host,
			"Tag out of range tag %x hdr status = %s\n",
			     id, fnic_fcpio_status_to_str(hdr_status));
846
		return;
847
	}
848
849
850

	sc = scsi_host_find_tag(fnic->lport->host, id);
	WARN_ON_ONCE(!sc);
851
	if (!sc) {
852
		atomic64_inc(&fnic_stats->io_stats.sc_null);
853
854
855
856
		shost_printk(KERN_ERR, fnic->lport->host,
			  "icmnd_cmpl sc is null - "
			  "hdr status = %s tag = 0x%x desc = 0x%p\n",
			  fnic_fcpio_status_to_str(hdr_status), id, desc);
Hiral Patel's avatar
Hiral Patel committed
857
858
859
860
861
862
863
864
		FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
			  fnic->lport->host->host_no, id,
			  ((u64)icmnd_cmpl->_resvd0[1] << 16 |
			  (u64)icmnd_cmpl->_resvd0[0]),
			  ((u64)hdr_status << 16 |
			  (u64)icmnd_cmpl->scsi_status << 8 |
			  (u64)icmnd_cmpl->flags), desc,
			  (u64)icmnd_cmpl->residual, 0);
865
		return;
866
	}
867
868
869
870
871
872

	io_lock = fnic_io_lock_hash(fnic, sc);
	spin_lock_irqsave(io_lock, flags);
	io_req = (struct fnic_io_req *)CMD_SP(sc);
	WARN_ON_ONCE(!io_req);
	if (!io_req) {
873
		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
874
		CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
875
		spin_unlock_irqrestore(io_lock, flags);
876
877
878
879
		shost_printk(KERN_ERR, fnic->lport->host,
			  "icmnd_cmpl io_req is null - "
			  "hdr status = %s tag = 0x%x sc 0x%p\n",
			  fnic_fcpio_status_to_str(hdr_status), id, sc);
880
881
		return;
	}
882
	start_time = io_req->start_time;
883
884
885
886
887
888

	/* firmware completed the io */
	io_req->io_completed = 1;

	/*
	 *  if SCSI-ML has already issued abort on this command,
889
	 *  set completion of the IO. The abts path will clean it up
890
891
	 */
	if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
892
893
894
895
896
897

		/*
		 * set the FNIC_IO_DONE so that this doesn't get
		 * flagged as 'out of order' if it was not aborted
		 */
		CMD_FLAGS(sc) |= FNIC_IO_DONE;
898
		CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
899
900
		spin_unlock_irqrestore(io_lock, flags);
		if(FCPIO_ABORTED == hdr_status)
901
			CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
902
903
904

		FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
			"icmnd_cmpl abts pending "
905
			  "hdr status = %s tag = 0x%x sc = 0x%p "
906
907
908
909
910
			  "scsi_status = %x residual = %d\n",
			  fnic_fcpio_status_to_str(hdr_status),
			  id, sc,
			  icmnd_cmpl->scsi_status,
			  icmnd_cmpl->residual);
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
		return;
	}

	/* Mark the IO as complete */
	CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;

	icmnd_cmpl = &desc->u.icmnd_cmpl;

	switch (hdr_status) {
	case FCPIO_SUCCESS:
		sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
		xfer_len = scsi_bufflen(sc);
		scsi_set_resid(sc, icmnd_cmpl->residual);

		if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
			xfer_len -= icmnd_cmpl->residual;

928
929
930
		if (icmnd_cmpl->scsi_status == SAM_STAT_CHECK_CONDITION)
			atomic64_inc(&fnic_stats->misc_stats.check_condition);

931
932
		if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL)
			atomic64_inc(&fnic_stats->misc_stats.queue_fulls);
933
934
935
		break;

	case FCPIO_TIMEOUT:          /* request was timed out */
936
		atomic64_inc(&fnic_stats->misc_stats.fcpio_timeout);
937
938
939
940
		sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
		break;

	case FCPIO_ABORTED:          /* request was aborted */
941
		atomic64_inc(&fnic_stats->misc_stats.fcpio_aborted);
942
943
944
945
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

	case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
946
		atomic64_inc(&fnic_stats->misc_stats.data_count_mismatch);
947
948
949
950
951
		scsi_set_resid(sc, icmnd_cmpl->residual);
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

	case FCPIO_OUT_OF_RESOURCE:  /* out of resources to complete request */
952
		atomic64_inc(&fnic_stats->fw_stats.fw_out_of_resources);
953
954
		sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
		break;
955

956
	case FCPIO_IO_NOT_FOUND:     /* requested I/O was not found */
957
958
959
960
		atomic64_inc(&fnic_stats->io_stats.io_not_found);
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

961
	case FCPIO_SGL_INVALID:      /* request was aborted due to sgl error */
962
963
964
965
		atomic64_inc(&fnic_stats->misc_stats.sgl_invalid);
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

966
	case FCPIO_FW_ERR:           /* request was terminated due fw error */
967
968
969
970
971
972
973
974
975
976
977
978
		atomic64_inc(&fnic_stats->fw_stats.io_fw_errs);
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

	case FCPIO_MSS_INVALID:      /* request was aborted due to mss error */
		atomic64_inc(&fnic_stats->misc_stats.mss_invalid);
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;

	case FCPIO_INVALID_HEADER:   /* header contains invalid data */
	case FCPIO_INVALID_PARAM:    /* some parameter in request invalid */
	case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
979
980
981
982
983
984
985
	default:
		sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
		break;
	}

	/* Break link with the SCSI command */
	CMD_SP(sc) = NULL;
986
	CMD_FLAGS(sc) |= FNIC_IO_DONE;
987
988
989

	spin_unlock_irqrestore(io_lock, flags);

990
991
992
993
994
995
	if (hdr_status != FCPIO_SUCCESS) {
		atomic64_inc(&fnic_stats->io_stats.io_failures);
		shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
			     fnic_fcpio_status_to_str(hdr_status));
	}

996
997
998
999
	fnic_release_ioreq_buf(fnic, io_req, sc);

	mempool_free(io_req, fnic->io_req_pool);

Hiral Patel's avatar
Hiral Patel committed
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
	cmd_trace = ((u64)hdr_status << 56) |
		  (u64)icmnd_cmpl->scsi_status << 48 |
		  (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
		  (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
		  (u64)sc->cmnd[4] << 8 | sc->cmnd[5];

	FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
		  sc->device->host->host_no, id, sc,
		  ((u64)icmnd_cmpl->_resvd0[1] << 56 |
		  (u64)icmnd_cmpl->_resvd0[0] << 48 |
		  jiffies_to_msecs(jiffies - start_time)),
		  desc, cmd_trace,
		  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));

1014
1015
1016
1017
1018
1019
1020
1021
1022
	if (sc->sc_data_direction == DMA_FROM_DEVICE) {
		fnic->lport->host_stats.fcp_input_requests++;
		fnic->fcp_input_bytes += xfer_len;
	} else if (sc->sc_data_direction == DMA_TO_DEVICE) {
		fnic->lport->host_stats.fcp_output_requests++;
		fnic->fcp_output_bytes += xfer_len;
	} else
		fnic->lport->host_stats.fcp_control_requests++;

1023
1024
1025
1026
1027
1028
	atomic64_dec(&fnic_stats->io_stats.active_ios);
	if (atomic64_read(&fnic->io_cmpl_skip))
		atomic64_dec(&fnic->io_cmpl_skip);
	else
		atomic64_inc(&fnic_stats->io_stats.io_completions);

1029

Pan Bian's avatar
Pan Bian committed
1030
1031
	io_duration_time = jiffies_to_msecs(jiffies) -
						jiffies_to_msecs(start_time);
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051

	if(io_duration_time <= 10)
		atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
	else if(io_duration_time <= 100)
		atomic64_inc(&fnic_stats->io_stats.io_btw_10_to_100_msec);
	else if(io_duration_time <= 500)
		atomic64_inc(&fnic_stats->io_stats.io_btw_100_to_500_msec);
	else if(io_duration_time <= 5000)
		atomic64_inc(&fnic_stats->io_stats.io_btw_500_to_5000_msec);
	else if(io_duration_time <= 10000)
		atomic64_inc(&fnic_stats->io_stats.io_btw_5000_to_10000_msec);
	else if(io_duration_time <= 30000)
		atomic64_inc(&fnic_stats->io_stats.io_btw_10000_to_30000_msec);
	else {
		atomic64_inc(&fnic_stats->io_stats.io_greater_than_30000_msec);

		if(io_duration_time > atomic64_read(&fnic_stats->io_stats.current_max_io_time))
			atomic64_set(&fnic_stats->io_stats.current_max_io_time, io_duration_time);
	}

1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
	/* Call SCSI completion function to complete the IO */
	if (sc->scsi_done)
		sc->scsi_done(sc);
}

/* fnic_fcpio_itmf_cmpl_handler
 * Routine to handle itmf completions
 */
static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
					struct fcpio_fw_req *desc)
{
	u8 type;
	u8 hdr_status;
	struct fcpio_tag tag;
	u32 id;
	struct scsi_cmnd *sc;
	struct fnic_io_req *io_req;
1069
1070
1071
1072
	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
	struct abort_stats *abts_stats = &fnic->fnic_stats.abts_stats;
	struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats;
	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
1073
1074
	unsigned long flags;
	spinlock_t *io_lock;
1075
	unsigned long start_time;
1076
1077
1078
1079

	fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
	fcpio_tag_id_dec(&tag, &id);

1080
	if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) {
1081
1082
1083
		shost_printk(KERN_ERR, fnic->lport->host,
		"Tag out of range tag %x hdr status = %s\n",
		id, fnic_fcpio_status_to_str(hdr_status));
1084
		return;
1085
	}
1086
1087
1088

	sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
	WARN_ON_ONCE(!sc);
1089
	if (!sc) {
1090
		atomic64_inc(&fnic_stats->io_stats.sc_null);
1091
1092
1093
		shost_printk(KERN_ERR, fnic->lport->host,
			  "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
			  fnic_fcpio_status_to_str(hdr_status), id);
1094
		return;
1095
	}
1096
1097
1098
1099
1100
	io_lock = fnic_io_lock_hash(fnic, sc);
	spin_lock_irqsave(io_lock, flags);
	io_req = (struct fnic_io_req *)CMD_SP(sc);
	WARN_ON_ONCE(!io_req);
	if (!io_req) {
1101
		atomic64_inc(&fnic_stats->io_stats.ioreq_null);
1102
		spin_unlock_irqrestore(io_lock, flags);
1103
1104
1105
1106
1107
		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
		shost_printk(KERN_ERR, fnic->lport->host,
			  "itmf_cmpl io_req is null - "
			  "hdr status = %s tag = 0x%x sc 0x%p\n",
			  fnic_fcpio_status_to_str(hdr_status), id, sc);
1108
1109
		return;
	}
1110
	start_time = io_req->start_time;
1111

1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
	if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
		/* Abort and terminate completion of device reset req */
		/* REVISIT : Add asserts about various flags */
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "dev reset abts cmpl recd. id %x status %s\n",
			      id, fnic_fcpio_status_to_str(hdr_status));
		CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
		CMD_ABTS_STATUS(sc) = hdr_status;
		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
		if (io_req->abts_done)
			complete(io_req->abts_done);
		spin_unlock_irqrestore(io_lock, flags);
	} else if (id & FNIC_TAG_ABORT) {
1125
		/* Completion of abort cmd */
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
		switch (hdr_status) {
		case FCPIO_SUCCESS:
			break;
		case FCPIO_TIMEOUT:
			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
				atomic64_inc(&abts_stats->abort_fw_timeouts);
			else
				atomic64_inc(
					&term_stats->terminate_fw_timeouts);
			break;
1136
1137
1138
1139
1140
		case FCPIO_ITMF_REJECTED:
			FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
				"abort reject recd. id %d\n",
				(int)(id & FNIC_TAG_MASK));
			break;
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
		case FCPIO_IO_NOT_FOUND:
			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
				atomic64_inc(&abts_stats->abort_io_not_found);
			else
				atomic64_inc(
					&term_stats->terminate_io_not_found);
			break;
		default:
			if (CMD_FLAGS(sc) & FNIC_IO_ABTS_ISSUED)
				atomic64_inc(&abts_stats->abort_failures);
			else
				atomic64_inc(
					&term_stats->terminate_failures);
			break;
		}
1156
1157
1158
1159
1160
		if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
			/* This is a late completion. Ignore it */
			spin_unlock_irqrestore(io_lock, flags);
			return;
		}
1161

1162
		CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
1163
		CMD_ABTS_STATUS(sc) = hdr_status;
1164

1165
1166
1167
1168
		/* If the status is IO not found consider it as success */
		if (hdr_status == FCPIO_IO_NOT_FOUND)
			CMD_ABTS_STATUS(sc) = FCPIO_SUCCESS;

1169
1170
1171
		if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE)))
			atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls);

1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "abts cmpl recd. id %d status %s\n",
			      (int)(id & FNIC_TAG_MASK),
			      fnic_fcpio_status_to_str(hdr_status));

		/*
		 * If scsi_eh thread is blocked waiting for abts to complete,
		 * signal completion to it. IO will be cleaned in the thread
		 * else clean it in this context
		 */
		if (io_req->abts_done) {
			complete(io_req->abts_done);
			spin_unlock_irqrestore(io_lock, flags);
		} else {
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				      "abts cmpl, completing IO\n");
			CMD_SP(sc) = NULL;
			sc->result = (DID_ERROR << 16);

			spin_unlock_irqrestore(io_lock, flags);

			fnic_release_ioreq_buf(fnic, io_req, sc);
			mempool_free(io_req, fnic->io_req_pool);
Hiral Patel's avatar
Hiral Patel committed
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
			if (sc->scsi_done) {
				FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
					sc->device->host->host_no, id,
					sc,
					jiffies_to_msecs(jiffies - start_time),
					desc,
					(((u64)hdr_status << 40) |
					(u64)sc->cmnd[0] << 32 |
					(u64)sc->cmnd[2] << 24 |
					(u64)sc->cmnd[3] << 16 |
					(u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
					(((u64)CMD_FLAGS(sc) << 32) |
					CMD_STATE(sc)));
1208
				sc->scsi_done(sc);
1209
1210
1211
1212
1213
				atomic64_dec(&fnic_stats->io_stats.active_ios);
				if (atomic64_read(&fnic->io_cmpl_skip))
					atomic64_dec(&fnic->io_cmpl_skip);
				else
					atomic64_inc(&fnic_stats->io_stats.io_completions);
Hiral Patel's avatar
Hiral Patel committed
1214
			}
1215
1216
1217
1218
1219
		}

	} else if (id & FNIC_TAG_DEV_RST) {
		/* Completion of device reset */
		CMD_LR_STATUS(sc) = hdr_status;
1220
1221
		if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
			spin_unlock_irqrestore(io_lock, flags);
1222
			CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
Hiral Patel's avatar
Hiral Patel committed
1223
1224
1225
1226
1227
			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
				  sc->device->host->host_no, id, sc,
				  jiffies_to_msecs(jiffies - start_time),
				  desc, 0,
				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				"Terminate pending "
				"dev reset cmpl recd. id %d status %s\n",
				(int)(id & FNIC_TAG_MASK),
				fnic_fcpio_status_to_str(hdr_status));
			return;
		}
		if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
			/* Need to wait for terminate completion */
			spin_unlock_irqrestore(io_lock, flags);
Hiral Patel's avatar
Hiral Patel committed
1238
1239
1240
1241
1242
			FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
				  sc->device->host->host_no, id, sc,
				  jiffies_to_msecs(jiffies - start_time),
				  desc, 0,
				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
1243
1244
1245
1246
1247
1248
1249
			FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
				"dev reset cmpl recd after time out. "
				"id %d status %s\n",
				(int)(id & FNIC_TAG_MASK),
				fnic_fcpio_status_to_str(hdr_status));
			return;
		}
1250
		CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
1251
		CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "dev reset cmpl recd. id %d status %s\n",
			      (int)(id & FNIC_TAG_MASK),
			      fnic_fcpio_status_to_str(hdr_status));
		if (io_req->dr_done)
			complete(io_req->dr_done);
		spin_unlock_irqrestore(io_lock, flags);

	} else {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unexpected itmf io state %s tag %x\n",
			     fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
		spin_unlock_irqrestore(io_lock, flags);
	}

}

/*
 * fnic_fcpio_cmpl_handler
 * Routine to service the cq for wq_copy
 */
static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
				   unsigned int cq_index,
				   struct fcpio_fw_req *desc)
{
	struct fnic *fnic = vnic_dev_priv(vdev);

1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
	switch (desc->hdr.type) {
	case FCPIO_ICMND_CMPL: /* fw completed a command */
	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
	case FCPIO_RESET_CMPL: /* fw completed reset */
		atomic64_dec(&fnic->fnic_stats.fw_stats.active_fw_reqs);
		break;
	default:
		break;
	}

1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
	switch (desc->hdr.type) {
	case FCPIO_ACK: /* fw copied copy wq desc to its queue */
		fnic_fcpio_ack_handler(fnic, cq_index, desc);
		break;

	case FCPIO_ICMND_CMPL: /* fw completed a command */
		fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
		break;

	case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
		fnic_fcpio_itmf_cmpl_handler(fnic, desc);
		break;

	case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
1305
	case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
1306
		fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
1307
1308
1309
		break;

	case FCPIO_RESET_CMPL: /* fw completed reset */
1310
		fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
1311
1312
1313
1314
1315
1316
1317
1318
1319
		break;

	default:
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
			      "firmware completion type %d\n",
			      desc->hdr.type);
		break;
	}

1320
	return 0;
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
}

/*
 * fnic_wq_copy_cmpl_handler
 * Routine to process wq copy
 */
int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
{
	unsigned int wq_work_done = 0;
	unsigned int i, cq_index;
	unsigned int cur_work_done;
1332
1333
1334
1335
1336
	struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats;
	u64 start_jiffies = 0;
	u64 end_jiffies = 0;
	u64 delta_jiffies = 0;
	u64 delta_ms = 0;
1337
1338
1339

	for (i = 0; i < fnic->wq_copy_count; i++) {
		cq_index = i + fnic->raw_wq_count + fnic->rq_count;
1340
1341

		start_jiffies = jiffies;
1342
1343
1344
		cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
						     fnic_fcpio_cmpl_handler,
						     copy_work_to_do);
1345
1346
		end_jiffies = jiffies;

1347
		wq_work_done += cur_work_done;
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
		delta_jiffies = end_jiffies - start_jiffies;
		if (delta_jiffies >
			(u64) atomic64_read(&misc_stats->max_isr_jiffies)) {
			atomic64_set(&misc_stats->max_isr_jiffies,
					delta_jiffies);
			delta_ms = jiffies_to_msecs(delta_jiffies);
			atomic64_set(&misc_stats->max_isr_time_ms, delta_ms);
			atomic64_set(&misc_stats->corr_work_done,
					cur_work_done);
		}
1358
1359
1360
1361
1362
1363
	}
	return wq_work_done;
}

static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
{
1364
	int i;
1365
1366
1367
1368
	struct fnic_io_req *io_req;
	unsigned long flags = 0;
	struct scsi_cmnd *sc;
	spinlock_t *io_lock;
1369
	unsigned long start_time = 0;
1370
	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1371

1372
	for (i = 0; i < fnic->fnic_max_tag_id; i++) {
1373
1374
1375
		if (i == exclude_id)
			continue;

1376
1377
		io_lock = fnic_io_lock_tag(fnic, i);
		spin_lock_irqsave(io_lock, flags);
1378
		sc = scsi_host_find_tag(fnic->lport->host, i);
1379
1380
		if (!sc) {
			spin_unlock_irqrestore(io_lock, flags);
1381
			continue;
1382
		}
1383
1384

		io_req = (struct fnic_io_req *)CMD_SP(sc);
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
		if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
			!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
			/*
			 * We will be here only when FW completes reset
			 * without sending completions for outstanding ios.
			 */
			CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
			if (io_req && io_req->dr_done)
				complete(io_req->dr_done);
			else if (io_req && io_req->abts_done)
				complete(io_req->abts_done);
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		} else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
			spin_unlock_irqrestore(io_lock, flags);
			continue;
		}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
		if (!io_req) {
			spin_unlock_irqrestore(io_lock, flags);
			goto cleanup_scsi_cmd;
		}

		CMD_SP(sc) = NULL;

		spin_unlock_irqrestore(io_lock, flags);

		/*
		 * If there is a scsi_cmnd associated with this io_req, then
		 * free the corresponding state
		 */
1415
		start_time = io_req->start_time;
1416
1417
1418
1419
1420
		fnic_release_ioreq_buf(fnic, io_req, sc);
		mempool_free(io_req, fnic->io_req_pool);

cleanup_scsi_cmd:
		sc->result = DID_TRANSPORT_DISRUPTED << 16;
1421
		FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
1422
1423
1424
			      "%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
			      __func__, sc->request->tag, sc,
			      (jiffies - start_time));
1425

1426
1427
1428
1429
1430
		if (atomic64_read(&fnic->io_cmpl_skip))
			atomic64_dec(&fnic->io_cmpl_skip);
		else
			atomic64_inc(&fnic_stats->io_stats.io_completions);

1431
		/* Complete the command to SCSI */
Hiral Patel's avatar
Hiral Patel committed
1432
		if (sc->scsi_done) {
1433
1434
1435
1436
1437
			if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED))
				shost_printk(KERN_ERR, fnic->lport->host,
				"Calling done for IO not issued to fw: tag:0x%x sc:0x%p\n",
				 sc->request->tag, sc);

Hiral Patel's avatar
Hiral Patel committed
1438
1439
1440
1441
1442
1443
1444
1445
1446
			FNIC_TRACE(fnic_cleanup_io,
				  sc->device->host->host_no, i, sc,
				  jiffies_to_msecs(jiffies - start_time),
				  0, ((u64)sc->cmnd[0] << 32 |
				  (u64)sc->cmnd[2] << 24 |
				  (u64)sc->cmnd[3] << 16 |
				  (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
				  (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));

1447
			sc->scsi_done(sc);
Hiral Patel's avatar
Hiral Patel committed
1448
		}