tmio_mmc_core.c 36.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
/*
3
4
5
 * Driver for the MMC / SD / SDIO IP found in:
 *
 * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
6
 *
7
8
9
 * Copyright (C) 2015-17 Renesas Electronics Corporation
 * Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
 * Copyright (C) 2017 Horms Solutions, Simon Horman
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
 * Copyright (C) 2011 Guennadi Liakhovetski
 * Copyright (C) 2007 Ian Molton
 * Copyright (C) 2004 Ian Molton
 *
 * This driver draws mainly on scattered spec sheets, Reverse engineering
 * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit
 * support). (Further 4 bit support from a later datasheet).
 *
 * TODO:
 *   Investigate using a workqueue for PIO transfers
 *   Eliminate FIXMEs
 *   Better Power management
 *   Handle MMC errors better
 *   double buffer support
 *
 */

#include <linux/delay.h>
#include <linux/device.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/tmio.h>
Ai Kyuse's avatar
Ai Kyuse committed
34
#include <linux/mmc/card.h>
35
#include <linux/mmc/host.h>
36
#include <linux/mmc/mmc.h>
37
#include <linux/mmc/slot-gpio.h>
38
39
40
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/platform_device.h>
41
#include <linux/pm_qos.h>
42
#include <linux/pm_runtime.h>
43
#include <linux/regulator/consumer.h>
44
#include <linux/mmc/sdio.h>
45
46
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
47
#include <linux/swiotlb.h>
48
#include <linux/workqueue.h>
49
50
51

#include "tmio_mmc.h"

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
				      struct mmc_data *data)
{
	if (host->dma_ops)
		host->dma_ops->start(host, data);
}

static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
{
	if (host->dma_ops)
		host->dma_ops->enable(host, enable);
}

static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
					struct tmio_mmc_data *pdata)
{
	if (host->dma_ops) {
		host->dma_ops->request(host, pdata);
	} else {
		host->chan_tx = NULL;
		host->chan_rx = NULL;
	}
}

static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
	if (host->dma_ops)
		host->dma_ops->release(host);
}

static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
{
	if (host->dma_ops)
		host->dma_ops->abort(host);
}

88
89
90
91
92
93
static inline void tmio_mmc_dataend_dma(struct tmio_mmc_host *host)
{
	if (host->dma_ops)
		host->dma_ops->dataend(host);
}

94
95
void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
96
	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ);
97
	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
98
}
99
EXPORT_SYMBOL_GPL(tmio_mmc_enable_mmc_irqs);
100
101
102

void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
103
	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ);
104
	sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask);
105
}
106
EXPORT_SYMBOL_GPL(tmio_mmc_disable_mmc_irqs);
107
108
109

static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i)
{
110
	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, ~i);
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
}

static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data)
{
	host->sg_len = data->sg_len;
	host->sg_ptr = data->sg;
	host->sg_orig = data->sg;
	host->sg_off = 0;
}

static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
{
	host->sg_ptr = sg_next(host->sg_ptr);
	host->sg_off = 0;
	return --host->sg_len;
}

128
129
#define CMDREQ_TIMEOUT	5000

130
131
132
133
static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);

134
	if (enable && !host->sdio_irq_enabled) {
135
136
		u16 sdio_status;

137
138
139
		/* Keep device active while SDIO irq is enabled */
		pm_runtime_get_sync(mmc_dev(mmc));

140
		host->sdio_irq_enabled = true;
141
		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ;
142
143
144
145
146
147
148

		/* Clear obsolete interrupts before enabling */
		sdio_status = sd_ctrl_read16(host, CTL_SDIO_STATUS) & ~TMIO_SDIO_MASK_ALL;
		if (host->pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
			sdio_status |= TMIO_SDIO_SETBITS_MASK;
		sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);

149
		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
150
	} else if (!enable && host->sdio_irq_enabled) {
151
152
		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL;
		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
153
154

		host->sdio_irq_enabled = false;
155
156
		pm_runtime_mark_last_busy(mmc_dev(mmc));
		pm_runtime_put_autosuspend(mmc_dev(mmc));
157
158
159
	}
}

160
161
162
163
static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
{
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
164
165
166

	/* HW engineers overrode docs: no sleep needed on R-Car2+ */
	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
167
		usleep_range(10000, 11000);
168
169
170

	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
171
		usleep_range(10000, 11000);
172
173
174
	}
}

175
176
177
178
static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
{
	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
179
		usleep_range(10000, 11000);
180
181
182
183
	}

	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
184
185
186

	/* HW engineers overrode docs: no sleep needed on R-Car2+ */
	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
187
		usleep_range(10000, 11000);
188
189
}

190
static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
191
			       unsigned int new_clock)
192
193
194
{
	u32 clk = 0, clock;

195
196
197
198
	if (new_clock == 0) {
		tmio_mmc_clk_stop(host);
		return;
	}
199
200
201
202
203
204
205
206
	/*
	 * Both HS400 and HS200/SD104 set 200MHz, but some devices need to
	 * set 400MHz to distinguish the CPG settings in HS400.
	 */
	if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 &&
	    host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400 &&
	    new_clock == 200000000)
		new_clock = 400000000;
207

208
209
210
211
	if (host->clk_update)
		clock = host->clk_update(host, new_clock) / 512;
	else
		clock = host->mmc->f_min;
212

213
214
215
216
	for (clk = 0x80000080; new_clock >= (clock << 1); clk >>= 1)
		clock <<= 1;

	/* 1/1 clock is option */
217
218
219
220
221
222
223
	if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
	    ((clk >> 22) & 0x1)) {
		if (!(host->mmc->ios.timing == MMC_TIMING_MMC_HS400))
			clk |= 0xff;
		else
			clk &= ~0xff;
	}
224
225

	if (host->set_clk_div)
226
		host->set_clk_div(host->pdev, (clk >> 22) & 1);
227

228
229
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
			sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
230
	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
231
	if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2))
232
		usleep_range(10000, 11000);
233
234

	tmio_mmc_clk_start(host);
235
236
237
238
239
240
}

static void tmio_mmc_reset(struct tmio_mmc_host *host)
{
	/* FIXME - should we set stop clock reg here */
	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
241
	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
242
		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
243
	usleep_range(10000, 11000);
244
	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001);
245
	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
246
		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001);
247
	usleep_range(10000, 11000);
248
249
250
251
252
253

	if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask);
		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001);
	}

254
255
256
257
258
259
260
261
262
263
264
265
}

static void tmio_mmc_reset_work(struct work_struct *work)
{
	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
						  delayed_reset_work.work);
	struct mmc_request *mrq;
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);
	mrq = host->mrq;

266
267
268
269
270
	/*
	 * is request already finished? Since we use a non-blocking
	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts
	 * us, so, have to check for IS_ERR(host->mrq)
	 */
271
272
273
	if (IS_ERR_OR_NULL(mrq) ||
	    time_is_after_jiffies(host->last_req_ts +
				  msecs_to_jiffies(CMDREQ_TIMEOUT))) {
274
275
276
277
278
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

	dev_warn(&host->pdev->dev,
279
280
		 "timeout waiting for hardware interrupt (CMD%u)\n",
		 mrq->cmd->opcode);
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295

	if (host->data)
		host->data->error = -ETIMEDOUT;
	else if (host->cmd)
		host->cmd->error = -ETIMEDOUT;
	else
		mrq->cmd->error = -ETIMEDOUT;

	host->cmd = NULL;
	host->data = NULL;

	spin_unlock_irqrestore(&host->lock, flags);

	tmio_mmc_reset(host);

296
297
298
	/* Ready for new calls */
	host->mrq = NULL;

299
	tmio_mmc_abort_dma(host);
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
	mmc_request_done(host->mmc, mrq);
}

/* These are the bitmasks the tmio chip requires to implement the MMC response
 * types. Note that R1 and R6 are the same in this scheme. */
#define APP_CMD        0x0040
#define RESP_NONE      0x0300
#define RESP_R1        0x0400
#define RESP_R1B       0x0500
#define RESP_R2        0x0600
#define RESP_R3        0x0700
#define DATA_PRESENT   0x0800
#define TRANSFER_READ  0x1000
#define TRANSFER_MULTI 0x2000
#define SECURITY_CMD   0x4000
315
#define NO_CMD12_ISSUE 0x4000 /* TMIO_MMC_HAVE_CMD12_CTRL */
316

317
318
static int tmio_mmc_start_command(struct tmio_mmc_host *host,
				  struct mmc_command *cmd)
319
320
321
322
323
324
{
	struct mmc_data *data = host->data;
	int c = cmd->opcode;

	switch (mmc_resp_type(cmd)) {
	case MMC_RSP_NONE: c |= RESP_NONE; break;
Wolfram Sang's avatar
Wolfram Sang committed
325
326
327
	case MMC_RSP_R1:
	case MMC_RSP_R1_NO_CRC:
			   c |= RESP_R1;   break;
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
	case MMC_RSP_R1B:  c |= RESP_R1B;  break;
	case MMC_RSP_R2:   c |= RESP_R2;   break;
	case MMC_RSP_R3:   c |= RESP_R3;   break;
	default:
		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd));
		return -EINVAL;
	}

	host->cmd = cmd;

/* FIXME - this seems to be ok commented out but the spec suggest this bit
 *         should be set when issuing app commands.
 *	if(cmd->flags & MMC_FLAG_ACMD)
 *		c |= APP_CMD;
 */
	if (data) {
		c |= DATA_PRESENT;
		if (data->blocks > 1) {
346
			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, TMIO_STOP_SEC);
347
			c |= TRANSFER_MULTI;
348
349

			/*
350
351
			 * Disable auto CMD12 at IO_RW_EXTENDED and
			 * SET_BLOCK_COUNT when doing multiple block transfer
352
353
			 */
			if ((host->pdata->flags & TMIO_MMC_HAVE_CMD12_CTRL) &&
Wolfram Sang's avatar
Wolfram Sang committed
354
			    (cmd->opcode == SD_IO_RW_EXTENDED || host->mrq->sbc))
355
				c |= NO_CMD12_ISSUE;
356
357
358
359
360
		}
		if (data->flags & MMC_DATA_READ)
			c |= TRANSFER_READ;
	}

361
	tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD);
362
363

	/* Fire off the command */
364
	sd_ctrl_write32_as_16_and_16(host, CTL_ARG_REG, cmd->arg);
365
366
367
368
369
	sd_ctrl_write16(host, CTL_SD_CMD, c);

	return 0;
}

370
371
372
373
374
375
376
377
378
379
static void tmio_mmc_transfer_data(struct tmio_mmc_host *host,
				   unsigned short *buf,
				   unsigned int count)
{
	int is_read = host->data->flags & MMC_DATA_READ;
	u8  *buf8;

	/*
	 * Transfer the data
	 */
380
	if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) {
381
382
		u32 data = 0;
		u32 *buf32 = (u32 *)buf;
383
384

		if (is_read)
385
			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32,
386
387
					   count >> 2);
		else
388
			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32,
389
390
391
392
393
394
					    count >> 2);

		/* if count was multiple of 4 */
		if (!(count & 0x3))
			return;

395
		buf32 += count >> 2;
396
397
398
		count %= 4;

		if (is_read) {
399
400
			sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1);
			memcpy(buf32, &data, count);
401
		} else {
402
403
			memcpy(&data, buf32, count);
			sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1);
404
405
406
407
408
		}

		return;
	}

409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
	if (is_read)
		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);
	else
		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1);

	/* if count was even number */
	if (!(count & 0x1))
		return;

	/* if count was odd number */
	buf8 = (u8 *)(buf + (count >> 1));

	/*
	 * FIXME
	 *
	 * driver and this function are assuming that
	 * it is used as little endian
	 */
	if (is_read)
		*buf8 = sd_ctrl_read16(host, CTL_SD_DATA_PORT) & 0xff;
	else
		sd_ctrl_write16(host, CTL_SD_DATA_PORT, *buf8);
}

433
434
435
/*
 * This chip always returns (at least?) as much data as you ask for.
 * I'm unsure what happens if you ask for less than a block. This should be
Lucas De Marchi's avatar
Lucas De Marchi committed
436
 * looked into to ensure that a funny length read doesn't hose the controller.
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
 */
static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
{
	struct mmc_data *data = host->data;
	void *sg_virt;
	unsigned short *buf;
	unsigned int count;
	unsigned long flags;

	if ((host->chan_tx || host->chan_rx) && !host->force_pio) {
		pr_err("PIO IRQ in DMA mode!\n");
		return;
	} else if (!data) {
		pr_debug("Spurious PIO IRQ\n");
		return;
	}

	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags);
	buf = (unsigned short *)(sg_virt + host->sg_off);

	count = host->sg_ptr->length - host->sg_off;
	if (count > data->blksz)
		count = data->blksz;

	pr_debug("count: %08x offset: %08x flags %08x\n",
		 count, host->sg_off, data->flags);

	/* Transfer the data */
465
	tmio_mmc_transfer_data(host, buf, count);
466
467
468
469
470
471
472
473
474
475
476
477
478
479

	host->sg_off += count;

	tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt);

	if (host->sg_off == host->sg_ptr->length)
		tmio_mmc_next_sg(host);
}

static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host)
{
	if (host->sg_ptr == &host->bounce_sg) {
		unsigned long flags;
		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags);
480

481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length);
		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr);
	}
}

/* needs to be called with host->lock held */
void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
{
	struct mmc_data *data = host->data;
	struct mmc_command *stop;

	host->data = NULL;

	if (!data) {
		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
		return;
	}
	stop = data->stop;

	/* FIXME - return correct transfer count on errors */
	if (!data->error)
		data->bytes_xfered = data->blocks * data->blksz;
	else
		data->bytes_xfered = 0;

	pr_debug("Completed data request\n");

	/*
	 * FIXME: other drivers allow an optional stop command of any given type
	 *        which we dont do, as the chip can auto generate them.
	 *        Perhaps we can be smarter about when to use auto CMD12 and
	 *        only issue the auto request when we know this is the desired
	 *        stop command, allowing fallback to the stop command the
	 *        upper layers expect. For now, we do what works.
	 */

	if (data->flags & MMC_DATA_READ) {
		if (host->chan_rx && !host->force_pio)
			tmio_mmc_check_bounce_buffer(host);
		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
			host->mrq);
	} else {
		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
			host->mrq);
	}

Wolfram Sang's avatar
Wolfram Sang committed
527
	if (stop && !host->mrq->sbc) {
528
529
530
531
		if (stop->opcode != MMC_STOP_TRANSMISSION || stop->arg)
			dev_err(&host->pdev->dev, "unsupported stop: CMD%u,0x%x. We did CMD12,0\n",
				stop->opcode, stop->arg);

532
533
534
		/* fill in response from auto CMD12 */
		stop->resp[0] = sd_ctrl_read16_and_16_as_32(host, CTL_RESPONSE);

535
		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0);
536
537
	}

538
	schedule_work(&host->done);
539
}
540
EXPORT_SYMBOL_GPL(tmio_mmc_do_data_irq);
541

542
static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
543
544
{
	struct mmc_data *data;
545

546
547
548
549
550
551
	spin_lock(&host->lock);
	data = host->data;

	if (!data)
		goto out;

552
553
554
	if (stat & TMIO_STAT_CRCFAIL || stat & TMIO_STAT_STOPBIT_ERR ||
	    stat & TMIO_STAT_TXUNDERRUN)
		data->error = -EILSEQ;
555
	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) {
556
		u32 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
557
558
		bool done = false;

559
560
561
562
563
564
565
566
		/*
		 * Has all data been written out yet? Testing on SuperH showed,
		 * that in most cases the first interrupt comes already with the
		 * BUSY status bit clear, but on some operations, like mount or
		 * in the beginning of a write / sync / umount, there is one
		 * DATAEND interrupt with the BUSY bit set, in this cases
		 * waiting for one more interrupt fixes the problem.
		 */
567
		if (host->pdata->flags & TMIO_MMC_HAS_IDLE_WAIT) {
568
			if (status & TMIO_STAT_SCLKDIVEN)
569
570
571
572
573
574
575
				done = true;
		} else {
			if (!(status & TMIO_STAT_CMD_BUSY))
				done = true;
		}

		if (done) {
576
			tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
577
			tmio_mmc_dataend_dma(host);
578
579
580
		}
	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
		tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
581
		tmio_mmc_dataend_dma(host);
582
583
584
585
586
587
588
589
	} else {
		tmio_mmc_do_data_irq(host);
		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
	}
out:
	spin_unlock(&host->lock);
}

590
static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, unsigned int stat)
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
{
	struct mmc_command *cmd = host->cmd;
	int i, addr;

	spin_lock(&host->lock);

	if (!host->cmd) {
		pr_debug("Spurious CMD irq\n");
		goto out;
	}

	/* This controller is sicker than the PXA one. Not only do we need to
	 * drop the top 8 bits of the first response word, we also need to
	 * modify the order of the response for short response command types.
	 */

	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4)
608
		cmd->resp[i] = sd_ctrl_read16_and_16_as_32(host, addr);
609
610
611
612
613
614
615
616
617
618
619
620

	if (cmd->flags &  MMC_RSP_136) {
		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24);
		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24);
		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24);
		cmd->resp[3] <<= 8;
	} else if (cmd->flags & MMC_RSP_R3) {
		cmd->resp[0] = cmd->resp[3];
	}

	if (stat & TMIO_STAT_CMDTIMEOUT)
		cmd->error = -ETIMEDOUT;
621
622
623
	else if ((stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) ||
		 stat & TMIO_STAT_STOPBIT_ERR ||
		 stat & TMIO_STAT_CMD_IDX_ERR)
624
625
626
627
628
629
		cmd->error = -EILSEQ;

	/* If there is data to handle we enable data IRQs here, and
	 * we will ultimatley finish the request in the data_end handler.
	 * If theres no data or we encountered an error, finish now.
	 */
630
	if (host->data && (!cmd->error || cmd->error == -EILSEQ)) {
631
		if (host->data->flags & MMC_DATA_READ) {
632
			if (host->force_pio || !host->chan_rx) {
633
				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP);
634
635
636
			} else {
				tmio_mmc_disable_mmc_irqs(host,
							  TMIO_MASK_READOP);
637
				tasklet_schedule(&host->dma_issue);
638
			}
639
		} else {
640
			if (host->force_pio || !host->chan_tx) {
641
				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
642
643
644
			} else {
				tmio_mmc_disable_mmc_irqs(host,
							  TMIO_MASK_WRITEOP);
645
				tasklet_schedule(&host->dma_issue);
646
			}
647
648
		}
	} else {
649
		schedule_work(&host->done);
650
651
652
653
654
655
	}

out:
	spin_unlock(&host->lock);
}

656
static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host,
657
				       int ireg, int status)
658
659
{
	struct mmc_host *mmc = host->mmc;
660

661
662
663
664
	/* Card insert / remove attempts */
	if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) {
		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT |
			TMIO_STAT_CARD_REMOVE);
665
666
667
		if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) ||
		     ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) &&
		    !work_pending(&mmc->detect.work))
668
			mmc_detect_change(host->mmc, msecs_to_jiffies(100));
669
		return true;
670
671
	}

672
673
674
	return false;
}

675
676
static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
				  int status)
677
{
678
679
	/* Command completion */
	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) {
680
681
		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CMDRESPEND |
				      TMIO_STAT_CMDTIMEOUT);
682
		tmio_mmc_cmd_irq(host, status);
683
		return true;
684
	}
685

686
687
688
689
	/* Data transfer */
	if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) {
		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ);
		tmio_mmc_pio_irq(host);
690
		return true;
691
	}
692

693
694
695
	/* Data transfer completion */
	if (ireg & TMIO_STAT_DATAEND) {
		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND);
696
		tmio_mmc_data_irq(host, status);
697
		return true;
698
	}
699

700
701
702
	return false;
}

703
static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
704
705
706
707
{
	struct mmc_host *mmc = host->mmc;
	struct tmio_mmc_data *pdata = host->pdata;
	unsigned int ireg, status;
708
	unsigned int sdio_status;
709
710

	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
711
		return;
712
713

	status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
714
	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
715

716
	sdio_status = status & ~TMIO_SDIO_MASK_ALL;
717
	if (pdata->flags & TMIO_MMC_SDIO_STATUS_SETBITS)
718
		sdio_status |= TMIO_SDIO_SETBITS_MASK;
719
720

	sd_ctrl_write16(host, CTL_SDIO_STATUS, sdio_status);
721
722
723
724
725
726
727
728
729
730

	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
		mmc_signal_sdio_irq(mmc);
}

irqreturn_t tmio_mmc_irq(int irq, void *devid)
{
	struct tmio_mmc_host *host = devid;
	unsigned int ireg, status;

731
	status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
Wolfram Sang's avatar
Wolfram Sang committed
732
733
734
	ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;

	/* Clear the status except the interrupt status */
735
	sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
736
737
738
739
740
741

	if (__tmio_mmc_card_detect_irq(host, ireg, status))
		return IRQ_HANDLED;
	if (__tmio_mmc_sdcard_irq(host, ireg, status))
		return IRQ_HANDLED;

742
	__tmio_mmc_sdio_irq(host);
743
744
745

	return IRQ_HANDLED;
}
746
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
747
748

static int tmio_mmc_start_data(struct tmio_mmc_host *host,
749
			       struct mmc_data *data)
750
751
752
753
754
755
{
	struct tmio_mmc_data *pdata = host->pdata;

	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n",
		 data->blksz, data->blocks);

Wolfram Sang's avatar
Wolfram Sang committed
756
757
758
	/* Some hardware cannot perform 2 byte requests in 4/8 bit mode */
	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4 ||
	    host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) {
759
760
761
		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES;

		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) {
Wolfram Sang's avatar
Wolfram Sang committed
762
			pr_err("%s: %d byte block unsupported in 4/8 bit mode\n",
763
764
765
766
767
768
769
			       mmc_hostname(host->mmc), data->blksz);
			return -EINVAL;
		}
	}

	tmio_mmc_init_sg(host, data);
	host->data = data;
770
	host->force_pio = false;
771
772
773
774
775
776
777
778
779
780

	/* Set transfer length / blocksize */
	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);

	tmio_mmc_start_dma(host, data);

	return 0;
}

Ai Kyuse's avatar
Ai Kyuse committed
781
782
783
784
785
786
787
788
static void tmio_mmc_hw_reset(struct mmc_host *mmc)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);

	if (host->hw_reset)
		host->hw_reset(host);
}

Ai Kyuse's avatar
Ai Kyuse committed
789
790
791
792
793
static int tmio_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);
	int i, ret = 0;

794
795
796
	if (!host->init_tuning || !host->select_tuning)
		/* Tuning is not supported */
		goto out;
Ai Kyuse's avatar
Ai Kyuse committed
797

798
799
800
801
	host->tap_num = host->init_tuning(host);
	if (!host->tap_num)
		/* Tuning is not supported */
		goto out;
Ai Kyuse's avatar
Ai Kyuse committed
802
803
804

	if (host->tap_num * 2 >= sizeof(host->taps) * BITS_PER_BYTE) {
		dev_warn_once(&host->pdev->dev,
805
			"Too many taps, skipping tuning. Please consider updating size of taps field of tmio_mmc_host\n");
Ai Kyuse's avatar
Ai Kyuse committed
806
807
808
809
810
811
812
813
814
815
816
817
818
819
		goto out;
	}

	bitmap_zero(host->taps, host->tap_num * 2);

	/* Issue CMD19 twice for each tap */
	for (i = 0; i < 2 * host->tap_num; i++) {
		if (host->prepare_tuning)
			host->prepare_tuning(host, i % host->tap_num);

		ret = mmc_send_tuning(mmc, opcode, NULL);
		if (ret == 0)
			set_bit(i, host->taps);

820
		usleep_range(1000, 1200);
Ai Kyuse's avatar
Ai Kyuse committed
821
822
823
824
825
826
827
828
829
830
831
832
833
	}

	ret = host->select_tuning(host);

out:
	if (ret < 0) {
		dev_warn(&host->pdev->dev, "Tuning procedure failed\n");
		tmio_mmc_hw_reset(mmc);
	}

	return ret;
}

834
835
static void tmio_process_mrq(struct tmio_mmc_host *host,
			     struct mmc_request *mrq)
836
{
Wolfram Sang's avatar
Wolfram Sang committed
837
	struct mmc_command *cmd;
838
839
	int ret;

Wolfram Sang's avatar
Wolfram Sang committed
840
841
842
843
844
845
846
847
848
	if (mrq->sbc && host->cmd != mrq->sbc) {
		cmd = mrq->sbc;
	} else {
		cmd = mrq->cmd;
		if (mrq->data) {
			ret = tmio_mmc_start_data(host, mrq->data);
			if (ret)
				goto fail;
		}
849
850
	}

Wolfram Sang's avatar
Wolfram Sang committed
851
	ret = tmio_mmc_start_command(host, cmd);
852
853
854
855
856
857
858
859
860
861
862
863
864
	if (ret)
		goto fail;

	schedule_delayed_work(&host->delayed_reset_work,
			      msecs_to_jiffies(CMDREQ_TIMEOUT));
	return;

fail:
	host->mrq = NULL;
	mrq->cmd->error = ret;
	mmc_request_done(host->mmc, mrq);
}

865
866
867
868
/* Process requests from the MMC layer */
static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
	struct tmio_mmc_host *host = mmc_priv(mmc);
869
	unsigned long flags;
870

871
872
873
	spin_lock_irqsave(&host->lock, flags);

	if (host->mrq) {
874
		pr_debug("request not null\n");
875
876
877
878
879
880
881
		if (IS_ERR(host->mrq)) {
			spin_unlock_irqrestore(&host->lock, flags);
			mrq->cmd->error = -EAGAIN;
			mmc_request_done(mmc, mrq);
			return;
		}
	}
882
883
884
885
886

	host->last_req_ts = jiffies;
	wmb();
	host->mrq = mrq;

887
888
	spin_unlock_irqrestore(&host->lock, flags);

889
	tmio_process_mrq(host, mrq);
890
891
}

892
893
894
895
896
897
898
899
900
901
902
903
904
static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
{
	struct mmc_request *mrq;
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

	mrq = host->mrq;
	if (IS_ERR_OR_NULL(mrq)) {
		spin_unlock_irqrestore(&host->lock, flags);
		return;
	}

Wolfram Sang's avatar
Wolfram Sang committed
905
906
907
908
909
910
	/* If not SET_BLOCK_COUNT, clear old data */
	if (host->cmd != mrq->sbc) {
		host->cmd = NULL;
		host->data = NULL;
		host->mrq = NULL;
	}
911
912
913
914
915
916
917
918
919
920
921

	cancel_delayed_work(&host->delayed_reset_work);

	spin_unlock_irqrestore(&host->lock, flags);

	if (mrq->cmd->error || (mrq->data && mrq->data->error))
		tmio_mmc_abort_dma(host);

	if (host->check_scc_error)
		host->check_scc_error(host);

Wolfram Sang's avatar
Wolfram Sang committed
922
	/* If SET_BLOCK_COUNT, continue with main command */
923
	if (host->mrq && !mrq->cmd->error) {
Wolfram Sang's avatar
Wolfram Sang committed
924
925
926
927
		tmio_process_mrq(host, mrq);
		return;
	}

928
929
930
931
932
933
934
935
936
937
	mmc_request_done(host->mmc, mrq);
}

static void tmio_mmc_done_work(struct work_struct *work)
{
	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host,
						  done);
	tmio_mmc_finish_request(host);
}

938
static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd)
939
940
{
	struct mmc_host *mmc = host->mmc;
941
942
943
	int ret = 0;

	/* .set_ios() is returning void, so, no chance to report an error */
944

945
946
947
	if (host->set_pwr)
		host->set_pwr(host->pdev, 1);

948
949
950
951
952
953
954
955
	if (!IS_ERR(mmc->supply.vmmc)) {
		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
		/*
		 * Attention: empiric value. With a b43 WiFi SDIO card this
		 * delay proved necessary for reliable card-insertion probing.
		 * 100us were not enough. Is this the same 140us delay, as in
		 * tmio_mmc_set_ios()?
		 */
956
		usleep_range(200, 300);
957
958
959
960
961
962
	}
	/*
	 * It seems, VccQ should be switched on after Vcc, this is also what the
	 * omap_hsmmc.c driver does.
	 */
	if (!IS_ERR(mmc->supply.vqmmc) && !ret) {
963
		ret = regulator_enable(mmc->supply.vqmmc);
964
		usleep_range(200, 300);
965
	}
966
967
968
969

	if (ret < 0)
		dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n",
			ret);
970
971
972
973
974
975
976
977
978
}

static void tmio_mmc_power_off(struct tmio_mmc_host *host)
{
	struct mmc_host *mmc = host->mmc;

	if (!IS_ERR(mmc->supply.vqmmc))
		regulator_disable(mmc->supply.vqmmc);

979
	if (!IS_ERR(mmc->supply.vmmc))
980
		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
981
982
983

	if (host->set_pwr)
		host->set_pwr(host->pdev, 0);
984
985
}

986
static void tmio_mmc_set_bus_width(struct tmio_mmc_host *host,
987
				   unsigned char bus_width)
988
{
Wolfram Sang's avatar
Wolfram Sang committed
989
990
991
992
993
994
995
996
997
998
	u16 reg = sd_ctrl_read16(host, CTL_SD_MEM_CARD_OPT)
				& ~(CARD_OPT_WIDTH | CARD_OPT_WIDTH8);

	/* reg now applies to MMC_BUS_WIDTH_4 */
	if (bus_width == MMC_BUS_WIDTH_1)
		reg |= CARD_OPT_WIDTH;
	else if (bus_width == MMC_BUS_WIDTH_8)
		reg |= CARD_OPT_WIDTH8;

	sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, reg);
999
1000
}

For faster browsing, not all history is shown. View entire blame