sdhci.c 115 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
Pierre Ossman's avatar
Pierre Ossman committed
3
 *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4
 *
5
 *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6
 *
7
8
9
 * Thanks to the following companies for their support:
 *
 *     - JMicron (hardware and technical support)
10
11
12
 */

#include <linux/delay.h>
13
#include <linux/ktime.h>
14
#include <linux/highmem.h>
15
#include <linux/io.h>
16
#include <linux/module.h>
17
#include <linux/dma-mapping.h>
18
#include <linux/slab.h>
19
#include <linux/scatterlist.h>
20
#include <linux/sizes.h>
21
#include <linux/swiotlb.h>
22
#include <linux/regulator/consumer.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/of.h>
25

26
27
#include <linux/leds.h>

28
#include <linux/mmc/mmc.h>
29
#include <linux/mmc/host.h>
30
#include <linux/mmc/card.h>
31
#include <linux/mmc/sdio.h>
32
#include <linux/mmc/slot-gpio.h>
33
34
35
36
37
38

#include "sdhci.h"

#define DRIVER_NAME "sdhci"

#define DBG(f, x...) \
39
	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
40

41
42
43
#define SDHCI_DUMP(f, x...) \
	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)

44
45
#define MAX_TUNING_LOOP 40

46
static unsigned int debug_quirks = 0;
47
static unsigned int debug_quirks2;
48

49
50
static void sdhci_finish_data(struct sdhci_host *);

51
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
52

53
void sdhci_dumpregs(struct sdhci_host *host)
54
{
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");

	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
		   sdhci_readw(host, SDHCI_HOST_VERSION));
	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
		   sdhci_readl(host, SDHCI_ARGUMENT),
		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
		   sdhci_readl(host, SDHCI_PRESENT_STATE),
		   sdhci_readb(host, SDHCI_HOST_CONTROL));
	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
		   sdhci_readb(host, SDHCI_POWER_CONTROL),
		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
		   sdhci_readl(host, SDHCI_INT_STATUS));
	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
		   sdhci_readl(host, SDHCI_INT_ENABLE),
		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
81
82
	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
83
84
85
86
87
88
89
90
		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
		   sdhci_readl(host, SDHCI_CAPABILITIES),
		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
		   sdhci_readw(host, SDHCI_COMMAND),
		   sdhci_readl(host, SDHCI_MAX_CURRENT));
	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
91
92
		   sdhci_readl(host, SDHCI_RESPONSE),
		   sdhci_readl(host, SDHCI_RESPONSE + 4));
93
	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
94
95
		   sdhci_readl(host, SDHCI_RESPONSE + 8),
		   sdhci_readl(host, SDHCI_RESPONSE + 12));
96
97
	SDHCI_DUMP("Host ctl2: 0x%08x\n",
		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
98

99
	if (host->flags & SDHCI_USE_ADMA) {
100
101
102
103
104
105
106
107
108
109
		if (host->flags & SDHCI_USE_64_BIT_DMA) {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		} else {
			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
				   sdhci_readl(host, SDHCI_ADMA_ERROR),
				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
		}
110
	}
111

112
	SDHCI_DUMP("============================================\n");
113
}
114
EXPORT_SYMBOL_GPL(sdhci_dumpregs);
115
116
117
118
119
120
121

/*****************************************************************************\
 *                                                                           *
 * Low level functions                                                       *
 *                                                                           *
\*****************************************************************************/

122
123
124
125
static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
{
	u16 ctrl2;

126
	ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
127
128
129
130
	if (ctrl2 & SDHCI_CTRL_V4_MODE)
		return;

	ctrl2 |= SDHCI_CTRL_V4_MODE;
131
	sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
132
133
134
135
136
137
138
139
140
141
142
143
144
}

/*
 * This can be called before sdhci_add_host() by Vendor's host controller
 * driver to enable v4 mode if supported.
 */
void sdhci_enable_v4_mode(struct sdhci_host *host)
{
	host->v4_mode = true;
	sdhci_do_enable_v4_mode(host);
}
EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);

145
146
147
148
149
static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
{
	return cmd->data || cmd->flags & MMC_RSP_BUSY;
}

150
151
static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
152
	u32 present;
153

154
	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
155
	    !mmc_card_is_removable(host->mmc))
156
157
		return;

158
159
160
	if (enable) {
		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
				      SDHCI_CARD_PRESENT;
161

162
163
164
165
166
		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
				       SDHCI_INT_CARD_INSERT;
	} else {
		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
	}
167
168
169

	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
170
171
172
173
174
175
176
177
178
179
180
181
}

static void sdhci_enable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, true);
}

static void sdhci_disable_card_detection(struct sdhci_host *host)
{
	sdhci_set_card_detection(host, false);
}

182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
{
	if (host->bus_on)
		return;
	host->bus_on = true;
	pm_runtime_get_noresume(host->mmc->parent);
}

static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
{
	if (!host->bus_on)
		return;
	host->bus_on = false;
	pm_runtime_put_noidle(host->mmc->parent);
}

198
void sdhci_reset(struct sdhci_host *host, u8 mask)
199
{
200
	ktime_t timeout;
201

202
	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
203

204
	if (mask & SDHCI_RESET_ALL) {
205
		host->clock = 0;
206
207
208
209
		/* Reset-all turns off SD Bus Power */
		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
			sdhci_runtime_pm_bus_off(host);
	}
210

211
	/* Wait max 100 ms */
212
	timeout = ktime_add_ms(ktime_get(), 100);
213
214

	/* hw clears the bit when it's done */
215
216
217
218
219
220
	while (1) {
		bool timedout = ktime_after(ktime_get(), timeout);

		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
			break;
		if (timedout) {
221
			pr_err("%s: Reset 0x%x never completed.\n",
222
223
224
225
				mmc_hostname(host->mmc), (int)mask);
			sdhci_dumpregs(host);
			return;
		}
226
		udelay(10);
227
	}
228
229
230
231
232
233
}
EXPORT_SYMBOL_GPL(sdhci_reset);

static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
{
	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
234
235
236
		struct mmc_host *mmc = host->mmc;

		if (!mmc->ops->get_cd(mmc))
237
238
			return;
	}
239

240
	host->ops->reset(host, mask);
241

242
243
244
245
246
247
248
249
	if (mask & SDHCI_RESET_ALL) {
		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
			if (host->ops->enable_dma)
				host->ops->enable_dma(host);
		}

		/* Resetting the controller clears many */
		host->preset_enabled = false;
250
	}
251
252
}

253
static void sdhci_set_default_irqs(struct sdhci_host *host)
254
{
255
256
257
258
259
260
	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
		    SDHCI_INT_RESPONSE;

261
262
263
264
	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
	    host->tuning_mode == SDHCI_TUNING_MODE_3)
		host->ier |= SDHCI_INT_RETUNE;

265
266
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
267
268
}

269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
static void sdhci_config_dma(struct sdhci_host *host)
{
	u8 ctrl;
	u16 ctrl2;

	if (host->version < SDHCI_SPEC_200)
		return;

	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);

	/*
	 * Always adjust the DMA selection as some controllers
	 * (e.g. JMicron) can't do PIO properly when the selection
	 * is ADMA.
	 */
	ctrl &= ~SDHCI_CTRL_DMA_MASK;
	if (!(host->flags & SDHCI_REQ_USE_DMA))
		goto out;

	/* Note if DMA Select is zero then SDMA is selected */
	if (host->flags & SDHCI_USE_ADMA)
		ctrl |= SDHCI_CTRL_ADMA32;

	if (host->flags & SDHCI_USE_64_BIT_DMA) {
		/*
		 * If v4 mode, all supported DMA can be 64-bit addressing if
		 * controller supports 64-bit system address, otherwise only
		 * ADMA can support 64-bit addressing.
		 */
		if (host->v4_mode) {
			ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
			ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
		} else if (host->flags & SDHCI_USE_ADMA) {
			/*
			 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
			 * set SDHCI_CTRL_ADMA64.
			 */
			ctrl |= SDHCI_CTRL_ADMA64;
		}
	}

out:
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
}

315
316
317
318
319
320
321
322
323
static void sdhci_init(struct sdhci_host *host, int soft)
{
	struct mmc_host *mmc = host->mmc;

	if (soft)
		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
	else
		sdhci_do_reset(host, SDHCI_RESET_ALL);

324
325
326
	if (host->v4_mode)
		sdhci_do_enable_v4_mode(host);

327
	sdhci_set_default_irqs(host);
328

Adrian Hunter's avatar
Adrian Hunter committed
329
330
	host->cqe_on = false;

331
332
333
	if (soft) {
		/* force clock reconfiguration */
		host->clock = 0;
334
		mmc->ops->set_ios(mmc, &mmc->ios);
335
	}
336
}
337

338
339
static void sdhci_reinit(struct sdhci_host *host)
{
340
	sdhci_init(host, 0);
341
	sdhci_enable_card_detection(host);
342
343
}

344
static void __sdhci_led_activate(struct sdhci_host *host)
345
346
347
{
	u8 ctrl;

348
349
350
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

351
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
352
	ctrl |= SDHCI_CTRL_LED;
353
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
354
355
}

356
static void __sdhci_led_deactivate(struct sdhci_host *host)
357
358
359
{
	u8 ctrl;

360
361
362
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

363
	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
364
	ctrl &= ~SDHCI_CTRL_LED;
365
	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
366
367
}

368
#if IS_REACHABLE(CONFIG_LEDS_CLASS)
369
static void sdhci_led_control(struct led_classdev *led,
370
			      enum led_brightness brightness)
371
372
373
374
375
376
{
	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
	unsigned long flags;

	spin_lock_irqsave(&host->lock, flags);

377
378
379
	if (host->runtime_suspended)
		goto out;

380
	if (brightness == LED_OFF)
381
		__sdhci_led_deactivate(host);
382
	else
383
		__sdhci_led_activate(host);
384
out:
385
386
	spin_unlock_irqrestore(&host->lock, flags);
}
387
388
389
390
391

static int sdhci_led_register(struct sdhci_host *host)
{
	struct mmc_host *mmc = host->mmc;

392
393
394
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return 0;

395
396
397
398
399
400
401
402
403
404
405
406
407
	snprintf(host->led_name, sizeof(host->led_name),
		 "%s::", mmc_hostname(mmc));

	host->led.name = host->led_name;
	host->led.brightness = LED_OFF;
	host->led.default_trigger = mmc_hostname(mmc);
	host->led.brightness_set = sdhci_led_control;

	return led_classdev_register(mmc_dev(mmc), &host->led);
}

static void sdhci_led_unregister(struct sdhci_host *host)
{
408
409
410
	if (host->quirks & SDHCI_QUIRK_NO_LED)
		return;

411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
	led_classdev_unregister(&host->led);
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
}

#else

static inline int sdhci_led_register(struct sdhci_host *host)
{
	return 0;
}

static inline void sdhci_led_unregister(struct sdhci_host *host)
{
}

static inline void sdhci_led_activate(struct sdhci_host *host)
{
	__sdhci_led_activate(host);
}

static inline void sdhci_led_deactivate(struct sdhci_host *host)
{
	__sdhci_led_deactivate(host);
}

443
444
#endif

445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
			    unsigned long timeout)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		mod_timer(&host->data_timer, timeout);
	else
		mod_timer(&host->timer, timeout);
}

static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
{
	if (sdhci_data_line_cmd(mrq->cmd))
		del_timer(&host->data_timer);
	else
		del_timer(&host->timer);
}

static inline bool sdhci_has_requests(struct sdhci_host *host)
{
	return host->cmd || host->data_cmd;
}

467
468
469
470
471
472
/*****************************************************************************\
 *                                                                           *
 * Core functions                                                            *
 *                                                                           *
\*****************************************************************************/

Pierre Ossman's avatar
Pierre Ossman committed
473
static void sdhci_read_block_pio(struct sdhci_host *host)
474
{
475
476
	unsigned long flags;
	size_t blksize, len, chunk;
477
	u32 uninitialized_var(scratch);
478
	u8 *buf;
479

Pierre Ossman's avatar
Pierre Ossman committed
480
	DBG("PIO reading\n");
481

Pierre Ossman's avatar
Pierre Ossman committed
482
	blksize = host->data->blksz;
483
	chunk = 0;
484

485
	local_irq_save(flags);
486

Pierre Ossman's avatar
Pierre Ossman committed
487
	while (blksize) {
Fabio Estevam's avatar
Fabio Estevam committed
488
		BUG_ON(!sg_miter_next(&host->sg_miter));
489

490
		len = min(host->sg_miter.length, blksize);
491

492
493
		blksize -= len;
		host->sg_miter.consumed = len;
494

495
		buf = host->sg_miter.addr;
496

497
498
		while (len) {
			if (chunk == 0) {
499
				scratch = sdhci_readl(host, SDHCI_BUFFER);
500
				chunk = 4;
Pierre Ossman's avatar
Pierre Ossman committed
501
			}
502
503
504
505
506
507
508

			*buf = scratch & 0xFF;

			buf++;
			scratch >>= 8;
			chunk--;
			len--;
509
		}
Pierre Ossman's avatar
Pierre Ossman committed
510
	}
511
512
513
514

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
Pierre Ossman's avatar
Pierre Ossman committed
515
}
516

Pierre Ossman's avatar
Pierre Ossman committed
517
518
static void sdhci_write_block_pio(struct sdhci_host *host)
{
519
520
521
522
	unsigned long flags;
	size_t blksize, len, chunk;
	u32 scratch;
	u8 *buf;
523

Pierre Ossman's avatar
Pierre Ossman committed
524
525
526
	DBG("PIO writing\n");

	blksize = host->data->blksz;
527
528
	chunk = 0;
	scratch = 0;
529

530
	local_irq_save(flags);
531

Pierre Ossman's avatar
Pierre Ossman committed
532
	while (blksize) {
Fabio Estevam's avatar
Fabio Estevam committed
533
		BUG_ON(!sg_miter_next(&host->sg_miter));
Pierre Ossman's avatar
Pierre Ossman committed
534

535
536
537
538
539
540
		len = min(host->sg_miter.length, blksize);

		blksize -= len;
		host->sg_miter.consumed = len;

		buf = host->sg_miter.addr;
541

542
543
544
545
546
547
548
549
		while (len) {
			scratch |= (u32)*buf << (chunk * 8);

			buf++;
			chunk++;
			len--;

			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
550
				sdhci_writel(host, scratch, SDHCI_BUFFER);
551
552
				chunk = 0;
				scratch = 0;
553
554
555
			}
		}
	}
556
557
558
559

	sg_miter_stop(&host->sg_miter);

	local_irq_restore(flags);
Pierre Ossman's avatar
Pierre Ossman committed
560
561
562
563
564
565
}

static void sdhci_transfer_pio(struct sdhci_host *host)
{
	u32 mask;

566
	if (host->blocks == 0)
Pierre Ossman's avatar
Pierre Ossman committed
567
568
569
570
571
572
573
		return;

	if (host->data->flags & MMC_DATA_READ)
		mask = SDHCI_DATA_AVAILABLE;
	else
		mask = SDHCI_SPACE_AVAILABLE;

574
575
576
577
578
579
580
581
582
	/*
	 * Some controllers (JMicron JMB38x) mess up the buffer bits
	 * for transfers < 4 bytes. As long as it is just one block,
	 * we can ignore the bits.
	 */
	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
		(host->data->blocks == 1))
		mask = ~0;

583
	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
584
585
586
		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
			udelay(100);

Pierre Ossman's avatar
Pierre Ossman committed
587
588
589
590
		if (host->data->flags & MMC_DATA_READ)
			sdhci_read_block_pio(host);
		else
			sdhci_write_block_pio(host);
591

592
593
		host->blocks--;
		if (host->blocks == 0)
Pierre Ossman's avatar
Pierre Ossman committed
594
595
			break;
	}
596

Pierre Ossman's avatar
Pierre Ossman committed
597
	DBG("PIO transfer complete.\n");
598
599
}

600
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
601
				  struct mmc_data *data, int cookie)
602
603
604
{
	int sg_count;

605
606
607
608
609
	/*
	 * If the data buffers are already mapped, return the previous
	 * dma_map_sg() result.
	 */
	if (data->host_cookie == COOKIE_PRE_MAPPED)
610
611
		return data->sg_count;

612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
	/* Bounce write requests to the bounce buffer */
	if (host->bounce_buffer) {
		unsigned int length = data->blksz * data->blocks;

		if (length > host->bounce_buffer_size) {
			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
			       mmc_hostname(host->mmc), length,
			       host->bounce_buffer_size);
			return -EIO;
		}
		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
			/* Copy the data to the bounce buffer */
			sg_copy_to_buffer(data->sg, data->sg_len,
					  host->bounce_buffer,
					  length);
		}
		/* Switch ownership to the DMA */
		dma_sync_single_for_device(host->mmc->parent,
					   host->bounce_addr,
					   host->bounce_buffer_size,
					   mmc_get_dma_dir(data));
		/* Just a dummy value */
		sg_count = 1;
	} else {
		/* Just access the data directly from memory */
		sg_count = dma_map_sg(mmc_dev(host->mmc),
				      data->sg, data->sg_len,
				      mmc_get_dma_dir(data));
	}
641
642
643
644
645

	if (sg_count == 0)
		return -ENOSPC;

	data->sg_count = sg_count;
646
	data->host_cookie = cookie;
647
648
649
650

	return sg_count;
}

651
652
653
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
	local_irq_save(*flags);
654
	return kmap_atomic(sg_page(sg)) + sg->offset;
655
656
657
658
}

static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
659
	kunmap_atomic(buffer);
660
661
662
	local_irq_restore(*flags);
}

663
664
void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
			   dma_addr_t addr, int len, unsigned int cmd)
665
{
666
	struct sdhci_adma2_64_desc *dma_desc = *desc;
667

668
	/* 32-bit and 64-bit descriptors have these members in same position */
669
670
	dma_desc->cmd = cpu_to_le16(cmd);
	dma_desc->len = cpu_to_le16(len);
671
	dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
672
673

	if (host->flags & SDHCI_USE_64_BIT_DMA)
674
		dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
675
676
677
678
679
680
681
682
683
684
685

	*desc += host->desc_sz;
}
EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);

static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
					   void **desc, dma_addr_t addr,
					   int len, unsigned int cmd)
{
	if (host->ops->adma_write_desc)
		host->ops->adma_write_desc(host, desc, addr, len, cmd);
686
687
	else
		sdhci_adma_write_desc(host, desc, addr, len, cmd);
688
689
}

690
691
static void sdhci_adma_mark_end(void *desc)
{
692
	struct sdhci_adma2_64_desc *dma_desc = desc;
693

694
	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
695
	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
696
697
}

698
699
static void sdhci_adma_table_pre(struct sdhci_host *host,
	struct mmc_data *data, int sg_count)
700
701
702
{
	struct scatterlist *sg;
	unsigned long flags;
703
704
705
706
	dma_addr_t addr, align_addr;
	void *desc, *align;
	char *buffer;
	int len, offset, i;
707
708
709
710
711
712

	/*
	 * The spec does not specify endianness of descriptor table.
	 * We currently guess that it is LE.
	 */

713
	host->sg_count = sg_count;
714

715
	desc = host->adma_table;
716
717
718
719
720
721
722
723
724
	align = host->align_buffer;

	align_addr = host->align_addr;

	for_each_sg(data->sg, sg, host->sg_count, i) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);

		/*
725
726
727
		 * The SDHCI specification states that ADMA addresses must
		 * be 32-bit aligned. If they aren't, then we use a bounce
		 * buffer for the (up to three) bytes that screw up the
728
729
		 * alignment.
		 */
730
731
		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
			 SDHCI_ADMA2_MASK;
732
733
734
735
736
737
738
		if (offset) {
			if (data->flags & MMC_DATA_WRITE) {
				buffer = sdhci_kmap_atomic(sg, &flags);
				memcpy(align, buffer, offset);
				sdhci_kunmap_atomic(buffer, &flags);
			}

739
			/* tran, valid */
740
741
			__sdhci_adma_write_desc(host, &desc, align_addr,
						offset, ADMA2_TRAN_VALID);
742
743
744

			BUG_ON(offset > 65536);

745
746
			align += SDHCI_ADMA2_ALIGN;
			align_addr += SDHCI_ADMA2_ALIGN;
747
748
749
750
751
752
753

			addr += offset;
			len -= offset;
		}

		BUG_ON(len > 65536);

754
755
756
757
		/* tran, valid */
		if (len)
			__sdhci_adma_write_desc(host, &desc, addr, len,
						ADMA2_TRAN_VALID);
758
759
760
761
762

		/*
		 * If this triggers then we have a calculation bug
		 * somewhere. :/
		 */
763
		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
764
765
	}

766
	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
767
		/* Mark the last descriptor as the terminating descriptor */
768
		if (desc != host->adma_table) {
769
			desc -= host->desc_sz;
770
			sdhci_adma_mark_end(desc);
771
772
		}
	} else {
773
		/* Add a terminating entry - nop, end, valid */
774
		__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
775
	}
776
777
778
779
780
781
782
}

static void sdhci_adma_table_post(struct sdhci_host *host,
	struct mmc_data *data)
{
	struct scatterlist *sg;
	int i, size;
783
	void *align;
784
785
786
	char *buffer;
	unsigned long flags;

787
788
	if (data->flags & MMC_DATA_READ) {
		bool has_unaligned = false;
789

790
791
792
793
794
795
		/* Do a quick scan of the SG list for any unaligned mappings */
		for_each_sg(data->sg, sg, host->sg_count, i)
			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
				has_unaligned = true;
				break;
			}
796

797
798
		if (has_unaligned) {
			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
799
					    data->sg_len, DMA_FROM_DEVICE);
800

801
			align = host->align_buffer;
802

803
804
805
806
807
808
809
810
			for_each_sg(data->sg, sg, host->sg_count, i) {
				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
					size = SDHCI_ADMA2_ALIGN -
					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);

					buffer = sdhci_kmap_atomic(sg, &flags);
					memcpy(buffer, align, size);
					sdhci_kunmap_atomic(buffer, &flags);
811

812
813
					align += SDHCI_ADMA2_ALIGN;
				}
814
815
816
817
818
			}
		}
	}
}

819
820
821
822
823
824
825
static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
{
	sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
	if (host->flags & SDHCI_USE_64_BIT_DMA)
		sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
}

826
static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
827
828
829
830
831
832
833
{
	if (host->bounce_buffer)
		return host->bounce_addr;
	else
		return sg_dma_address(host->data->sg);
}

834
835
static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
{
836
837
838
	if (host->v4_mode)
		sdhci_set_adma_addr(host, addr);
	else
839
840
841
		sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
}

842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
static unsigned int sdhci_target_timeout(struct sdhci_host *host,
					 struct mmc_command *cmd,
					 struct mmc_data *data)
{
	unsigned int target_timeout;

	/* timeout in us */
	if (!data) {
		target_timeout = cmd->busy_timeout * 1000;
	} else {
		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
		if (host->clock && data->timeout_clks) {
			unsigned long long val;

			/*
			 * data->timeout_clks is in units of clock cycles.
			 * host->clock is in Hz.  target_timeout is in us.
			 * Hence, us = 1000000 * cycles / Hz.  Round up.
			 */
			val = 1000000ULL * data->timeout_clks;
			if (do_div(val, host->clock))
				target_timeout++;
			target_timeout += val;
		}
	}

	return target_timeout;
}

871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
static void sdhci_calc_sw_timeout(struct sdhci_host *host,
				  struct mmc_command *cmd)
{
	struct mmc_data *data = cmd->data;
	struct mmc_host *mmc = host->mmc;
	struct mmc_ios *ios = &mmc->ios;
	unsigned char bus_width = 1 << ios->bus_width;
	unsigned int blksz;
	unsigned int freq;
	u64 target_timeout;
	u64 transfer_time;

	target_timeout = sdhci_target_timeout(host, cmd, data);
	target_timeout *= NSEC_PER_USEC;

	if (data) {
		blksz = data->blksz;
		freq = host->mmc->actual_clock ? : host->clock;
		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
		do_div(transfer_time, freq);
		/* multiply by '2' to account for any unknowns */
		transfer_time = transfer_time * 2;
		/* calculate timeout for the entire data */
		host->data_timeout = data->blocks * target_timeout +
				     transfer_time;
	} else {
		host->data_timeout = target_timeout;
	}

	if (host->data_timeout)
		host->data_timeout += MMC_CMD_TRANSFER_TIME;
}

904
905
static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
			     bool *too_big)
906
{
907
	u8 count;
908
	struct mmc_data *data;
909
	unsigned target_timeout, current_timeout;
910

911
912
	*too_big = true;

913
914
915
916
917
918
	/*
	 * If the host controller provides us with an incorrect timeout
	 * value, just skip the check and use 0xE.  The hardware may take
	 * longer to time out, but that's much better than having a too-short
	 * timeout value.
	 */
919
	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
920
		return 0xE;
921

922
923
924
925
926
	/* Unspecified command, asume max */
	if (cmd == NULL)
		return 0xE;

	data = cmd->data;
927
	/* Unspecified timeout, assume max */
928
	if (!data && !cmd->busy_timeout)
929
		return 0xE;
930

931
	/* timeout in us */
932
	target_timeout = sdhci_target_timeout(host, cmd, data);
933

934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
	/*
	 * Figure out needed cycles.
	 * We do this in steps in order to fit inside a 32 bit int.
	 * The first step is the minimum timeout, which will have a
	 * minimum resolution of 6 bits:
	 * (1) 2^13*1000 > 2^22,
	 * (2) host->timeout_clk < 2^16
	 *     =>
	 *     (1) / (2) > 2^6
	 */
	count = 0;
	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
	while (current_timeout < target_timeout) {
		count++;
		current_timeout <<= 1;
		if (count >= 0xF)
			break;
	}

	if (count >= 0xF) {
954
955
956
		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
			DBG("Too large timeout 0x%x requested for CMD%d!\n",
			    count, cmd->opcode);
957
		count = 0xE;
958
959
	} else {
		*too_big = false;
960
961
	}

962
963
964
	return count;
}

965
966
967
968
969
970
static void sdhci_set_transfer_irqs(struct sdhci_host *host)
{
	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;

	if (host->flags & SDHCI_REQ_USE_DMA)
971
		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
972
	else
973
974
		host->ier = (host->ier & ~dma_irqs) | pio_irqs;

975
976
977
978
979
	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
	else
		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;

980
981
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
982
983
}

984
985
986
987
988
989
990
991
992
993
static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
{
	if (enable)
		host->ier |= SDHCI_INT_DATA_TIMEOUT;
	else
		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
}

994
static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
995
996
{
	u8 count;
997
998
999
1000

	if (host->ops->set_timeout) {
		host->ops->set_timeout(host, cmd);
	} else {
1001
1002
1003
1004
1005
1006
		bool too_big = false;

		count = sdhci_calc_timeout(host, cmd, &too_big);

		if (too_big &&
		    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1007
			sdhci_calc_sw_timeout(host, cmd);
1008
1009
1010
1011
1012
			sdhci_set_data_timeout_irq(host, false);
		} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
			sdhci_set_data_timeout_irq(host, true);
		}

1013
1014
1015
1016
1017
1018
		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
	}
}

static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
1019
	struct mmc_data *data = cmd->data;
1020

1021
1022
	host->data_timeout = 0;

1023
	if (sdhci_data_line_cmd(cmd))
1024
		sdhci_set_timeout(host, cmd);
1025
1026

	if (!data)
1027
1028
		return;

1029
1030
	WARN_ON(host->data);

1031
1032
1033
1034
1035
1036
1037
	/* Sanity checks */
	BUG_ON(data->blksz * data->blocks > 524288);
	BUG_ON(data->blksz > host->mmc->max_blk_size);
	BUG_ON(data->blocks > 65535);

	host->data = data;
	host->data_early = 0;
1038
	host->data->bytes_xfered = 0;
1039

1040
	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1041
		struct scatterlist *sg;
1042
		unsigned int length_mask, offset_mask;
1043
		int i;
1044

1045
1046
1047
1048
1049
1050
1051
1052
1053
		host->flags |= SDHCI_REQ_USE_DMA;

		/*
		 * FIXME: This doesn't account for merging when mapping the
		 * scatterlist.
		 *
		 * The assumption here being that alignment and lengths are
		 * the same after DMA mapping to device address space.
		 */
1054
		length_mask = 0;
1055
		offset_mask = 0;
1056
		if (host->flags & SDHCI_USE_ADMA) {
1057
			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1058
				length_mask = 3;
1059
1060
1061
1062
1063
1064
1065
				/*
				 * As we use up to 3 byte chunks to work
				 * around alignment problems, we need to
				 * check the offset as well.
				 */
				offset_mask = 3;
			}
1066
1067
		} else {
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1068
				length_mask = 3;
1069
1070
			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
				offset_mask = 3;
1071
1072
		}

1073
		if (unlikely(length_mask | offset_mask)) {
1074
			for_each_sg(data->sg, sg, data->sg_len, i) {
1075
				if (sg->length & length_mask) {
1076
					DBG("Reverting to PIO because of transfer size (%d)\n",
1077
					    sg->length);
1078
1079
1080
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
1081
				if (sg->offset & offset_mask) {
1082
					DBG("Reverting to PIO because of bad alignment\n");
1083
1084
1085
1086
1087
1088
1089
					host->flags &= ~SDHCI_REQ_USE_DMA;
					break;
				}
			}
		}
	}

1090
	if (host->flags & SDHCI_REQ_USE_DMA) {
1091
		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101

		if (sg_cnt <= 0) {
			/*
			 * This only happens when someone fed
			 * us an invalid request.
			 */
			WARN_ON(1);
			host->flags &= ~SDHCI_REQ_USE_DMA;
		} else if (host->flags & SDHCI_USE_ADMA) {
			sdhci_adma_table_pre(host, data, sg_cnt);
1102
			sdhci_set_adma_addr(host, host->adma_addr);
1103
		} else {
1104
			WARN_ON(sg_cnt != 1);
1105
			sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1106
1107
1108
		}
	}

1109
	sdhci_config_dma(host);
1110

1111
	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1112
1113
1114
1115
1116
1117
1118
1119
		int flags;

		flags = SG_MITER_ATOMIC;
		if (host->data->flags & MMC_DATA_READ)
			flags |= SG_MITER_TO_SG;
		else
			flags |= SG_MITER_FROM_SG;
		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1120
		host->blocks = data->blocks;
1121
	}
1122

1123
1124
	sdhci_set_transfer_irqs(host);

1125
	/* Set the DMA boundary value and block size */
1126
1127
	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
		     SDHCI_BLOCK_SIZE);
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140

	/*
	 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
	 * can be supported, in that case 16-bit block count register must be 0.
	 */
	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
	    (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
		if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
			sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
		sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
	} else {
		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
	}
1141
1142
}

1143
1144
1145
static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
				    struct mmc_request *mrq)
{
1146
1147
	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
	       !mrq->cap_cmd_during_tfr;
1148
1149
}

1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
					 struct mmc_command *cmd,
					 u16 *mode)
{
	bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
			 (cmd->opcode != SD_IO_RW_EXTENDED);
	bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
	u16 ctrl2;

	/*
	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
	 * Select' is recommended rather than use of 'Auto CMD12
	 * Enable' or 'Auto CMD23 Enable'.
	 */
	if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23