i2c-designware-master.c 24.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
 * Synopsys DesignWare I2C adapter driver (master only).
4
5
6
7
8
9
10
 *
 * Based on the TI DAVINCI I2C adapter driver.
 *
 * Copyright (C) 2006 Texas Instruments.
 * Copyright (C) 2007 MontaVista Software Inc.
 * Copyright (C) 2009 Provigent Ltd.
 */
11
#include <linux/delay.h>
12
#include <linux/err.h>
13
14
#include <linux/errno.h>
#include <linux/export.h>
15
#include <linux/gpio/consumer.h>
16
#include <linux/i2c.h>
17
18
#include <linux/interrupt.h>
#include <linux/io.h>
19
#include <linux/module.h>
20
#include <linux/pm_runtime.h>
21
#include <linux/regmap.h>
22
#include <linux/reset.h>
23

24
#include "i2c-designware-core.h"
25

26
27
28
29
#define AMD_TIMEOUT_MIN_US	25
#define AMD_TIMEOUT_MAX_US	250
#define AMD_MASTERCFG_MASK	GENMASK(15, 0)

30
31
32
static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
{
	/* Configure Tx/Rx FIFO threshold levels */
33
34
	regmap_write(dev->map, DW_IC_TX_TL, dev->tx_fifo_depth / 2);
	regmap_write(dev->map, DW_IC_RX_TL, 0);
35
36

	/* Configure the I2C master */
37
	regmap_write(dev->map, DW_IC_CON, dev->master_cfg);
38
39
}

40
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
41
{
42
	u32 comp_param1;
43
	u32 sda_falling_time, scl_falling_time;
44
	struct i2c_timings *t = &dev->timings;
45
	const char *fp_str = "";
46
	u32 ic_clk;
47
48
	int ret;

49
50
51
	ret = i2c_dw_acquire_lock(dev);
	if (ret)
		return ret;
52
53

	ret = regmap_read(dev->map, DW_IC_COMP_PARAM_1, &comp_param1);
54
	i2c_dw_release_lock(dev);
55
56
	if (ret)
		return ret;
57

58
	/* Set standard and fast speed dividers for high/low periods */
59
60
	sda_falling_time = t->sda_fall_ns ?: 300; /* ns */
	scl_falling_time = t->scl_fall_ns ?: 300; /* ns */
61

62
63
	/* Calculate SCL timing parameters for standard mode if not set */
	if (!dev->ss_hcnt || !dev->ss_lcnt) {
64
		ic_clk = i2c_dw_clk_rate(dev);
65
66
		dev->ss_hcnt =
			i2c_dw_scl_hcnt(ic_clk,
67
68
69
70
					4000,	/* tHD;STA = tHIGH = 4.0 us */
					sda_falling_time,
					0,	/* 0: DW default, 1: Ideal */
					0);	/* No offset */
71
72
		dev->ss_lcnt =
			i2c_dw_scl_lcnt(ic_clk,
73
74
75
					4700,	/* tLOW = 4.7 us */
					scl_falling_time,
					0);	/* No offset */
76
	}
77
78
79
80
81
82
83
84
	dev_dbg(dev->dev, "Standard Mode HCNT:LCNT = %d:%d\n",
		dev->ss_hcnt, dev->ss_lcnt);

	/*
	 * Set SCL timing parameters for fast mode or fast mode plus. Only
	 * difference is the timing parameter values since the registers are
	 * the same.
	 */
85
	if (t->bus_freq_hz == I2C_MAX_FAST_MODE_PLUS_FREQ) {
86
		/*
87
88
		 * Check are Fast Mode Plus parameters available. Calculate
		 * SCL timing parameters for Fast Mode Plus if not set.
89
90
91
92
		 */
		if (dev->fp_hcnt && dev->fp_lcnt) {
			dev->fs_hcnt = dev->fp_hcnt;
			dev->fs_lcnt = dev->fp_lcnt;
93
94
95
96
97
98
99
100
101
102
103
104
105
		} else {
			ic_clk = i2c_dw_clk_rate(dev);
			dev->fs_hcnt =
				i2c_dw_scl_hcnt(ic_clk,
						260,	/* tHIGH = 260 ns */
						sda_falling_time,
						0,	/* DW default */
						0);	/* No offset */
			dev->fs_lcnt =
				i2c_dw_scl_lcnt(ic_clk,
						500,	/* tLOW = 500 ns */
						scl_falling_time,
						0);	/* No offset */
106
		}
107
		fp_str = " Plus";
108
109
110
111
112
113
	}
	/*
	 * Calculate SCL timing parameters for fast mode if not set. They are
	 * needed also in high speed mode.
	 */
	if (!dev->fs_hcnt || !dev->fs_lcnt) {
114
		ic_clk = i2c_dw_clk_rate(dev);
115
116
		dev->fs_hcnt =
			i2c_dw_scl_hcnt(ic_clk,
117
118
119
120
					600,	/* tHD;STA = tHIGH = 0.6 us */
					sda_falling_time,
					0,	/* 0: DW default, 1: Ideal */
					0);	/* No offset */
121
122
		dev->fs_lcnt =
			i2c_dw_scl_lcnt(ic_clk,
123
124
125
					1300,	/* tLOW = 1.3 us */
					scl_falling_time,
					0);	/* No offset */
126
	}
127
128
	dev_dbg(dev->dev, "Fast Mode%s HCNT:LCNT = %d:%d\n",
		fp_str, dev->fs_hcnt, dev->fs_lcnt);
129

130
	/* Check is high speed possible and fall back to fast mode if not */
131
132
133
134
135
	if ((dev->master_cfg & DW_IC_CON_SPEED_MASK) ==
		DW_IC_CON_SPEED_HIGH) {
		if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK)
			!= DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) {
			dev_err(dev->dev, "High Speed not supported!\n");
136
			t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
137
138
			dev->master_cfg &= ~DW_IC_CON_SPEED_MASK;
			dev->master_cfg |= DW_IC_CON_SPEED_FAST;
139
140
			dev->hs_hcnt = 0;
			dev->hs_lcnt = 0;
141
142
143
144
145
146
147
148
149
150
151
152
153
		} else if (!dev->hs_hcnt || !dev->hs_lcnt) {
			ic_clk = i2c_dw_clk_rate(dev);
			dev->hs_hcnt =
				i2c_dw_scl_hcnt(ic_clk,
						160,	/* tHIGH = 160 ns */
						sda_falling_time,
						0,	/* DW default */
						0);	/* No offset */
			dev->hs_lcnt =
				i2c_dw_scl_lcnt(ic_clk,
						320,	/* tLOW = 320 ns */
						scl_falling_time,
						0);	/* No offset */
154
		}
155
156
		dev_dbg(dev->dev, "High Speed Mode HCNT:LCNT = %d:%d\n",
			dev->hs_hcnt, dev->hs_lcnt);
157
158
	}

159
160
	ret = i2c_dw_set_sda_hold(dev);
	if (ret)
161
		return ret;
162

163
164
	dev_dbg(dev->dev, "Bus speed: %s\n", i2c_freq_mode_string(t->bus_freq_hz));
	return 0;
165
166
167
}

/**
168
 * i2c_dw_init_master() - Initialize the designware I2C master hardware
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
 * @dev: device private data
 *
 * This functions configures and enables the I2C master.
 * This function is called during I2C init function, and in case of timeout at
 * run time.
 */
static int i2c_dw_init_master(struct dw_i2c_dev *dev)
{
	int ret;

	ret = i2c_dw_acquire_lock(dev);
	if (ret)
		return ret;

	/* Disable the adapter */
	__i2c_dw_disable(dev);

	/* Write standard speed timing parameters */
187
188
	regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
	regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
189
190

	/* Write fast mode/fast mode plus timing parameters */
191
192
	regmap_write(dev->map, DW_IC_FS_SCL_HCNT, dev->fs_hcnt);
	regmap_write(dev->map, DW_IC_FS_SCL_LCNT, dev->fs_lcnt);
193
194
195

	/* Write high speed timing parameters if supported */
	if (dev->hs_hcnt && dev->hs_lcnt) {
196
197
		regmap_write(dev->map, DW_IC_HS_SCL_HCNT, dev->hs_hcnt);
		regmap_write(dev->map, DW_IC_HS_SCL_LCNT, dev->hs_lcnt);
198
199
	}

200
201
	/* Write SDA hold time if supported */
	if (dev->sda_hold_time)
202
		regmap_write(dev->map, DW_IC_SDA_HOLD, dev->sda_hold_time);
203

204
	i2c_dw_configure_fifo_master(dev);
205
206
	i2c_dw_release_lock(dev);

207
	return 0;
208
209
}

210
211
212
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
	struct i2c_msg *msgs = dev->msgs;
213
214
	u32 ic_con = 0, ic_tar = 0;
	u32 dummy;
215

216
	/* Disable the adapter */
217
	__i2c_dw_disable(dev);
218

219
	/* If the slave address is ten bit address, enable 10BITADDR */
220
	if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) {
221
		ic_con = DW_IC_CON_10BITADDR_MASTER;
222
223
		/*
		 * If I2C_DYNAMIC_TAR_UPDATE is set, the 10-bit addressing
224
225
226
		 * mode has to be enabled via bit 12 of IC_TAR register.
		 * We set it always as I2C_DYNAMIC_TAR_UPDATE can't be
		 * detected from registers.
227
		 */
228
		ic_tar = DW_IC_TAR_10BITADDR_MASTER;
229
	}
230

231
232
	regmap_update_bits(dev->map, DW_IC_CON, DW_IC_CON_10BITADDR_MASTER,
			   ic_con);
233

234
235
236
237
	/*
	 * Set the slave (target) address and enable 10-bit addressing mode
	 * if applicable.
	 */
238
239
	regmap_write(dev->map, DW_IC_TAR,
		     msgs[dev->msg_write_idx].addr | ic_tar);
240

241
	/* Enforce disabled interrupts (due to HW issues) */
242
243
	i2c_dw_disable_int(dev);

244
	/* Enable the adapter */
245
	__i2c_dw_enable(dev);
246
247

	/* Dummy read to avoid the register getting stuck on Bay Trail */
248
	regmap_read(dev->map, DW_IC_ENABLE_STATUS, &dummy);
249

250
	/* Clear and enable interrupts */
251
252
	regmap_read(dev->map, DW_IC_CLR_INTR, &dummy);
	regmap_write(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_MASTER_MASK);
253
254
}

255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
static int i2c_dw_check_stopbit(struct dw_i2c_dev *dev)
{
	u32 val;
	int ret;

	ret = regmap_read_poll_timeout(dev->map, DW_IC_INTR_STAT, val,
				       !(val & DW_IC_INTR_STOP_DET),
					1100, 20000);
	if (ret)
		dev_err(dev->dev, "i2c timeout error %d\n", ret);

	return ret;
}

static int i2c_dw_status(struct dw_i2c_dev *dev)
{
	int status;

	status = i2c_dw_wait_bus_not_busy(dev);
	if (status)
		return status;

	return i2c_dw_check_stopbit(dev);
}

/*
 * Initiate and continue master read/write transaction with polling
 * based transfer routine afterward write messages into the Tx buffer.
 */
static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, int num_msgs)
{
	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
	int msg_wrt_idx, msg_itr_lmt, buf_len, data_idx;
	int cmd = 0, status;
	u8 *tx_buf;
	u32 val;

	/*
	 * In order to enable the interrupt for UCSI i.e. AMD NAVI GPU card,
	 * it is mandatory to set the right value in specific register
	 * (offset:0x474) as per the hardware IP specification.
	 */
	regmap_write(dev->map, AMD_UCSI_INTR_REG, AMD_UCSI_INTR_EN);

	dev->msgs = msgs;
	dev->msgs_num = num_msgs;
	i2c_dw_xfer_init(dev);
	i2c_dw_disable_int(dev);

	/* Initiate messages read/write transaction */
	for (msg_wrt_idx = 0; msg_wrt_idx < num_msgs; msg_wrt_idx++) {
		tx_buf = msgs[msg_wrt_idx].buf;
		buf_len = msgs[msg_wrt_idx].len;

		if (!(msgs[msg_wrt_idx].flags & I2C_M_RD))
			regmap_write(dev->map, DW_IC_TX_TL, buf_len - 1);
		/*
		 * Initiate the i2c read/write transaction of buffer length,
		 * and poll for bus busy status. For the last message transfer,
		 * update the command with stopbit enable.
		 */
		for (msg_itr_lmt = buf_len; msg_itr_lmt > 0; msg_itr_lmt--) {
			if (msg_wrt_idx == num_msgs - 1 && msg_itr_lmt == 1)
				cmd |= BIT(9);

			if (msgs[msg_wrt_idx].flags & I2C_M_RD) {
				/* Due to hardware bug, need to write the same command twice. */
				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100);
				regmap_write(dev->map, DW_IC_DATA_CMD, 0x100 | cmd);
				if (cmd) {
					regmap_write(dev->map, DW_IC_TX_TL, 2 * (buf_len - 1));
					regmap_write(dev->map, DW_IC_RX_TL, 2 * (buf_len - 1));
					/*
					 * Need to check the stop bit. However, it cannot be
					 * detected from the registers so we check it always
					 * when read/write the last byte.
					 */
					status = i2c_dw_status(dev);
					if (status)
						return status;

					for (data_idx = 0; data_idx < buf_len; data_idx++) {
						regmap_read(dev->map, DW_IC_DATA_CMD, &val);
						tx_buf[data_idx] = val;
					}
					status = i2c_dw_check_stopbit(dev);
					if (status)
						return status;
				}
			} else {
				regmap_write(dev->map, DW_IC_DATA_CMD, *tx_buf++ | cmd);
				usleep_range(AMD_TIMEOUT_MIN_US, AMD_TIMEOUT_MAX_US);
			}
		}
		status = i2c_dw_check_stopbit(dev);
		if (status)
			return status;
	}

	return 0;
}

357
/*
358
359
360
361
 * Initiate (and continue) low level master read/write transaction.
 * This function is only called from i2c_dw_isr, and pumping i2c_msg
 * messages into the tx buffer.  Even if the size of i2c_msg data is
 * longer than the size of the tx buffer, it handles everything.
362
 */
363
static void
364
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
365
366
{
	struct i2c_msg *msgs = dev->msgs;
367
	u32 intr_mask;
368
	int tx_limit, rx_limit;
369
370
	u32 addr = msgs[dev->msg_write_idx].addr;
	u32 buf_len = dev->tx_buf_len;
371
	u8 *buf = dev->tx_buf;
372
	bool need_restart = false;
373
	unsigned int flr;
374

375
	intr_mask = DW_IC_INTR_MASTER_MASK;
376

377
	for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
378
379
		u32 flags = msgs[dev->msg_write_idx].flags;

380
		/*
381
382
383
		 * If target address has changed, we need to
		 * reprogram the target address in the I2C
		 * adapter when we are done with this transfer.
384
		 */
385
386
387
388
389
390
		if (msgs[dev->msg_write_idx].addr != addr) {
			dev_err(dev->dev,
				"%s: invalid target address\n", __func__);
			dev->msg_err = -EINVAL;
			break;
		}
391
392
393

		if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
			/* new i2c_msg */
394
			buf = msgs[dev->msg_write_idx].buf;
395
			buf_len = msgs[dev->msg_write_idx].len;
396
397
398
399
400
401
402
403

			/* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
			 * IC_RESTART_EN are set, we must manually
			 * set restart bit between messages.
			 */
			if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
					(dev->msg_write_idx > 0))
				need_restart = true;
404
405
		}

406
407
408
409
410
		regmap_read(dev->map, DW_IC_TXFLR, &flr);
		tx_limit = dev->tx_fifo_depth - flr;

		regmap_read(dev->map, DW_IC_RXFLR, &flr);
		rx_limit = dev->rx_fifo_depth - flr;
411

412
		while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
413
414
415
416
417
418
419
420
			u32 cmd = 0;

			/*
			 * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must
			 * manually set the stop bit. However, it cannot be
			 * detected from the registers so we set it always
			 * when writing/reading the last byte.
			 */
421
422

			/*
423
			 * i2c-core always sets the buffer length of
424
425
426
427
			 * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will
			 * be adjusted when receiving the first byte.
			 * Thus we can't stop the transaction here.
			 */
428
			if (dev->msg_write_idx == dev->msgs_num - 1 &&
429
			    buf_len == 1 && !(flags & I2C_M_RECV_LEN))
430
431
				cmd |= BIT(9);

432
433
434
435
436
			if (need_restart) {
				cmd |= BIT(10);
				need_restart = false;
			}

437
			if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
438

439
				/* Avoid rx buffer overrun */
440
				if (dev->rx_outstanding >= dev->rx_fifo_depth)
441
442
					break;

443
444
				regmap_write(dev->map, DW_IC_DATA_CMD,
					     cmd | 0x100);
445
				rx_limit--;
446
				dev->rx_outstanding++;
447
448
449
450
			} else {
				regmap_write(dev->map, DW_IC_DATA_CMD,
					     cmd | *buf++);
			}
451
452
			tx_limit--; buf_len--;
		}
453

454
		dev->tx_buf = buf;
455
456
		dev->tx_buf_len = buf_len;

457
458
459
460
461
462
		/*
		 * Because we don't know the buffer length in the
		 * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
		 * the transaction here.
		 */
		if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
463
464
465
			/* more bytes to be written */
			dev->status |= STATUS_WRITE_IN_PROGRESS;
			break;
466
		} else
467
			dev->status &= ~STATUS_WRITE_IN_PROGRESS;
468
469
	}

470
471
472
473
474
475
476
	/*
	 * If i2c_msg index search is completed, we don't need TX_EMPTY
	 * interrupt any more.
	 */
	if (dev->msg_write_idx == dev->msgs_num)
		intr_mask &= ~DW_IC_INTR_TX_EMPTY;

477
478
479
	if (dev->msg_err)
		intr_mask = 0;

480
	regmap_write(dev->map,  DW_IC_INTR_MASK, intr_mask);
481
482
}

483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
static u8
i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
{
	struct i2c_msg *msgs = dev->msgs;
	u32 flags = msgs[dev->msg_read_idx].flags;

	/*
	 * Adjust the buffer length and mask the flag
	 * after receiving the first byte.
	 */
	len += (flags & I2C_CLIENT_PEC) ? 2 : 1;
	dev->tx_buf_len = len - min_t(u8, len, dev->rx_outstanding);
	msgs[dev->msg_read_idx].len = len;
	msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;

	return len;
}

501
static void
502
i2c_dw_read(struct dw_i2c_dev *dev)
503
504
{
	struct i2c_msg *msgs = dev->msgs;
505
	unsigned int rx_valid;
506

507
	for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
508
		u32 len, tmp;
509
510
511
512
513
514
515
516
517
518
519
520
521
		u8 *buf;

		if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
			continue;

		if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
			len = msgs[dev->msg_read_idx].len;
			buf = msgs[dev->msg_read_idx].buf;
		} else {
			len = dev->rx_buf_len;
			buf = dev->rx_buf;
		}

522
		regmap_read(dev->map, DW_IC_RXFLR, &rx_valid);
523

524
		for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
525
526
			u32 flags = msgs[dev->msg_read_idx].flags;

527
			regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
528
529
			/* Ensure length byte is a valid value */
			if (flags & I2C_M_RECV_LEN &&
530
			    (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
531
				len = i2c_dw_recv_len(dev, tmp);
532
			}
533
			*buf++ = tmp;
534
535
			dev->rx_outstanding--;
		}
536
537
538
539
540
541
542
543
544
545
546
547

		if (len > 0) {
			dev->status |= STATUS_READ_IN_PROGRESS;
			dev->rx_buf_len = len;
			dev->rx_buf = buf;
			return;
		} else
			dev->status &= ~STATUS_READ_IN_PROGRESS;
	}
}

/*
548
 * Prepare controller for a transaction and call i2c_dw_xfer_msg.
549
 */
550
static int
551
552
553
554
555
556
557
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
	int ret;

	dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);

558
	pm_runtime_get_sync(dev->dev);
559

560
561
562
563
564
565
566
567
568
569
	/*
	 * Initiate I2C message transfer when AMD NAVI GPU card is enabled,
	 * As it is polling based transfer mechanism, which does not support
	 * interrupt based functionalities of existing DesignWare driver.
	 */
	if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) {
		ret = amd_i2c_dw_xfer_quirk(adap, msgs, num);
		goto done_nolock;
	}

570
	if (dev_WARN_ONCE(dev->dev, dev->suspended, "Transfer while suspended\n")) {
571
572
573
574
		ret = -ESHUTDOWN;
		goto done_nolock;
	}

575
	reinit_completion(&dev->cmd_complete);
576
577
578
579
580
581
582
	dev->msgs = msgs;
	dev->msgs_num = num;
	dev->cmd_err = 0;
	dev->msg_write_idx = 0;
	dev->msg_read_idx = 0;
	dev->msg_err = 0;
	dev->status = STATUS_IDLE;
583
	dev->abort_source = 0;
584
	dev->rx_outstanding = 0;
585

586
587
588
	ret = i2c_dw_acquire_lock(dev);
	if (ret)
		goto done_nolock;
589

590
591
592
593
	ret = i2c_dw_wait_bus_not_busy(dev);
	if (ret < 0)
		goto done;

594
	/* Start the transfers */
595
	i2c_dw_xfer_init(dev);
596

597
	/* Wait for tx to complete */
598
	if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) {
599
		dev_err(dev->dev, "controller timed out\n");
600
		/* i2c_dw_init implicitly disables the adapter */
601
		i2c_recover_bus(&dev->adapter);
602
		i2c_dw_init_master(dev);
603
604
		ret = -ETIMEDOUT;
		goto done;
605
	}
606

607
608
609
610
611
612
613
614
	/*
	 * We must disable the adapter before returning and signaling the end
	 * of the current transfer. Otherwise the hardware might continue
	 * generating interrupts which in turn causes a race condition with
	 * the following transfer.  Needs some more investigation if the
	 * additional interrupts are a hardware bug or this driver doesn't
	 * handle them correctly yet.
	 */
615
	__i2c_dw_disable_nowait(dev);
616

617
618
619
620
621
	if (dev->msg_err) {
		ret = dev->msg_err;
		goto done;
	}

622
	/* No error */
623
	if (likely(!dev->cmd_err && !dev->status)) {
624
625
626
627
628
629
		ret = num;
		goto done;
	}

	/* We have an error */
	if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
630
631
		ret = i2c_dw_handle_tx_abort(dev);
		goto done;
632
	}
633
634
635
636
637

	if (dev->status)
		dev_err(dev->dev,
			"transfer terminated early - interrupt latency too high?\n");

638
639
640
	ret = -EIO;

done:
641
	i2c_dw_release_lock(dev);
642
643

done_nolock:
644
645
	pm_runtime_mark_last_busy(dev->dev);
	pm_runtime_put_autosuspend(dev->dev);
646
647
648
649

	return ret;
}

650
static const struct i2c_algorithm i2c_dw_algo = {
651
652
	.master_xfer = i2c_dw_xfer,
	.functionality = i2c_dw_func,
653
};
654

655
656
657
658
static const struct i2c_adapter_quirks i2c_dw_quirks = {
	.flags = I2C_AQ_NO_ZERO_LEN,
};

659
660
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
661
	u32 stat, dummy;
662
663
664

	/*
	 * The IC_INTR_STAT register just indicates "enabled" interrupts.
665
	 * The unmasked raw version of interrupt status bits is available
666
667
668
	 * in the IC_RAW_INTR_STAT register.
	 *
	 * That is,
669
	 *   stat = readl(IC_INTR_STAT);
670
	 * equals to,
671
	 *   stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
672
673
674
	 *
	 * The raw version might be useful for debugging purposes.
	 */
675
	regmap_read(dev->map, DW_IC_INTR_STAT, &stat);
676
677
678
679

	/*
	 * Do not use the IC_CLR_INTR register to clear interrupts, or
	 * you'll miss some interrupts, triggered during the period from
680
	 * readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
681
682
683
684
	 *
	 * Instead, use the separately-prepared IC_CLR_* registers.
	 */
	if (stat & DW_IC_INTR_RX_UNDER)
685
		regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &dummy);
686
	if (stat & DW_IC_INTR_RX_OVER)
687
		regmap_read(dev->map, DW_IC_CLR_RX_OVER, &dummy);
688
	if (stat & DW_IC_INTR_TX_OVER)
689
		regmap_read(dev->map, DW_IC_CLR_TX_OVER, &dummy);
690
	if (stat & DW_IC_INTR_RD_REQ)
691
		regmap_read(dev->map, DW_IC_CLR_RD_REQ, &dummy);
692
693
694
695
696
	if (stat & DW_IC_INTR_TX_ABRT) {
		/*
		 * The IC_TX_ABRT_SOURCE register is cleared whenever
		 * the IC_CLR_TX_ABRT is read.  Preserve it beforehand.
		 */
697
698
		regmap_read(dev->map, DW_IC_TX_ABRT_SOURCE, &dev->abort_source);
		regmap_read(dev->map, DW_IC_CLR_TX_ABRT, &dummy);
699
700
	}
	if (stat & DW_IC_INTR_RX_DONE)
701
		regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
702
	if (stat & DW_IC_INTR_ACTIVITY)
703
		regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
704
	if (stat & DW_IC_INTR_STOP_DET)
705
		regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
706
	if (stat & DW_IC_INTR_START_DET)
707
		regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
708
	if (stat & DW_IC_INTR_GEN_CALL)
709
		regmap_read(dev->map, DW_IC_CLR_GEN_CALL, &dummy);
710
711
712
713

	return stat;
}

714
/*
715
 * Interrupt service routine. This gets called whenever an I2C master interrupt
716
717
 * occurs.
 */
718
static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
719
{
720
	u32 stat;
721

722
	stat = i2c_dw_read_clear_intrbits(dev);
723
724
725
	if (stat & DW_IC_INTR_TX_ABRT) {
		dev->cmd_err |= DW_IC_ERR_TX_ABRT;
		dev->status = STATUS_IDLE;
726
727
728

		/*
		 * Anytime TX_ABRT is set, the contents of the tx/rx
729
		 * buffers are flushed. Make sure to skip them.
730
		 */
731
		regmap_write(dev->map, DW_IC_INTR_MASK, 0);
732
		goto tx_aborted;
733
734
	}

735
	if (stat & DW_IC_INTR_RX_FULL)
736
		i2c_dw_read(dev);
737
738

	if (stat & DW_IC_INTR_TX_EMPTY)
739
740
741
742
743
744
745
		i2c_dw_xfer_msg(dev);

	/*
	 * No need to modify or disable the interrupt mask here.
	 * i2c_dw_xfer_msg() will take care of it according to
	 * the current transmit status.
	 */
746

747
tx_aborted:
748
	if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
749
		complete(&dev->cmd_complete);
750
	else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
751
		/* Workaround to trigger pending interrupt */
752
		regmap_read(dev->map, DW_IC_INTR_MASK, &stat);
753
		i2c_dw_disable_int(dev);
754
		regmap_write(dev->map, DW_IC_INTR_MASK, stat);
755
	}
756

757
758
759
760
761
762
763
764
	return 0;
}

static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
{
	struct dw_i2c_dev *dev = dev_id;
	u32 stat, enabled;

765
766
	regmap_read(dev->map, DW_IC_ENABLE, &enabled);
	regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &stat);
767
768
769
770
771
772
	dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat);
	if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY))
		return IRQ_NONE;

	i2c_dw_irq_handler_master(dev);

773
774
	return IRQ_HANDLED;
}
775

776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
void i2c_dw_configure_master(struct dw_i2c_dev *dev)
{
	struct i2c_timings *t = &dev->timings;

	dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;

	dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
			  DW_IC_CON_RESTART_EN;

	dev->mode = DW_IC_MASTER;

	switch (t->bus_freq_hz) {
	case I2C_MAX_STANDARD_MODE_FREQ:
		dev->master_cfg |= DW_IC_CON_SPEED_STD;
		break;
	case I2C_MAX_HIGH_SPEED_MODE_FREQ:
		dev->master_cfg |= DW_IC_CON_SPEED_HIGH;
		break;
	default:
		dev->master_cfg |= DW_IC_CON_SPEED_FAST;
	}
}
EXPORT_SYMBOL_GPL(i2c_dw_configure_master);

800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
static void i2c_dw_prepare_recovery(struct i2c_adapter *adap)
{
	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);

	i2c_dw_disable(dev);
	reset_control_assert(dev->rst);
	i2c_dw_prepare_clk(dev, false);
}

static void i2c_dw_unprepare_recovery(struct i2c_adapter *adap)
{
	struct dw_i2c_dev *dev = i2c_get_adapdata(adap);

	i2c_dw_prepare_clk(dev, true);
	reset_control_deassert(dev->rst);
	i2c_dw_init_master(dev);
}

static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
{
	struct i2c_bus_recovery_info *rinfo = &dev->rinfo;
	struct i2c_adapter *adap = &dev->adapter;
	struct gpio_desc *gpio;

824
825
826
827
	gpio = devm_gpiod_get_optional(dev->dev, "scl", GPIOD_OUT_HIGH);
	if (IS_ERR_OR_NULL(gpio))
		return PTR_ERR_OR_ZERO(gpio);

828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
	rinfo->scl_gpiod = gpio;

	gpio = devm_gpiod_get_optional(dev->dev, "sda", GPIOD_IN);
	if (IS_ERR(gpio))
		return PTR_ERR(gpio);
	rinfo->sda_gpiod = gpio;

	rinfo->recover_bus = i2c_generic_scl_recovery;
	rinfo->prepare_recovery = i2c_dw_prepare_recovery;
	rinfo->unprepare_recovery = i2c_dw_unprepare_recovery;
	adap->bus_recovery_info = rinfo;

	dev_info(dev->dev, "running with gpio recovery mode! scl%s",
		 rinfo->sda_gpiod ? ",sda" : "");

	return 0;
}

846
847
848
849
850
851
852
853
854
855
856
857
858
859
static int amd_i2c_adap_quirk(struct dw_i2c_dev *dev)
{
	struct i2c_adapter *adap = &dev->adapter;
	int ret;

	pm_runtime_get_noresume(dev->dev);
	ret = i2c_add_numbered_adapter(adap);
	if (ret)
		dev_err(dev->dev, "Failed to add adapter: %d\n", ret);
	pm_runtime_put_noidle(dev->dev);

	return ret;
}

860
int i2c_dw_probe_master(struct dw_i2c_dev *dev)
861
862
{
	struct i2c_adapter *adap = &dev->adapter;
863
	unsigned long irq_flags;
864
	int ret;
865
866
867

	init_completion(&dev->cmd_complete);

868
	dev->init = i2c_dw_init_master;
869
870
871
	dev->disable = i2c_dw_disable;
	dev->disable_int = i2c_dw_disable_int;

872
	ret = i2c_dw_init_regmap(dev);
873
874
875
	if (ret)
		return ret;

876
877
878
879
	ret = i2c_dw_set_timings_master(dev);
	if (ret)
		return ret;

880
881
882
	ret = i2c_dw_set_fifo_size(dev);
	if (ret)
		return ret;
883

884
	ret = dev->init(dev);
885
886
	if (ret)
		return ret;
887
888
889

	snprintf(adap->name, sizeof(adap->name),
		 "Synopsys DesignWare I2C adapter");
890
	adap->retries = 3;
891
	adap->algo = &i2c_dw_algo;
892
	adap->quirks = &i2c_dw_quirks;
893
894
895
	adap->dev.parent = dev->dev;
	i2c_set_adapdata(adap, dev);

896
897
898
	if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU)
		return amd_i2c_adap_quirk(dev);

899
	if (dev->flags & ACCESS_NO_IRQ_SUSPEND) {
900
901
902
903
904
		irq_flags = IRQF_NO_SUSPEND;
	} else {
		irq_flags = IRQF_SHARED | IRQF_COND_SUSPEND;
	}

905
	i2c_dw_disable_int(dev);
906
907
908
	ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags,
			       dev_name(dev->dev), dev);
	if (ret) {
909
		dev_err(dev->dev, "failure requesting irq %i: %d\n",
910
911
			dev->irq, ret);
		return ret;
912
913
	}

914
915
916
917
	ret = i2c_dw_init_recovery_info(dev);
	if (ret)
		return ret;

918
919
920
921
922
923
924
	/*
	 * Increment PM usage count during adapter registration in order to
	 * avoid possible spurious runtime suspend when adapter device is
	 * registered to the device core and immediate resume in case bus has
	 * registered I2C slaves that do I2C transfers in their probe.
	 */
	pm_runtime_get_noresume(dev->dev);
925
926
927
	ret = i2c_add_numbered_adapter(adap);
	if (ret)
		dev_err(dev->dev, "failure adding adapter: %d\n", ret);
928
	pm_runtime_put_noidle(dev->dev);
929

930
	return ret;
931
}
932
EXPORT_SYMBOL_GPL(i2c_dw_probe_master);
933

934
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter");
935
MODULE_LICENSE("GPL");