f_fs.c 87.4 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
 * f_fs.c -- user mode file system API for USB composite function controllers
4
5
 *
 * Copyright (C) 2010 Samsung Electronics
6
 * Author: Michal Nazarewicz <mina86@mina86.com>
7
 *
8
 * Based on inode.c (GadgetFS) which was:
9
10
11
12
13
14
15
16
17
 * Copyright (C) 2003-2004 David Brownell
 * Copyright (C) 2003 Agilent Technologies
 */


/* #define DEBUG */
/* #define VERBOSE_DEBUG */

#include <linux/blkdev.h>
18
#include <linux/pagemap.h>
19
#include <linux/export.h>
20
#include <linux/hid.h>
21
#include <linux/module.h>
22
#include <linux/sched/signal.h>
23
#include <linux/uio.h>
24
25
26
27
28
#include <asm/unaligned.h>

#include <linux/usb/composite.h>
#include <linux/usb/functionfs.h>

29
30
#include <linux/aio.h>
#include <linux/mmu_context.h>
31
#include <linux/poll.h>
32
#include <linux/eventfd.h>
33

34
#include "u_fs.h"
35
#include "u_f.h"
36
#include "u_os_desc.h"
37
#include "configfs.h"
38
39
40
41
42
43
44

#define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */

/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
/* Creates new ffs_data object. */
45
46
static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
	__attribute__((malloc));
47
48
49
50
51

/* Opened counter handling. */
static void ffs_data_opened(struct ffs_data *ffs);
static void ffs_data_closed(struct ffs_data *ffs);

52
/* Called with ffs->mutex held; take over ownership of data. */
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
static int __must_check
__ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len);
static int __must_check
__ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len);


/* The function structure ***************************************************/

struct ffs_ep;

struct ffs_function {
	struct usb_configuration	*conf;
	struct usb_gadget		*gadget;
	struct ffs_data			*ffs;

	struct ffs_ep			*eps;
	u8				eps_revmap[16];
	short				*interfaces_nums;

	struct usb_function		function;
};


static struct ffs_function *ffs_func_from_usb(struct usb_function *f)
{
	return container_of(f, struct ffs_function, function);
}


82
83
84
85
86
87
88
89
static inline enum ffs_setup_state
ffs_setup_state_clear_cancelled(struct ffs_data *ffs)
{
	return (enum ffs_setup_state)
		cmpxchg(&ffs->setup_state, FFS_SETUP_CANCELLED, FFS_NO_SETUP);
}


90
91
92
93
94
95
96
97
98
static void ffs_func_eps_disable(struct ffs_function *func);
static int __must_check ffs_func_eps_enable(struct ffs_function *func);

static int ffs_func_bind(struct usb_configuration *,
			 struct usb_function *);
static int ffs_func_set_alt(struct usb_function *, unsigned, unsigned);
static void ffs_func_disable(struct usb_function *);
static int ffs_func_setup(struct usb_function *,
			  const struct usb_ctrlrequest *);
99
static bool ffs_func_req_match(struct usb_function *,
100
101
			       const struct usb_ctrlrequest *,
			       bool config0);
102
103
104
105
106
107
108
109
110
111
112
113
114
115
static void ffs_func_suspend(struct usb_function *);
static void ffs_func_resume(struct usb_function *);


static int ffs_func_revmap_ep(struct ffs_function *func, u8 num);
static int ffs_func_revmap_intf(struct ffs_function *func, u8 intf);


/* The endpoints structures *************************************************/

struct ffs_ep {
	struct usb_ep			*ep;	/* P: ffs->eps_lock */
	struct usb_request		*req;	/* P: epfile->mutex */

116
117
	/* [0]: full speed, [1]: high speed, [2]: super speed */
	struct usb_endpoint_descriptor	*descs[3];
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132

	u8				num;

	int				status;	/* P: epfile->mutex */
};

struct ffs_epfile {
	/* Protects ep->ep and ep->req. */
	struct mutex			mutex;

	struct ffs_data			*ffs;
	struct ffs_ep			*ep;	/* P: ffs->eps_lock */

	struct dentry			*dentry;

133
134
135
	/*
	 * Buffer for holding data from partial reads which may happen since
	 * we’re rounding user read requests to a multiple of a max packet size.
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
	 *
	 * The pointer is initialised with NULL value and may be set by
	 * __ffs_epfile_read_data function to point to a temporary buffer.
	 *
	 * In normal operation, calls to __ffs_epfile_read_buffered will consume
	 * data from said buffer and eventually free it.  Importantly, while the
	 * function is using the buffer, it sets the pointer to NULL.  This is
	 * all right since __ffs_epfile_read_data and __ffs_epfile_read_buffered
	 * can never run concurrently (they are synchronised by epfile->mutex)
	 * so the latter will not assign a new value to the pointer.
	 *
	 * Meanwhile ffs_func_eps_disable frees the buffer (if the pointer is
	 * valid) and sets the pointer to READ_BUFFER_DROP value.  This special
	 * value is crux of the synchronisation between ffs_func_eps_disable and
	 * __ffs_epfile_read_data.
	 *
	 * Once __ffs_epfile_read_data is about to finish it will try to set the
	 * pointer back to its old value (as described above), but seeing as the
	 * pointer is not-NULL (namely READ_BUFFER_DROP) it will instead free
	 * the buffer.
	 *
	 * == State transitions ==
	 *
	 * • ptr == NULL:  (initial state)
	 *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP
	 *   ◦ __ffs_epfile_read_buffered:    nop
	 *   ◦ __ffs_epfile_read_data allocates temp buffer: go to ptr == buf
	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
	 * • ptr == DROP:
	 *   ◦ __ffs_epfile_read_buffer_free: nop
	 *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL
	 *   ◦ __ffs_epfile_read_data allocates temp buffer: free buf, nop
	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
	 * • ptr == buf:
	 *   ◦ __ffs_epfile_read_buffer_free: free buf, go to ptr == DROP
	 *   ◦ __ffs_epfile_read_buffered:    go to ptr == NULL and reading
	 *   ◦ __ffs_epfile_read_data:        n/a, __ffs_epfile_read_buffered
	 *                                    is always called first
	 *   ◦ reading finishes:              n/a, not in ‘and reading’ state
	 * • ptr == NULL and reading:
	 *   ◦ __ffs_epfile_read_buffer_free: go to ptr == DROP and reading
	 *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
	 *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
	 *   ◦ reading finishes and …
	 *     … all data read:               free buf, go to ptr == NULL
	 *     … otherwise:                   go to ptr == buf and reading
	 * • ptr == DROP and reading:
	 *   ◦ __ffs_epfile_read_buffer_free: nop
	 *   ◦ __ffs_epfile_read_buffered:    n/a, mutex is held
	 *   ◦ __ffs_epfile_read_data:        n/a, mutex is held
	 *   ◦ reading finishes:              free buf, go to ptr == DROP
187
	 */
188
189
	struct ffs_buffer		*read_buffer;
#define READ_BUFFER_DROP ((struct ffs_buffer *)ERR_PTR(-ESHUTDOWN))
190

191
192
193
194
195
196
197
198
	char				name[5];

	unsigned char			in;	/* P: ffs->eps_lock */
	unsigned char			isoc;	/* P: ffs->eps_lock */

	unsigned char			_pad;
};

199
200
201
202
203
204
struct ffs_buffer {
	size_t length;
	char *data;
	char storage[];
};

205
206
207
208
209
210
211
/*  ffs_io_data structure ***************************************************/

struct ffs_io_data {
	bool aio;
	bool read;

	struct kiocb *kiocb;
212
213
214
	struct iov_iter data;
	const void *to_free;
	char *buf;
215
216
217
218
219
220

	struct mm_struct *mm;
	struct work_struct work;

	struct usb_ep *ep;
	struct usb_request *req;
221
222

	struct ffs_data *ffs;
223
224
};

225
226
227
228
229
230
struct ffs_desc_helper {
	struct ffs_data *ffs;
	unsigned interfaces_count;
	unsigned eps_count;
};

231
232
233
static int  __must_check ffs_epfiles_create(struct ffs_data *ffs);
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count);

234
static struct dentry *
235
ffs_sb_create_file(struct super_block *sb, const char *name, void *data,
236
		   const struct file_operations *fops);
237

238
239
240
/* Devices management *******************************************************/

DEFINE_MUTEX(ffs_lock);
241
EXPORT_SYMBOL_GPL(ffs_lock);
242

243
244
245
static struct ffs_dev *_ffs_find_dev(const char *name);
static struct ffs_dev *_ffs_alloc_dev(void);
static void _ffs_free_dev(struct ffs_dev *dev);
246
247
248
249
static void *ffs_acquire_dev(const char *dev_name);
static void ffs_release_dev(struct ffs_data *ffs_data);
static int ffs_ready(struct ffs_data *ffs);
static void ffs_closed(struct ffs_data *ffs);
250
251
252
253
254

/* Misc helper functions ****************************************************/

static int ffs_mutex_lock(struct mutex *mutex, unsigned nonblock)
	__attribute__((warn_unused_result, nonnull));
Al Viro's avatar
Al Viro committed
255
static char *ffs_prepare_buffer(const char __user *buf, size_t len)
256
257
258
259
260
261
262
263
264
	__attribute__((warn_unused_result, nonnull));


/* Control file aka ep0 *****************************************************/

static void ffs_ep0_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct ffs_data *ffs = req->context;

265
	complete(&ffs->ep0req_completion);
266
267
268
}

static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
269
	__releases(&ffs->ev.waitq.lock)
270
271
272
273
274
275
276
277
278
279
280
{
	struct usb_request *req = ffs->ep0req;
	int ret;

	req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);

	spin_unlock_irq(&ffs->ev.waitq.lock);

	req->buf      = data;
	req->length   = len;

281
282
283
284
285
286
287
288
	/*
	 * UDC layer requires to provide a buffer even for ZLP, but should
	 * not use it at all. Let's provide some poisoned pointer to catch
	 * possible bug in the driver.
	 */
	if (req->buf == NULL)
		req->buf = (void *)0xDEADBABE;

289
	reinit_completion(&ffs->ep0req_completion);
290
291
292
293
294
295
296
297
298
299
300
301

	ret = usb_ep_queue(ffs->gadget->ep0, req, GFP_ATOMIC);
	if (unlikely(ret < 0))
		return ret;

	ret = wait_for_completion_interruptible(&ffs->ep0req_completion);
	if (unlikely(ret)) {
		usb_ep_dequeue(ffs->gadget->ep0, req);
		return -EINTR;
	}

	ffs->setup_state = FFS_NO_SETUP;
302
	return req->status ? req->status : req->actual;
303
304
305
306
307
}

static int __ffs_ep0_stall(struct ffs_data *ffs)
{
	if (ffs->ev.can_stall) {
308
		pr_vdebug("ep0 stall\n");
309
310
311
312
		usb_ep_set_halt(ffs->gadget->ep0);
		ffs->setup_state = FFS_NO_SETUP;
		return -EL2HLT;
	} else {
313
		pr_debug("bogus ep0 stall!\n");
314
315
316
317
318
319
320
321
322
323
324
325
326
327
		return -ESRCH;
	}
}

static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
			     size_t len, loff_t *ptr)
{
	struct ffs_data *ffs = file->private_data;
	ssize_t ret;
	char *data;

	ENTER();

	/* Fast check if setup was canceled */
328
	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
		return -EIDRM;

	/* Acquire mutex */
	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
	if (unlikely(ret < 0))
		return ret;

	/* Check state */
	switch (ffs->state) {
	case FFS_READ_DESCRIPTORS:
	case FFS_READ_STRINGS:
		/* Copy data */
		if (unlikely(len < 16)) {
			ret = -EINVAL;
			break;
		}

		data = ffs_prepare_buffer(buf, len);
347
		if (IS_ERR(data)) {
348
349
350
351
352
353
			ret = PTR_ERR(data);
			break;
		}

		/* Handle data */
		if (ffs->state == FFS_READ_DESCRIPTORS) {
354
			pr_info("read descriptors\n");
355
356
357
358
359
360
361
			ret = __ffs_data_got_descs(ffs, data, len);
			if (unlikely(ret < 0))
				break;

			ffs->state = FFS_READ_STRINGS;
			ret = len;
		} else {
362
			pr_info("read strings\n");
363
364
365
366
367
368
369
370
371
372
373
374
375
			ret = __ffs_data_got_strings(ffs, data, len);
			if (unlikely(ret < 0))
				break;

			ret = ffs_epfiles_create(ffs);
			if (unlikely(ret)) {
				ffs->state = FFS_CLOSING;
				break;
			}

			ffs->state = FFS_ACTIVE;
			mutex_unlock(&ffs->mutex);

376
			ret = ffs_ready(ffs);
377
378
379
380
381
382
383
384
385
386
387
			if (unlikely(ret < 0)) {
				ffs->state = FFS_CLOSING;
				return ret;
			}

			return len;
		}
		break;

	case FFS_ACTIVE:
		data = NULL;
388
389
390
391
		/*
		 * We're called from user space, we can use _irq
		 * rather then _irqsave
		 */
392
		spin_lock_irq(&ffs->ev.waitq.lock);
393
		switch (ffs_setup_state_clear_cancelled(ffs)) {
394
		case FFS_SETUP_CANCELLED:
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
			ret = -EIDRM;
			goto done_spin;

		case FFS_NO_SETUP:
			ret = -ESRCH;
			goto done_spin;

		case FFS_SETUP_PENDING:
			break;
		}

		/* FFS_SETUP_PENDING */
		if (!(ffs->ev.setup.bRequestType & USB_DIR_IN)) {
			spin_unlock_irq(&ffs->ev.waitq.lock);
			ret = __ffs_ep0_stall(ffs);
			break;
		}

		/* FFS_SETUP_PENDING and not stall */
		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));

		spin_unlock_irq(&ffs->ev.waitq.lock);

		data = ffs_prepare_buffer(buf, len);
419
		if (IS_ERR(data)) {
420
421
422
423
424
425
			ret = PTR_ERR(data);
			break;
		}

		spin_lock_irq(&ffs->ev.waitq.lock);

426
427
		/*
		 * We are guaranteed to be still in FFS_ACTIVE state
428
		 * but the state of setup could have changed from
429
		 * FFS_SETUP_PENDING to FFS_SETUP_CANCELLED so we need
430
		 * to check for that.  If that happened we copied data
431
432
433
		 * from user space in vain but it's unlikely.
		 *
		 * For sure we are not in FFS_NO_SETUP since this is
434
435
		 * the only place FFS_SETUP_PENDING -> FFS_NO_SETUP
		 * transition can be performed and it's protected by
436
437
		 * mutex.
		 */
438
439
		if (ffs_setup_state_clear_cancelled(ffs) ==
		    FFS_SETUP_CANCELLED) {
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
			ret = -EIDRM;
done_spin:
			spin_unlock_irq(&ffs->ev.waitq.lock);
		} else {
			/* unlocks spinlock */
			ret = __ffs_ep0_queue_wait(ffs, data, len);
		}
		kfree(data);
		break;

	default:
		ret = -EBADFD;
		break;
	}

	mutex_unlock(&ffs->mutex);
	return ret;
}

459
/* Called with ffs->ev.waitq.lock and ffs->mutex held, both released on exit. */
460
461
static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
				     size_t n)
462
	__releases(&ffs->ev.waitq.lock)
463
{
464
	/*
465
466
467
	 * n cannot be bigger than ffs->ev.count, which cannot be bigger than
	 * size of ffs->ev.types array (which is four) so that's how much space
	 * we reserve.
468
	 */
469
470
	struct usb_functionfs_event events[ARRAY_SIZE(ffs->ev.types)];
	const size_t size = n * sizeof *events;
471
472
	unsigned i = 0;

473
	memset(events, 0, size);
474
475
476
477
478
479
480
481
482

	do {
		events[i].type = ffs->ev.types[i];
		if (events[i].type == FUNCTIONFS_SETUP) {
			events[i].u.setup = ffs->ev.setup;
			ffs->setup_state = FFS_SETUP_PENDING;
		}
	} while (++i < n);

483
484
	ffs->ev.count -= n;
	if (ffs->ev.count)
485
486
487
488
489
490
		memmove(ffs->ev.types, ffs->ev.types + n,
			ffs->ev.count * sizeof *ffs->ev.types);

	spin_unlock_irq(&ffs->ev.waitq.lock);
	mutex_unlock(&ffs->mutex);

491
	return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
492
493
494
495
496
497
498
499
500
501
502
503
504
}

static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
			    size_t len, loff_t *ptr)
{
	struct ffs_data *ffs = file->private_data;
	char *data = NULL;
	size_t n;
	int ret;

	ENTER();

	/* Fast check if setup was canceled */
505
	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
506
507
508
509
510
511
512
513
514
515
516
517
518
		return -EIDRM;

	/* Acquire mutex */
	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
	if (unlikely(ret < 0))
		return ret;

	/* Check state */
	if (ffs->state != FFS_ACTIVE) {
		ret = -EBADFD;
		goto done_mutex;
	}

519
520
521
522
	/*
	 * We're called from user space, we can use _irq rather then
	 * _irqsave
	 */
523
524
	spin_lock_irq(&ffs->ev.waitq.lock);

525
	switch (ffs_setup_state_clear_cancelled(ffs)) {
526
	case FFS_SETUP_CANCELLED:
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
		ret = -EIDRM;
		break;

	case FFS_NO_SETUP:
		n = len / sizeof(struct usb_functionfs_event);
		if (unlikely(!n)) {
			ret = -EINVAL;
			break;
		}

		if ((file->f_flags & O_NONBLOCK) && !ffs->ev.count) {
			ret = -EAGAIN;
			break;
		}

542
543
		if (wait_event_interruptible_exclusive_locked_irq(ffs->ev.waitq,
							ffs->ev.count)) {
544
545
546
547
			ret = -EINTR;
			break;
		}

548
		/* unlocks spinlock */
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
		return __ffs_ep0_read_events(ffs, buf,
					     min(n, (size_t)ffs->ev.count));

	case FFS_SETUP_PENDING:
		if (ffs->ev.setup.bRequestType & USB_DIR_IN) {
			spin_unlock_irq(&ffs->ev.waitq.lock);
			ret = __ffs_ep0_stall(ffs);
			goto done_mutex;
		}

		len = min(len, (size_t)le16_to_cpu(ffs->ev.setup.wLength));

		spin_unlock_irq(&ffs->ev.waitq.lock);

		if (likely(len)) {
			data = kmalloc(len, GFP_KERNEL);
			if (unlikely(!data)) {
				ret = -ENOMEM;
				goto done_mutex;
			}
		}

		spin_lock_irq(&ffs->ev.waitq.lock);

		/* See ffs_ep0_write() */
574
575
		if (ffs_setup_state_clear_cancelled(ffs) ==
		    FFS_SETUP_CANCELLED) {
576
577
578
579
580
581
			ret = -EIDRM;
			break;
		}

		/* unlocks spinlock */
		ret = __ffs_ep0_queue_wait(ffs, data, len);
582
		if (likely(ret > 0) && unlikely(copy_to_user(buf, data, len)))
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
			ret = -EFAULT;
		goto done_mutex;

	default:
		ret = -EBADFD;
		break;
	}

	spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
	mutex_unlock(&ffs->mutex);
	kfree(data);
	return ret;
}

static int ffs_ep0_open(struct inode *inode, struct file *file)
{
	struct ffs_data *ffs = inode->i_private;

	ENTER();

	if (unlikely(ffs->state == FFS_CLOSING))
		return -EBUSY;

	file->private_data = ffs;
	ffs_data_opened(ffs);

	return 0;
}

static int ffs_ep0_release(struct inode *inode, struct file *file)
{
	struct ffs_data *ffs = file->private_data;

	ENTER();

	ffs_data_closed(ffs);

	return 0;
}

static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
{
	struct ffs_data *ffs = file->private_data;
	struct usb_gadget *gadget = ffs->gadget;
	long ret;

	ENTER();

	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
		struct ffs_function *func = ffs->func;
		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
635
	} else if (gadget && gadget->ops->ioctl) {
636
637
638
639
640
641
642
643
		ret = gadget->ops->ioctl(gadget, code, value);
	} else {
		ret = -ENOTTY;
	}

	return ret;
}

644
static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
645
646
{
	struct ffs_data *ffs = file->private_data;
647
	__poll_t mask = EPOLLWRNORM;
648
649
650
651
652
653
654
655
656
657
658
	int ret;

	poll_wait(file, &ffs->ev.waitq, wait);

	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
	if (unlikely(ret < 0))
		return mask;

	switch (ffs->state) {
	case FFS_READ_DESCRIPTORS:
	case FFS_READ_STRINGS:
659
		mask |= EPOLLOUT;
660
661
662
663
664
665
		break;

	case FFS_ACTIVE:
		switch (ffs->setup_state) {
		case FFS_NO_SETUP:
			if (ffs->ev.count)
666
				mask |= EPOLLIN;
667
668
669
670
			break;

		case FFS_SETUP_PENDING:
		case FFS_SETUP_CANCELLED:
671
			mask |= (EPOLLIN | EPOLLOUT);
672
673
674
675
			break;
		}
	case FFS_CLOSING:
		break;
676
677
	case FFS_DEACTIVATED:
		break;
678
679
680
681
682
683
684
	}

	mutex_unlock(&ffs->mutex);

	return mask;
}

685
686
687
688
689
690
691
692
static const struct file_operations ffs_ep0_operations = {
	.llseek =	no_llseek,

	.open =		ffs_ep0_open,
	.write =	ffs_ep0_write,
	.read =		ffs_ep0_read,
	.release =	ffs_ep0_release,
	.unlocked_ioctl =	ffs_ep0_ioctl,
693
	.poll =		ffs_ep0_poll,
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
};


/* "Normal" endpoints operations ********************************************/

static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req)
{
	ENTER();
	if (likely(req->context)) {
		struct ffs_ep *ep = _ep->driver_data;
		ep->status = req->status ? req->status : req->actual;
		complete(req->context);
	}
}

709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter)
{
	ssize_t ret = copy_to_iter(data, data_len, iter);
	if (likely(ret == data_len))
		return ret;

	if (unlikely(iov_iter_count(iter)))
		return -EFAULT;

	/*
	 * Dear user space developer!
	 *
	 * TL;DR: To stop getting below error message in your kernel log, change
	 * user space code using functionfs to align read buffers to a max
	 * packet size.
	 *
	 * Some UDCs (e.g. dwc3) require request sizes to be a multiple of a max
	 * packet size.  When unaligned buffer is passed to functionfs, it
	 * internally uses a larger, aligned buffer so that such UDCs are happy.
	 *
	 * Unfortunately, this means that host may send more data than was
	 * requested in read(2) system call.  f_fs doesn’t know what to do with
	 * that excess data so it simply drops it.
	 *
	 * Was the buffer aligned in the first place, no such problem would
	 * happen.
	 *
736
737
738
739
740
	 * Data may be dropped only in AIO reads.  Synchronous reads are handled
	 * by splitting a request into multiple parts.  This splitting may still
	 * be a problem though so it’s likely best to align the buffer
	 * regardless of it being AIO or not..
	 *
741
742
743
744
745
746
747
748
749
750
751
	 * This only affects OUT endpoints, i.e. reading data with a read(2),
	 * aio_read(2) etc. system calls.  Writing data to an IN endpoint is not
	 * affected.
	 */
	pr_err("functionfs read size %d > requested size %zd, dropping excess data. "
	       "Align read buffer size to max packet size to avoid the problem.\n",
	       data_len, ret);

	return ret;
}

752
753
754
755
756
757
static void ffs_user_copy_worker(struct work_struct *work)
{
	struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
						   work);
	int ret = io_data->req->status ? io_data->req->status :
					 io_data->req->actual;
758
	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
759
760

	if (io_data->read && ret > 0) {
761
762
763
		mm_segment_t oldfs = get_fs();

		set_fs(USER_DS);
764
		use_mm(io_data->mm);
765
		ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
766
		unuse_mm(io_data->mm);
767
		set_fs(oldfs);
768
769
	}

770
	io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
771

772
	if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
773
774
		eventfd_signal(io_data->ffs->ffs_eventfd, 1);

775
776
777
	usb_ep_free_request(io_data->ep, io_data->req);

	if (io_data->read)
778
		kfree(io_data->to_free);
779
780
781
782
783
784
785
786
	kfree(io_data->buf);
	kfree(io_data);
}

static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
					 struct usb_request *req)
{
	struct ffs_io_data *io_data = req->context;
787
	struct ffs_data *ffs = io_data->ffs;
788
789
790
791

	ENTER();

	INIT_WORK(&io_data->work, ffs_user_copy_worker);
792
	queue_work(ffs->io_completion_wq, &io_data->work);
793
794
}

795
796
797
798
799
800
801
802
803
804
805
static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
{
	/*
	 * See comment in struct ffs_epfile for full read_buffer pointer
	 * synchronisation story.
	 */
	struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP);
	if (buf && buf != READ_BUFFER_DROP)
		kfree(buf);
}

806
807
808
809
/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_buffered(struct ffs_epfile *epfile,
					  struct iov_iter *iter)
{
810
811
812
813
814
815
	/*
	 * Null out epfile->read_buffer so ffs_func_eps_disable does not free
	 * the buffer while we are using it.  See comment in struct ffs_epfile
	 * for full read_buffer pointer synchronisation story.
	 */
	struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL);
816
	ssize_t ret;
817
	if (!buf || buf == READ_BUFFER_DROP)
818
819
820
821
822
		return 0;

	ret = copy_to_iter(buf->data, buf->length, iter);
	if (buf->length == ret) {
		kfree(buf);
823
824
825
826
		return ret;
	}

	if (unlikely(iov_iter_count(iter))) {
827
828
829
830
831
		ret = -EFAULT;
	} else {
		buf->length -= ret;
		buf->data += ret;
	}
832
833
834
835

	if (cmpxchg(&epfile->read_buffer, NULL, buf))
		kfree(buf);

836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
	return ret;
}

/* Assumes epfile->mutex is held. */
static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
				      void *data, int data_len,
				      struct iov_iter *iter)
{
	struct ffs_buffer *buf;

	ssize_t ret = copy_to_iter(data, data_len, iter);
	if (likely(data_len == ret))
		return ret;

	if (unlikely(iov_iter_count(iter)))
		return -EFAULT;

	/* See ffs_copy_to_iter for more context. */
	pr_warn("functionfs read size %d > requested size %zd, splitting request into multiple reads.",
		data_len, ret);

	data_len -= ret;
	buf = kmalloc(sizeof(*buf) + data_len, GFP_KERNEL);
859
860
	if (!buf)
		return -ENOMEM;
861
862
863
	buf->length = data_len;
	buf->data = buf->storage;
	memcpy(buf->storage, data + ret, data_len);
864
865
866
867
868
869
870
871
872

	/*
	 * At this point read_buffer is NULL or READ_BUFFER_DROP (if
	 * ffs_func_eps_disable has been called in the meanwhile).  See comment
	 * in struct ffs_epfile for full read_buffer pointer synchronisation
	 * story.
	 */
	if (unlikely(cmpxchg(&epfile->read_buffer, NULL, buf)))
		kfree(buf);
873
874
875
876

	return ret;
}

877
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
878
879
{
	struct ffs_epfile *epfile = file->private_data;
880
	struct usb_request *req;
881
882
	struct ffs_ep *ep;
	char *data = NULL;
883
	ssize_t ret, data_len = -EINVAL;
884
885
	int halt;

886
	/* Are we still active? */
887
888
	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
		return -ENODEV;
889

890
891
892
	/* Wait for endpoint to be enabled */
	ep = epfile->ep;
	if (!ep) {
893
894
		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;
895

896
897
		ret = wait_event_interruptible(
				epfile->ffs->wait, (ep = epfile->ep));
898
899
		if (ret)
			return -EINTR;
900
	}
901

902
	/* Do we halt? */
903
	halt = (!io_data->read == !epfile->in);
904
905
	if (halt && epfile->isoc)
		return -EINVAL;
906

907
908
909
910
911
	/* We will be using request and read_buffer */
	ret = ffs_mutex_lock(&epfile->mutex, file->f_flags & O_NONBLOCK);
	if (unlikely(ret))
		goto error;

912
913
	/* Allocate & copy */
	if (!halt) {
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
		struct usb_gadget *gadget;

		/*
		 * Do we have buffered data from previous partial read?  Check
		 * that for synchronous case only because we do not have
		 * facility to ‘wake up’ a pending asynchronous read and push
		 * buffered data to it which we would need to make things behave
		 * consistently.
		 */
		if (!io_data->aio && io_data->read) {
			ret = __ffs_epfile_read_buffered(epfile, &io_data->data);
			if (ret)
				goto error_mutex;
		}

929
930
		/*
		 * if we _do_ wait above, the epfile->ffs->gadget might be NULL
931
932
		 * before the waiting completes, so do not assign to 'gadget'
		 * earlier
933
		 */
934
		gadget = epfile->ffs->gadget;
935

936
937
938
		spin_lock_irq(&epfile->ffs->eps_lock);
		/* In the meantime, endpoint got disabled or changed. */
		if (epfile->ep != ep) {
939
940
			ret = -ESHUTDOWN;
			goto error_lock;
941
		}
942
		data_len = iov_iter_count(&io_data->data);
943
944
945
946
		/*
		 * Controller may require buffer size to be aligned to
		 * maxpacketsize of an out endpoint.
		 */
947
948
		if (io_data->read)
			data_len = usb_ep_align_maybe(gadget, ep->ep, data_len);
949
		spin_unlock_irq(&epfile->ffs->eps_lock);
950
951

		data = kmalloc(data_len, GFP_KERNEL);
952
953
954
955
956
		if (unlikely(!data)) {
			ret = -ENOMEM;
			goto error_mutex;
		}
		if (!io_data->read &&
957
		    !copy_from_iter_full(data, data_len, &io_data->data)) {
958
959
			ret = -EFAULT;
			goto error_mutex;
960
961
		}
	}
962

963
	spin_lock_irq(&epfile->ffs->eps_lock);
964

965
966
967
968
	if (epfile->ep != ep) {
		/* In the meantime, endpoint got disabled or changed. */
		ret = -ESHUTDOWN;
	} else if (halt) {
969
970
971
		ret = usb_ep_set_halt(ep->ep);
		if (!ret)
			ret = -EBADMSG;
972
	} else if (unlikely(data_len == -EINVAL)) {
973
974
975
976
977
978
979
980
981
982
983
		/*
		 * Sanity Check: even though data_len can't be used
		 * uninitialized at the time I write this comment, some
		 * compilers complain about this situation.
		 * In order to keep the code clean from warnings, data_len is
		 * being initialized to -EINVAL during its declaration, which
		 * means we can't rely on compiler anymore to warn no future
		 * changes won't result in data_len being used uninitialized.
		 * For such reason, we're adding this redundant sanity check
		 * here.
		 */
984
985
986
987
		WARN(1, "%s: data_len == -EINVAL\n", __func__);
		ret = -EINVAL;
	} else if (!io_data->aio) {
		DECLARE_COMPLETION_ONSTACK(done);
988
		bool interrupted = false;
989

990
991
992
		req = ep->req;
		req->buf      = data;
		req->length   = data_len;
993

994
995
		req->context  = &done;
		req->complete = ffs_epfile_io_complete;
996

997
998
999
		ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC);
		if (unlikely(ret < 0))
			goto error_lock;
1000