fanotify_user.c 14.7 KB
Newer Older
1
#include <linux/fanotify.h>
2
#include <linux/fcntl.h>
3
#include <linux/file.h>
4
#include <linux/fs.h>
5
#include <linux/anon_inodes.h>
6
#include <linux/fsnotify_backend.h>
7
#include <linux/init.h>
Eric Paris's avatar
Eric Paris committed
8
#include <linux/mount.h>
9
#include <linux/namei.h>
Eric Paris's avatar
Eric Paris committed
10
#include <linux/poll.h>
11
12
#include <linux/security.h>
#include <linux/syscalls.h>
13
#include <linux/types.h>
Eric Paris's avatar
Eric Paris committed
14
15
16
#include <linux/uaccess.h>

#include <asm/ioctls.h>
17

18
extern const struct fsnotify_ops fanotify_fsnotify_ops;
19

20
21
static struct kmem_cache *fanotify_mark_cache __read_mostly;

Eric Paris's avatar
Eric Paris committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
/*
 * Get an fsnotify notification event if one exists and is small
 * enough to fit in "count". Return an error pointer if the count
 * is not large enough.
 *
 * Called with the group->notification_mutex held.
 */
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
					    size_t count)
{
	BUG_ON(!mutex_is_locked(&group->notification_mutex));

	pr_debug("%s: group=%p count=%zd\n", __func__, group, count);

	if (fsnotify_notify_queue_is_empty(group))
		return NULL;

	if (FAN_EVENT_METADATA_LEN > count)
		return ERR_PTR(-EINVAL);

	/* held the notification_mutex the whole time, so this is the
	 * same event we peeked above */
	return fsnotify_remove_notify_event(group);
}

47
static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)
Eric Paris's avatar
Eric Paris committed
48
49
50
51
52
53
{
	int client_fd;
	struct dentry *dentry;
	struct vfsmount *mnt;
	struct file *new_file;

54
	pr_debug("%s: group=%p event=%p\n", __func__, group, event);
Eric Paris's avatar
Eric Paris committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93

	client_fd = get_unused_fd();
	if (client_fd < 0)
		return client_fd;

	if (event->data_type != FSNOTIFY_EVENT_PATH) {
		WARN_ON(1);
		put_unused_fd(client_fd);
		return -EINVAL;
	}

	/*
	 * we need a new file handle for the userspace program so it can read even if it was
	 * originally opened O_WRONLY.
	 */
	dentry = dget(event->path.dentry);
	mnt = mntget(event->path.mnt);
	/* it's possible this event was an overflow event.  in that case dentry and mnt
	 * are NULL;  That's fine, just don't call dentry open */
	if (dentry && mnt)
		new_file = dentry_open(dentry, mnt,
				       O_RDONLY | O_LARGEFILE | FMODE_NONOTIFY,
				       current_cred());
	else
		new_file = ERR_PTR(-EOVERFLOW);
	if (IS_ERR(new_file)) {
		/*
		 * we still send an event even if we can't open the file.  this
		 * can happen when say tasks are gone and we try to open their
		 * /proc files or we try to open a WRONLY file like in sysfs
		 * we just send the errno to userspace since there isn't much
		 * else we can do.
		 */
		put_unused_fd(client_fd);
		client_fd = PTR_ERR(new_file);
	} else {
		fd_install(client_fd, new_file);
	}

94
	return client_fd;
Eric Paris's avatar
Eric Paris committed
95
96
97
98
99
100
101
102
103
104
105
}

static ssize_t fill_event_metadata(struct fsnotify_group *group,
				   struct fanotify_event_metadata *metadata,
				   struct fsnotify_event *event)
{
	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
		 group, metadata, event);

	metadata->event_len = FAN_EVENT_METADATA_LEN;
	metadata->vers = FANOTIFY_METADATA_VERSION;
106
	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
107
	metadata->pid = pid_vnr(event->tgid);
108
	metadata->fd = create_fd(group, event);
Eric Paris's avatar
Eric Paris committed
109

110
	return metadata->fd;
Eric Paris's avatar
Eric Paris committed
111
112
113
114
115
116
117
118
119
120
121
122
}

static ssize_t copy_event_to_user(struct fsnotify_group *group,
				  struct fsnotify_event *event,
				  char __user *buf)
{
	struct fanotify_event_metadata fanotify_event_metadata;
	int ret;

	pr_debug("%s: group=%p event=%p\n", __func__, group, event);

	ret = fill_event_metadata(group, &fanotify_event_metadata, event);
123
	if (ret < 0)
Eric Paris's avatar
Eric Paris committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
		return ret;

	if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN))
		return -EFAULT;

	return FAN_EVENT_METADATA_LEN;
}

/* intofiy userspace file descriptor functions */
static unsigned int fanotify_poll(struct file *file, poll_table *wait)
{
	struct fsnotify_group *group = file->private_data;
	int ret = 0;

	poll_wait(file, &group->notification_waitq, wait);
	mutex_lock(&group->notification_mutex);
	if (!fsnotify_notify_queue_is_empty(group))
		ret = POLLIN | POLLRDNORM;
	mutex_unlock(&group->notification_mutex);

	return ret;
}

static ssize_t fanotify_read(struct file *file, char __user *buf,
			     size_t count, loff_t *pos)
{
	struct fsnotify_group *group;
	struct fsnotify_event *kevent;
	char __user *start;
	int ret;
	DEFINE_WAIT(wait);

	start = buf;
	group = file->private_data;

	pr_debug("%s: group=%p\n", __func__, group);

	while (1) {
		prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);

		mutex_lock(&group->notification_mutex);
		kevent = get_one_event(group, count);
		mutex_unlock(&group->notification_mutex);

		if (kevent) {
			ret = PTR_ERR(kevent);
			if (IS_ERR(kevent))
				break;
			ret = copy_event_to_user(group, kevent, buf);
			fsnotify_put_event(kevent);
			if (ret < 0)
				break;
			buf += ret;
			count -= ret;
			continue;
		}

		ret = -EAGAIN;
		if (file->f_flags & O_NONBLOCK)
			break;
		ret = -EINTR;
		if (signal_pending(current))
			break;

		if (start != buf)
			break;

		schedule();
	}

	finish_wait(&group->notification_waitq, &wait);
	if (start != buf && ret != -EFAULT)
		ret = buf - start;
	return ret;
}

200
201
202
203
204
205
206
207
208
209
210
211
static int fanotify_release(struct inode *ignored, struct file *file)
{
	struct fsnotify_group *group = file->private_data;

	pr_debug("%s: file=%p group=%p\n", __func__, file, group);

	/* matches the fanotify_init->fsnotify_alloc_group */
	fsnotify_put_group(group);

	return 0;
}

Eric Paris's avatar
Eric Paris committed
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct fsnotify_group *group;
	struct fsnotify_event_holder *holder;
	void __user *p;
	int ret = -ENOTTY;
	size_t send_len = 0;

	group = file->private_data;

	p = (void __user *) arg;

	switch (cmd) {
	case FIONREAD:
		mutex_lock(&group->notification_mutex);
		list_for_each_entry(holder, &group->notification_list, event_list)
			send_len += FAN_EVENT_METADATA_LEN;
		mutex_unlock(&group->notification_mutex);
		ret = put_user(send_len, (int __user *) p);
		break;
	}

	return ret;
}

237
static const struct file_operations fanotify_fops = {
Eric Paris's avatar
Eric Paris committed
238
239
	.poll		= fanotify_poll,
	.read		= fanotify_read,
240
241
	.fasync		= NULL,
	.release	= fanotify_release,
Eric Paris's avatar
Eric Paris committed
242
243
	.unlocked_ioctl	= fanotify_ioctl,
	.compat_ioctl	= fanotify_ioctl,
244
245
};

246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
{
	kmem_cache_free(fanotify_mark_cache, fsn_mark);
}

static int fanotify_find_path(int dfd, const char __user *filename,
			      struct path *path, unsigned int flags)
{
	int ret;

	pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
		 dfd, filename, flags);

	if (filename == NULL) {
		struct file *file;
		int fput_needed;

		ret = -EBADF;
		file = fget_light(dfd, &fput_needed);
		if (!file)
			goto out;

		ret = -ENOTDIR;
		if ((flags & FAN_MARK_ONLYDIR) &&
		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) {
			fput_light(file, fput_needed);
			goto out;
		}

		*path = file->f_path;
		path_get(path);
		fput_light(file, fput_needed);
	} else {
		unsigned int lookup_flags = 0;

		if (!(flags & FAN_MARK_DONT_FOLLOW))
			lookup_flags |= LOOKUP_FOLLOW;
		if (flags & FAN_MARK_ONLYDIR)
			lookup_flags |= LOOKUP_DIRECTORY;

		ret = user_path_at(dfd, filename, lookup_flags, path);
		if (ret)
			goto out;
	}

	/* you can only watch an inode if you have read permissions on it */
	ret = inode_permission(path->dentry->d_inode, MAY_READ);
	if (ret)
		path_put(path);
out:
	return ret;
}

299
300
301
static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
					    __u32 mask,
					    unsigned int flags)
302
303
304
305
{
	__u32 oldmask;

	spin_lock(&fsn_mark->lock);
306
307
308
309
310
311
312
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
	} else {
		oldmask = fsn_mark->ignored_mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
	}
313
314
315
316
317
318
319
320
	spin_unlock(&fsn_mark->lock);

	if (!(oldmask & ~mask))
		fsnotify_destroy_mark(fsn_mark);

	return mask & oldmask;
}

321
static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
322
323
					 struct vfsmount *mnt, __u32 mask,
					 unsigned int flags)
324
325
{
	struct fsnotify_mark *fsn_mark = NULL;
326
	__u32 removed;
327

328
329
330
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!fsn_mark)
		return -ENOENT;
331

332
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
333
334
335
336
337
338
339
340
	fsnotify_put_mark(fsn_mark);
	if (removed & group->mask)
		fsnotify_recalc_group_mask(group);
	if (removed & mnt->mnt_fsnotify_mask)
		fsnotify_recalc_vfsmount_mask(mnt);

	return 0;
}
341

342
static int fanotify_remove_inode_mark(struct fsnotify_group *group,
343
344
				      struct inode *inode, __u32 mask,
				      unsigned int flags)
345
346
347
348
349
{
	struct fsnotify_mark *fsn_mark = NULL;
	__u32 removed;

	fsn_mark = fsnotify_find_inode_mark(group, inode);
350
351
352
	if (!fsn_mark)
		return -ENOENT;

353
	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);
354
	/* matches the fsnotify_find_inode_mark() */
355
356
	fsnotify_put_mark(fsn_mark);

357
358
	if (removed & group->mask)
		fsnotify_recalc_group_mask(group);
359
360
	if (removed & inode->i_fsnotify_mask)
		fsnotify_recalc_inode_mask(inode);
361

362
363
364
	return 0;
}

365
366
367
static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
				       __u32 mask,
				       unsigned int flags)
368
369
370
371
{
	__u32 oldmask;

	spin_lock(&fsn_mark->lock);
372
373
374
375
376
377
	if (!(flags & FAN_MARK_IGNORED_MASK)) {
		oldmask = fsn_mark->mask;
		fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
	} else {
		oldmask = fsn_mark->ignored_mask;
		fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask | mask));
378
379
		if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
			fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
380
	}
381
382
383
384
385
	spin_unlock(&fsn_mark->lock);

	return mask & ~oldmask;
}

386
static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
387
388
				      struct vfsmount *mnt, __u32 mask,
				      unsigned int flags)
389
390
{
	struct fsnotify_mark *fsn_mark;
391
	__u32 added;
392

393
394
395
396
	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
	if (!fsn_mark) {
		int ret;

397
398
		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
		if (!fsn_mark)
399
			return -ENOMEM;
400

401
402
		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
403
		if (ret) {
404
			fanotify_free_mark(fsn_mark);
405
			return ret;
406
407
		}
	}
408
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
409
	fsnotify_put_mark(fsn_mark);
410
411
412
413
414
415
	if (added) {
		if (added & ~group->mask)
			fsnotify_recalc_group_mask(group);
		if (added & ~mnt->mnt_fsnotify_mask)
			fsnotify_recalc_vfsmount_mask(mnt);
	}
416
	return 0;
417
418
}

419
static int fanotify_add_inode_mark(struct fsnotify_group *group,
420
421
				   struct inode *inode, __u32 mask,
				   unsigned int flags)
422
423
{
	struct fsnotify_mark *fsn_mark;
424
	__u32 added;
425
426

	pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
427

428
	fsn_mark = fsnotify_find_inode_mark(group, inode);
429
	if (!fsn_mark) {
430
		int ret;
431

432
433
		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
		if (!fsn_mark)
434
			return -ENOMEM;
435

436
437
		fsnotify_init_mark(fsn_mark, fanotify_free_mark);
		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
438
		if (ret) {
439
			fanotify_free_mark(fsn_mark);
440
			return ret;
441
442
		}
	}
443
	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
444
	fsnotify_put_mark(fsn_mark);
445
446
447
448
449
450
	if (added) {
		if (added & ~group->mask)
			fsnotify_recalc_group_mask(group);
		if (added & ~inode->i_fsnotify_mask)
			fsnotify_recalc_inode_mask(inode);
	}
451
	return 0;
452
}
453

454
/* fanotify syscalls */
455
456
457
SYSCALL_DEFINE3(fanotify_init, unsigned int, flags, unsigned int, event_f_flags,
		unsigned int, priority)
{
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
	struct fsnotify_group *group;
	int f_flags, fd;

	pr_debug("%s: flags=%d event_f_flags=%d priority=%d\n",
		__func__, flags, event_f_flags, priority);

	if (event_f_flags)
		return -EINVAL;
	if (priority)
		return -EINVAL;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	if (flags & ~FAN_ALL_INIT_FLAGS)
		return -EINVAL;

	f_flags = (O_RDONLY | FMODE_NONOTIFY);
	if (flags & FAN_CLOEXEC)
		f_flags |= O_CLOEXEC;
	if (flags & FAN_NONBLOCK)
		f_flags |= O_NONBLOCK;

	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */
	group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
	if (IS_ERR(group))
		return PTR_ERR(group);

	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
	if (fd < 0)
		goto out_put_group;

	return fd;

out_put_group:
	fsnotify_put_group(group);
	return fd;
495
}
496

497
498
499
SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,
			      __u64 mask, int dfd,
			      const char  __user * pathname)
500
{
501
502
	struct inode *inode = NULL;
	struct vfsmount *mnt = NULL;
503
504
505
506
507
508
509
510
511
512
513
514
	struct fsnotify_group *group;
	struct file *filp;
	struct path path;
	int ret, fput_needed;

	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
		 __func__, fanotify_fd, flags, dfd, pathname, mask);

	/* we only use the lower 32 bits as of right now. */
	if (mask & ((__u64)0xffffffff << 32))
		return -EINVAL;

515
516
	if (flags & ~FAN_ALL_MARK_FLAGS)
		return -EINVAL;
517
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
518
519
	case FAN_MARK_ADD:
	case FAN_MARK_REMOVE:
520
	case FAN_MARK_FLUSH:
521
522
523
524
525
		break;
	default:
		return -EINVAL;
	}
	if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
		return -EINVAL;

	filp = fget_light(fanotify_fd, &fput_needed);
	if (unlikely(!filp))
		return -EBADF;

	/* verify that this is indeed an fanotify instance */
	ret = -EINVAL;
	if (unlikely(filp->f_op != &fanotify_fops))
		goto fput_and_out;

	ret = fanotify_find_path(dfd, pathname, &path, flags);
	if (ret)
		goto fput_and_out;

	/* inode held in place by reference to path; group by fget on fd */
542
	if (!(flags & FAN_MARK_MOUNT))
543
544
545
		inode = path.dentry->d_inode;
	else
		mnt = path.mnt;
546
547
548
	group = filp->private_data;

	/* create/update an inode mark */
549
	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
550
	case FAN_MARK_ADD:
551
		if (flags & FAN_MARK_MOUNT)
552
			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
553
		else
554
			ret = fanotify_add_inode_mark(group, inode, mask, flags);
555
556
		break;
	case FAN_MARK_REMOVE:
557
		if (flags & FAN_MARK_MOUNT)
558
			ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
559
		else
560
			ret = fanotify_remove_inode_mark(group, inode, mask, flags);
561
		break;
562
563
564
565
566
567
568
	case FAN_MARK_FLUSH:
		if (flags & FAN_MARK_MOUNT)
			fsnotify_clear_vfsmount_marks_by_group(group);
		else
			fsnotify_clear_inode_marks_by_group(group);
		fsnotify_recalc_group_mask(group);
		break;
569
570
571
	default:
		ret = -EINVAL;
	}
572
573
574
575
576
577
578

	path_put(&path);
fput_and_out:
	fput_light(filp, fput_needed);
	return ret;
}

579
580
581
582
583
584
585
586
587
588
589
#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask,
				  long dfd, long pathname)
{
	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags,
				  mask, (int) dfd,
				  (const char  __user *) pathname);
}
SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);
#endif

590
591
592
593
594
595
596
597
598
599
/*
 * fanotify_user_setup - Our initialization function.  Note that we cannnot return
 * error because we have compiled-in VFS hooks.  So an (unlikely) failure here
 * must result in panic().
 */
static int __init fanotify_user_setup(void)
{
	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);

	return 0;
600
}
601
device_initcall(fanotify_user_setup);