namespace.c 46.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 *  linux/fs/namespace.c
 *
 * (C) Copyright Al Viro 2000, 2001
 *	Released under GPL v2.
 *
 * Based on code from fs/super.c, copyright Linus Torvalds and others.
 * Heavily rewritten.
 */

#include <linux/syscalls.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/smp_lock.h>
#include <linux/init.h>
16
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
17 18
#include <linux/quotaops.h>
#include <linux/acct.h>
19
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/module.h>
21
#include <linux/sysfs.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/seq_file.h>
23
#include <linux/mnt_namespace.h>
Linus Torvalds's avatar
Linus Torvalds committed
24 25 26
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/mount.h>
27
#include <linux/ramfs.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29
#include <asm/uaccess.h>
#include <asm/unistd.h>
30
#include "pnode.h"
Linus Torvalds's avatar
Linus Torvalds committed
31 32

/* spinlock for vfsmount related operations, inplace of dcache_lock */
Al Viro's avatar
Al Viro committed
33 34 35
__cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock);

static int event;
Linus Torvalds's avatar
Linus Torvalds committed
36

37
static struct list_head *mount_hashtable __read_mostly;
38
static int hash_mask __read_mostly, hash_bits __read_mostly;
39
static struct kmem_cache *mnt_cache __read_mostly;
Ram Pai's avatar
Ram Pai committed
40
static struct rw_semaphore namespace_sem;
Linus Torvalds's avatar
Linus Torvalds committed
41

Miklos Szeredi's avatar
Miklos Szeredi committed
42 43 44 45
/* /sys/fs */
decl_subsys(fs, NULL, NULL);
EXPORT_SYMBOL_GPL(fs_subsys);

Linus Torvalds's avatar
Linus Torvalds committed
46 47
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
{
Ram Pai's avatar
Ram Pai committed
48 49
	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
	tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
Linus Torvalds's avatar
Linus Torvalds committed
50 51 52 53 54 55
	tmp = tmp + (tmp >> hash_bits);
	return tmp & hash_mask;
}

struct vfsmount *alloc_vfsmnt(const char *name)
{
Ram Pai's avatar
Ram Pai committed
56
	struct vfsmount *mnt = kmem_cache_alloc(mnt_cache, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
57 58
	if (mnt) {
		memset(mnt, 0, sizeof(struct vfsmount));
Ram Pai's avatar
Ram Pai committed
59
		atomic_set(&mnt->mnt_count, 1);
Linus Torvalds's avatar
Linus Torvalds committed
60 61 62 63
		INIT_LIST_HEAD(&mnt->mnt_hash);
		INIT_LIST_HEAD(&mnt->mnt_child);
		INIT_LIST_HEAD(&mnt->mnt_mounts);
		INIT_LIST_HEAD(&mnt->mnt_list);
64
		INIT_LIST_HEAD(&mnt->mnt_expire);
Ram Pai's avatar
Ram Pai committed
65
		INIT_LIST_HEAD(&mnt->mnt_share);
Ram Pai's avatar
Ram Pai committed
66 67
		INIT_LIST_HEAD(&mnt->mnt_slave_list);
		INIT_LIST_HEAD(&mnt->mnt_slave);
Linus Torvalds's avatar
Linus Torvalds committed
68
		if (name) {
Ram Pai's avatar
Ram Pai committed
69
			int size = strlen(name) + 1;
Linus Torvalds's avatar
Linus Torvalds committed
70 71 72 73 74 75 76 77 78 79
			char *newname = kmalloc(size, GFP_KERNEL);
			if (newname) {
				memcpy(newname, name, size);
				mnt->mnt_devname = newname;
			}
		}
	}
	return mnt;
}

80 81 82 83 84 85 86 87 88
int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb)
{
	mnt->mnt_sb = sb;
	mnt->mnt_root = dget(sb->s_root);
	return 0;
}

EXPORT_SYMBOL(simple_set_mnt);

Linus Torvalds's avatar
Linus Torvalds committed
89 90 91 92 93 94 95
void free_vfsmnt(struct vfsmount *mnt)
{
	kfree(mnt->mnt_devname);
	kmem_cache_free(mnt_cache, mnt);
}

/*
96 97
 * find the first or last mount at @dentry on vfsmount @mnt depending on
 * @dir. If @dir is set return the first mount else return the last mount.
Linus Torvalds's avatar
Linus Torvalds committed
98
 */
99 100
struct vfsmount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,
			      int dir)
Linus Torvalds's avatar
Linus Torvalds committed
101
{
Ram Pai's avatar
Ram Pai committed
102 103
	struct list_head *head = mount_hashtable + hash(mnt, dentry);
	struct list_head *tmp = head;
Linus Torvalds's avatar
Linus Torvalds committed
104 105 106
	struct vfsmount *p, *found = NULL;

	for (;;) {
107
		tmp = dir ? tmp->next : tmp->prev;
Linus Torvalds's avatar
Linus Torvalds committed
108 109 110 111 112
		p = NULL;
		if (tmp == head)
			break;
		p = list_entry(tmp, struct vfsmount, mnt_hash);
		if (p->mnt_parent == mnt && p->mnt_mountpoint == dentry) {
113
			found = p;
Linus Torvalds's avatar
Linus Torvalds committed
114 115 116 117 118 119
			break;
		}
	}
	return found;
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133
/*
 * lookup_mnt increments the ref count before returning
 * the vfsmount struct.
 */
struct vfsmount *lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
	struct vfsmount *child_mnt;
	spin_lock(&vfsmount_lock);
	if ((child_mnt = __lookup_mnt(mnt, dentry, 1)))
		mntget(child_mnt);
	spin_unlock(&vfsmount_lock);
	return child_mnt;
}

Linus Torvalds's avatar
Linus Torvalds committed
134 135
static inline int check_mnt(struct vfsmount *mnt)
{
136
	return mnt->mnt_ns == current->nsproxy->mnt_ns;
Linus Torvalds's avatar
Linus Torvalds committed
137 138
}

139
static void touch_mnt_namespace(struct mnt_namespace *ns)
Al Viro's avatar
Al Viro committed
140 141 142 143 144 145 146
{
	if (ns) {
		ns->event = ++event;
		wake_up_interruptible(&ns->poll);
	}
}

147
static void __touch_mnt_namespace(struct mnt_namespace *ns)
Al Viro's avatar
Al Viro committed
148 149 150 151 152 153 154
{
	if (ns && ns->event != event) {
		ns->event = event;
		wake_up_interruptible(&ns->poll);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159 160 161 162 163 164 165
static void detach_mnt(struct vfsmount *mnt, struct nameidata *old_nd)
{
	old_nd->dentry = mnt->mnt_mountpoint;
	old_nd->mnt = mnt->mnt_parent;
	mnt->mnt_parent = mnt;
	mnt->mnt_mountpoint = mnt->mnt_root;
	list_del_init(&mnt->mnt_child);
	list_del_init(&mnt->mnt_hash);
	old_nd->dentry->d_mounted--;
}

166 167 168 169 170 171 172 173
void mnt_set_mountpoint(struct vfsmount *mnt, struct dentry *dentry,
			struct vfsmount *child_mnt)
{
	child_mnt->mnt_parent = mntget(mnt);
	child_mnt->mnt_mountpoint = dget(dentry);
	dentry->d_mounted++;
}

Linus Torvalds's avatar
Linus Torvalds committed
174 175
static void attach_mnt(struct vfsmount *mnt, struct nameidata *nd)
{
176 177 178
	mnt_set_mountpoint(nd->mnt, nd->dentry, mnt);
	list_add_tail(&mnt->mnt_hash, mount_hashtable +
			hash(nd->mnt, nd->dentry));
Linus Torvalds's avatar
Linus Torvalds committed
179
	list_add_tail(&mnt->mnt_child, &nd->mnt->mnt_mounts);
180 181 182 183 184 185 186 187 188 189
}

/*
 * the caller must hold vfsmount_lock
 */
static void commit_tree(struct vfsmount *mnt)
{
	struct vfsmount *parent = mnt->mnt_parent;
	struct vfsmount *m;
	LIST_HEAD(head);
190
	struct mnt_namespace *n = parent->mnt_ns;
191 192 193 194 195

	BUG_ON(parent == mnt);

	list_add_tail(&head, &mnt->mnt_list);
	list_for_each_entry(m, &head, mnt_list)
196
		m->mnt_ns = n;
197 198 199 200 201
	list_splice(&head, n->list.prev);

	list_add_tail(&mnt->mnt_hash, mount_hashtable +
				hash(parent, mnt->mnt_mountpoint));
	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
202
	touch_mnt_namespace(n);
Linus Torvalds's avatar
Linus Torvalds committed
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
}

static struct vfsmount *next_mnt(struct vfsmount *p, struct vfsmount *root)
{
	struct list_head *next = p->mnt_mounts.next;
	if (next == &p->mnt_mounts) {
		while (1) {
			if (p == root)
				return NULL;
			next = p->mnt_child.next;
			if (next != &p->mnt_parent->mnt_mounts)
				break;
			p = p->mnt_parent;
		}
	}
	return list_entry(next, struct vfsmount, mnt_child);
}

Ram Pai's avatar
Ram Pai committed
221 222 223 224 225 226 227 228 229 230
static struct vfsmount *skip_mnt_tree(struct vfsmount *p)
{
	struct list_head *prev = p->mnt_mounts.prev;
	while (prev != &p->mnt_mounts) {
		p = list_entry(prev, struct vfsmount, mnt_child);
		prev = p->mnt_mounts.prev;
	}
	return p;
}

Ram Pai's avatar
Ram Pai committed
231 232
static struct vfsmount *clone_mnt(struct vfsmount *old, struct dentry *root,
					int flag)
Linus Torvalds's avatar
Linus Torvalds committed
233 234 235 236 237 238 239 240 241 242 243
{
	struct super_block *sb = old->mnt_sb;
	struct vfsmount *mnt = alloc_vfsmnt(old->mnt_devname);

	if (mnt) {
		mnt->mnt_flags = old->mnt_flags;
		atomic_inc(&sb->s_active);
		mnt->mnt_sb = sb;
		mnt->mnt_root = dget(root);
		mnt->mnt_mountpoint = mnt->mnt_root;
		mnt->mnt_parent = mnt;
244

Ram Pai's avatar
Ram Pai committed
245 246 247 248 249 250 251 252 253 254 255
		if (flag & CL_SLAVE) {
			list_add(&mnt->mnt_slave, &old->mnt_slave_list);
			mnt->mnt_master = old;
			CLEAR_MNT_SHARED(mnt);
		} else {
			if ((flag & CL_PROPAGATION) || IS_MNT_SHARED(old))
				list_add(&mnt->mnt_share, &old->mnt_share);
			if (IS_MNT_SLAVE(old))
				list_add(&mnt->mnt_slave, &old->mnt_slave);
			mnt->mnt_master = old->mnt_master;
		}
256 257
		if (flag & CL_MAKE_SHARED)
			set_mnt_shared(mnt);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260

		/* stick the duplicate mount on the same expiry list
		 * as the original if that was on one */
Ram Pai's avatar
Ram Pai committed
261 262 263 264 265 266
		if (flag & CL_EXPIRE) {
			spin_lock(&vfsmount_lock);
			if (!list_empty(&old->mnt_expire))
				list_add(&mnt->mnt_expire, &old->mnt_expire);
			spin_unlock(&vfsmount_lock);
		}
Linus Torvalds's avatar
Linus Torvalds committed
267 268 269 270
	}
	return mnt;
}

271
static inline void __mntput(struct vfsmount *mnt)
Linus Torvalds's avatar
Linus Torvalds committed
272 273 274 275 276 277 278
{
	struct super_block *sb = mnt->mnt_sb;
	dput(mnt->mnt_root);
	free_vfsmnt(mnt);
	deactivate_super(sb);
}

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
void mntput_no_expire(struct vfsmount *mnt)
{
repeat:
	if (atomic_dec_and_lock(&mnt->mnt_count, &vfsmount_lock)) {
		if (likely(!mnt->mnt_pinned)) {
			spin_unlock(&vfsmount_lock);
			__mntput(mnt);
			return;
		}
		atomic_add(mnt->mnt_pinned + 1, &mnt->mnt_count);
		mnt->mnt_pinned = 0;
		spin_unlock(&vfsmount_lock);
		acct_auto_close_mnt(mnt);
		security_sb_umount_close(mnt);
		goto repeat;
	}
}

EXPORT_SYMBOL(mntput_no_expire);

void mnt_pin(struct vfsmount *mnt)
{
	spin_lock(&vfsmount_lock);
	mnt->mnt_pinned++;
	spin_unlock(&vfsmount_lock);
}

EXPORT_SYMBOL(mnt_pin);

void mnt_unpin(struct vfsmount *mnt)
{
	spin_lock(&vfsmount_lock);
	if (mnt->mnt_pinned) {
		atomic_inc(&mnt->mnt_count);
		mnt->mnt_pinned--;
	}
	spin_unlock(&vfsmount_lock);
}

EXPORT_SYMBOL(mnt_unpin);
Linus Torvalds's avatar
Linus Torvalds committed
319 320 321 322

/* iterator */
static void *m_start(struct seq_file *m, loff_t *pos)
{
323
	struct mnt_namespace *n = m->private;
Linus Torvalds's avatar
Linus Torvalds committed
324 325 326
	struct list_head *p;
	loff_t l = *pos;

Ram Pai's avatar
Ram Pai committed
327
	down_read(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
328 329 330 331 332 333 334 335
	list_for_each(p, &n->list)
		if (!l--)
			return list_entry(p, struct vfsmount, mnt_list);
	return NULL;
}

static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
336
	struct mnt_namespace *n = m->private;
Linus Torvalds's avatar
Linus Torvalds committed
337 338
	struct list_head *p = ((struct vfsmount *)v)->mnt_list.next;
	(*pos)++;
Ram Pai's avatar
Ram Pai committed
339
	return p == &n->list ? NULL : list_entry(p, struct vfsmount, mnt_list);
Linus Torvalds's avatar
Linus Torvalds committed
340 341 342 343
}

static void m_stop(struct seq_file *m, void *v)
{
Ram Pai's avatar
Ram Pai committed
344
	up_read(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
}

static inline void mangle(struct seq_file *m, const char *s)
{
	seq_escape(m, s, " \t\n\\");
}

static int show_vfsmnt(struct seq_file *m, void *v)
{
	struct vfsmount *mnt = v;
	int err = 0;
	static struct proc_fs_info {
		int flag;
		char *str;
	} fs_info[] = {
		{ MS_SYNCHRONOUS, ",sync" },
		{ MS_DIRSYNC, ",dirsync" },
		{ MS_MANDLOCK, ",mand" },
		{ 0, NULL }
	};
	static struct proc_fs_info mnt_info[] = {
		{ MNT_NOSUID, ",nosuid" },
		{ MNT_NODEV, ",nodev" },
		{ MNT_NOEXEC, ",noexec" },
369 370
		{ MNT_NOATIME, ",noatime" },
		{ MNT_NODIRATIME, ",nodiratime" },
Linus Torvalds's avatar
Linus Torvalds committed
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401
		{ 0, NULL }
	};
	struct proc_fs_info *fs_infop;

	mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
	seq_putc(m, ' ');
	seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
	seq_putc(m, ' ');
	mangle(m, mnt->mnt_sb->s_type->name);
	seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
	for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
		if (mnt->mnt_sb->s_flags & fs_infop->flag)
			seq_puts(m, fs_infop->str);
	}
	for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
		if (mnt->mnt_flags & fs_infop->flag)
			seq_puts(m, fs_infop->str);
	}
	if (mnt->mnt_sb->s_op->show_options)
		err = mnt->mnt_sb->s_op->show_options(m, mnt);
	seq_puts(m, " 0 0\n");
	return err;
}

struct seq_operations mounts_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_vfsmnt
};

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
static int show_vfsstat(struct seq_file *m, void *v)
{
	struct vfsmount *mnt = v;
	int err = 0;

	/* device */
	if (mnt->mnt_devname) {
		seq_puts(m, "device ");
		mangle(m, mnt->mnt_devname);
	} else
		seq_puts(m, "no device");

	/* mount point */
	seq_puts(m, " mounted on ");
	seq_path(m, mnt, mnt->mnt_root, " \t\n\\");
	seq_putc(m, ' ');

	/* file system type */
	seq_puts(m, "with fstype ");
	mangle(m, mnt->mnt_sb->s_type->name);

	/* optional statistics */
	if (mnt->mnt_sb->s_op->show_stats) {
		seq_putc(m, ' ');
		err = mnt->mnt_sb->s_op->show_stats(m, mnt);
	}

	seq_putc(m, '\n');
	return err;
}

struct seq_operations mountstats_op = {
	.start	= m_start,
	.next	= m_next,
	.stop	= m_stop,
	.show	= show_vfsstat,
};

Linus Torvalds's avatar
Linus Torvalds committed
440 441 442 443 444 445 446 447 448 449
/**
 * may_umount_tree - check if a mount tree is busy
 * @mnt: root of mount tree
 *
 * This is called to check if a tree of mounts has any
 * open files, pwds, chroots or sub mounts that are
 * busy.
 */
int may_umount_tree(struct vfsmount *mnt)
{
Ram Pai's avatar
Ram Pai committed
450 451 452
	int actual_refs = 0;
	int minimum_refs = 0;
	struct vfsmount *p;
Linus Torvalds's avatar
Linus Torvalds committed
453 454

	spin_lock(&vfsmount_lock);
Ram Pai's avatar
Ram Pai committed
455
	for (p = mnt; p; p = next_mnt(p, mnt)) {
Linus Torvalds's avatar
Linus Torvalds committed
456 457 458 459 460 461
		actual_refs += atomic_read(&p->mnt_count);
		minimum_refs += 2;
	}
	spin_unlock(&vfsmount_lock);

	if (actual_refs > minimum_refs)
462
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed
463

464
	return 1;
Linus Torvalds's avatar
Linus Torvalds committed
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
}

EXPORT_SYMBOL(may_umount_tree);

/**
 * may_umount - check if a mount point is busy
 * @mnt: root of mount
 *
 * This is called to check if a mount point has any
 * open files, pwds, chroots or sub mounts. If the
 * mount has sub mounts this will return busy
 * regardless of whether the sub mounts are busy.
 *
 * Doesn't take quota and stuff into account. IOW, in some cases it will
 * give false negatives. The main reason why it's here is that we need
 * a non-destructive way to look for easily umountable filesystems.
 */
int may_umount(struct vfsmount *mnt)
{
484
	int ret = 1;
485 486
	spin_lock(&vfsmount_lock);
	if (propagate_mount_busy(mnt, 2))
487
		ret = 0;
488 489
	spin_unlock(&vfsmount_lock);
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
490 491 492 493
}

EXPORT_SYMBOL(may_umount);

494
void release_mounts(struct list_head *head)
Ram Pai's avatar
Ram Pai committed
495 496
{
	struct vfsmount *mnt;
497
	while (!list_empty(head)) {
Ram Pai's avatar
Ram Pai committed
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
		mnt = list_entry(head->next, struct vfsmount, mnt_hash);
		list_del_init(&mnt->mnt_hash);
		if (mnt->mnt_parent != mnt) {
			struct dentry *dentry;
			struct vfsmount *m;
			spin_lock(&vfsmount_lock);
			dentry = mnt->mnt_mountpoint;
			m = mnt->mnt_parent;
			mnt->mnt_mountpoint = mnt->mnt_root;
			mnt->mnt_parent = mnt;
			spin_unlock(&vfsmount_lock);
			dput(dentry);
			mntput(m);
		}
		mntput(mnt);
	}
}

516
void umount_tree(struct vfsmount *mnt, int propagate, struct list_head *kill)
Linus Torvalds's avatar
Linus Torvalds committed
517 518 519
{
	struct vfsmount *p;

Akinobu Mita's avatar
Akinobu Mita committed
520 521
	for (p = mnt; p; p = next_mnt(p, mnt))
		list_move(&p->mnt_hash, kill);
Linus Torvalds's avatar
Linus Torvalds committed
522

523 524 525
	if (propagate)
		propagate_umount(kill);

Ram Pai's avatar
Ram Pai committed
526 527 528
	list_for_each_entry(p, kill, mnt_hash) {
		list_del_init(&p->mnt_expire);
		list_del_init(&p->mnt_list);
529 530
		__touch_mnt_namespace(p->mnt_ns);
		p->mnt_ns = NULL;
Ram Pai's avatar
Ram Pai committed
531 532
		list_del_init(&p->mnt_child);
		if (p->mnt_parent != p)
533
			p->mnt_mountpoint->d_mounted--;
534
		change_mnt_propagation(p, MS_PRIVATE);
Linus Torvalds's avatar
Linus Torvalds committed
535 536 537 538 539
	}
}

static int do_umount(struct vfsmount *mnt, int flags)
{
Ram Pai's avatar
Ram Pai committed
540
	struct super_block *sb = mnt->mnt_sb;
Linus Torvalds's avatar
Linus Torvalds committed
541
	int retval;
Ram Pai's avatar
Ram Pai committed
542
	LIST_HEAD(umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576

	retval = security_sb_umount(mnt, flags);
	if (retval)
		return retval;

	/*
	 * Allow userspace to request a mountpoint be expired rather than
	 * unmounting unconditionally. Unmount only happens if:
	 *  (1) the mark is already set (the mark is cleared by mntput())
	 *  (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount]
	 */
	if (flags & MNT_EXPIRE) {
		if (mnt == current->fs->rootmnt ||
		    flags & (MNT_FORCE | MNT_DETACH))
			return -EINVAL;

		if (atomic_read(&mnt->mnt_count) != 2)
			return -EBUSY;

		if (!xchg(&mnt->mnt_expiry_mark, 1))
			return -EAGAIN;
	}

	/*
	 * If we may have to abort operations to get out of this
	 * mount, and they will themselves hold resources we must
	 * allow the fs to do things. In the Unix tradition of
	 * 'Gee thats tricky lets do it in userspace' the umount_begin
	 * might fail to complete on the first run through as other tasks
	 * must return, and the like. Thats for the mount program to worry
	 * about for the moment.
	 */

	lock_kernel();
577 578
	if (sb->s_op->umount_begin)
		sb->s_op->umount_begin(mnt, flags);
Linus Torvalds's avatar
Linus Torvalds committed
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
	unlock_kernel();

	/*
	 * No sense to grab the lock for this test, but test itself looks
	 * somewhat bogus. Suggestions for better replacement?
	 * Ho-hum... In principle, we might treat that as umount + switch
	 * to rootfs. GC would eventually take care of the old vfsmount.
	 * Actually it makes sense, especially if rootfs would contain a
	 * /reboot - static binary that would close all descriptors and
	 * call reboot(9). Then init(8) could umount root and exec /reboot.
	 */
	if (mnt == current->fs->rootmnt && !(flags & MNT_DETACH)) {
		/*
		 * Special case for "unmounting" root ...
		 * we just try to remount it readonly.
		 */
		down_write(&sb->s_umount);
		if (!(sb->s_flags & MS_RDONLY)) {
			lock_kernel();
			DQUOT_OFF(sb);
			retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
			unlock_kernel();
		}
		up_write(&sb->s_umount);
		return retval;
	}

Ram Pai's avatar
Ram Pai committed
606
	down_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
607
	spin_lock(&vfsmount_lock);
Al Viro's avatar
Al Viro committed
608
	event++;
Linus Torvalds's avatar
Linus Torvalds committed
609 610

	retval = -EBUSY;
611
	if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) {
Linus Torvalds's avatar
Linus Torvalds committed
612
		if (!list_empty(&mnt->mnt_list))
613
			umount_tree(mnt, 1, &umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
614 615 616 617 618
		retval = 0;
	}
	spin_unlock(&vfsmount_lock);
	if (retval)
		security_sb_umount_busy(mnt);
Ram Pai's avatar
Ram Pai committed
619
	up_write(&namespace_sem);
Ram Pai's avatar
Ram Pai committed
620
	release_mounts(&umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	return retval;
}

/*
 * Now umount can handle mount points as well as block devices.
 * This is important for filesystems which use unnamed block devices.
 *
 * We now support a flag for forced unmount like the other 'big iron'
 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
 */

asmlinkage long sys_umount(char __user * name, int flags)
{
	struct nameidata nd;
	int retval;

	retval = __user_walk(name, LOOKUP_FOLLOW, &nd);
	if (retval)
		goto out;
	retval = -EINVAL;
	if (nd.dentry != nd.mnt->mnt_root)
		goto dput_and_out;
	if (!check_mnt(nd.mnt))
		goto dput_and_out;

	retval = -EPERM;
	if (!capable(CAP_SYS_ADMIN))
		goto dput_and_out;

	retval = do_umount(nd.mnt, flags);
dput_and_out:
	path_release_on_umount(&nd);
out:
	return retval;
}

#ifdef __ARCH_WANT_SYS_OLDUMOUNT

/*
Ram Pai's avatar
Ram Pai committed
660
 *	The 2.0 compatible umount. No flags.
Linus Torvalds's avatar
Linus Torvalds committed
661 662 663
 */
asmlinkage long sys_oldumount(char __user * name)
{
Ram Pai's avatar
Ram Pai committed
664
	return sys_umount(name, 0);
Linus Torvalds's avatar
Linus Torvalds committed
665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
}

#endif

static int mount_is_safe(struct nameidata *nd)
{
	if (capable(CAP_SYS_ADMIN))
		return 0;
	return -EPERM;
#ifdef notyet
	if (S_ISLNK(nd->dentry->d_inode->i_mode))
		return -EPERM;
	if (nd->dentry->d_inode->i_mode & S_ISVTX) {
		if (current->uid != nd->dentry->d_inode->i_uid)
			return -EPERM;
	}
681
	if (vfs_permission(nd, MAY_WRITE))
Linus Torvalds's avatar
Linus Torvalds committed
682 683 684 685 686
		return -EPERM;
	return 0;
#endif
}

Ram Pai's avatar
Ram Pai committed
687
static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
Linus Torvalds's avatar
Linus Torvalds committed
688 689 690 691 692 693 694 695 696 697
{
	while (1) {
		if (d == dentry)
			return 1;
		if (d == NULL || d == d->d_parent)
			return 0;
		d = d->d_parent;
	}
}

698
struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
Ram Pai's avatar
Ram Pai committed
699
					int flag)
Linus Torvalds's avatar
Linus Torvalds committed
700 701 702 703
{
	struct vfsmount *res, *p, *q, *r, *s;
	struct nameidata nd;

Ram Pai's avatar
Ram Pai committed
704 705 706
	if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(mnt))
		return NULL;

Ram Pai's avatar
Ram Pai committed
707
	res = q = clone_mnt(mnt, dentry, flag);
Linus Torvalds's avatar
Linus Torvalds committed
708 709 710 711 712
	if (!q)
		goto Enomem;
	q->mnt_mountpoint = mnt->mnt_mountpoint;

	p = mnt;
713
	list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
Linus Torvalds's avatar
Linus Torvalds committed
714 715 716 717
		if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
			continue;

		for (s = r; s; s = next_mnt(s, r)) {
Ram Pai's avatar
Ram Pai committed
718 719 720 721
			if (!(flag & CL_COPY_ALL) && IS_MNT_UNBINDABLE(s)) {
				s = skip_mnt_tree(s);
				continue;
			}
Linus Torvalds's avatar
Linus Torvalds committed
722 723 724 725 726 727 728
			while (p != s->mnt_parent) {
				p = p->mnt_parent;
				q = q->mnt_parent;
			}
			p = s;
			nd.mnt = q;
			nd.dentry = p->mnt_mountpoint;
Ram Pai's avatar
Ram Pai committed
729
			q = clone_mnt(p, p->mnt_root, flag);
Linus Torvalds's avatar
Linus Torvalds committed
730 731 732 733 734 735 736 737 738
			if (!q)
				goto Enomem;
			spin_lock(&vfsmount_lock);
			list_add_tail(&q->mnt_list, &res->mnt_list);
			attach_mnt(q, &nd);
			spin_unlock(&vfsmount_lock);
		}
	}
	return res;
Ram Pai's avatar
Ram Pai committed
739
Enomem:
Linus Torvalds's avatar
Linus Torvalds committed
740
	if (res) {
Ram Pai's avatar
Ram Pai committed
741
		LIST_HEAD(umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
742
		spin_lock(&vfsmount_lock);
743
		umount_tree(res, 0, &umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
744
		spin_unlock(&vfsmount_lock);
Ram Pai's avatar
Ram Pai committed
745
		release_mounts(&umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
746 747 748 749
	}
	return NULL;
}

750 751
/*
 *  @source_mnt : mount tree to be attached
Ram Pai's avatar
Ram Pai committed
752 753 754 755
 *  @nd         : place the mount tree @source_mnt is attached
 *  @parent_nd  : if non-null, detach the source_mnt from its parent and
 *  		   store the parent mount and mountpoint dentry.
 *  		   (done when source_mnt is moved)
756 757 758
 *
 *  NOTE: in the table below explains the semantics when a source mount
 *  of a given type is attached to a destination mount of a given type.
Ram Pai's avatar
Ram Pai committed
759 760 761 762 763 764 765 766 767 768 769 770
 * ---------------------------------------------------------------------------
 * |         BIND MOUNT OPERATION                                            |
 * |**************************************************************************
 * | source-->| shared        |       private  |       slave    | unbindable |
 * | dest     |               |                |                |            |
 * |   |      |               |                |                |            |
 * |   v      |               |                |                |            |
 * |**************************************************************************
 * |  shared  | shared (++)   |     shared (+) |     shared(+++)|  invalid   |
 * |          |               |                |                |            |
 * |non-shared| shared (+)    |      private   |      slave (*) |  invalid   |
 * ***************************************************************************
771 772 773 774 775 776 777 778 779
 * A bind operation clones the source mount and mounts the clone on the
 * destination mount.
 *
 * (++)  the cloned mount is propagated to all the mounts in the propagation
 * 	 tree of the destination mount and the cloned mount is added to
 * 	 the peer group of the source mount.
 * (+)   the cloned mount is created under the destination mount and is marked
 *       as shared. The cloned mount is added to the peer group of the source
 *       mount.
Ram Pai's avatar
Ram Pai committed
780 781 782 783 784 785 786
 * (+++) the mount is propagated to all the mounts in the propagation tree
 *       of the destination mount and the cloned mount is made slave
 *       of the same master as that of the source mount. The cloned mount
 *       is marked as 'shared and slave'.
 * (*)   the cloned mount is made a slave of the same master as that of the
 * 	 source mount.
 *
Ram Pai's avatar
Ram Pai committed
787 788 789 790 791 792 793 794 795 796 797 798
 * ---------------------------------------------------------------------------
 * |         		MOVE MOUNT OPERATION                                 |
 * |**************************************************************************
 * | source-->| shared        |       private  |       slave    | unbindable |
 * | dest     |               |                |                |            |
 * |   |      |               |                |                |            |
 * |   v      |               |                |                |            |
 * |**************************************************************************
 * |  shared  | shared (+)    |     shared (+) |    shared(+++) |  invalid   |
 * |          |               |                |                |            |
 * |non-shared| shared (+*)   |      private   |    slave (*)   | unbindable |
 * ***************************************************************************
Ram Pai's avatar
Ram Pai committed
799 800 801
 *
 * (+)  the mount is moved to the destination. And is then propagated to
 * 	all the mounts in the propagation tree of the destination mount.
Ram Pai's avatar
Ram Pai committed
802
 * (+*)  the mount is moved to the destination.
Ram Pai's avatar
Ram Pai committed
803 804 805 806
 * (+++)  the mount is moved to the destination and is then propagated to
 * 	all the mounts belonging to the destination mount's propagation tree.
 * 	the mount is marked as 'shared and slave'.
 * (*)	the mount continues to be a slave at the new location.
807 808 809 810 811 812 813
 *
 * if the source mount is a tree, the operations explained above is
 * applied to each mount in the tree.
 * Must be called without spinlocks held, since this function can sleep
 * in allocations.
 */
static int attach_recursive_mnt(struct vfsmount *source_mnt,
Ram Pai's avatar
Ram Pai committed
814
			struct nameidata *nd, struct nameidata *parent_nd)
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829
{
	LIST_HEAD(tree_list);
	struct vfsmount *dest_mnt = nd->mnt;
	struct dentry *dest_dentry = nd->dentry;
	struct vfsmount *child, *p;

	if (propagate_mnt(dest_mnt, dest_dentry, source_mnt, &tree_list))
		return -EINVAL;

	if (IS_MNT_SHARED(dest_mnt)) {
		for (p = source_mnt; p; p = next_mnt(p, source_mnt))
			set_mnt_shared(p);
	}

	spin_lock(&vfsmount_lock);
Ram Pai's avatar
Ram Pai committed
830 831 832
	if (parent_nd) {
		detach_mnt(source_mnt, parent_nd);
		attach_mnt(source_mnt, nd);
833
		touch_mnt_namespace(current->nsproxy->mnt_ns);
Ram Pai's avatar
Ram Pai committed
834 835 836 837
	} else {
		mnt_set_mountpoint(dest_mnt, dest_dentry, source_mnt);
		commit_tree(source_mnt);
	}
838 839 840 841 842 843 844 845 846

	list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
		list_del_init(&child->mnt_hash);
		commit_tree(child);
	}
	spin_unlock(&vfsmount_lock);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
847 848 849 850 851 852 853 854 855 856 857
static int graft_tree(struct vfsmount *mnt, struct nameidata *nd)
{
	int err;
	if (mnt->mnt_sb->s_flags & MS_NOUSER)
		return -EINVAL;

	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
	      S_ISDIR(mnt->mnt_root->d_inode->i_mode))
		return -ENOTDIR;

	err = -ENOENT;
858
	mutex_lock(&nd->dentry->d_inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
859 860 861 862 863 864 865 866
	if (IS_DEADDIR(nd->dentry->d_inode))
		goto out_unlock;

	err = security_sb_check_sb(mnt, nd);
	if (err)
		goto out_unlock;

	err = -ENOENT;
867
	if (IS_ROOT(nd->dentry) || !d_unhashed(nd->dentry))
Ram Pai's avatar
Ram Pai committed
868
		err = attach_recursive_mnt(mnt, nd, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
869
out_unlock:
870
	mutex_unlock(&nd->dentry->d_inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
871 872 873 874 875
	if (!err)
		security_sb_post_addmount(mnt, nd);
	return err;
}

876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896
/*
 * recursively change the type of the mountpoint.
 */
static int do_change_type(struct nameidata *nd, int flag)
{
	struct vfsmount *m, *mnt = nd->mnt;
	int recurse = flag & MS_REC;
	int type = flag & ~MS_REC;

	if (nd->dentry != nd->mnt->mnt_root)
		return -EINVAL;

	down_write(&namespace_sem);
	spin_lock(&vfsmount_lock);
	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
		change_mnt_propagation(m, type);
	spin_unlock(&vfsmount_lock);
	up_write(&namespace_sem);
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
897 898 899
/*
 * do loopback mount.
 */
900
static int do_loopback(struct nameidata *nd, char *old_name, int recurse)
Linus Torvalds's avatar
Linus Torvalds committed
901 902 903 904 905 906 907 908 909 910 911 912
{
	struct nameidata old_nd;
	struct vfsmount *mnt = NULL;
	int err = mount_is_safe(nd);
	if (err)
		return err;
	if (!old_name || !*old_name)
		return -EINVAL;
	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
	if (err)
		return err;

Ram Pai's avatar
Ram Pai committed
913
	down_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
914
	err = -EINVAL;
Ram Pai's avatar
Ram Pai committed
915 916 917
	if (IS_MNT_UNBINDABLE(old_nd.mnt))
 		goto out;

918 919
	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
920

921 922
	err = -ENOMEM;
	if (recurse)
Ram Pai's avatar
Ram Pai committed
923
		mnt = copy_tree(old_nd.mnt, old_nd.dentry, 0);
924
	else
Ram Pai's avatar
Ram Pai committed
925
		mnt = clone_mnt(old_nd.mnt, old_nd.dentry, 0);
926 927 928 929 930 931

	if (!mnt)
		goto out;

	err = graft_tree(mnt, nd);
	if (err) {
Ram Pai's avatar
Ram Pai committed
932
		LIST_HEAD(umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
933
		spin_lock(&vfsmount_lock);
934
		umount_tree(mnt, 0, &umount_list);
Linus Torvalds's avatar
Linus Torvalds committed
935
		spin_unlock(&vfsmount_lock);
Ram Pai's avatar
Ram Pai committed
936
		release_mounts(&umount_list);
937
	}
Linus Torvalds's avatar
Linus Torvalds committed
938

939
out:
Ram Pai's avatar
Ram Pai committed
940
	up_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
941 942 943 944 945 946 947 948 949 950 951 952 953
	path_release(&old_nd);
	return err;
}

/*
 * change filesystem flags. dir should be a physical root of filesystem.
 * If you've mounted a non-root directory somewhere and want to do remount
 * on it - tough luck.
 */
static int do_remount(struct nameidata *nd, int flags, int mnt_flags,
		      void *data)
{
	int err;
Ram Pai's avatar
Ram Pai committed
954
	struct super_block *sb = nd->mnt->mnt_sb;
Linus Torvalds's avatar
Linus Torvalds committed
955 956 957 958 959 960 961 962 963 964 965 966 967

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	if (!check_mnt(nd->mnt))
		return -EINVAL;

	if (nd->dentry != nd->mnt->mnt_root)
		return -EINVAL;

	down_write(&sb->s_umount);
	err = do_remount_sb(sb, flags, data, 0);
	if (!err)
Ram Pai's avatar
Ram Pai committed
968
		nd->mnt->mnt_flags = mnt_flags;
Linus Torvalds's avatar
Linus Torvalds committed
969 970 971 972 973 974
	up_write(&sb->s_umount);
	if (!err)
		security_sb_post_remount(nd->mnt, flags, data);
	return err;
}

Ram Pai's avatar
Ram Pai committed
975 976 977 978 979 980 981 982 983 984
static inline int tree_contains_unbindable(struct vfsmount *mnt)
{
	struct vfsmount *p;
	for (p = mnt; p; p = next_mnt(p, mnt)) {
		if (IS_MNT_UNBINDABLE(p))
			return 1;
	}
	return 0;
}

Linus Torvalds's avatar
Linus Torvalds committed
985 986 987 988 989 990 991 992 993 994 995 996 997
static int do_move_mount(struct nameidata *nd, char *old_name)
{
	struct nameidata old_nd, parent_nd;
	struct vfsmount *p;
	int err = 0;
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	if (!old_name || !*old_name)
		return -EINVAL;
	err = path_lookup(old_name, LOOKUP_FOLLOW, &old_nd);
	if (err)
		return err;

Ram Pai's avatar
Ram Pai committed
998
	down_write(&namespace_sem);
Ram Pai's avatar
Ram Pai committed
999
	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
Linus Torvalds's avatar
Linus Torvalds committed
1000 1001 1002 1003 1004 1005
		;
	err = -EINVAL;
	if (!check_mnt(nd->mnt) || !check_mnt(old_nd.mnt))
		goto out;

	err = -ENOENT;
1006
	mutex_lock(&nd->dentry->d_inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1007 1008 1009 1010
	if (IS_DEADDIR(nd->dentry->d_inode))
		goto out1;

	if (!IS_ROOT(nd->dentry) && d_unhashed(nd->dentry))
Ram Pai's avatar
Ram Pai committed
1011
		goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013 1014

	err = -EINVAL;
	if (old_nd.dentry != old_nd.mnt->mnt_root)
Ram Pai's avatar
Ram Pai committed
1015
		goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1016 1017

	if (old_nd.mnt == old_nd.mnt->mnt_parent)
Ram Pai's avatar
Ram Pai committed
1018
		goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1019 1020 1021

	if (S_ISDIR(nd->dentry->d_inode->i_mode) !=
	      S_ISDIR(old_nd.dentry->d_inode->i_mode))
Ram Pai's avatar
Ram Pai committed
1022 1023 1024 1025 1026 1027
		goto out1;
	/*
	 * Don't move a mount residing in a shared parent.
	 */
	if (old_nd.mnt->mnt_parent && IS_MNT_SHARED(old_nd.mnt->mnt_parent))
		goto out1;
Ram Pai's avatar
Ram Pai committed
1028 1029 1030 1031 1032 1033
	/*
	 * Don't move a mount tree containing unbindable mounts to a destination
	 * mount which is shared.
	 */
	if (IS_MNT_SHARED(nd->mnt) && tree_contains_unbindable(old_nd.mnt))
		goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1034
	err = -ELOOP;
Ram Pai's avatar
Ram Pai committed
1035
	for (p = nd->mnt; p->mnt_parent != p; p = p->mnt_parent)
Linus Torvalds's avatar
Linus Torvalds committed
1036
		if (p == old_nd.mnt)
Ram Pai's avatar
Ram Pai committed
1037
			goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1038

Ram Pai's avatar
Ram Pai committed
1039 1040
	if ((err = attach_recursive_mnt(old_nd.mnt, nd, &parent_nd)))
		goto out1;
Linus Torvalds's avatar
Linus Torvalds committed
1041

Ram Pai's avatar
Ram Pai committed
1042
	spin_lock(&vfsmount_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1043 1044
	/* if the mount is moved, it should no longer be expire
	 * automatically */
1045
	list_del_init(&old_nd.mnt->mnt_expire);
Linus Torvalds's avatar
Linus Torvalds committed
1046 1047
	spin_unlock(&vfsmount_lock);
out1:
1048
	mutex_unlock(&nd->dentry->d_inode->i_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1049
out:
Ram Pai's avatar
Ram Pai committed
1050
	up_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
	if (!err)
		path_release(&parent_nd);
	path_release(&old_nd);
	return err;
}

/*
 * create a new mount for userspace and request it to be added into the
 * namespace's tree
 */
static int do_new_mount(struct nameidata *nd, char *type, int flags,
			int mnt_flags, char *name, void *data)
{
	struct vfsmount *mnt;

	if (!type || !memchr(type, 0, PAGE_SIZE))
		return -EINVAL;

	/* we need capabilities... */
	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	mnt = do_kern_mount(type, flags, name, data);
	if (IS_ERR(mnt))
		return PTR_ERR(mnt);

	return do_add_mount(mnt, nd, mnt_flags, NULL);
}

/*
 * add a mount into a namespace's mount tree
 * - provide the option of adding the new mount to an expiration list
 */
int do_add_mount(struct vfsmount *newmnt, struct nameidata *nd,
		 int mnt_flags, struct list_head *fslist)
{
	int err;

Ram Pai's avatar
Ram Pai committed
1089
	down_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
1090
	/* Something was mounted here while we slept */
Ram Pai's avatar
Ram Pai committed
1091
	while (d_mountpoint(nd->dentry) && follow_down(&nd->mnt, &nd->dentry))
Linus Torvalds's avatar
Linus Torvalds committed
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
		;
	err = -EINVAL;
	if (!check_mnt(nd->mnt))
		goto unlock;

	/* Refuse the same filesystem on the same mount point */
	err = -EBUSY;
	if (nd->mnt->mnt_sb == newmnt->mnt_sb &&
	    nd->mnt->mnt_root == nd->dentry)
		goto unlock;

	err = -EINVAL;
	if (S_ISLNK(newmnt->mnt_root->d_inode->i_mode))
		goto unlock;

	newmnt->mnt_flags = mnt_flags;
1108 1109
	if ((err = graft_tree(newmnt, nd)))
		goto unlock;
Linus Torvalds's avatar
Linus Torvalds committed
1110

1111
	if (fslist) {
Linus Torvalds's avatar
Linus Torvalds committed
1112 1113
		/* add to the specified expiration list */
		spin_lock(&vfsmount_lock);
1114
		list_add_tail(&newmnt->mnt_expire, fslist);
Linus Torvalds's avatar
Linus Torvalds committed
1115 1116
		spin_unlock(&vfsmount_lock);
	}
Ram Pai's avatar
Ram Pai committed
1117
	up_write(&namespace_sem);
1118
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1119 1120

unlock:
Ram Pai's avatar
Ram Pai committed
1121
	up_write(&namespace_sem);
Linus Torvalds's avatar
Linus Torvalds committed
1122 1123 1124 1125 1126 1127
	mntput(newmnt);
	return err;
}

EXPORT_SYMBOL_GPL(do_add_mount);

Ram Pai's avatar
Ram Pai committed
1128 1129
static void expire_mount(struct vfsmount *mnt, struct list_head *mounts,
				struct list_head *umounts)
1130 1131 1132
{
	spin_lock(&vfsmount_lock);

1133 1134 1135 1136 1137 1138 1139 1140 1141
	/*
	 * Check if mount is still attached, if not, let whoever holds it deal
	 * with the sucker
	 */
	if (mnt->mnt_parent == mnt) {
		spin_unlock(&vfsmount_lock);
		return;
	}

1142 1143 1144 1145
	/*
	 * Check that it is still dead: the count should now be 2 - as
	 * contributed by the vfsmount parent and the mntget above
	 */
1146
	if (!propagate_mount_busy(mnt, 2)) {
1147
		/* delete from the namespace */
1148
		touch_mnt_namespace(mnt->mnt_ns);
1149
		list_del_init(&mnt->mnt_list);
1150
		mnt->mnt_ns = NULL;
1151
		umount_tree(mnt, 1, umounts);
1152 1153 1154 1155 1156 1157
		spin_unlock(&vfsmount_lock);
	} else {
		/*
		 * Someone brought it back to life whilst we didn't have any
		 * locks held so return it to the expiration list
		 */
1158
		list_add_tail(&mnt->mnt_expire, mounts);
1159 1160 1161 1162
		spin_unlock(&vfsmount_lock);
	}
}

Trond Myklebust's avatar
Trond Myklebust committed
1163 1164 1165 1166 1167 1168 1169 1170
/*
 * go through the vfsmounts we've just consigned to the graveyard to
 * - check that they're still dead
 * - delete the vfsmount from the appropriate namespace under lock
 * - dispose of the corpse
 */
static void expire_mount_list(struct list_head *graveyard, struct list_head *mounts)
{
1171
	struct mnt_namespace *ns;
Trond Myklebust's avatar
Trond Myklebust committed
1172 1173 1174 1175 1176 1177 1178 1179 1180
	struct vfsmount *mnt;

	while (!list_empty(graveyard)) {
		LIST_HEAD(umounts);
		mnt = list_entry(graveyard->next, struct vfsmount, mnt_expire);
		list_del_init(&mnt->mnt_expire);

		/* don't do anything if the namespace is dead - all the
		 * vfsmounts from it are going away anyway */
1181 1182
		ns = mnt->mnt_ns;
		if (!ns || !ns->root)
Trond Myklebust's avatar
Trond Myklebust committed
1183
			continue;
1184
		get_mnt_ns(ns);
Trond Myklebust's avatar
Trond Myklebust committed
1185 1186 1187 1188 1189 1190 1191

		spin_unlock(&vfsmount_lock);
		down_write(&namespace_sem);
		expire_mount(mnt, mounts, &umounts);
		up_write(&namespace_sem);
		release_mounts(&umounts);
		mntput(mnt);
1192
		put_mnt_ns(ns);
Trond Myklebust's avatar
Trond Myklebust committed
1193 1194 1195 1196
		spin_lock(&vfsmount_lock);
	}
}

Linus Torvalds's avatar
Linus Torvalds committed
1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
/*
 * process a list of expirable mountpoints with the intent of discarding any
 * mountpoints that aren't in use and haven't been touched since last we came
 * here
 */
void mark_mounts_for_expiry(struct list_head *mounts)
{
	struct vfsmount *mnt, *next;
	LIST_HEAD(graveyard);

	if (list_empty(mounts))
		return;

	spin_lock(&vfsmount_lock);

	/* extract from the expiration list every vfsmount that matches the
	 * following criteria:
	 * - only referenced by its parent vfsmount
	 * - still marked for expiry (marked on the last call here; marks are
	 *   cleared by mntput())
	 */
1218
	list_for_each_entry_safe(mnt, next, mounts, mnt_expire) {
Linus Torvalds's avatar
Linus Torvalds committed
1219 1220 1221 1222 1223
		if (!xchg(&mnt->mnt_expiry_mark, 1) ||
		    atomic_read(&mnt->mnt_count) != 1)
			continue;

		mntget(mnt);
1224
		list_move(&mnt->mnt_expire, &graveyard);
Linus Torvalds's avatar
Linus Torvalds committed
1225 1226
	}

Trond Myklebust's avatar
Trond Myklebust committed
1227
	expire_mount_list(&graveyard, mounts);
Linus Torvalds's avatar
Linus Torvalds committed
1228

Trond Myklebust's avatar
Trond Myklebust committed
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
	spin_unlock(&vfsmount_lock);
}

EXPORT_SYMBOL_GPL(mark_mounts_for_expiry);

/*
 * Ripoff of 'select_parent()'
 *
 * search the list of submounts for a given mountpoint, and move any
 * shrinkable submounts to the 'graveyard' list.
 */
static int select_submounts(struct vfsmount *parent, struct list_head *graveyard)
{
	struct vfsmount *this_parent = parent;
	struct list_head *next;
	int found = 0;

repeat:
	next = this_parent->mnt_mounts.next;
resume:
	while (next != &this_parent->mnt_mounts) {
		struct list_head *tmp = next;
		struct vfsmount *mnt = list_entry(tmp, struct vfsmount, mnt_child);

		next = tmp->next;
		if (!(mnt->mnt_flags & MNT_SHRINKABLE))
Linus Torvalds's avatar
Linus Torvalds committed
1255
			continue;
Trond Myklebust's avatar
Trond Myklebust committed
1256 1257 1258 1259 1260 1261 1262
		/*
		 * Descend a level if the d_mounts list is non-empty.
		 */
		if (!list_empty(&mnt->mnt_mounts)) {
			this_parent = mnt;
			goto repeat;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1263

Trond Myklebust's avatar
Trond Myklebust committed
1264 1265 1266 1267 1268
		if (!propagate_mount_busy(mnt, 1)) {
			mntget(mnt);
			list_move_tail(&mnt->mnt_expire, graveyard);
			found++;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1269
	}
Trond Myklebust's avatar
Trond Myklebust committed
1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
	/*
	 * All done at this level ... ascend and resume the search
	 */
	if (this_parent != parent) {
		next = this_parent->mnt_child.next;
		this_parent = this_parent->mnt_parent;
		goto resume;
	}
	return found;
}

/*
 * process a list of expirable mountpoints with the intent of discarding any
 * submounts of a specific parent mountpoint
 */
void shrink_submounts(struct vfsmount *mountpoint, struct list_head *mounts)