locks.c 9.83 KB
Newer Older
1
#include <linux/ceph/ceph_debug.h>
2 3 4

#include <linux/file.h>
#include <linux/namei.h>
5
#include <linux/random.h>
6 7 8

#include "super.h"
#include "mds_client.h"
9
#include <linux/ceph/pagelist.h>
10

11
static u64 lock_secret;
12 13
static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
                                         struct ceph_mds_request *req);
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31

static inline u64 secure_addr(void *addr)
{
	u64 v = lock_secret ^ (u64)(unsigned long)addr;
	/*
	 * Set the most significant bit, so that MDS knows the 'owner'
	 * is sufficient to identify the owner of lock. (old code uses
	 * both 'owner' and 'pid')
	 */
	v |= (1ULL << 63);
	return v;
}

void __init ceph_flock_init(void)
{
	get_random_bytes(&lock_secret, sizeof(lock_secret));
}

32 33 34 35
/**
 * Implement fcntl and flock locking functions.
 */
static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
36
			     int cmd, u8 wait, struct file_lock *fl)
37
{
Al Viro's avatar
Al Viro committed
38
	struct inode *inode = file_inode(file);
39
	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
40 41
	struct ceph_mds_request *req;
	int err;
42
	u64 length = 0;
43
	u64 owner;
44

45 46 47
	if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
		wait = 0;

48 49 50
	req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
	if (IS_ERR(req))
		return PTR_ERR(req);
51 52
	req->r_inode = inode;
	ihold(inode);
53
	req->r_num_caps = 1;
54

55 56 57 58 59 60
	/* mds requires start and length rather than start and end */
	if (LLONG_MAX == fl->fl_end)
		length = 0;
	else
		length = fl->fl_end - fl->fl_start + 1;

61
	owner = secure_addr(fl->fl_owner);
62 63 64 65 66

	dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
	     "start: %llu, length: %llu, wait: %d, type: %d", (int)lock_type,
	     (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
	     wait, fl->fl_type);
67

68 69
	req->r_args.filelock_change.rule = lock_type;
	req->r_args.filelock_change.type = cmd;
70
	req->r_args.filelock_change.owner = cpu_to_le64(owner);
71 72
	req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
	req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
73 74 75
	req->r_args.filelock_change.length = cpu_to_le64(length);
	req->r_args.filelock_change.wait = wait;

76 77 78
	if (wait)
		req->r_wait_for_completion = ceph_lock_wait_for_completion;

79
	err = ceph_mdsc_do_request(mdsc, inode, req);
80

81
	if (operation == CEPH_MDS_OP_GETFILELOCK) {
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
		fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid);
		if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
			fl->fl_type = F_RDLCK;
		else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
			fl->fl_type = F_WRLCK;
		else
			fl->fl_type = F_UNLCK;

		fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
		length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
						 le64_to_cpu(req->r_reply_info.filelock_reply->length);
		if (length >= 1)
			fl->fl_end = length -1;
		else
			fl->fl_end = 0;

	}
99 100
	ceph_mdsc_put_request(req);
	dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
101
	     "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
102 103
	     (int)operation, (u64)fl->fl_pid, fl->fl_start,
	     length, wait, fl->fl_type, err);
104 105 106
	return err;
}

107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
                                         struct ceph_mds_request *req)
{
	struct ceph_mds_request *intr_req;
	struct inode *inode = req->r_inode;
	int err, lock_type;

	BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
	if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
		lock_type = CEPH_LOCK_FCNTL_INTR;
	else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
		lock_type = CEPH_LOCK_FLOCK_INTR;
	else
		BUG_ON(1);
	BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);

	err = wait_for_completion_interruptible(&req->r_completion);
	if (!err)
		return 0;

	dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
	     req->r_tid);

	intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
					    USE_AUTH_MDS);
	if (IS_ERR(intr_req))
		return PTR_ERR(intr_req);

	intr_req->r_inode = inode;
	ihold(inode);
	intr_req->r_num_caps = 1;

	intr_req->r_args.filelock_change = req->r_args.filelock_change;
	intr_req->r_args.filelock_change.rule = lock_type;
	intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;

	err = ceph_mdsc_do_request(mdsc, inode, intr_req);
	ceph_mdsc_put_request(intr_req);

	if (err && err != -ERESTARTSYS)
		return err;

	wait_for_completion(&req->r_completion);
	return 0;
}

153 154 155 156 157 158 159 160 161 162 163
/**
 * Attempt to set an fcntl lock.
 * For now, this just goes away to the server. Later it may be more awesome.
 */
int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
{
	u8 lock_cmd;
	int err;
	u8 wait = 0;
	u16 op = CEPH_MDS_OP_SETFILELOCK;

164 165 166 167 168 169
	if (!(fl->fl_flags & FL_POSIX))
		return -ENOLCK;
	/* No mandatory locks */
	if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
		return -ENOLCK;

170
	dout("ceph_lock, fl_owner: %p", fl->fl_owner);
171 172

	/* set wait bit as appropriate, then make command as Ceph expects it*/
173
	if (IS_GETLK(cmd))
174
		op = CEPH_MDS_OP_GETFILELOCK;
175 176
	else if (IS_SETLKW(cmd))
		wait = 1;
177 178 179 180 181 182 183 184

	if (F_RDLCK == fl->fl_type)
		lock_cmd = CEPH_LOCK_SHARED;
	else if (F_WRLCK == fl->fl_type)
		lock_cmd = CEPH_LOCK_EXCL;
	else
		lock_cmd = CEPH_LOCK_UNLOCK;

185
	err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl);
186
	if (!err) {
187
		if (op != CEPH_MDS_OP_GETFILELOCK) {
188 189 190
			dout("mds locked, locking locally");
			err = posix_lock_file(file, fl, NULL);
			if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
191 192 193
				/* undo! This should only happen if
				 * the kernel detects local
				 * deadlock. */
194 195
				ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
						  CEPH_LOCK_UNLOCK, 0, fl);
196 197
				dout("got %d on posix_lock_file, undid lock",
				     err);
198
			}
199 200 201 202 203 204 205 206 207
		}
	}
	return err;
}

int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
{
	u8 lock_cmd;
	int err;
208
	u8 wait = 0;
209

210 211 212
	if (!(fl->fl_flags & FL_FLOCK))
		return -ENOLCK;
	/* No mandatory locks */
Yan, Zheng's avatar
Yan, Zheng committed
213 214
	if (fl->fl_type & LOCK_MAND)
		return -EOPNOTSUPP;
215

216
	dout("ceph_flock, fl_file: %p", fl->fl_file);
217

218 219 220 221
	if (IS_SETLKW(cmd))
		wait = 1;

	if (F_RDLCK == fl->fl_type)
222
		lock_cmd = CEPH_LOCK_SHARED;
223
	else if (F_WRLCK == fl->fl_type)
224 225 226 227 228
		lock_cmd = CEPH_LOCK_EXCL;
	else
		lock_cmd = CEPH_LOCK_UNLOCK;

	err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
229
				file, lock_cmd, wait, fl);
230
	if (!err) {
231
		err = locks_lock_file_wait(file, fl);
232 233 234
		if (err) {
			ceph_lock_message(CEPH_LOCK_FLOCK,
					  CEPH_MDS_OP_SETFILELOCK,
235
					  file, CEPH_LOCK_UNLOCK, 0, fl);
236
			dout("got %d on locks_lock_file_wait, undid lock", err);
237 238 239 240 241
		}
	}
	return err;
}

242 243 244
/*
 * Fills in the passed counter variables, so you can prepare pagelist metadata
 * before calling ceph_encode_locks.
245 246 247
 */
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
248
	struct file_lock *lock;
249
	struct file_lock_context *ctx;
250 251 252 253

	*fcntl_count = 0;
	*flock_count = 0;

254 255
	ctx = inode->i_flctx;
	if (ctx) {
256 257 258 259 260 261
		spin_lock(&ctx->flc_lock);
		list_for_each_entry(lock, &ctx->flc_posix, fl_list)
			++(*fcntl_count);
		list_for_each_entry(lock, &ctx->flc_flock, fl_list)
			++(*flock_count);
		spin_unlock(&ctx->flc_lock);
262 263 264 265 266 267
	}
	dout("counted %d flock locks and %d fcntl locks",
	     *flock_count, *fcntl_count);
}

/**
268
 * Encode the flock and fcntl locks for the given inode into the ceph_filelock
269
 * array. Must be called with inode->i_lock already held.
270
 * If we encounter more of a specific lock type than expected, return -ENOSPC.
271
 */
272 273 274
int ceph_encode_locks_to_buffer(struct inode *inode,
				struct ceph_filelock *flocks,
				int num_fcntl_locks, int num_flock_locks)
275 276
{
	struct file_lock *lock;
277
	struct file_lock_context *ctx = inode->i_flctx;
278
	int err = 0;
279 280
	int seen_fcntl = 0;
	int seen_flock = 0;
281
	int l = 0;
282 283 284

	dout("encoding %d flock and %d fcntl locks", num_flock_locks,
	     num_fcntl_locks);
285

286 287 288
	if (!ctx)
		return 0;

289
	spin_lock(&ctx->flc_lock);
290
	list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
291 292 293 294
		++seen_fcntl;
		if (seen_fcntl > num_fcntl_locks) {
			err = -ENOSPC;
			goto fail;
295
		}
296 297 298 299
		err = lock_to_ceph_filelock(lock, &flocks[l]);
		if (err)
			goto fail;
		++l;
300
	}
301 302 303 304 305
	list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
		++seen_flock;
		if (seen_flock > num_flock_locks) {
			err = -ENOSPC;
			goto fail;
306
		}
307 308 309 310
		err = lock_to_ceph_filelock(lock, &flocks[l]);
		if (err)
			goto fail;
		++l;
311 312
	}
fail:
313
	spin_unlock(&ctx->flc_lock);
314 315 316
	return err;
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
/**
 * Copy the encoded flock and fcntl locks into the pagelist.
 * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
 * sequential flock locks.
 * Returns zero on success.
 */
int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
			   struct ceph_pagelist *pagelist,
			   int num_fcntl_locks, int num_flock_locks)
{
	int err = 0;
	__le32 nlocks;

	nlocks = cpu_to_le32(num_fcntl_locks);
	err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
	if (err)
		goto out_fail;

	err = ceph_pagelist_append(pagelist, flocks,
				   num_fcntl_locks * sizeof(*flocks));
	if (err)
		goto out_fail;

	nlocks = cpu_to_le32(num_flock_locks);
	err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
	if (err)
		goto out_fail;

	err = ceph_pagelist_append(pagelist,
				   &flocks[num_fcntl_locks],
				   num_flock_locks * sizeof(*flocks));
out_fail:
	return err;
}

352 353 354 355 356 357 358 359 360 361
/*
 * Given a pointer to a lock, convert it to a ceph filelock
 */
int lock_to_ceph_filelock(struct file_lock *lock,
			  struct ceph_filelock *cephlock)
{
	int err = 0;
	cephlock->start = cpu_to_le64(lock->fl_start);
	cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
	cephlock->client = cpu_to_le64(0);
362
	cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
363
	cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381

	switch (lock->fl_type) {
	case F_RDLCK:
		cephlock->type = CEPH_LOCK_SHARED;
		break;
	case F_WRLCK:
		cephlock->type = CEPH_LOCK_EXCL;
		break;
	case F_UNLCK:
		cephlock->type = CEPH_LOCK_UNLOCK;
		break;
	default:
		dout("Have unknown lock type %d", lock->fl_type);
		err = -EINVAL;
	}

	return err;
}