Commit 22e2c507 authored by Jens Axboe's avatar Jens Axboe Committed by Linus Torvalds
Browse files

[PATCH] Update cfq io scheduler to time sliced design



This updates the CFQ io scheduler to the new time sliced design (cfq
v3).  It provides full process fairness, while giving excellent
aggregate system throughput even for many competing processes.  It
supports io priorities, either inherited from the cpu nice value or set
directly with the ioprio_get/set syscalls.  The latter closely mimic
set/getpriority.

This import is based on my latest from -mm.
Signed-off-by: default avatarJens Axboe <axboe@suse.de>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 020f46a3
...@@ -289,3 +289,5 @@ ENTRY(sys_call_table) ...@@ -289,3 +289,5 @@ ENTRY(sys_call_table)
.long sys_add_key .long sys_add_key
.long sys_request_key .long sys_request_key
.long sys_keyctl .long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get /* 290 */
...@@ -1577,8 +1577,8 @@ sys_call_table: ...@@ -1577,8 +1577,8 @@ sys_call_table:
data8 sys_add_key data8 sys_add_key
data8 sys_request_key data8 sys_request_key
data8 sys_keyctl data8 sys_keyctl
data8 sys_ni_syscall data8 sys_ioprio_set
data8 sys_ni_syscall // 1275 data8 sys_ioprio_get // 1275
data8 sys_set_zone_reclaim data8 sys_set_zone_reclaim
data8 sys_ni_syscall data8 sys_ni_syscall
data8 sys_ni_syscall data8 sys_ni_syscall
......
...@@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table) ...@@ -1449,3 +1449,5 @@ _GLOBAL(sys_call_table)
.long sys_request_key /* 270 */ .long sys_request_key /* 270 */
.long sys_keyctl .long sys_keyctl
.long sys_waitid .long sys_waitid
.long sys_ioprio_set
.long sys_ioprio_get
...@@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq) ...@@ -1806,7 +1806,8 @@ static void as_put_request(request_queue_t *q, struct request *rq)
rq->elevator_private = NULL; rq->elevator_private = NULL;
} }
static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) static int as_set_request(request_queue_t *q, struct request *rq,
struct bio *bio, int gfp_mask)
{ {
struct as_data *ad = q->elevator->elevator_data; struct as_data *ad = q->elevator->elevator_data;
struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
...@@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) ...@@ -1827,7 +1828,7 @@ static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
return 1; return 1;
} }
static int as_may_queue(request_queue_t *q, int rw) static int as_may_queue(request_queue_t *q, int rw, struct bio *bio)
{ {
int ret = ELV_MQUEUE_MAY; int ret = ELV_MQUEUE_MAY;
struct as_data *ad = q->elevator->elevator_data; struct as_data *ad = q->elevator->elevator_data;
......
This diff is collapsed.
...@@ -760,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq) ...@@ -760,7 +760,8 @@ static void deadline_put_request(request_queue_t *q, struct request *rq)
} }
static int static int
deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
int gfp_mask)
{ {
struct deadline_data *dd = q->elevator->elevator_data; struct deadline_data *dd = q->elevator->elevator_data;
struct deadline_rq *drq; struct deadline_rq *drq;
......
...@@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq) ...@@ -486,12 +486,13 @@ struct request *elv_former_request(request_queue_t *q, struct request *rq)
return NULL; return NULL;
} }
int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask) int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
int gfp_mask)
{ {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
if (e->ops->elevator_set_req_fn) if (e->ops->elevator_set_req_fn)
return e->ops->elevator_set_req_fn(q, rq, gfp_mask); return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask);
rq->elevator_private = NULL; rq->elevator_private = NULL;
return 0; return 0;
...@@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq) ...@@ -505,12 +506,12 @@ void elv_put_request(request_queue_t *q, struct request *rq)
e->ops->elevator_put_req_fn(q, rq); e->ops->elevator_put_req_fn(q, rq);
} }
int elv_may_queue(request_queue_t *q, int rw) int elv_may_queue(request_queue_t *q, int rw, struct bio *bio)
{ {
elevator_t *e = q->elevator; elevator_t *e = q->elevator;
if (e->ops->elevator_may_queue_fn) if (e->ops->elevator_may_queue_fn)
return e->ops->elevator_may_queue_fn(q, rw); return e->ops->elevator_may_queue_fn(q, rw, bio);
return ELV_MQUEUE_MAY; return ELV_MQUEUE_MAY;
} }
......
...@@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) ...@@ -276,6 +276,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
rq->errors = 0; rq->errors = 0;
rq->rq_status = RQ_ACTIVE; rq->rq_status = RQ_ACTIVE;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
rq->ioprio = 0;
rq->buffer = NULL; rq->buffer = NULL;
rq->ref_count = 1; rq->ref_count = 1;
rq->q = q; rq->q = q;
...@@ -1442,10 +1443,6 @@ void __generic_unplug_device(request_queue_t *q) ...@@ -1442,10 +1443,6 @@ void __generic_unplug_device(request_queue_t *q)
if (!blk_remove_plug(q)) if (!blk_remove_plug(q))
return; return;
/*
* was plugged, fire request_fn if queue has stuff to do
*/
if (elv_next_request(q))
q->request_fn(q); q->request_fn(q);
} }
EXPORT_SYMBOL(__generic_unplug_device); EXPORT_SYMBOL(__generic_unplug_device);
...@@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq) ...@@ -1776,8 +1773,8 @@ static inline void blk_free_request(request_queue_t *q, struct request *rq)
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
} }
static inline struct request *blk_alloc_request(request_queue_t *q, int rw, static inline struct request *
int gfp_mask) blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask)
{ {
struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
...@@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw, ...@@ -1790,7 +1787,7 @@ static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
*/ */
rq->flags = rw; rq->flags = rw;
if (!elv_set_request(q, rq, gfp_mask)) if (!elv_set_request(q, rq, bio, gfp_mask))
return rq; return rq;
mempool_free(rq, q->rq.rq_pool); mempool_free(rq, q->rq.rq_pool);
...@@ -1872,7 +1869,8 @@ static void freed_request(request_queue_t *q, int rw) ...@@ -1872,7 +1869,8 @@ static void freed_request(request_queue_t *q, int rw)
/* /*
* Get a free request, queue_lock must not be held * Get a free request, queue_lock must not be held
*/ */
static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
int gfp_mask)
{ {
struct request *rq = NULL; struct request *rq = NULL;
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
...@@ -1895,7 +1893,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1895,7 +1893,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
} }
} }
switch (elv_may_queue(q, rw)) { switch (elv_may_queue(q, rw, bio)) {
case ELV_MQUEUE_NO: case ELV_MQUEUE_NO:
goto rq_starved; goto rq_starved;
case ELV_MQUEUE_MAY: case ELV_MQUEUE_MAY:
...@@ -1920,7 +1918,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1920,7 +1918,7 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
set_queue_congested(q, rw); set_queue_congested(q, rw);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
rq = blk_alloc_request(q, rw, gfp_mask); rq = blk_alloc_request(q, rw, bio, gfp_mask);
if (!rq) { if (!rq) {
/* /*
* Allocation failed presumably due to memory. Undo anything * Allocation failed presumably due to memory. Undo anything
...@@ -1961,7 +1959,8 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -1961,7 +1959,8 @@ static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
* No available requests for this queue, unplug the device and wait for some * No available requests for this queue, unplug the device and wait for some
* requests to become available. * requests to become available.
*/ */
static struct request *get_request_wait(request_queue_t *q, int rw) static struct request *get_request_wait(request_queue_t *q, int rw,
struct bio *bio)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct request *rq; struct request *rq;
...@@ -1972,7 +1971,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw) ...@@ -1972,7 +1971,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw)
prepare_to_wait_exclusive(&rl->wait[rw], &wait, prepare_to_wait_exclusive(&rl->wait[rw], &wait,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
rq = get_request(q, rw, GFP_NOIO); rq = get_request(q, rw, bio, GFP_NOIO);
if (!rq) { if (!rq) {
struct io_context *ioc; struct io_context *ioc;
...@@ -2003,9 +2002,9 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask) ...@@ -2003,9 +2002,9 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
BUG_ON(rw != READ && rw != WRITE); BUG_ON(rw != READ && rw != WRITE);
if (gfp_mask & __GFP_WAIT) if (gfp_mask & __GFP_WAIT)
rq = get_request_wait(q, rw); rq = get_request_wait(q, rw, NULL);
else else
rq = get_request(q, rw, gfp_mask); rq = get_request(q, rw, NULL, gfp_mask);
return rq; return rq;
} }
...@@ -2333,7 +2332,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req) ...@@ -2333,7 +2332,6 @@ static void __blk_put_request(request_queue_t *q, struct request *req)
return; return;
req->rq_status = RQ_INACTIVE; req->rq_status = RQ_INACTIVE;
req->q = NULL;
req->rl = NULL; req->rl = NULL;
/* /*
...@@ -2462,6 +2460,8 @@ static int attempt_merge(request_queue_t *q, struct request *req, ...@@ -2462,6 +2460,8 @@ static int attempt_merge(request_queue_t *q, struct request *req,
req->rq_disk->in_flight--; req->rq_disk->in_flight--;
} }
req->ioprio = ioprio_best(req->ioprio, next->ioprio);
__blk_put_request(q, next); __blk_put_request(q, next);
return 1; return 1;
} }
...@@ -2514,11 +2514,13 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2514,11 +2514,13 @@ static int __make_request(request_queue_t *q, struct bio *bio)
{ {
struct request *req, *freereq = NULL; struct request *req, *freereq = NULL;
int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
unsigned short prio;
sector_t sector; sector_t sector;
sector = bio->bi_sector; sector = bio->bi_sector;
nr_sectors = bio_sectors(bio); nr_sectors = bio_sectors(bio);
cur_nr_sectors = bio_cur_sectors(bio); cur_nr_sectors = bio_cur_sectors(bio);
prio = bio_prio(bio);
rw = bio_data_dir(bio); rw = bio_data_dir(bio);
sync = bio_sync(bio); sync = bio_sync(bio);
...@@ -2559,6 +2561,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2559,6 +2561,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->biotail->bi_next = bio; req->biotail->bi_next = bio;
req->biotail = bio; req->biotail = bio;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0); drive_stat_acct(req, nr_sectors, 0);
if (!attempt_back_merge(q, req)) if (!attempt_back_merge(q, req))
elv_merged_request(q, req); elv_merged_request(q, req);
...@@ -2583,6 +2586,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2583,6 +2586,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->hard_cur_sectors = cur_nr_sectors; req->hard_cur_sectors = cur_nr_sectors;
req->sector = req->hard_sector = sector; req->sector = req->hard_sector = sector;
req->nr_sectors = req->hard_nr_sectors += nr_sectors; req->nr_sectors = req->hard_nr_sectors += nr_sectors;
req->ioprio = ioprio_best(req->ioprio, prio);
drive_stat_acct(req, nr_sectors, 0); drive_stat_acct(req, nr_sectors, 0);
if (!attempt_front_merge(q, req)) if (!attempt_front_merge(q, req))
elv_merged_request(q, req); elv_merged_request(q, req);
...@@ -2610,7 +2614,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2610,7 +2614,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
freereq = NULL; freereq = NULL;
} else { } else {
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) { if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
/* /*
* READA bit set * READA bit set
*/ */
...@@ -2618,7 +2622,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2618,7 +2622,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
if (bio_rw_ahead(bio)) if (bio_rw_ahead(bio))
goto end_io; goto end_io;
freereq = get_request_wait(q, rw); freereq = get_request_wait(q, rw, bio);
} }
goto again; goto again;
} }
...@@ -2646,6 +2650,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -2646,6 +2650,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->buffer = bio_data(bio); /* see ->buffer comment above */ req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL; req->waiting = NULL;
req->bio = req->biotail = bio; req->bio = req->biotail = bio;
req->ioprio = prio;
req->rq_disk = bio->bi_bdev->bd_disk; req->rq_disk = bio->bi_bdev->bd_disk;
req->start_time = jiffies; req->start_time = jiffies;
...@@ -2674,7 +2679,7 @@ static inline void blk_partition_remap(struct bio *bio) ...@@ -2674,7 +2679,7 @@ static inline void blk_partition_remap(struct bio *bio)
if (bdev != bdev->bd_contains) { if (bdev != bdev->bd_contains) {
struct hd_struct *p = bdev->bd_part; struct hd_struct *p = bdev->bd_part;
switch (bio->bi_rw) { switch (bio_data_dir(bio)) {
case READ: case READ:
p->read_sectors += bio_sectors(bio); p->read_sectors += bio_sectors(bio);
p->reads++; p->reads++;
...@@ -2693,6 +2698,7 @@ void blk_finish_queue_drain(request_queue_t *q) ...@@ -2693,6 +2698,7 @@ void blk_finish_queue_drain(request_queue_t *q)
{ {
struct request_list *rl = &q->rq; struct request_list *rl = &q->rq;
struct request *rq; struct request *rq;
int requeued = 0;
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
...@@ -2701,9 +2707,13 @@ void blk_finish_queue_drain(request_queue_t *q) ...@@ -2701,9 +2707,13 @@ void blk_finish_queue_drain(request_queue_t *q)
rq = list_entry_rq(q->drain_list.next); rq = list_entry_rq(q->drain_list.next);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); elv_requeue_request(q, rq);
requeued++;
} }
if (requeued)
q->request_fn(q);
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
wake_up(&rl->wait[0]); wake_up(&rl->wait[0]);
...@@ -2900,7 +2910,7 @@ void submit_bio(int rw, struct bio *bio) ...@@ -2900,7 +2910,7 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_size);
BIO_BUG_ON(!bio->bi_io_vec); BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw = rw; bio->bi_rw |= rw;
if (rw & WRITE) if (rw & WRITE)
mod_page_state(pgpgout, count); mod_page_state(pgpgout, count);
else else
...@@ -3257,8 +3267,11 @@ void exit_io_context(void) ...@@ -3257,8 +3267,11 @@ void exit_io_context(void)
struct io_context *ioc; struct io_context *ioc;
local_irq_save(flags); local_irq_save(flags);
task_lock(current);
ioc = current->io_context; ioc = current->io_context;
current->io_context = NULL; current->io_context = NULL;
ioc->task = NULL;
task_unlock(current);
local_irq_restore(flags); local_irq_restore(flags);
if (ioc->aic && ioc->aic->exit) if (ioc->aic && ioc->aic->exit)
...@@ -3293,12 +3306,12 @@ struct io_context *get_io_context(int gfp_flags) ...@@ -3293,12 +3306,12 @@ struct io_context *get_io_context(int gfp_flags)
ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
if (ret) { if (ret) {
atomic_set(&ret->refcount, 1); atomic_set(&ret->refcount, 1);
ret->pid = tsk->pid; ret->task = current;
ret->set_ioprio = NULL;
ret->last_waited = jiffies; /* doesn't matter... */ ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */ ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL; ret->aic = NULL;
ret->cic = NULL; ret->cic = NULL;
spin_lock_init(&ret->lock);
local_irq_save(flags); local_irq_save(flags);
......
...@@ -10,6 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \ ...@@ -10,6 +10,7 @@ obj-y := open.o read_write.o file_table.o buffer.o bio.o super.o \
ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \
attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \
seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \
ioprio.o
obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_EPOLL) += eventpoll.o
obj-$(CONFIG_COMPAT) += compat.o obj-$(CONFIG_COMPAT) += compat.o
......
/*
* fs/ioprio.c
*
* Copyright (C) 2004 Jens Axboe <axboe@suse.de>
*
* Helper functions for setting/querying io priorities of processes. The
* system calls closely mimmick getpriority/setpriority, see the man page for
* those. The prio argument is a composite of prio class and prio data, where
* the data argument has meaning within that class. The standard scheduling
* classes have 8 distinct prio levels, with 0 being the highest prio and 7
* being the lowest.
*
* IOW, setting BE scheduling class with prio 2 is done ala:
*
* unsigned int prio = (IOPRIO_CLASS_BE << IOPRIO_CLASS_SHIFT) | 2;
*
* ioprio_set(PRIO_PROCESS, pid, prio);
*
* See also Documentation/block/ioprio.txt
*
*/
#include <linux/kernel.h>
#include <linux/ioprio.h>
#include <linux/blkdev.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
{
struct io_context *ioc;
if (task->uid != current->euid &&
task->uid != current->uid && !capable(CAP_SYS_NICE))
return -EPERM;
task_lock(task);
task->ioprio = ioprio;
ioc = task->io_context;
if (ioc && ioc->set_ioprio)
ioc->set_ioprio(ioc, ioprio);
task_unlock(task);
return 0;
}
asmlinkage int sys_ioprio_set(int which, int who, int ioprio)
{
int class = IOPRIO_PRIO_CLASS(ioprio);
int data = IOPRIO_PRIO_DATA(ioprio);
struct task_struct *p, *g;
struct user_struct *user;
int ret;
switch (class) {
case IOPRIO_CLASS_RT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/* fall through, rt has prio field too */
case IOPRIO_CLASS_BE:
if (data >= IOPRIO_BE_NR || data < 0)
return -EINVAL;
break;
case IOPRIO_CLASS_IDLE:
break;
default:
return -EINVAL;
}
ret = -ESRCH;
read_lock_irq(&tasklist_lock);
switch (which) {
case IOPRIO_WHO_PROCESS:
if (!who)
p = current;
else
p = find_task_by_pid(who);
if (p)
ret = set_task_ioprio(p, ioprio);
break;
case IOPRIO_WHO_PGRP:
if (!who)
who = process_group(current);
do_each_task_pid(who, PIDTYPE_PGID, p) {
ret = set_task_ioprio(p, ioprio);
if (ret)
break;
} while_each_task_pid(who, PIDTYPE_PGID, p);
break;
case IOPRIO_WHO_USER:
if (!who)
user = current->user;
else
user = find_user(who);
if (!user)
break;
do_each_thread(g, p) {
if (p->uid != who)
continue;
ret = set_task_ioprio(p, ioprio);
if (ret)
break;
} while_each_thread(g, p);
if (who)
free_uid(user);
break;
default:
ret = -EINVAL;
}
read_unlock_irq(&tasklist_lock);
return ret;