Commit a11e1d43 authored by Linus Torvalds's avatar Linus Torvalds

Revert changes to convert to ->poll_mask() and aio IOCB_CMD_POLL

The poll() changes were not well thought out, and completely
unexplained.  They also caused a huge performance regression, because
"->poll()" was no longer a trivial file operation that just called down
to the underlying file operations, but instead did at least two indirect
calls.

Indirect calls are sadly slow now with the Spectre mitigation, but the
performance problem could at least be largely mitigated by changing the
"->get_poll_head()" operation to just have a per-file-descriptor pointer
to the poll head instead.  That gets rid of one of the new indirections.

But that doesn't fix the new complexity that is completely unwarranted
for the regular case.  The (undocumented) reason for the poll() changes
was some alleged AIO poll race fixing, but we don't make the common case
slower and more complex for some uncommon special case, so this all
really needs way more explanations and most likely a fundamental
redesign.

[ This revert is a revert of about 30 different commits, not reverted
  individually because that would just be unnecessarily messy  - Linus ]

Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: 's avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f5749432
......@@ -441,8 +441,6 @@ prototypes:
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
__poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
......@@ -473,7 +471,7 @@ prototypes:
};
locking rules:
All except for ->poll_mask may block.
All may block.
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
......@@ -505,9 +503,6 @@ in sys_read() and friends.
the lease within the individual filesystem to record the result of the
operation
->poll_mask can be called with or without the waitqueue lock for the waitqueue
returned from ->get_poll_head.
--------------------------- dquot_operations -------------------------------
prototypes:
int (*write_dquot) (struct dquot *);
......
......@@ -857,8 +857,6 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
__poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
......@@ -903,17 +901,6 @@ otherwise noted.
activity on this file and (optionally) go to sleep until there
is activity. Called by the select(2) and poll(2) system calls
get_poll_head: Returns the struct wait_queue_head that callers can
wait on. Callers need to check the returned events using ->poll_mask
once woken. Can return NULL to indicate polling is not supported,
or any error code using the ERR_PTR convention to indicate that a
grave error occured and ->poll_mask shall not be called.
poll_mask: return the mask of EPOLL* values describing the file descriptor
state. Called either before going to sleep on the waitqueue returned by
get_poll_head, or after it has been woken. If ->get_poll_head and
->poll_mask are implemented ->poll does not need to be implement.
unlocked_ioctl: called by the ioctl(2) system call.
compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
......
......@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
/**
* af_alg_poll - poll system call handler
*/
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
__poll_t mask = 0;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
if (!ctx->more || ctx->used)
mask |= EPOLLIN | EPOLLRDNORM;
......@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
EXPORT_SYMBOL_GPL(af_alg_poll_mask);
EXPORT_SYMBOL_GPL(af_alg_poll);
/**
* af_alg_alloc_areq - allocate struct af_alg_async_req
......
......@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
.sendmsg = aead_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
.poll_mask = af_alg_poll_mask,
.poll = af_alg_poll,
};
static int aead_check_key(struct socket *sock)
......@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
.poll_mask = af_alg_poll_mask,
.poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
......
......@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
.sendmsg = skcipher_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
.poll_mask = af_alg_poll_mask,
.poll = af_alg_poll,
};
static int skcipher_check_key(struct socket *sock)
......@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.sendmsg = skcipher_sendmsg_nokey,
.sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
.poll_mask = af_alg_poll_mask,
.poll = af_alg_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
......
......@@ -402,7 +402,8 @@ static struct poolinfo {
/*
* Static global variables
*/
static DECLARE_WAIT_QUEUE_HEAD(random_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
......@@ -721,8 +722,8 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits &&
wq_has_sleeper(&random_wait)) {
wake_up_interruptible_poll(&random_wait, POLLIN);
wq_has_sleeper(&random_read_wait)) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
/* If the input pool is getting full, send some
......@@ -1396,7 +1397,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
trace_debit_entropy(r->name, 8 * ibytes);
if (ibytes &&
(r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
wake_up_interruptible_poll(&random_wait, POLLOUT);
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
......@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
if (nonblock)
return -EAGAIN;
wait_event_interruptible(random_wait,
wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_bits);
if (signal_pending(current))
......@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
static struct wait_queue_head *
random_get_poll_head(struct file *file, __poll_t events)
{
return &random_wait;
}
static __poll_t
random_poll_mask(struct file *file, __poll_t events)
random_poll(struct file *file, poll_table * wait)
{
__poll_t mask = 0;
__poll_t mask;
poll_wait(file, &random_read_wait, wait);
poll_wait(file, &random_write_wait, wait);
mask = 0;
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
......@@ -1992,8 +1990,7 @@ static int random_fasync(int fd, struct file *filp, int on)
const struct file_operations random_fops = {
.read = random_read,
.write = random_write,
.get_poll_head = random_get_poll_head,
.poll_mask = random_poll_mask,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
......@@ -2326,7 +2323,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
wait_event_interruptible(random_wait, kthread_should_stop() ||
wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
......
......@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
.getname = data_sock_getname,
.sendmsg = mISDN_sock_sendmsg,
.recvmsg = mISDN_sock_recvmsg,
.poll_mask = datagram_poll_mask,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = data_sock_setsockopt,
......
......@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppoe_getname,
.poll_mask = datagram_poll_mask,
.poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
......
......@@ -5,7 +5,6 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
* Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
......@@ -165,22 +164,10 @@ struct fsync_iocb {
bool datasync;
};
struct poll_iocb {
struct file *file;
__poll_t events;
struct wait_queue_head *head;
union {
struct wait_queue_entry wait;
struct work_struct work;
};
};
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
struct poll_iocb poll;
};
struct kioctx *ki_ctx;
......@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
return -EINVAL;
req->file = fget(iocb->aio_fildes);
if (unlikely(!req->file))
return -EBADF;
......@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
/* need to use list_del_init so we can check if item was present */
static inline bool __aio_poll_remove(struct poll_iocb *req)
{
if (list_empty(&req->wait.entry))
return false;
list_del_init(&req->wait.entry);
return true;
}
static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
{
fput(iocb->poll.file);
aio_complete(iocb, mangle_poll(mask), 0);
}
static void aio_poll_work(struct work_struct *work)
{
struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
if (!list_empty_careful(&iocb->ki_list))
aio_remove_iocb(iocb);
__aio_poll_complete(iocb, iocb->poll.events);
}
static int aio_poll_cancel(struct kiocb *iocb)
{
struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
struct poll_iocb *req = &aiocb->poll;
struct wait_queue_head *head = req->head;
bool found = false;
spin_lock(&head->lock);
found = __aio_poll_remove(req);
spin_unlock(&head->lock);
if (found) {
req->events = 0;
INIT_WORK(&req->work, aio_poll_work);
schedule_work(&req->work);
}
return 0;
}
static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
void *key)
{
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
struct file *file = req->file;
__poll_t mask = key_to_poll(key);
assert_spin_locked(&req->head->lock);
/* for instances that support it check for an event match first: */
if (mask && !(mask & req->events))
return 0;
mask = file->f_op->poll_mask(file, req->events) & req->events;
if (!mask)
return 0;
__aio_poll_remove(req);
/*
* Try completing without a context switch if we can acquire ctx_lock
* without spinning. Otherwise we need to defer to a workqueue to
* avoid a deadlock due to the lock order.
*/
if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
list_del_init(&iocb->ki_list);
spin_unlock(&iocb->ki_ctx->ctx_lock);
__aio_poll_complete(iocb, mask);
} else {
req->events = mask;
INIT_WORK(&req->work, aio_poll_work);
schedule_work(&req->work);
}
return 1;
}
static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
{
struct kioctx *ctx = aiocb->ki_ctx;
struct poll_iocb *req = &aiocb->poll;
__poll_t mask;
/* reject any unknown events outside the normal event mask. */
if ((u16)iocb->aio_buf != iocb->aio_buf)
return -EINVAL;
/* reject fields that are not defined for poll */
if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
return -EINVAL;
req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
req->file = fget(iocb->aio_fildes);
if (unlikely(!req->file))
return -EBADF;
if (!file_has_poll_mask(req->file))
goto out_fail;
req->head = req->file->f_op->get_poll_head(req->file, req->events);
if (!req->head)
goto out_fail;
if (IS_ERR(req->head)) {
mask = EPOLLERR;
goto done;
}
init_waitqueue_func_entry(&req->wait, aio_poll_wake);
aiocb->ki_cancel = aio_poll_cancel;
spin_lock_irq(&ctx->ctx_lock);
spin_lock(&req->head->lock);
mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
if (!mask) {
__add_wait_queue(req->head, &req->wait);
list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
}
spin_unlock(&req->head->lock);
spin_unlock_irq(&ctx->ctx_lock);
done:
if (mask)
__aio_poll_complete(aiocb, mask);
return 0;
out_fail:
fput(req->file);
return -EINVAL; /* same as no support for IOCB_CMD_POLL */
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
......@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
case IOCB_CMD_POLL:
ret = aio_poll(req, &iocb);
break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
......
......@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
return 0;
}
static struct wait_queue_head *
eventfd_get_poll_head(struct file *file, __poll_t events)
{
struct eventfd_ctx *ctx = file->private_data;
return &ctx->wqh;
}
static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
static __poll_t eventfd_poll(struct file *file, poll_table *wait)
{
struct eventfd_ctx *ctx = file->private_data;
__poll_t events = 0;
u64 count;
poll_wait(file, &ctx->wqh, wait);
/*
* All writes to ctx->count occur within ctx->wqh.lock. This read
* can be done outside ctx->wqh.lock because we know that poll_wait
......@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
count = READ_ONCE(ctx->count);
if (count > 0)
events |= (EPOLLIN & eventmask);
events |= EPOLLIN;
if (count == ULLONG_MAX)
events |= EPOLLERR;
if (ULLONG_MAX - 1 > count)
events |= (EPOLLOUT & eventmask);
events |= EPOLLOUT;
return events;
}
......@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
.show_fdinfo = eventfd_show_fdinfo,
#endif
.release = eventfd_release,
.get_poll_head = eventfd_get_poll_head,
.poll_mask = eventfd_poll_mask,
.poll = eventfd_poll,
.read = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
......
......@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
return 0;
}
static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
__poll_t eventmask)
{
struct eventpoll *ep = file->private_data;
return &ep->poll_wait;
}
static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
/* Insert inside our poll wait queue */
poll_wait(file, &ep->poll_wait, wait);
/*
* Proceed to find out if wanted events are really available inside
* the ready list.
......@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
.show_fdinfo = ep_show_fdinfo,
#endif
.release = ep_eventpoll_release,
.get_poll_head = ep_eventpoll_get_poll_head,
.poll_mask = ep_eventpoll_poll_mask,
.poll = ep_eventpoll_poll,
.llseek = noop_llseek,
};
......
......@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
static struct wait_queue_head *
pipe_get_poll_head(struct file *filp, __poll_t events)
{
struct pipe_inode_info *pipe = filp->private_data;
return &pipe->wait;
}
/* No kernel lock held - fine */
static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
static __poll_t
pipe_poll(struct file *filp, poll_table *wait)
{
__poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs = pipe->nrbufs;
__poll_t mask = 0;
int nrbufs;
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
......@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
.llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
.get_poll_head = pipe_get_poll_head,
.poll_mask = pipe_poll_mask,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
......
......@@ -34,29 +34,6 @@
#include <linux/uaccess.h>
__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
if (file->f_op->poll) {
return file->f_op->poll(file, pt);
} else if (file_has_poll_mask(file)) {
unsigned int events = poll_requested_events(pt);
struct wait_queue_head *head;
if (pt && pt->_qproc) {
head = file->f_op->get_poll_head(file, events);
if (!head)
return DEFAULT_POLLMASK;
if (IS_ERR(head))
return EPOLLERR;
pt->_qproc(file, head, pt);
}
return file->f_op->poll_mask(file, events);
} else {
return DEFAULT_POLLMASK;
}
}
EXPORT_SYMBOL_GPL(vfs_poll);
/*
* Estimate expected accuracy in ns from a timeval.
......
......@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
kfree_rcu(ctx, rcu);
return 0;
}
static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
__poll_t eventmask)
static __poll_t timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
__poll_t events = 0;
unsigned long flags;
return &ctx->wqh;
}
poll_wait(file, &ctx->wqh, wait);
static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
{
struct timerfd_ctx *ctx = file->private_data;
spin_lock_irqsave(&ctx->wqh.lock, flags);
if (ctx->ticks)
events |= EPOLLIN;
spin_unlock_irqrestore(&ctx->wqh.lock, flags);
return ctx->ticks ? EPOLLIN : 0;
return events;
}
static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
......@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
static const struct file_operations timerfd_fops = {
.release = timerfd_release,
.get_poll_head = timerfd_get_poll_head,
.poll_mask = timerfd_poll_mask,
.poll = timerfd_poll,
.read = timerfd_read,
.llseek = noop_llseek,
.show_fdinfo = timerfd_show,
......
......@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
__poll_t af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
......
......@@ -1720,8 +1720,6 @@ struct file_operations {
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
__poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
......
......@@ -147,7 +147,6 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
__poll_t (*poll_mask) (struct socket *sock, __poll_t events);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
......
......@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */
}
static inline bool file_has_poll_mask(struct file *file)
static inline bool file_can_poll(struct file *file)
{
return file->f_op->get_poll_head && file->f_op->poll_mask;
return file->f_op->poll;
}
static inline bool file_can_poll(struct file *file)
static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
return file->f_op->poll || file_has_poll_mask(file);
if (unlikely(!file->f_op->poll))
return DEFAULT_POLLMASK;
return file->f_op->poll(file, pt);
}
__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
struct poll_table_entry {
struct file *filp;
__poll_t key;
......
......@@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
__poll_t datagram_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
......
......@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
......
......@@ -153,6 +153,8 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
......
......@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
__poll_t sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
......
......@@ -388,7 +388,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
__poll_t tcp_poll(struct file *file, struct socket *sock,