diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 14:34:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-11 14:34:03 -0800 |
commit | a9a08845e9acbd224e4ee466f5c1275ed50054e8 (patch) | |
tree | 415d6e6a82e001c65e6b161539411f54ba5fe8ce /fs/eventpoll.c | |
parent | ee5daa1361fceb6f482c005bcc9ba8d01b92ea5c (diff) |
vfs: do bulk POLL* -> EPOLL* replacement
This is the mindless scripted replacement of kernel use of POLL*
variables as described by Al, done by this script:
for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do
L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'`
for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done
done
with de-mangling cleanups yet to come.
NOTE! On almost all architectures, the EPOLL* constants have the same
values as the POLL* constants do. But they keyword here is "almost".
For various bad reasons they aren't the same, and epoll() doesn't
actually work quite correctly in some cases due to this on Sparc et al.
The next patch from Al will sort out the final differences, and we
should be all done.
Scripted-by: Al Viro <viro@zeniv.linux.org.uk>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index d1a490c7e6c3..0f3494ed3ed0 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -95,9 +95,9 @@ /* Epoll private bits inside the event mask */ #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE) -#define EPOLLINOUT_BITS (POLLIN | POLLOUT) +#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT) -#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \ +#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \ EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE) /* Maximum number of nesting allowed inside epoll sets */ @@ -555,7 +555,7 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) wait_queue_head_t *wqueue = (wait_queue_head_t *)cookie; spin_lock_irqsave_nested(&wqueue->lock, flags, call_nests + 1); - wake_up_locked_poll(wqueue, POLLIN); + wake_up_locked_poll(wqueue, EPOLLIN); spin_unlock_irqrestore(&wqueue->lock, flags); return 0; @@ -575,7 +575,7 @@ static void ep_poll_safewake(wait_queue_head_t *wq) static void ep_poll_safewake(wait_queue_head_t *wq) { - wake_up_poll(wq, POLLIN); + wake_up_poll(wq, EPOLLIN); } #endif @@ -908,7 +908,7 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head list_for_each_entry_safe(epi, tmp, head, rdllink) { if (ep_item_poll(epi, &pt, depth)) { - return POLLIN | POLLRDNORM; + return EPOLLIN | EPOLLRDNORM; } else { /* * Item has been dropped into the ready list by the poll @@ -1181,12 +1181,12 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v if ((epi->event.events & EPOLLEXCLUSIVE) && !(pollflags & POLLFREE)) { switch (pollflags & EPOLLINOUT_BITS) { - case POLLIN: - if (epi->event.events & POLLIN) + case EPOLLIN: + if (epi->event.events & EPOLLIN) ewake = 1; break; - case POLLOUT: - if (epi->event.events & POLLOUT) + case EPOLLOUT: + if (epi->event.events & EPOLLOUT) ewake = 1; break; case 0: @@ -2105,7 +2105,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, switch (op) { case EPOLL_CTL_ADD: if (!epi) { - epds.events |= POLLERR | POLLHUP; + epds.events |= EPOLLERR | EPOLLHUP; error = ep_insert(ep, &epds, tf.file, fd, full_check); } else error = -EEXIST; @@ -2121,7 +2121,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, case EPOLL_CTL_MOD: if (epi) { if (!(epi->event.events & EPOLLEXCLUSIVE)) { - epds.events |= POLLERR | POLLHUP; + epds.events |= EPOLLERR | EPOLLHUP; error = ep_modify(ep, epi, &epds); } } else |