@@ -466,6 +466,19 @@ static inline void ep_set_busy_poll_napi_id(struct epitem *epi)
#endif /* CONFIG_NET_RX_BUSY_POLL */
+#ifdef CONFIG_PM_SLEEP
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
+ epev->events &= ~EPOLLWAKEUP;
+}
+#else
+static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
+{
+ epev->events &= ~EPOLLWAKEUP;
+}
+#endif /* CONFIG_PM_SLEEP */
+
/**
* ep_call_nested - Perform a bound (possibly) nested call, by checking
* that the recursion limit is not exceeded, and that
@@ -79,16 +79,4 @@ struct epoll_event {
__u64 data;
} EPOLL_PACKED;
-#ifdef CONFIG_PM_SLEEP
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
-{
- if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
- epev->events &= ~EPOLLWAKEUP;
-}
-#else
-static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)
-{
- epev->events &= ~EPOLLWAKEUP;
-}
-#endif
#endif /* _UAPI_LINUX_EVENTPOLL_H */
Those helpers will access private eventpoll structure in future patches, so keep those helpers close to callers. Nothing important here. Signed-off-by: Roman Penyaev <rpenyaev@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-fsdevel@vger.kernel.org Cc: linux-kernel@vger.kernel.org