laforge has submitted this change. ( https://gerrit.osmocom.org/c/libosmocore/+/40489?usp=email )
Change subject: Allow io_uring_submit batching just ahead of poll/select ......................................................................
Allow io_uring_submit batching just ahead of poll/select
Let's add a mode (enabled via the LIBOSMO_IO_URING_BATCH environment variable), where we don't call io_uring_submit() after every operation we add to the submission queue. Rather, do that once before we go into poll.
This should massively reduce the amount of io_uring_enter() syscalls we're seeing.
Related: OS#6705 Change-Id: Id34fe2ced32c63d15b14810e145744f7509064cc --- M src/core/osmo_io_uring.c M src/core/select.c 2 files changed, 47 insertions(+), 4 deletions(-)
Approvals: pespin: Looks good to me, but someone else must approve Jenkins Builder: Verified laforge: Looks good to me, approved
diff --git a/src/core/osmo_io_uring.c b/src/core/osmo_io_uring.c index b8b240f..347461e 100644 --- a/src/core/osmo_io_uring.c +++ b/src/core/osmo_io_uring.c @@ -54,6 +54,11 @@
#define IOFD_URING_ENTRIES 4096
+#define OSMO_IO_URING_BATCH "LIBOSMO_IO_URING_BATCH" + +bool g_io_uring_batch = false; +bool g_io_uring_submit_needed = false; + struct osmo_io_uring { struct osmo_fd event_ofd; struct io_uring ring; @@ -88,8 +93,12 @@ /*! initialize the uring and tie it into our event loop */ void osmo_iofd_uring_init(void) { + const char *env; int rc, evfd;
+ if ((env = getenv(OSMO_IO_URING_BATCH))) + g_io_uring_batch = true; + rc = io_uring_queue_init(IOFD_URING_ENTRIES, &g_ring.ring, 0); if (rc < 0) osmo_panic("failure during io_uring_queue_init(): %s\n", strerror(-rc)); @@ -117,6 +126,13 @@ } }
+static inline void iofd_io_uring_submit(void) +{ + if (OSMO_LIKELY(!g_io_uring_batch)) + io_uring_submit(&g_ring.ring); + else + g_io_uring_submit_needed = true; +}
static void iofd_uring_submit_recv(struct osmo_io_fd *iofd, enum iofd_msg_action action) { @@ -175,7 +191,8 @@ } io_uring_sqe_set_data(sqe, msghdr);
- io_uring_submit(&g_ring.ring); + iofd_io_uring_submit(); + /* NOTE: This only works if we have one read per fd */ iofd->u.uring.read_msghdr = msghdr; } @@ -317,7 +334,8 @@ OSMO_ASSERT(0); }
- io_uring_submit(&g_ring.ring); + iofd_io_uring_submit(); + iofd->u.uring.write_msghdr = msghdr;
return 0; @@ -417,7 +435,8 @@ msghdr->iofd = NULL; io_uring_prep_cancel(sqe, msghdr, 0); } - io_uring_submit(&g_ring.ring); + + iofd_io_uring_submit();
if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED)) { osmo_fd_unregister(&iofd->u.uring.connect_ofd); @@ -466,7 +485,8 @@ io_uring_prep_writev(sqe, iofd->fd, msghdr->iov, 1, 0); io_uring_sqe_set_data(sqe, msghdr);
- io_uring_submit(&g_ring.ring); + iofd_io_uring_submit(); + iofd->u.uring.write_msghdr = msghdr; } } @@ -537,4 +557,12 @@ .notify_connected = iofd_uring_notify_connected, };
+void osmo_io_uring_submit(void) +{ + if (OSMO_LIKELY(g_io_uring_submit_needed)) { + io_uring_submit(&g_ring.ring); + g_io_uring_submit_needed = false; + } +} + #endif /* defined(__linux__) */ diff --git a/src/core/select.c b/src/core/select.c index 70047f0..267f041 100644 --- a/src/core/select.c +++ b/src/core/select.c @@ -426,12 +426,22 @@ return work; }
+#if defined(HAVE_URING) +void osmo_io_uring_submit(void); +extern bool g_io_uring_batch; +#endif + static int _osmo_select_main(int polling) { unsigned int n_poll; int rc; int timeout = 0;
+#if defined(HAVE_URING) + if (OSMO_UNLIKELY(g_io_uring_batch)) + osmo_io_uring_submit(); +#endif + /* prepare read and write fdsets */ n_poll = poll_fill_fds();
@@ -464,6 +474,11 @@ int rc; struct timeval no_time = {0, 0};
+#if defined(HAVE_URING) + if (OSMO_UNLIKELY(g_io_uring_batch)) + osmo_io_uring_submit(); +#endif + FD_ZERO(&readset); FD_ZERO(&writeset); FD_ZERO(&exceptset);