Attention is currently required from: Hoernchen.
laforge has posted comments on this change by Hoernchen. ( https://gerrit.osmocom.org/c/osmo-ttcn3-hacks/+/41119?usp=email )
Change subject: smdpp: es9p testsuite
......................................................................
Patch Set 4: Code-Review+1
(1 comment)
Patchset:
PS4:
what I'm missign in both the main .ttcn file and the commit log is which exact version of which GSMA test suite document was used for implementingthe test suite.
--
To view, visit https://gerrit.osmocom.org/c/osmo-ttcn3-hacks/+/41119?usp=email
To unsubscribe, or for help writing mail filters, visit https://gerrit.osmocom.org/settings?usp=email
Gerrit-MessageType: comment
Gerrit-Project: osmo-ttcn3-hacks
Gerrit-Branch: master
Gerrit-Change-Id: I3ba163d9155a3b019214dbccc3c8031dfd7deb6b
Gerrit-Change-Number: 41119
Gerrit-PatchSet: 4
Gerrit-Owner: Hoernchen <ewild(a)sysmocom.de>
Gerrit-Reviewer: Jenkins Builder
Gerrit-Reviewer: laforge <laforge(a)osmocom.org>
Gerrit-Attention: Hoernchen <ewild(a)sysmocom.de>
Gerrit-Comment-Date: Thu, 18 Sep 2025 09:46:06 +0000
Gerrit-HasComments: Yes
Gerrit-Has-Labels: Yes
laforge has submitted this change. ( https://gerrit.osmocom.org/c/libosmocore/+/40725?usp=email )
Change subject: Automatically increase io_uring, if too small.
......................................................................
Automatically increase io_uring, if too small.
The ring may be too small to store all SQEs before the kernel can
handle them. If this happens, a new ring is allocated with twice of the
size of the old ring. The old ring will not be destroyed, as it still
contains uncompleted elements. Some of them may never be completed.
A pointer to the current ring will be stored within the msghdr
structure. It is used when cancelling an SQE. The cancellation must be
performed in the same ring where it was created.
It is quite unlikely that the old ring cannot store the cancellation
SQE. If this happens, the cancellation is queued and submitted, once
the ring can store it.
The old ring will not be removed, because there is currently no counter
to determine when all submissions are completed.
Related: OS#6705
Change-Id: Id9230146acc8d54bfd44834e783c31b37bd64bca
---
M src/core/osmo_io_internal.h
M src/core/osmo_io_uring.c
2 files changed, 148 insertions(+), 39 deletions(-)
Approvals:
Jenkins Builder: Verified
pespin: Looks good to me, but someone else must approve
laforge: Looks good to me, approved
diff --git a/src/core/osmo_io_internal.h b/src/core/osmo_io_internal.h
index f983b77..bf5bb76 100644
--- a/src/core/osmo_io_internal.h
+++ b/src/core/osmo_io_internal.h
@@ -114,9 +114,13 @@
uint8_t num_read_sqes;
/*! array of simultaneously submitted read SQEs */
void *read_msghdr[IOFD_MSGHDR_MAX_READ_SQES];
+ /*! ring the read SQEs have been submitted to */
+ struct io_uring *read_ring;
/*! current number of simultaneously submitted read SQEs */
uint8_t reads_submitted;
void *write_msghdr;
+ /*! ring the write SQE has been submitted to */
+ struct io_uring *write_ring;
/* TODO: index into array of registered fd's? */
/* osmo_fd for non-blocking connect handling */
struct osmo_fd connect_ofd;
@@ -157,6 +161,9 @@
/*! I/O file descriptor on which we perform this I/O operation */
struct osmo_io_fd *iofd;
+ /*! msghdr is in the cancel_queue list */
+ bool in_cancel_queue;
+
/*! control message buffer for passing sctp_sndrcvinfo along */
char cmsg[0]; /* size is determined by iofd->cmsg_size on recvmsg, and by mcghdr->msg_controllen on sendmsg */
};
diff --git a/src/core/osmo_io_uring.c b/src/core/osmo_io_uring.c
index 48f6f0d..12caa77 100644
--- a/src/core/osmo_io_uring.c
+++ b/src/core/osmo_io_uring.c
@@ -35,6 +35,7 @@
#include <string.h>
#include <stdbool.h>
#include <errno.h>
+#include <limits.h>
#include <netinet/in.h>
#include <netinet/sctp.h>
@@ -72,12 +73,15 @@
struct osmo_io_uring {
struct osmo_fd event_ofd;
struct io_uring ring;
+ struct llist_head cancel_queue;
};
-static __thread struct osmo_io_uring g_ring;
+static __thread struct osmo_io_uring *g_ring = NULL;
static void iofd_uring_cqe(struct io_uring *ring);
+void osmo_io_uring_submit(void);
+
/*! read call-back for eventfd notifying us if entries are in the completion queue */
static int iofd_uring_poll_cb(struct osmo_fd *ofd, unsigned int what)
{
@@ -109,7 +113,7 @@
if ((env = getenv(OSMO_IO_URING_BATCH)))
g_io_uring_batch = true;
- if ((env = getenv(OSMO_IO_URING_INITIAL_SIZE))) {
+ if (!g_ring && (env = getenv(OSMO_IO_URING_INITIAL_SIZE))) {
int env_value;
rc = osmo_str_to_int(&env_value, env, 10, 1, IOFD_URING_MAXIMUM_SIZE);
if (rc < 0) {
@@ -124,7 +128,10 @@
g_io_uring_size = env_value;
}
- rc = io_uring_queue_init(g_io_uring_size, &g_ring.ring, 0);
+ g_ring = talloc_zero(OTC_GLOBAL, struct osmo_io_uring);
+ INIT_LLIST_HEAD(&g_ring->cancel_queue);
+
+ rc = io_uring_queue_init(g_io_uring_size, &g_ring->ring, 0);
if (rc < 0)
osmo_panic("failure during io_uring_queue_init(): %s\n", strerror(-rc));
@@ -139,42 +146,88 @@
rc = eventfd(0, 0);
if (rc < 0) {
- io_uring_queue_exit(&g_ring.ring);
+ io_uring_queue_exit(&g_ring->ring);
osmo_panic("failure creating eventfd(0, 0) for io_uring: %s\n", strerror(-rc));
}
evfd = rc;
- osmo_fd_setup(&g_ring.event_ofd, evfd, OSMO_FD_READ, iofd_uring_poll_cb, &g_ring.ring, 0);
- rc = osmo_fd_register(&g_ring.event_ofd);
+ osmo_fd_setup(&g_ring->event_ofd, evfd, OSMO_FD_READ, iofd_uring_poll_cb, &g_ring->ring, 0);
+ rc = osmo_fd_register(&g_ring->event_ofd);
if (rc < 0) {
close(evfd);
- io_uring_queue_exit(&g_ring.ring);
+ io_uring_queue_exit(&g_ring->ring);
osmo_panic("failure registering io_uring-eventfd as osmo_fd: %d\n", rc);
}
- rc = io_uring_register_eventfd(&g_ring.ring, evfd);
+ rc = io_uring_register_eventfd(&g_ring->ring, evfd);
if (rc < 0) {
- osmo_fd_unregister(&g_ring.event_ofd);
+ osmo_fd_unregister(&g_ring->event_ofd);
close(evfd);
- io_uring_queue_exit(&g_ring.ring);
+ io_uring_queue_exit(&g_ring->ring);
osmo_panic("failure registering eventfd with io_uring: %s\n", strerror(-rc));
}
}
+static struct io_uring_sqe *iofd_uring_get_sqe(struct osmo_io_fd *iofd, bool read)
+{
+ struct io_uring_sqe *sqe;
+
+ /* All subsequent read SQEs must be on the same ring. */
+ if (read && iofd->u.uring.reads_submitted > 0 && iofd->u.uring.read_ring != &g_ring->ring)
+ return NULL;
+
+ sqe = io_uring_get_sqe(&g_ring->ring);
+ if (sqe)
+ return sqe;
+
+ /* The current ring is full, subsequent read SQEs on different ring are not allowed. */
+ if (read && iofd->u.uring.reads_submitted > 0)
+ return NULL;
+
+ if (g_io_uring_size < IOFD_URING_MAXIMUM_SIZE) {
+ LOGP(DLIO, LOGL_NOTICE, "io_uring too small to handle all SQEs with its current size of %d. "
+ "Increasing io_uring size to %d.\n", g_io_uring_size, g_io_uring_size * 2);
+ g_io_uring_size <<= 1;
+ } else {
+ LOGP(DLIO, LOGL_NOTICE, "io_uring too small to handle all SQEs with its maximum size of %d. "
+ "adding another one.\n", g_io_uring_size);
+ }
+
+ /* Submit all SQEs of current ring and create a new ring, if needed.
+ * The old ring will be kept, as there are uncompleted submissions.
+ * TODO: Destroy old ring, once all submissions are completed. */
+ osmo_io_uring_submit();
+
+ osmo_iofd_uring_init();
+
+ sqe = io_uring_get_sqe(&g_ring->ring);
+ OSMO_ASSERT(sqe);
+ return sqe;
+}
+
static inline void iofd_io_uring_submit(void)
{
if (OSMO_LIKELY(!g_io_uring_batch))
- io_uring_submit(&g_ring.ring);
+ io_uring_submit(&g_ring->ring);
else
g_io_uring_submit_needed = true;
}
-static void iofd_uring_submit_recv_sqe(struct osmo_io_fd *iofd, enum iofd_msg_action action)
+static inline int iofd_uring_submit_recv_sqe(struct osmo_io_fd *iofd, enum iofd_msg_action action)
{
struct msgb *msg;
struct iofd_msghdr *msghdr;
struct io_uring_sqe *sqe;
uint8_t idx;
+ /* Tell iofd_uring_get_sqe() not to allocate a new ring, if we want to enqueue multiple read SQEs. */
+ sqe = iofd_uring_get_sqe(iofd, true);
+ if (!sqe) {
+ if (iofd->u.uring.reads_submitted > 0)
+ return -EINVAL;
+ LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
+ OSMO_ASSERT(0);
+ }
+
msg = iofd_msgb_alloc(iofd);
if (!msg) {
LOGPIO(iofd, LOGL_ERROR, "Could not allocate msgb for reading\n");
@@ -209,12 +262,6 @@
OSMO_ASSERT(0);
}
- sqe = io_uring_get_sqe(&g_ring.ring);
- if (!sqe) {
- LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
- OSMO_ASSERT(0);
- }
-
switch (action) {
case IOFD_ACT_READ:
io_uring_prep_readv(sqe, iofd->fd, msghdr->iov, msghdr->hdr.msg_iovlen, -1);
@@ -232,13 +279,22 @@
iofd->u.uring.read_msghdr[iofd->u.uring.reads_submitted] = msghdr;
iofd->u.uring.reads_submitted++;
+ iofd->u.uring.read_ring = &g_ring->ring;
+
+ return 0;
}
static void iofd_uring_submit_recv(struct osmo_io_fd *iofd, enum iofd_msg_action action)
{
+ int rc;
+
/* Submit more read SQEs in advance, if requested. */
- while (iofd->u.uring.reads_submitted < iofd->u.uring.num_read_sqes)
- iofd_uring_submit_recv_sqe(iofd, action);
+ while (iofd->u.uring.reads_submitted < iofd->u.uring.num_read_sqes) {
+ rc = iofd_uring_submit_recv_sqe(iofd, action);
+ /* Stop, if we cannot enqueue multiple read SQEs in the same ring. */
+ if (rc < 0)
+ break;
+ }
}
/*! completion call-back for READ/RECVFROM */
@@ -311,8 +367,10 @@
* This callback function might close iofd, leading to the potential freeing of iofd->u.uring.write_msghdr if
* still attached. Since iofd_handle_send_completion() frees msghdr at the end of the function, detaching
* msghdr here prevents a double-free bug. */
- if (iofd->u.uring.write_msghdr == msghdr)
+ if (iofd->u.uring.write_msghdr == msghdr) {
iofd->u.uring.write_msghdr = NULL;
+ iofd->u.uring.write_ring = NULL;
+ }
if (OSMO_UNLIKELY(IOFD_FLAG_ISSET(iofd, IOFD_FLAG_CLOSED))) {
for (int idx = 0; idx < msghdr->io_len; idx++) {
@@ -362,7 +420,8 @@
{
int rc;
struct io_uring_cqe *cqe;
- struct iofd_msghdr *msghdr;
+ struct iofd_msghdr *msghdr, *msghdr2;
+ struct osmo_io_uring *orig_ring = container_of(ring, struct osmo_io_uring, ring);
while (io_uring_peek_cqe(ring, &cqe) == 0) {
@@ -374,6 +433,8 @@
}
if (!msghdr->iofd) {
io_uring_cqe_seen(ring, cqe);
+ if (msghdr->in_cancel_queue)
+ llist_del(&msghdr->list);
iofd_msghdr_free(msghdr);
continue;
}
@@ -385,6 +446,23 @@
iofd_uring_handle_completion(msghdr, rc);
}
+
+ /* If there are unsubmitted cancel SQEs, try to queue them now. */
+ if (OSMO_LIKELY(llist_empty(&orig_ring->cancel_queue)))
+ return;
+ llist_for_each_entry_safe(msghdr, msghdr2, &orig_ring->cancel_queue, list) {
+ struct io_uring_sqe *sqe;
+ sqe = io_uring_get_sqe(&orig_ring->ring);
+ if (!sqe)
+ break;
+ io_uring_sqe_set_data(sqe, NULL);
+ LOGP(DLIO, LOGL_DEBUG, "Cancelling queued read/write\n");
+ io_uring_prep_cancel(sqe, msghdr, 0);
+ llist_del(&msghdr->list);
+ msghdr->in_cancel_queue = false;
+ }
+ io_uring_submit(&orig_ring->ring);
+
}
/*! will submit the next to-be-transmitted message for given iofd */
@@ -397,7 +475,7 @@
if (!msghdr)
return -ENODATA;
- sqe = io_uring_get_sqe(&g_ring.ring);
+ sqe = iofd_uring_get_sqe(iofd, false);
if (!sqe) {
LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
OSMO_ASSERT(0);
@@ -420,6 +498,7 @@
iofd_io_uring_submit();
iofd->u.uring.write_msghdr = msghdr;
+ iofd->u.uring.write_ring = &g_ring->ring;
return 0;
}
@@ -503,36 +582,58 @@
uint8_t idx;
for (idx = 0; idx < iofd->u.uring.reads_submitted; idx++) {
+ struct osmo_io_uring *ring = container_of(iofd->u.uring.read_ring, struct osmo_io_uring, ring);
msghdr = iofd->u.uring.read_msghdr[idx];
iofd->u.uring.read_msghdr[idx] = NULL;
- sqe = io_uring_get_sqe(&g_ring.ring);
- OSMO_ASSERT(sqe != NULL);
- io_uring_sqe_set_data(sqe, NULL);
- LOGPIO(iofd, LOGL_DEBUG, "Cancelling read\n");
+ /* Submit SQEs of the current ring, if needed. */
+ if (&ring->ring == &g_ring->ring)
+ osmo_io_uring_submit();
+ /* If the submission queue is full, use cancel queue. We cannot cancel SQEs on the new ring. */
+ sqe = io_uring_get_sqe(&ring->ring);
+ if (sqe) {
+ io_uring_sqe_set_data(sqe, NULL);
+ LOGPIO(iofd, LOGL_DEBUG, "Cancelling read\n");
+ io_uring_prep_cancel(sqe, msghdr, 0);
+ io_uring_submit(&ring->ring);
+ } else {
+ llist_add_tail(&msghdr->list, &ring->cancel_queue);
+ msghdr->in_cancel_queue = true;
+ }
talloc_steal(OTC_GLOBAL, msghdr);
msghdr->iofd = NULL;
- io_uring_prep_cancel(sqe, msghdr, 0);
}
- iofd->u.uring.reads_submitted = 0;
+ if (iofd->u.uring.reads_submitted) {
+ iofd->u.uring.read_ring = NULL;
+ iofd->u.uring.reads_submitted = 0;
+ }
if (iofd->u.uring.write_msghdr) {
+ struct osmo_io_uring *ring = container_of(iofd->u.uring.write_ring, struct osmo_io_uring, ring);
msghdr = iofd->u.uring.write_msghdr;
- sqe = io_uring_get_sqe(&g_ring.ring);
- OSMO_ASSERT(sqe != NULL);
- io_uring_sqe_set_data(sqe, NULL);
- LOGPIO(iofd, LOGL_DEBUG, "Cancelling write\n");
iofd->u.uring.write_msghdr = NULL;
- talloc_steal(OTC_GLOBAL, msghdr);
for (int idx = 0; idx < msghdr->io_len; idx++) {
msgb_free(msghdr->msg[idx]);
msghdr->msg[idx] = NULL;
}
+ /* Submit SQEs of the current ring, if needed. */
+ if (&ring->ring == &g_ring->ring)
+ osmo_io_uring_submit();
+ /* If the submission queue is full, use cancel queue. We cannot cancel SQEs on the new ring. */
+ sqe = io_uring_get_sqe(&ring->ring);
+ if (sqe) {
+ io_uring_sqe_set_data(sqe, NULL);
+ LOGPIO(iofd, LOGL_DEBUG, "Cancelling write\n");
+ io_uring_prep_cancel(sqe, msghdr, 0);
+ io_uring_submit(&ring->ring);
+ } else {
+ llist_add_tail(&msghdr->list, &ring->cancel_queue);
+ msghdr->in_cancel_queue = true;
+ }
+ talloc_steal(OTC_GLOBAL, msghdr);
msghdr->iofd = NULL;
- io_uring_prep_cancel(sqe, msghdr, 0);
+ iofd->u.uring.write_ring = NULL;
}
- iofd_io_uring_submit();
-
if (IOFD_FLAG_ISSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED)) {
osmo_fd_unregister(&iofd->u.uring.connect_ofd);
IOFD_FLAG_UNSET(iofd, IOFD_FLAG_NOTIFY_CONNECTED);
@@ -572,7 +673,7 @@
msghdr->iov[0].iov_base = msgb_data(msg);
msghdr->iov[0].iov_len = msgb_length(msg);
- sqe = io_uring_get_sqe(&g_ring.ring);
+ sqe = iofd_uring_get_sqe(iofd, false);
if (!sqe) {
LOGPIO(iofd, LOGL_ERROR, "Could not get io_uring_sqe\n");
OSMO_ASSERT(0);
@@ -583,6 +684,7 @@
iofd_io_uring_submit();
iofd->u.uring.write_msghdr = msghdr;
+ iofd->u.uring.write_ring = &g_ring->ring;
}
}
@@ -656,7 +758,7 @@
void osmo_io_uring_submit(void)
{
if (OSMO_LIKELY(g_io_uring_submit_needed)) {
- io_uring_submit(&g_ring.ring);
+ io_uring_submit(&g_ring->ring);
g_io_uring_submit_needed = false;
}
}
--
To view, visit https://gerrit.osmocom.org/c/libosmocore/+/40725?usp=email
To unsubscribe, or for help writing mail filters, visit https://gerrit.osmocom.org/settings?usp=email
Gerrit-MessageType: merged
Gerrit-Project: libosmocore
Gerrit-Branch: master
Gerrit-Change-Id: Id9230146acc8d54bfd44834e783c31b37bd64bca
Gerrit-Change-Number: 40725
Gerrit-PatchSet: 15
Gerrit-Owner: jolly <andreas(a)eversberg.eu>
Gerrit-Reviewer: Jenkins Builder
Gerrit-Reviewer: laforge <laforge(a)osmocom.org>
Gerrit-Reviewer: pespin <pespin(a)sysmocom.de>
laforge has submitted this change. ( https://gerrit.osmocom.org/c/libosmocore/+/40855?usp=email )
Change subject: Remove old empty io_uring
......................................................................
Remove old empty io_uring
A previous patch creates a new io_uring, if it becomes too small to
store all SQEs. When all SQEs of the old ring are completed, the old
ring will be destroyed.
A counter is incremented whenever an SQE is submitted to an io_uring.
The counter is decremented whenever a CQE is received and handled. This
counter will determine when a ring is empty and can be destroyed.
Related: OS#6705
Change-Id: Id2d2a0400ad442198c684ea0ead4eaeaead4c53d
---
M src/core/osmo_io_uring.c
1 file changed, 36 insertions(+), 8 deletions(-)
Approvals:
pespin: Looks good to me, but someone else must approve
Jenkins Builder: Verified
laforge: Looks good to me, approved
diff --git a/src/core/osmo_io_uring.c b/src/core/osmo_io_uring.c
index 12caa77..45a08fe 100644
--- a/src/core/osmo_io_uring.c
+++ b/src/core/osmo_io_uring.c
@@ -66,7 +66,7 @@
bool g_io_uring_batch = false;
bool g_io_uring_submit_needed = false;
-static unsigned int g_io_uring_size = IOFD_URING_INITIAL_SIZE;
+static int g_io_uring_size = IOFD_URING_INITIAL_SIZE;
static int g_io_uring_read_sqes = 1;
@@ -74,6 +74,7 @@
struct osmo_fd event_ofd;
struct io_uring ring;
struct llist_head cancel_queue;
+ unsigned int num_pending_submissions;
};
static __thread struct osmo_io_uring *g_ring = NULL;
@@ -167,6 +168,29 @@
}
}
+static void osmo_iofd_uring_exit(struct osmo_io_uring *ring)
+{
+ LOGP(DLIO, LOGL_DEBUG, "Old empty io_uring will be destroyed.");
+
+ io_uring_queue_exit(&ring->ring);
+
+ osmo_fd_unregister(&ring->event_ofd);
+ close(ring->event_ofd.fd);
+
+ talloc_free(ring);
+}
+
+static inline struct io_uring_sqe *io_uring_get_sqe_and_count(struct osmo_io_uring *ring)
+{
+ struct io_uring_sqe *sqe;
+
+ sqe = io_uring_get_sqe(&ring->ring);
+ if (sqe)
+ ring->num_pending_submissions++;
+
+ return sqe;
+}
+
static struct io_uring_sqe *iofd_uring_get_sqe(struct osmo_io_fd *iofd, bool read)
{
struct io_uring_sqe *sqe;
@@ -175,7 +199,7 @@
if (read && iofd->u.uring.reads_submitted > 0 && iofd->u.uring.read_ring != &g_ring->ring)
return NULL;
- sqe = io_uring_get_sqe(&g_ring->ring);
+ sqe = io_uring_get_sqe_and_count(g_ring);
if (sqe)
return sqe;
@@ -199,7 +223,7 @@
osmo_iofd_uring_init();
- sqe = io_uring_get_sqe(&g_ring->ring);
+ sqe = io_uring_get_sqe_and_count(g_ring);
OSMO_ASSERT(sqe);
return sqe;
}
@@ -424,6 +448,7 @@
struct osmo_io_uring *orig_ring = container_of(ring, struct osmo_io_uring, ring);
while (io_uring_peek_cqe(ring, &cqe) == 0) {
+ orig_ring->num_pending_submissions--;
msghdr = io_uring_cqe_get_data(cqe);
if (!msghdr) {
@@ -448,11 +473,15 @@
}
/* If there are unsubmitted cancel SQEs, try to queue them now. */
- if (OSMO_LIKELY(llist_empty(&orig_ring->cancel_queue)))
+ if (OSMO_LIKELY(llist_empty(&orig_ring->cancel_queue))) {
+ /* Old ring is empty, remove it. */
+ if (OSMO_UNLIKELY(orig_ring != g_ring && orig_ring->num_pending_submissions == 0))
+ osmo_iofd_uring_exit(orig_ring);
return;
+ }
llist_for_each_entry_safe(msghdr, msghdr2, &orig_ring->cancel_queue, list) {
struct io_uring_sqe *sqe;
- sqe = io_uring_get_sqe(&orig_ring->ring);
+ sqe = io_uring_get_sqe_and_count(orig_ring);
if (!sqe)
break;
io_uring_sqe_set_data(sqe, NULL);
@@ -462,7 +491,6 @@
msghdr->in_cancel_queue = false;
}
io_uring_submit(&orig_ring->ring);
-
}
/*! will submit the next to-be-transmitted message for given iofd */
@@ -589,7 +617,7 @@
if (&ring->ring == &g_ring->ring)
osmo_io_uring_submit();
/* If the submission queue is full, use cancel queue. We cannot cancel SQEs on the new ring. */
- sqe = io_uring_get_sqe(&ring->ring);
+ sqe = io_uring_get_sqe_and_count(ring);
if (sqe) {
io_uring_sqe_set_data(sqe, NULL);
LOGPIO(iofd, LOGL_DEBUG, "Cancelling read\n");
@@ -619,7 +647,7 @@
if (&ring->ring == &g_ring->ring)
osmo_io_uring_submit();
/* If the submission queue is full, use cancel queue. We cannot cancel SQEs on the new ring. */
- sqe = io_uring_get_sqe(&ring->ring);
+ sqe = io_uring_get_sqe_and_count(ring);
if (sqe) {
io_uring_sqe_set_data(sqe, NULL);
LOGPIO(iofd, LOGL_DEBUG, "Cancelling write\n");
--
To view, visit https://gerrit.osmocom.org/c/libosmocore/+/40855?usp=email
To unsubscribe, or for help writing mail filters, visit https://gerrit.osmocom.org/settings?usp=email
Gerrit-MessageType: merged
Gerrit-Project: libosmocore
Gerrit-Branch: master
Gerrit-Change-Id: Id2d2a0400ad442198c684ea0ead4eaeaead4c53d
Gerrit-Change-Number: 40855
Gerrit-PatchSet: 7
Gerrit-Owner: jolly <andreas(a)eversberg.eu>
Gerrit-Reviewer: Jenkins Builder
Gerrit-Reviewer: laforge <laforge(a)osmocom.org>
Gerrit-Reviewer: pespin <pespin(a)sysmocom.de>
laforge has submitted this change. ( https://gerrit.osmocom.org/c/libosmocore/+/40760?usp=email )
Change subject: osmo-io: Put together message buffers when dequeued from tx queue
......................................................................
osmo-io: Put together message buffers when dequeued from tx queue
Write operations may be incomplete. osmo-io processs will remove
complete message buffers after a write operation from msghdr and put
the msghdr with the remaining buffers back into tx_queue.
If the user requests multiple messages buffers per write operation, the
msghdr of an incomplete write may have less message buffers than the
user requested. The remaining buffers are buffers are taken from next
msghdr in the queue, if exists.
Change-Id: I97c366211dd266fd58ec252890ec017d6d334534
---
M src/core/osmo_io.c
1 file changed, 36 insertions(+), 5 deletions(-)
Approvals:
pespin: Looks good to me, approved
laforge: Looks good to me, but someone else must approve
Jenkins Builder: Verified
diff --git a/src/core/osmo_io.c b/src/core/osmo_io.c
index bfc75c6..62d2362 100644
--- a/src/core/osmo_io.c
+++ b/src/core/osmo_io.c
@@ -247,21 +247,52 @@
*/
struct iofd_msghdr *iofd_txqueue_dequeue(struct osmo_io_fd *iofd)
{
- struct llist_head *lh;
+ struct iofd_msghdr *msghdr;
if (iofd->tx_queue.current_length == 0)
return NULL;
- lh = iofd->tx_queue.msg_queue.next;
+ msghdr = llist_first_entry_or_null(&iofd->tx_queue.msg_queue, struct iofd_msghdr, list);
- OSMO_ASSERT(lh);
+ OSMO_ASSERT(msghdr);
iofd->tx_queue.current_length--;
- llist_del(lh);
+ llist_del(&msghdr->list);
+
+ /* Fill up empty buffers in dequeued msghdr with buffers from the next msghdr.
+ * There can be empty buffers, when a msghdr is queued to the front with incomplete write. */
+ while (OSMO_UNLIKELY(msghdr->io_len < iofd->io_write_buffers)) {
+ struct iofd_msghdr *next;
+ int i;
+
+ if (iofd->tx_queue.current_length == 0)
+ break;
+ next = llist_first_entry_or_null(&iofd->tx_queue.msg_queue, struct iofd_msghdr, list);
+ OSMO_ASSERT(next->io_len > 0);
+ /* Get first message buffer from next msghdr and store them in the dequeued one. */
+ msghdr->iov[msghdr->io_len] = next->iov[0];
+ msghdr->msg[msghdr->io_len] = next->msg[0];
+ msghdr->hdr.msg_iovlen = ++msghdr->io_len;
+ /* Remove the message buffer from the next msghdr and free, if empty. */
+ next->io_len--;
+ for (i = 0; i < next->io_len; i++) {
+ next->iov[i] = next->iov[i + 1];
+ next->msg[i] = next->msg[i + 1];
+ }
+ if (next->io_len == 0) {
+ iofd->tx_queue.current_length--;
+ llist_del(&next->list);
+ iofd_msghdr_free(next);
+ } else {
+ memset(&next->iov[next->io_len], 0, sizeof(struct iovec));
+ next->msg[next->io_len] = NULL;
+ next->hdr.msg_iovlen = --next->io_len;
+ }
+ }
if (iofd->tx_queue.current_length == 0)
osmo_iofd_ops.write_disable(iofd);
- return llist_entry(lh, struct iofd_msghdr, list);
+ return msghdr;
}
/*! Handle segmentation of the msg. If this function returns *_HANDLE_ONE or MORE then the data in msg will contain
--
To view, visit https://gerrit.osmocom.org/c/libosmocore/+/40760?usp=email
To unsubscribe, or for help writing mail filters, visit https://gerrit.osmocom.org/settings?usp=email
Gerrit-MessageType: merged
Gerrit-Project: libosmocore
Gerrit-Branch: master
Gerrit-Change-Id: I97c366211dd266fd58ec252890ec017d6d334534
Gerrit-Change-Number: 40760
Gerrit-PatchSet: 7
Gerrit-Owner: jolly <andreas(a)eversberg.eu>
Gerrit-Reviewer: Jenkins Builder
Gerrit-Reviewer: laforge <laforge(a)osmocom.org>
Gerrit-Reviewer: pespin <pespin(a)sysmocom.de>