fixeria submitted this change.
s1gw: initial testcases for MME pooling
Three test cases covering the MME pool selection logic in OsmoS1GW:
* TC_mme_pool_reject_fallback: S1GW falls back to the next pool entry
when the first MME rejects S1SetupReq with S1SetupFailure.
* TC_mme_pool_timeout_fallback: S1GW falls back when the first MME
does not respond to S1SetupReq within the timeout.
* TC_mme_pool_all_reject: all pool entries reject S1SetupReq; S1GW
must send S1SetupFailure to the eNB and tear down the connection.
Infrastructure added to support these tests:
* S1AP_Server.ttcn: S1AP_ServerList type; directed register/unregister
helpers (f_ConnHdlr_s1ap_register_to / _unregister_from) for use
when multiple S1AP_Server_CT instances are active simultaneously.
* S1GW_ConnHdlr.ttcn: f_ConnHdlr_s1ap_setup_pool() drives the pool
setup sequence: pre-registers with all servers, sends S1SetupReq once
(S1GW re-transmits it per-MME), then iterates through the expected
behaviors (ACCEPT / REJECT / TIMEOUT) waiting for each server in turn.
* S1GW_Tests.ttcn: f_init_s1ap_srv(N) starts N MME emulators on
consecutive IP addresses; f_TC_exec_pool() orchestrates pool tests.
* osmo-s1gw.config: a 'mme_pool' section with three entries is added
alongside the existing sctp_client section. Older OsmoS1GW (without
pooling support) will use sctp_client to connect to a single MME and
the pool test cases will simply fail, as expected. Newer OsmoS1GW
will use mme_pool and all three test cases will pass.
Change-Id: Ib8fd62e4352e3055971a669b8b363078bcd95d8d
Related: SYS#7052
---
M s1gw/S1AP_Server.ttcn
M s1gw/S1GW_ConnHdlr.ttcn
M s1gw/S1GW_Tests.ttcn
M s1gw/expected-results.xml
M s1gw/osmo-s1gw.config
5 files changed, 270 insertions(+), 20 deletions(-)
diff --git a/s1gw/S1AP_Server.ttcn b/s1gw/S1AP_Server.ttcn
index 6d5d254..40f8741 100644
--- a/s1gw/S1AP_Server.ttcn
+++ b/s1gw/S1AP_Server.ttcn
@@ -5,7 +5,7 @@
* A ConnHdlr component may subscribe for one or more S1AP connections
* using the Global_ENB_ID value, which is sent in S1AP SetupReq.
*
- * (C) 2024 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
+ * (C) 2024-2026 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
* Author: Vadim Yanitskiy <vyanitskiy@sysmocom.de>
*
* All rights reserved.
@@ -343,6 +343,32 @@
}
}
+/* Pool of S1AP servers (MME emulators) */
+type record of S1AP_Server_CT S1AP_ServerList;
+
+/* Setup behavior of a specific MME instance (for MME pool testing) */
+type enumerated S1APSRV_SetupBehavior {
+ S1APSRV_SETUP_ACCEPT, /* accept the S1SetupReq with a S1SetupResp */
+ S1APSRV_SETUP_REJECT, /* reject the S1SetupReq with a S1SetupFailure */
+ S1APSRV_SETUP_TIMEOUT /* ignore the S1SetupReq, force S1GW timeout */
+};
+type record of S1APSRV_SetupBehavior S1APSRV_SetupBehaviorList;
+
+/* directed register/unregister variants for use when multiple servers are connected */
+function f_ConnHdlr_s1ap_register_to(Global_ENB_ID genb_id, S1AP_Server_CT vc_srv)
+runs on S1APSRV_ConnHdlr {
+ S1AP_PROC.call(S1APSRV_register:{self, genb_id}) to vc_srv {
+ [] S1AP_PROC.getreply(S1APSRV_register:{?, ?}) from vc_srv { };
+ }
+}
+
+function f_ConnHdlr_s1ap_unregister_from(Global_ENB_ID genb_id, S1AP_Server_CT vc_srv)
+runs on S1APSRV_ConnHdlr {
+ S1AP_PROC.call(S1APSRV_unregister:{self, genb_id}) to vc_srv {
+ [] S1AP_PROC.getreply(S1APSRV_unregister:{?, ?}) from vc_srv { };
+ }
+}
+
function main(S1APSRV_ConnParams cpars) runs on S1AP_Server_CT {
var Result res;
diff --git a/s1gw/S1GW_ConnHdlr.ttcn b/s1gw/S1GW_ConnHdlr.ttcn
index d1f830b..2c900fe 100644
--- a/s1gw/S1GW_ConnHdlr.ttcn
+++ b/s1gw/S1GW_ConnHdlr.ttcn
@@ -1,6 +1,6 @@
/* OsmoS1GW (S1AP Gateway) ConnHdlr
*
- * (C) 2024-2025 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
+ * (C) 2024-2026 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
* Author: Vadim Yanitskiy <vyanitskiy@sysmocom.de>
*
* All rights reserved.
@@ -49,6 +49,7 @@
port S1AP_CODEC_PT S1AP_ENB;
var ConnectionId g_s1ap_conn_id := -1;
var default conn_track := null;
+ var S1AP_Server_CT g_s1ap_server := null; /* currently active MME server */
};
type record of ConnHdlr ConnHdlrList;
@@ -59,7 +60,10 @@
charstring pfcp_loc_addr,
charstring pfcp_rem_addr,
MME_UE_S1AP_ID mme_ue_id,
- ERabList erabs
+ ERabList erabs,
+ /* optional fields for MME pool testing */
+ S1AP_ServerList pool_srvs optional,
+ S1APSRV_SetupBehaviorList pool_behaviors optional
};
template Global_ENB_ID
@@ -188,13 +192,17 @@
function f_ConnHdlr_tx_s1ap_from_mme(template (value) S1AP_PDU pdu)
runs on ConnHdlr {
- S1AP_CONN.send(pdu);
+ if (g_s1ap_server != null) {
+ S1AP_CONN.send(pdu) to g_s1ap_server;
+ } else {
+ S1AP_CONN.send(pdu);
+ }
}
altstep as_ConnHdlr_s1ap_from_enb(out S1AP_PDU pdu,
template (present) S1AP_PDU tr_pdu := ?)
runs on ConnHdlr {
- [] S1AP_CONN.receive(tr_pdu) -> value pdu;
+ [] S1AP_CONN.receive(tr_pdu) -> value pdu sender g_s1ap_server;
}
function f_ConnHdlr_rx_s1ap_from_enb(out S1AP_PDU pdu,
@@ -287,6 +295,94 @@
}
}
+/* S1 Setup procedure with MME pool: iterate through pool_behaviors, attempting setup
+ * with each corresponding server. On ACCEPT, the connection is established and
+ * g_s1ap_server is set to the accepting server. On REJECT or TIMEOUT, the server
+ * closes the connection and S1GW tries the next MME. Caller must have connected
+ * S1AP_CONN/S1AP_PROC to all pool servers before calling this function. */
+function f_ConnHdlr_s1ap_setup_pool(S1AP_ServerList pool_srvs,
+ S1APSRV_SetupBehaviorList pool_behaviors,
+ in SupportedTAs supported_tas := c_SupportedTAs)
+runs on ConnHdlr {
+ var S1AP_PDU pdu;
+ timer T_pool;
+
+ /* conn_track would fire on CONN_DOWN during pool setup: disable it */
+ f_ConnHdlr_conn_track_disable();
+
+ /* pre-register with all pool servers to avoid timing issues */
+ for (var integer i := 0; i < lengthof(pool_srvs); i := i + 1) {
+ f_ConnHdlr_s1ap_register_to(g_pars.genb_id, pool_srvs[i]);
+ }
+
+ /* send S1SetupReq once from the eNB; S1GW will forward it to each MME it tries */
+ f_ConnHdlr_tx_s1ap_from_enb(ts_S1AP_SetupReq(g_pars.genb_id, supported_tas, v32));
+
+ for (var integer i := 0; i < lengthof(pool_behaviors); i := i + 1) {
+ var S1AP_Server_CT vc_srv := pool_srvs[i];
+ var S1APSRV_SetupBehavior behavior := pool_behaviors[i];
+
+ /* wait for CONN_UP and then S1SetupReq from this specific server */
+ g_s1ap_server := vc_srv;
+ T_pool.start(10.0);
+ alt {
+ [] S1AP_CONN.receive(S1APSRV_Event:S1APSRV_EVENT_CONN_UP) from vc_srv {
+ repeat;
+ }
+ [] S1AP_CONN.receive(tr_S1AP_SetupReq) from vc_srv -> value pdu {
+ T_pool.stop;
+ }
+ [] T_pool.timeout {
+ setverdict(fail, "Timeout waiting for S1SetupReq on pool server ", i);
+ Misc_Helpers.f_shutdown(__BFILE__, __LINE__);
+ }
+ }
+
+ if (behavior == S1APSRV_SETUP_ACCEPT) {
+ /* send SetupResp and verify the eNB gets it */
+ var template (value) PLMNidentity plmn_id := '00f110'O;
+ var template (value) MME_Group_ID mme_group_id := '0011'O;
+ var template (value) MME_Code mme_code := int2oct(i + 1, 1);
+ var template (value) ServedGUMMEIsItem gummei := ts_S1AP_ServedGUMMEIsItem(
+ { plmn_id }, { mme_group_id }, { mme_code });
+ f_ConnHdlr_tx_s1ap_from_mme(ts_S1AP_SetupResp({ gummei }, 1));
+ f_ConnHdlr_rx_s1ap_from_mme(pdu, tr_S1AP_SetupResp({ gummei }, 1));
+ /* unregister from remaining (unused) servers */
+ for (var integer j := i + 1; j < lengthof(pool_srvs); j := j + 1) {
+ f_ConnHdlr_s1ap_unregister_from(g_pars.genb_id, pool_srvs[j]);
+ }
+ /* re-enable conn_track: setup is complete */
+ f_ConnHdlr_conn_track_enable();
+ return;
+ }
+
+ if (behavior == S1APSRV_SETUP_REJECT) {
+ /* send S1SetupFailure; S1GW will close connection and try next MME */
+ f_ConnHdlr_tx_s1ap_from_mme(
+ ts_S1AP_SetupFail({ radioNetwork := unspecified }));
+ }
+ /* else S1APSRV_SETUP_TIMEOUT: do nothing, wait for S1GW to time out (5s) */
+
+ /* wait for S1GW to close the connection */
+ T_pool.start(15.0);
+ alt {
+ [] S1AP_CONN.receive(S1APSRV_Event:S1APSRV_EVENT_CONN_DOWN) from vc_srv {
+ T_pool.stop;
+ }
+ [] T_pool.timeout {
+ setverdict(fail, "Timeout waiting for CONN_DOWN from pool server ", i);
+ Misc_Helpers.f_shutdown(__BFILE__, __LINE__);
+ }
+ }
+
+ /* unregister from this server before moving to the next */
+ f_ConnHdlr_s1ap_unregister_from(g_pars.genb_id, vc_srv);
+ }
+
+ setverdict(fail, "Pool setup exhausted all MME behaviors without accepting");
+ Misc_Helpers.f_shutdown(__BFILE__, __LINE__);
+}
+
private function f_ConnHdlr_pfcp_assoc_setup()
runs on ConnHdlr
{
diff --git a/s1gw/S1GW_Tests.ttcn b/s1gw/S1GW_Tests.ttcn
index ff4734d..5fffb09 100644
--- a/s1gw/S1GW_Tests.ttcn
+++ b/s1gw/S1GW_Tests.ttcn
@@ -1,6 +1,6 @@
/* OsmoS1GW (S1AP Gateway) test suite in TTCN-3
*
- * (C) 2024-2025 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
+ * (C) 2024-2026 by sysmocom - s.f.m.c. GmbH <info@sysmocom.de>
* Author: Vadim Yanitskiy <vyanitskiy@sysmocom.de>
*
* All rights reserved.
@@ -74,7 +74,7 @@
type component test_CT extends StatsD_Checker_CT, http_CT {
timer g_Tguard;
var MutexDispCT vc_mutex_disp;
- var S1AP_Server_CT vc_S1APSRV;
+ var S1AP_ServerList vc_S1APSRVs := {};
var PFCP_Emulation_CT vc_PFCP;
var StatsD_Checker_CT vc_STATSD;
};
@@ -86,6 +86,7 @@
}
function f_init(boolean s1apsrv_start := true,
+ integer num_mmes := 1,
boolean upf_start := true,
float Tval := 20.0) runs on test_CT {
g_Tguard.start(Tval);
@@ -96,7 +97,7 @@
f_init_statsd("StatsDSRV", vc_STATSD, mp_local_statsd_ip, mp_local_statsd_port);
if (s1apsrv_start) {
- f_init_s1ap_srv();
+ f_init_s1ap_srv(num_mmes);
}
if (upf_start) {
f_init_pfcp();
@@ -107,14 +108,20 @@
}
}
-function f_init_s1ap_srv() runs on test_CT {
- var S1APSRV_ConnParams cpars := {
- local_ip := mp_mme_bind_ip,
- local_port := 36412
- };
+/* compute the IP address for pool MME server [idx]: mp_mme_bind_ip + idx */
+private function f_mme_ip(integer idx) return charstring {
+ return f_inet_ntoa(int2oct(oct2int(f_inet_addr(mp_mme_bind_ip)) + idx, 4));
+}
- vc_S1APSRV := S1AP_Server_CT.create("S1APSRV-" & testcasename()) alive;
- vc_S1APSRV.start(S1AP_Server.main(cpars));
+function f_init_s1ap_srv(integer num_mmes := 1) runs on test_CT {
+ for (var integer i := 0; i < num_mmes; i := i + 1) {
+ var S1APSRV_ConnParams cpars := {
+ local_ip := f_mme_ip(i),
+ local_port := 36412
+ };
+ vc_S1APSRVs[i] := S1AP_Server_CT.create("S1APSRV-" & int2str(i) & "-" & testcasename()) alive;
+ vc_S1APSRVs[i].start(S1AP_Server.main(cpars));
+ }
}
function f_init_pfcp() runs on test_CT {
@@ -166,7 +173,9 @@
pfcp_loc_addr := pfcp_loc_addr,
pfcp_rem_addr := pfcp_rem_addr,
mme_ue_id := 4242,
- erabs := f_gen_erab_list(idx, num_erabs)
+ erabs := f_gen_erab_list(idx, num_erabs),
+ pool_srvs := omit,
+ pool_behaviors := omit
}
private function f_gen_erab_list(integer idx, integer num_erabs)
@@ -198,9 +207,11 @@
if (isbound(vc_STATSD) and vc_STATSD.running) {
connect(vc_conn:STATSD_PROC, vc_STATSD:STATSD_PROC);
}
- if (isbound(vc_S1APSRV) and vc_S1APSRV.running) {
- connect(vc_conn:S1AP_CONN, vc_S1APSRV:S1AP_CLIENT);
- connect(vc_conn:S1AP_PROC, vc_S1APSRV:S1AP_PROC);
+ for (var integer i := 0; i < lengthof(vc_S1APSRVs); i := i + 1) {
+ if (vc_S1APSRVs[i].running) {
+ connect(vc_conn:S1AP_CONN, vc_S1APSRVs[i]:S1AP_CLIENT);
+ connect(vc_conn:S1AP_PROC, vc_S1APSRVs[i]:S1AP_PROC);
+ }
}
if (isbound(vc_PFCP) and vc_PFCP.running) {
connect(vc_conn:PFCP, vc_PFCP:CLIENT);
@@ -322,6 +333,26 @@
f_ConnHdlrList_all_done(vc_conns);
}
+/* execute a single-eNB pool test: spins up num_mmes S1AP servers, builds ConnHdlrPars
+ * with pool_srvs/pool_behaviors set, and runs fn inside a ConnHdlr */
+function f_TC_exec_pool(void_fn fn,
+ integer num_mmes,
+ S1APSRV_SetupBehaviorList behaviors,
+ float Tval := 60.0)
+runs on test_CT {
+ var ConnHdlrPars pars;
+ var ConnHdlr vc_conn;
+
+ f_init(num_mmes := num_mmes, Tval := Tval);
+
+ pars := valueof(t_ConnHdlrPars);
+ pars.pool_srvs := vc_S1APSRVs;
+ pars.pool_behaviors := behaviors;
+
+ vc_conn := f_ConnHdlr_spawn(fn, pars);
+ vc_conn.done;
+}
+
function f_TC_setup(charstring id) runs on ConnHdlr {
f_ConnHdlr_s1ap_register(g_pars.genb_id);
@@ -1019,6 +1050,91 @@
f_TC_exec(refers(f_TC_pfcp_heartbeat));
}
+/* MME pool test: eNB connects, the first MME rejects, the second one accepts */
+function f_TC_mme_pool_reject_fallback(charstring id) runs on ConnHdlr {
+ f_ConnHdlr_s1ap_connect(mp_enb_bind_ip, mp_s1gw_enb_ip);
+ f_ConnHdlr_s1ap_setup_pool(g_pars.pool_srvs, g_pars.pool_behaviors);
+
+ f_sleep(0.5); /* keep the connection idle for some time */
+
+ f_ConnHdlr_s1ap_disconnect();
+ f_ConnHdlr_s1ap_unregister_from(g_pars.genb_id, g_s1ap_server);
+}
+testcase TC_mme_pool_reject_fallback() runs on test_CT {
+ f_TC_exec_pool(refers(f_TC_mme_pool_reject_fallback), 2,
+ { S1APSRV_SETUP_REJECT, S1APSRV_SETUP_ACCEPT });
+}
+
+/* MME pool test: eNB connects, the first MME times out, the second one accepts */
+function f_TC_mme_pool_timeout_fallback(charstring id) runs on ConnHdlr {
+ f_ConnHdlr_s1ap_connect(mp_enb_bind_ip, mp_s1gw_enb_ip);
+ f_ConnHdlr_s1ap_setup_pool(g_pars.pool_srvs, g_pars.pool_behaviors);
+
+ f_sleep(0.5); /* keep the connection idle for some time */
+
+ f_ConnHdlr_s1ap_disconnect();
+ f_ConnHdlr_s1ap_unregister_from(g_pars.genb_id, g_s1ap_server);
+}
+testcase TC_mme_pool_timeout_fallback() runs on test_CT {
+ f_TC_exec_pool(refers(f_TC_mme_pool_timeout_fallback), 2,
+ { S1APSRV_SETUP_TIMEOUT, S1APSRV_SETUP_ACCEPT });
+}
+
+/* MME pool test: eNB connects, all MMEs reject, S1GW sends S1SetupFailure to eNB */
+function f_TC_mme_pool_all_reject(charstring id) runs on ConnHdlr {
+ f_ConnHdlr_s1ap_connect(mp_enb_bind_ip, mp_s1gw_enb_ip);
+
+ /* conn_track would fire on CONN_DOWN events during the reject loop */
+ f_ConnHdlr_conn_track_disable();
+
+ /* pre-register with all pool servers */
+ for (var integer i := 0; i < lengthof(g_pars.pool_srvs); i := i + 1) {
+ f_ConnHdlr_s1ap_register_to(g_pars.genb_id, g_pars.pool_srvs[i]);
+ }
+
+ /* send S1SetupReq once from the eNB; S1GW will forward it to each MME it tries */
+ f_ConnHdlr_tx_s1ap_from_enb(ts_S1AP_SetupReq(g_pars.genb_id, c_SupportedTAs, v32));
+
+ /* S1GW will try each MME and get rejected */
+ for (var integer i := 0; i < lengthof(g_pars.pool_behaviors); i := i + 1) {
+ var S1AP_Server_CT vc_srv := g_pars.pool_srvs[i];
+ var S1AP_PDU pdu;
+ timer T;
+
+ g_s1ap_server := vc_srv;
+ T.start(10.0);
+ alt {
+ [] S1AP_CONN.receive(S1APSRV_Event:S1APSRV_EVENT_CONN_UP) from vc_srv { repeat; }
+ [] S1AP_CONN.receive(tr_S1AP_SetupReq) from vc_srv -> value pdu { T.stop; }
+ [] T.timeout {
+ setverdict(fail, "Timeout waiting for S1SetupReq on pool server ", i);
+ Misc_Helpers.f_shutdown(__BFILE__, __LINE__);
+ }
+ }
+ f_ConnHdlr_tx_s1ap_from_mme(ts_S1AP_SetupFail({ radioNetwork := unspecified }));
+
+ T.start(10.0);
+ alt {
+ [] S1AP_CONN.receive(S1APSRV_Event:S1APSRV_EVENT_CONN_DOWN) from vc_srv { T.stop; }
+ [] T.timeout {
+ setverdict(fail, "Timeout waiting for CONN_DOWN from pool server ", i);
+ Misc_Helpers.f_shutdown(__BFILE__, __LINE__);
+ }
+ }
+ f_ConnHdlr_s1ap_unregister_from(g_pars.genb_id, vc_srv);
+ }
+
+ /* S1GW has exhausted all MMEs: it sends S1SetupFailure to eNB and then closes */
+ var S1AP_PDU pdu;
+ f_ConnHdlr_rx_s1ap_from_mme(pdu, tr_S1AP_SetupFail(?));
+ f_ConnHdlr_s1ap_expect_shutdown();
+ setverdict(pass);
+}
+testcase TC_mme_pool_all_reject() runs on test_CT {
+ f_TC_exec_pool(refers(f_TC_mme_pool_all_reject), 3,
+ { S1APSRV_SETUP_REJECT, S1APSRV_SETUP_REJECT, S1APSRV_SETUP_REJECT });
+}
+
control {
execute( TC_setup() );
execute( TC_setup_multi() );
@@ -1055,6 +1171,9 @@
execute( TC_handover_res_alloc() );
execute( TC_handover_res_alloc_fail() );
execute( TC_pfcp_heartbeat() );
+ execute( TC_mme_pool_reject_fallback() );
+ execute( TC_mme_pool_timeout_fallback() );
+ execute( TC_mme_pool_all_reject() );
}
}
diff --git a/s1gw/expected-results.xml b/s1gw/expected-results.xml
index f560a60..28fcf71 100644
--- a/s1gw/expected-results.xml
+++ b/s1gw/expected-results.xml
@@ -1,5 +1,5 @@
<?xml version="1.0"?>
-<testsuite name='S1GW_Tests' tests='33' failures='0' errors='0' skipped='0' inconc='0' time='MASKED'>
+<testsuite name='S1GW_Tests' tests='36' failures='0' errors='0' skipped='0' inconc='0' time='MASKED'>
<testcase classname='S1GW_Tests' name='TC_setup' time='MASKED'/>
<testcase classname='S1GW_Tests' name='TC_setup_multi' time='MASKED'/>
<testcase classname='S1GW_Tests' name='TC_conn_term_by_mme' time='MASKED'/>
@@ -35,4 +35,7 @@
<testcase classname='S1GW_Tests' name='TC_handover_res_alloc' time='MASKED'/>
<testcase classname='S1GW_Tests' name='TC_handover_res_alloc_fail' time='MASKED'/>
<testcase classname='S1GW_Tests' name='TC_pfcp_heartbeat' time='MASKED'/>
+ <testcase classname='S1GW_Tests' name='TC_mme_pool_reject_fallback' time='MASKED'/>
+ <testcase classname='S1GW_Tests' name='TC_mme_pool_timeout_fallback' time='MASKED'/>
+ <testcase classname='S1GW_Tests' name='TC_mme_pool_all_reject' time='MASKED'/>
</testsuite>
diff --git a/s1gw/osmo-s1gw.config b/s1gw/osmo-s1gw.config
index d0f5a41..5788446 100644
--- a/s1gw/osmo-s1gw.config
+++ b/s1gw/osmo-s1gw.config
@@ -24,6 +24,12 @@
%% socket options (if omitted or left empty, defaults apply)
sockopts => #{ }
}},
+ %% MME pool
+ {mme_pool, [
+ #{name => "mme0", laddr => "127.0.2.1", raddr => "127.0.2.10"},
+ #{name => "mme1", laddr => "127.0.2.1", raddr => "127.0.2.11"},
+ #{name => "mme2", laddr => "127.0.2.1", raddr => "127.0.2.12"}
+ ]},
{pfcp_loc_addr, "127.0.3.1"}, %% local address for incoming PFCP PDUs from the UPF
{pfcp_rem_addr, "127.0.3.10"} %% remote address for outgoing PFCP PDUs to the UPF
]},
To view, visit change 42358. To unsubscribe, or for help writing mail filters, visit settings.