osmith has uploaded this change for review. ( https://gerrit.osmocom.org/c/osmo-ttcn3-hacks/+/41220?usp=email )
Change subject: hlr: add initial testenv_pyhss.cfg ......................................................................
hlr: add initial testenv_pyhss.cfg
With this patch and fixes in PyHSS, the first test cases are passing: https://github.com/nickvsnetworking/pyhss/pull/257
Related: OS#6862 Depends: osmo-ci Ic7acd3ca654580aa8e5a52f162f5c9042fc7d09f Change-Id: I7a7e709ec02a9bf44343fff4df2861dfe4e1a761 --- M _testenv/data/podman/Dockerfile A _testenv/data/scripts/run_in_venv.sh A _testenv/data/scripts/wait_for_port.py A hlr/pyhss/HLR_Tests.cfg A hlr/pyhss/pyhss.yaml R hlr/testenv_osmo_hlr.cfg A hlr/testenv_pyhss.cfg 7 files changed, 273 insertions(+), 0 deletions(-)
git pull ssh://gerrit.osmocom.org:29418/osmo-ttcn3-hacks refs/changes/20/41220/1
diff --git a/_testenv/data/podman/Dockerfile b/_testenv/data/podman/Dockerfile index 91569ee..f1deb29 100644 --- a/_testenv/data/podman/Dockerfile +++ b/_testenv/data/podman/Dockerfile @@ -76,7 +76,11 @@ pkg-config \ procps \ psmisc \ + python3-build \ + python3-dev \ python3-pip \ + python3-poetry-core \ + python3-venv \ qemu-system-x86 \ rsync \ source-highlight \ diff --git a/_testenv/data/scripts/run_in_venv.sh b/_testenv/data/scripts/run_in_venv.sh new file mode 100755 index 0000000..b953a36 --- /dev/null +++ b/_testenv/data/scripts/run_in_venv.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e +if [ -n "$TESTENV_INSTALL_DIR" ]; then + . "$TESTENV_INSTALL_DIR"/venv/bin/activate +fi + +"$@" diff --git a/_testenv/data/scripts/wait_for_port.py b/_testenv/data/scripts/wait_for_port.py new file mode 100755 index 0000000..2a85319 --- /dev/null +++ b/_testenv/data/scripts/wait_for_port.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 +# Copyright 2024 sysmocom - s.f.m.c. GmbH +# SPDX-License-Identifier: GPL-3.0-or-later +# Wait until a port is available, useful for waiting until a SUT has started +import socket +import argparse +import time +import sys + +args = None + + +def wait_for_port(): + start_time = time.time() + while True: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + result = s.connect_ex((args.hostname, args.port)) + if result == 0: + sys.exit(0) + + if time.time() - start_time >= args.timeout: + print(f"ERROR: {args.hostname}:{args.port} did not become available within {args.timeout}s!") + sys.exit(1) + + time.sleep(0.1) + + +def parse_args(): + global args + + parser = argparse.ArgumentParser() + parser.add_argument( + "-H", + "--hostname", + help="default: 127.0.0.1", + default="127.0.0.1", + ) + parser.add_argument( + "-p", + "--port", + type=int, + required=True, + ) + parser.add_argument( + "-t", + "--timeout", + type=int, + default=5, + help="timeout in seconds (default: 5).", + ) + args = parser.parse_args() + + +if __name__ == "__main__": + parse_args() + wait_for_port() diff --git a/hlr/pyhss/HLR_Tests.cfg b/hlr/pyhss/HLR_Tests.cfg new file mode 100644 index 0000000..d65c824 --- /dev/null +++ b/hlr/pyhss/HLR_Tests.cfg @@ -0,0 +1,9 @@ +[ORDERED_INCLUDE] +"../Common.cfg" +"./HLR_Tests.default" + +[MODULE_PARAMETERS] +HLR_Tests.m_hlr_impl := HLR_IMPL_PYHSS + +[EXECUTE] +HLR_Tests.control diff --git a/hlr/pyhss/pyhss.yaml b/hlr/pyhss/pyhss.yaml new file mode 100644 index 0000000..87e6c53 --- /dev/null +++ b/hlr/pyhss/pyhss.yaml @@ -0,0 +1,186 @@ +hss: + transport: "TCP" + bind_ip: ["127.0.0.1"] + bind_port: 3868 + OriginHost: "hss01" + OriginRealm: "epc.mnc001.mcc001.3gppnetwork.org" + ProductName: "pyHSS" + site_name: "TTCN3" + MCC: "001" + MNC: "01" + SLh_enabled: False + #IMSI of Test Subscriber for Unit Checks (Optional) + test_sub_imsi: '001021234567890' + + #The maximum time to wait, in seconds, before disconnecting a client when no data is received. + client_socket_timeout: 120 + + #The maximum time to wait, in seconds, before disconnecting a client when no data is received. + client_socket_timeout: 300 + + #The maximum time to wait, in seconds, before discarding a diameter request. + diameter_request_timeout: 3 + + # Whether to send a DWR to connected peers. + send_dwr: False + + # How often to send a DWR to connected peers if enabled, in seconds. + send_dwr_interval: 5 + + #The amount of time, in seconds, before purging a disconnected client from the Active Diameter Peers key in redis. + active_diameter_peers_timeout: 10 + + #Prevent updates from being performed without a valid 'Provisioning-Key' in the header + lock_provisioning: False + + #Provisioning Key for this HSS, alias for an API key. Required to be present in the header of requests to this HSS' api, if lock_provisioning is True. + provisioning_key: "changeThisKeyInProduction" + + #If enabled sends CLRs to old MME when new MME attaches active sub + CancelLocationRequest_Enabled: False + + #Workaround for some MMEs to force an Insert Subscriber Data request to be sent immediately after ULA + Insert_Subscriber_Data_Force: False + + #Default Initial Filter Criteria for IMS Subscribers + #Jinja Formatted Template, see the example for variables passed to it. + Default_iFC: 'default_ifc.xml' + + #Default Sh User Data + Default_Sh_UserData: 'default_sh_user_data.xml' + + #Whether to use an external socket service + use_external_socket_service: False + + #The Redis key used to store active diameter peers + diameter_peer_key: diameterPeers + + # Send requests via a DRA (if connected) when a given peer can't be found + use_dra_fallback: False + + # How long an emergency subscriber entry will last for in the table before expiring, in minutes. + emergency_subscriber_expiry: 3600 + + # Whether to send a Delete Subscriber Data Request to the Old MME on an Update Location Request. + send_dsr_on_mme_change: False + + # Static Identifier for the subscriber context with the Delete Subscriber Data Request. + dsr_external_identifier: "example" + + # Whether to ignore Purge UE Requests - leaving the subscriber state with the last served mme instead of null. + ignore_purge_ue_request: False + + #S-CSCF Pool + scscf_pool: + - 'scscf.ims.mnc001.mcc001.3gppnetwork.org' + + roaming: + outbound: + # Whether or not to a subscriber to connect to an undefined network when outbound roaming. + allow_undefined_networks: True + + # SCTP Socket Parameters + sctp: + rtoMax: 5000 + rtoMin: 500 + rtoInitial: 1000 + + gsup: + bind_ip: "127.0.0.1" + bind_port: 4222 + +api: + page_size: 200 + # Whether or not to return key-based data when querying the AUC. Disable in production systems. + enable_insecure_auc: False + +benchmarking: + # Whether to enable benchmark logging + enabled: True + # How often to report, in seconds. Not all benchmarking supports interval reporting. + reporting_interval: 3600 + +eir: + imsi_imei_logging: True #Store current IMEI / IMSI pair in backend + no_match_response: 2 #Greylist + store_offnet_imsi: False # Whether or not to store an IMEI / IMSI pair that doesn't exist in the AUC + simSwapNotification: False # If the IMEI for a stored IMSI/IMEI combo changes, notify the webhook endpoint + # Define an optional TAC csv file path + #tac_database_csv: '/etc/pyhss/tac_database.csv' + +logging: + level: DEBUG + logfiles: + hss_logging_file: /dev/stdout + diameter_logging_file: pyhss_diameter.log + geored_logging_file: pyhss_geored.log + metric_logging_file: pyhss_metrics.log + sqlalchemy_sql_echo: False + sqlalchemy_pool_recycle: 15 + sqlalchemy_pool_size: 30 + sqlalchemy_max_overflow: 0 + +## Database Parameters +database: + db_type: sqlite + server: 127.0.0.1 + username: dbeaver + password: password + database: pyhss.db + readCacheEnabled: True + readCacheInterval: 60 + +## External Webhook Notifications +webhooks: + enabled: False + endpoints: + - 'http://127.0.0.1:8181' + +### Notifications to OCS on Credit Control Requests +ocs: + enabled: False + endpoints: + - 'http://127.0.0.1:8282' + +## Geographic Redundancy Parameters +geored: + enabled: False + sync_actions: ['HSS', 'IMS', 'PCRF', 'EIR'] #What event actions should be synced + endpoints: #List of PyHSS API Endpoints to update + - 'http://hss01.mnc001.mcc001.3gppnetwork.org:8080' + - 'http://hss02.mnc001.mcc001.3gppnetwork.org:8080' + +#Redis is required to run PyHSS. An instance running on a local network is recommended for production. +redis: + # Which connection type to attempt. Valid options are: tcp, unix, sentinel + # tcp - Connection via a standard TCP socket to a given host and port. + # unix - Connect to redis via a unix socket, provided by unixSocketPath. + # sentinel - Connect to one or more redis sentinel hosts. + connectionType: "tcp" + unixSocketPath: '/var/run/redis/redis-server.sock' + host: localhost + port: 6379 + sentinel: + masterName: exampleMaster + hosts: + - exampleSentinel.mnc001.mcc001.3gppnetwork.org: + port: 6379 + password: '' + + +prometheus: + enabled: False + port: 8081 #If the API is run the API runs on the next port number up from this + async_subscriber_count: False #If enabled the subscriber count will be updated asynchronously for Prometheus + +influxdb: + enabled: False + host: "127.0.0.1" + port: 8086 + username: exampleUser + password: examplePassword + database: example + +snmp: + port: 1161 + listen_address: 127.0.0.1 diff --git a/hlr/testenv.cfg b/hlr/testenv_osmo_hlr.cfg similarity index 100% rename from hlr/testenv.cfg rename to hlr/testenv_osmo_hlr.cfg diff --git a/hlr/testenv_pyhss.cfg b/hlr/testenv_pyhss.cfg new file mode 100644 index 0000000..693a6cc --- /dev/null +++ b/hlr/testenv_pyhss.cfg @@ -0,0 +1,12 @@ +[testsuite] +titan_min=11.1.0 +program=HLR_Tests +config=HLR_Tests.cfg +copy=pyhss/HLR_Tests.cfg + +[pyhss_gsup] +program=PYHSS_CONFIG=pyhss.yaml run_in_venv.sh pyhss_gsup +setup=wait_for_port.py -p 4222 +make=pyhss +package=pyhss +copy=pyhss/pyhss.yaml