 
            neels has uploaded this change for review. ( https://gerrit.osmocom.org/c/libosmocore/+/36538?usp=email )
Change subject: add osmo_stats_report_lock api ......................................................................
add osmo_stats_report_lock api
Allow multi-threaded access to reported stats: - enable use of a stats mutex with osmo_stats_report_use_lock(true). - lock/unlock externally with osmo_stats_report_lock(true/false).
Rationale:
In osmo-hnbgw, we would like to collect stats from nftables, and do so in a separate thead. The most efficient way is to write the parsing results from nft directly to the rate_ctr destination.
But what if the main thread reports rate counters at exactly that time? - is writing to stats atomic on a data type level? - do applications need stats to be "atomic" as a whole?
In osmo-hnbgw in particular, there are two counters, 'packets' and 'total_bytes'. These correspond, and it would skew statistics if we reported them out of sync to each other.
The simplest way to ensure correctness in all cases is a mutex around the stats reporting.
But this mutex isn't needed in most of our programs. To completely avoid any overhead the mutex may bring, make use of it optional with a global flag.
This use case is likely to also show up in other programs that would like to collect and report stats from a separate thread.
Related: SYS#6773 Related: osmo-hnbgw I9dc54e6bc94c553f45adfa71ae8ad70be4afbc8f Change-Id: Ib335bea7d2a440ca284e6c439066f96456bf2c2d --- M include/osmocom/core/stats.h M src/core/libosmocore.map M src/core/stats.c M src/vty/stats_vty.c 4 files changed, 96 insertions(+), 0 deletions(-)
git pull ssh://gerrit.osmocom.org:29418/libosmocore refs/changes/38/36538/1
diff --git a/include/osmocom/core/stats.h b/include/osmocom/core/stats.h index a034a61..4ec448c 100644 --- a/include/osmocom/core/stats.h +++ b/include/osmocom/core/stats.h @@ -109,6 +109,8 @@
void osmo_stats_init(void *ctx); int osmo_stats_report(void); +void osmo_stats_report_use_lock(bool enable); +void osmo_stats_report_lock(bool lock);
int osmo_stats_set_interval(int interval);
diff --git a/src/core/libosmocore.map b/src/core/libosmocore.map index c5ab6e3..a3c1db6 100644 --- a/src/core/libosmocore.map +++ b/src/core/libosmocore.map @@ -488,6 +488,8 @@ osmo_stats_config; osmo_stats_init; osmo_stats_report; +osmo_stats_report_use_lock; +osmo_stats_report_lock; osmo_stats_reporter_alloc; osmo_stats_reporter_create_log; osmo_stats_reporter_create_statsd; diff --git a/src/core/stats.c b/src/core/stats.c index 16e6f62..21c7ddd 100644 --- a/src/core/stats.c +++ b/src/core/stats.c @@ -71,6 +71,7 @@ #include <stdio.h> #include <sys/types.h> #include <inttypes.h> +#include <pthread.h>
#ifdef HAVE_SYS_SOCKET_H #include <sys/socket.h> @@ -787,13 +788,59 @@ } }
+static pthread_mutex_t *g_report_lock = NULL; + +/*! Enable osmo_stats_report() mutex locking with osmo_stats_report_lock(). + * Calling osmo_stats_report_use_lock(true) */ +void osmo_stats_report_use_lock(bool enable) +{ + static pthread_mutex_t report_lock; + if (enable) { + /* enable locking */ + if (!g_report_lock) { + pthread_mutex_init(&report_lock, NULL); + g_report_lock = &report_lock; + } + return; + } + /* disable locking */ + if (g_report_lock) { + pthread_mutex_destroy(g_report_lock); + g_report_lock = NULL; + } +} + +/*! After osmo_stats_report_use_lock(true), lock a mutex to avoid osmo_stats_report() reading from stats. + * The caller must ensure to call osmo_stats_report_lock(false) as soon as possible, or the application will lock up. + * This function has no effect with osmo_stats_report_use_lock(false). + * Useful in multi-threaded applications to allow writing to stats/counters/rate_ctrs directly from various threads. + * Particularly useful to ensure that a given set of stats/counters updates from another thread are reported in sync. + */ +void osmo_stats_report_lock(bool lock) +{ + if (!g_report_lock) + return; + if (lock) + pthread_mutex_lock(g_report_lock); + else + pthread_mutex_unlock(g_report_lock); +} + int osmo_stats_report(void) { + pthread_mutex_t *lock = g_report_lock; + /* per group actions */ TRACE(LIBOSMOCORE_STATS_START()); + if (lock) + pthread_mutex_lock(lock); + /* { */ osmo_counters_for_each(handle_counter, NULL); rate_ctr_for_each_group(rate_ctr_group_handler, NULL); osmo_stat_item_for_each_group(osmo_stat_item_group_handler, NULL); + /* } */ + if (lock) + pthread_mutex_unlock(lock);
/* global actions */ flush_all_reporters(); diff --git a/src/vty/stats_vty.c b/src/vty/stats_vty.c index f940018..4a62dc4 100644 --- a/src/vty/stats_vty.c +++ b/src/vty/stats_vty.c @@ -426,7 +426,9 @@ if (argc > 0) skip_zero = true;
+ osmo_stats_report_lock(true); vty_out_statistics_full2(vty, "", skip_zero); + osmo_stats_report_lock(false);
return CMD_SUCCESS; } @@ -444,7 +446,9 @@ bool skip_zero = false; if (argc > 1) skip_zero = true; + osmo_stats_report_lock(true); vty_out_statistics_partial2(vty, "", level, skip_zero); + osmo_stats_report_lock(false);
return CMD_SUCCESS; } @@ -632,7 +636,9 @@ struct rctr_vty_ctx rctx = { .vty = vty, .skip_zero = false }; if (argc > 0) rctx.skip_zero = true; + osmo_stats_report_lock(true); rate_ctr_for_each_group(rate_ctr_group_handler, &rctx); + osmo_stats_report_lock(false); return CMD_SUCCESS; }
