Change in osmo-ttcn3-hacks[master]: re-implement compare-results.sh as compare-results.py

This is merely a historical archive of years 2008-2021, before the migration to mailman3.

A maintained and still updated list archive can be found at https://lists.osmocom.org/hyperkitty/list/gerrit-log@lists.osmocom.org/.

laforge gerrit-no-reply at lists.osmocom.org
Sat Nov 23 07:59:09 UTC 2019


laforge has submitted this change. ( https://gerrit.osmocom.org/c/osmo-ttcn3-hacks/+/15943 )

Change subject: re-implement compare-results.sh as compare-results.py
......................................................................

re-implement compare-results.sh as compare-results.py

The compare-results.sh is annoyingly slow. Since our ttcn3 tests containers
support Python 2, re-implement in Python for much quicker evaluation.

Change-Id: I0747c9d66ffc7e4121497a2416fca78d7b56c8e6
---
A compare-results.py
D compare-results.sh
M start-testsuite.sh
3 files changed, 138 insertions(+), 214 deletions(-)

Approvals:
  laforge: Looks good to me, but someone else must approve
  pespin: Looks good to me, approved
  Jenkins Builder: Verified



diff --git a/compare-results.py b/compare-results.py
new file mode 100755
index 0000000..d1adb20
--- /dev/null
+++ b/compare-results.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright 2018 sysmocom - s.f.m.c. GmbH
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+import re
+
+doc = "Compare TTCN3 test run results with expected results by junit logs."
+
+# The nicest would be to use an XML library, but I don't want to introduce dependencies on the build slaves.
+re_testcase = re.compile(r'''<testcase classname=['"]([^'"]+)['"].* name=['"]([^'"]+)['"].*>''')
+re_testcase_end = re.compile(r'''(</testcase>|<testcase [^>]*/>)''')
+re_failure = re.compile(r'''(<failure\b|<error\b)''')
+
+RESULT_PASS = 'pass'
+RESULT_FAIL = 'pass->FAIL'
+RESULT_SKIP = 'skip'
+RESULT_XFAIL = 'xfail'
+RESULT_FIXED = 'xfail->PASS'
+RESULT_NEW_PASS = 'NEW: PASS'
+RESULT_NEW_FAIL = 'NEW: FAIL'
+
+RESULTS = (
+	RESULT_FAIL,
+	RESULT_NEW_FAIL,
+	RESULT_XFAIL,
+	RESULT_FIXED,
+	RESULT_PASS,
+	RESULT_NEW_PASS,
+	RESULT_SKIP,
+	)
+
+def count(counter, name, result):
+	v = counter.get(result) or 0
+	v += 1
+	counter[result] = v
+	if result != RESULT_SKIP:
+		print('%s %s' % (result, name))
+
+def compare_one(name, expect, result, counter):
+	if result is None:
+		count(counter, name, RESULT_SKIP)
+	elif result == RESULT_PASS:
+		if expect == RESULT_PASS:
+			count(counter, name, RESULT_PASS)
+		elif expect == RESULT_FAIL:
+			count(counter, name, RESULT_FIXED)
+		elif expect is None:
+			count(counter, name, RESULT_NEW_PASS)
+	elif result == RESULT_FAIL:
+		if expect == RESULT_PASS:
+			count(counter, name, RESULT_FAIL)
+		elif expect == RESULT_FAIL:
+			count(counter, name, RESULT_XFAIL)
+		elif expect is None:
+			count(counter, name, RESULT_NEW_FAIL)
+
+def compare(cmdline, f_expected, f_current):
+	expected_list = parse_results(f_expected)
+	current_list = parse_results(f_current)
+
+	expected_dict = dict(expected_list)
+	current_dict = dict(current_list)
+
+	counter = {}
+
+	for expected_name, expected_result in expected_list:
+		compare_one(expected_name, expected_result, current_dict.get(expected_name), counter)
+
+	# Also count new tests
+	for current_name, current_result in current_list:
+		if current_name in expected_dict:
+			continue
+		compare_one(current_name, None, current_result, counter)
+
+
+	print('\nSummary:')
+	for r in RESULTS:
+		v = counter.get(r)
+		if not v:
+			continue
+		print('  %s: %d' % (r, v))
+	print('\n')
+
+def parse_results(f):
+	tests = []
+	name = None
+	result = None
+	for line in f:
+		m = re_testcase.search(line)
+		if m:
+			class_name, test_name = m.groups()
+			name = '%s.%s' % (class_name, test_name)
+
+		m = re_failure.search(line)
+		if m:
+			result = RESULT_FAIL
+
+		m = re_testcase_end.search(line)
+		if m:
+			if not name:
+				continue
+			if result is None:
+				result = RESULT_PASS
+			tests.append((name, result))
+
+			name = None
+			result = None
+
+	return tests
+
+def main(cmdline):
+	with open(cmdline.expected_results, 'r') as f_expected:
+		with open(cmdline.current_results, 'r') as f_current:
+			print('\nComparing expected results %r against results in %r\n--------------------'
+				% (cmdline.expected_results, cmdline.current_results))
+			compare(cmdline, f_expected, f_current)
+
+if __name__ == '__main__':
+	parser = argparse.ArgumentParser(description=doc)
+	parser.add_argument('expected_results', metavar='expected.junit-xml',
+			help='junit XML file listing the expected test results.')
+	parser.add_argument('current_results', metavar='current.junit-xml',
+			help='junit XML file listing the current test results.')
+
+	cmdline = parser.parse_args()
+	main(cmdline)
diff --git a/compare-results.sh b/compare-results.sh
deleted file mode 100755
index 6037174..0000000
--- a/compare-results.sh
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env bash
-expected_file="$1"
-results_file="$2"
-
-# Copyright 2018 sysmocom - s.f.m.c. GmbH
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-usage() {
-  echo "
-Usage:
-
-  $(basename "$0") expected_results.junit-log current_results.junit-log [--allow-* [...]]
-
-Return 0 if the expected results match the current results exactly.
-
-  --allow-skip   Allow runnning less tests than are listed in the expected file.
-                 Default is to return failure on any skipped tests.
-  --allow-new    Allow more test results than found in the expected file.
-                 Default is to return failure on any unknown tests.
-  --allow-xpass  If a test was expected to fail but passed, return success.
-                 Default is to return failure on any mismatch.
-"
-}
-
-if [ ! -f "$expected_file" ]; then
-  usage
-  echo "Expected file not found: '$expected_file'"
-  exit 1
-fi
-
-if [ ! -f "$results_file" ]; then
-  usage
-  echo "Current results file not found: '$results_file'"
-  exit 1
-fi
-
-shift
-shift
-
-allow_xpass=0
-allow_skip=0
-allow_new=0
-
-while test -n "$1"; do
-  arg="$1"
-  if [ "x$arg" = "x--allow-xpass" ]; then
-    allow_xpass=1
-  elif [ "x$arg" = "x--allow-skip" ]; then
-    allow_skip=1
-  elif [ "x$arg" = "x--allow-new" ]; then
-    allow_new=1
-  else
-    usage
-    echo "Unknown argument: '$arg'"
-    exit 1
-  fi
-  shift
-done
-
-echo "Comparing expected results $expected_file against results in $results_file
---------------------"
-
-parse_testcase() {
-  line="$1"
-  suite_name="$(echo "$line" | sed 's,.*classname='"'"'\([^'"'"']*\)'"'"'.*,\1,')"
-  test_name="$(echo "$line" | sed 's,.*\<name='"'"'\([^'"'"']*\)'"'"'.*,\1,')"
-  if [ -n "$(echo "$line" | grep '/>$')" ]; then
-    test_result="pass"
-  else
-    test_result="FAIL"
-  fi
-}
-
-pass=0
-xfail=0
-more_failures=0
-more_successes=0
-skipped=0
-new=0
-
-while read line; do
-  parse_testcase "$line"
-  exp_suite_name="$suite_name"
-  exp_test_name="$test_name"
-  exp_test_result="$test_result"
-  matched="0"
-
-  while read line; do
-    parse_testcase "$line"
-    if [ "x$exp_suite_name" != "x$suite_name" ]; then
-      continue
-    fi
-    if [ "x$exp_test_name" != "x$test_name" ]; then
-      continue
-    fi
-
-    if [ "x$exp_test_result" = "x$test_result" ]; then
-      if [ "x$exp_test_result" = "xFAIL" ]; then
-        exp_test_result="xfail"
-	(( xfail += 1 ))
-      else
-        (( pass += 1 ))
-      fi
-      echo "$exp_test_result $suite_name.$test_name"
-    else
-      if [ "x$exp_test_result" = "xFAIL" ]; then
-        exp_test_result="xfail"
-      fi
-      echo "$exp_test_result->$test_result $suite_name.$test_name"
-      if [ "x$test_result" = "xFAIL" ]; then
-        (( more_failures += 1 ))
-      else
-	(( more_successes += 1 ))
-      fi
-    fi
-    matched="1"
-    break
-  done <<< "$(grep "<testcase.*$exp_test_name" "$results_file")"
-
-  if [ "x$matched" = "x0" ]; then
-    echo "skipped $exp_suite_name.$exp_test_name"
-    (( skipped += 1 ))
-  fi
-
-done <<< "$(grep "<testcase" "$expected_file")"
-
-# Also catch all new tests that aren't covered in the expected results
-while read line; do
-  parse_testcase "$line"
-  got_suite_name="$suite_name"
-  got_test_name="$test_name"
-  got_test_result="$test_result"
-  matched="0"
-
-  while read line; do
-    parse_testcase "$line"
-    if [ "x$got_suite_name" != "x$suite_name" ]; then
-      continue
-    fi
-    if [ "x$got_test_name" != "x$test_name" ]; then
-      continue
-    fi
-
-    matched="1"
-    break
-  done <<< "$(grep "<testcase.*$test_name" "$expected_file")"
-
-  if [ "x$matched" = "x0" ]; then
-    echo "NEW-$got_test_result $got_suite_name.$got_test_name"
-    (( new += 1 ))
-  fi
-
-done <<< "$(grep "<testcase" "$results_file")"
-
-echo "--------------------"
-overall_verdict=0
-
-ask_update=""
-
-if [ "x$pass" != x0 ]; then
-  echo "$pass pass"
-fi
-
-if [ "x$xfail" != x0 ]; then
-  echo "$xfail xfail"
-fi
-
-if [ "x$skipped" != x0 ]; then
-  echo "$skipped skipped"
-  ask_update="$ask_update removed=$skipped"
-  if [ "x$allow_skip" = x0 ]; then
-    overall_verdict=4
-  fi
-fi
-
-if [ "x$new" != x0 ]; then
-  echo "$new new"
-  ask_update="$ask_update new=$new"
-  if [ "x$allow_new" = x0 ]; then
-    overall_verdict=3
-  fi
-fi
-
-if [ "x$more_successes" != x0 ]; then
-  echo "$more_successes pass unexpectedly"
-  ask_update="$ask_update xpass=$more_successes"
-  if [ "x$allow_xpass" = x0 ]; then
-    overall_verdict=2
-  fi
-fi
-
-if [ "x$more_failures" != x0 ]; then
-  echo "$more_failures FAIL"
-  overall_verdict=1
-fi
-
-if [ -n "$ask_update" ]; then
-  echo
-  echo "(Please update the expected results:$ask_update)"
-fi
-
-exit $overall_verdict
diff --git a/start-testsuite.sh b/start-testsuite.sh
index e5bc756..a6380b7 100755
--- a/start-testsuite.sh
+++ b/start-testsuite.sh
@@ -60,7 +60,7 @@
   exit 1
 fi
 
-compare="$SUITE_DIR/../compare-results.sh"
+compare="$SUITE_DIR/../compare-results.py"
 if [ ! -x "$compare" ]; then
   echo "ERROR: cannot find $compare"
   exit 1

-- 
To view, visit https://gerrit.osmocom.org/c/osmo-ttcn3-hacks/+/15943
To unsubscribe, or for help writing mail filters, visit https://gerrit.osmocom.org/settings

Gerrit-Project: osmo-ttcn3-hacks
Gerrit-Branch: master
Gerrit-Change-Id: I0747c9d66ffc7e4121497a2416fca78d7b56c8e6
Gerrit-Change-Number: 15943
Gerrit-PatchSet: 4
Gerrit-Owner: neels <nhofmeyr at sysmocom.de>
Gerrit-Reviewer: Jenkins Builder
Gerrit-Reviewer: laforge <laforge at osmocom.org>
Gerrit-Reviewer: pespin <pespin at sysmocom.de>
Gerrit-MessageType: merged
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.osmocom.org/pipermail/gerrit-log/attachments/20191123/30400ebb/attachment.htm>


More information about the gerrit-log mailing list