Files
rtems/tools/build/rtems-test-check-py
Chris Johns 258bda306b testsuite: Add a common test configuration. Fix configure.ac and Makefile.am errors.
- Add a top level test configuration file for test states that are common
  to all BSPs. This saves adding a test configuration (tcfg) file for
  every BSP.

- Add the test states 'user-input' and 'benchmark'. This
  lets 'rtems-test' stop the test rather than waiting for a timeout or
  letting a benchmark run without the user asking for it to run.

- Implement rtems-test-check in Python to make it faster. The shell script
  had grown to a point it was noticably slowing the build down.

- Fix the configure.ac and Makefile.am files for a number of the
  test directories. The files are difficiult to keep in sync with the
  number of tests and mistakes can happen such as tests being left
  out of the build. The test fsrofs01 is an example. Also a there was
  a mix of SUBDIRS and _SUBDIRS being used and only _SUBDIRS should be
  used.

- Fix the test fsrofs01 so it compiles.

Closes #2963.
2017-04-04 08:24:22 +10:00

117 lines
2.8 KiB
Python
Executable File

#! /usr/bin/env python
#
# Copyright 2017 Chris Johns <chrisj@rtems.org>
# All rights reserved
#
#
# Python version the rtems-test-check script.
#
from __future__ import print_function
import os.path
import sys
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
#
# Arguments. Keep it simple.
#
if len(sys.argv) < 4:
eprint('error: invalid command line')
print('INVALID-TEST-DATA')
sys.exit(2)
mode = sys.argv[1]
testconfig = [sys.argv[2]]
includepath = sys.argv[3]
bsp = sys.argv[4]
tests = sys.argv[5:]
#
# Handle the modes.
#
if mode == 'exclude':
pass
elif mode == 'flags':
if len(tests) != 1:
eprint('error: test count not 1 for mode: %s' % (mode))
print('INVALID-TEST-DATA')
sys.exit(1)
else:
eprint('error: invalid mode: %s' % (mode))
print('INVALID-TEST-DATA')
sys.exit(1)
#
# Common RTEMS testsuite configuration. Load first.
#
rtems_testdata = os.path.join(includepath, 'testdata', 'rtems.tcfg')
if os.path.exists(rtems_testdata):
testconfig.insert(0, rtems_testdata)
states = ['exclude',
'expected-fail',
'user-input',
'indeterminate',
'benchmark']
defines = { 'expected-fail' : '-DTEST_STATE_EXPECTED_FAIL=1',
'user-input' : '-DTEST_STATE_USER_INPUT=1',
'indeterminate' : '-DTEST_STATE_INDETERMINATE=1',
'benchmark' : '-DTEST_STATE_BENCHMARK=1' }
output = []
testdata = {}
def clean(line):
line = line[0:-1]
b = line.find('#')
if b >= 0:
line = line[1:b]
return line.strip()
#
# Load the test data.
#
for tc in range(0, len(testconfig)):
if not os.path.exists(testconfig[tc]):
continue
with open(testconfig[tc]) as f:
tdata = [clean(l) for l in f.readlines()]
lc = 0
for line in tdata:
lc += 1
ls = [s.strip() for s in line.split(':')]
if len(line) == 0:
continue
if len(ls) != 2:
eprint('error: syntax error: %s:%d' % (tc, lc))
print('INVALID-TEST-DATA')
sys.exit(1)
state = ls[0]
test = ls[1]
if state == 'include':
testconfig.insert(td, test)
elif state in states:
if state not in testdata:
testdata[state] = [test]
else:
testdata[state] += [test]
else:
eprint('error: invalid test state: %s in %s:%d' % (state, tc, lc))
print('INVALID-TEST-DATA')
sys.exit(1)
for test in tests:
if mode == 'exclude':
if 'exclude' not in testdata or test not in testdata['exclude']:
output += [test]
elif mode == 'flags':
for state in states:
if state != 'exclude' and state in testdata and test in testdata[state]:
output += [defines[state]]
print(' '.join(sorted(set(output))))
sys.exit(0)