import sys
class AmalgamationFile:
- def __init__( self, top_dir ):
+ def __init__(self, top_dir):
self.top_dir = top_dir
self.blocks = []
- def add_text( self, text ):
- if not text.endswith( "\n" ):
+ def add_text(self, text):
+ if not text.endswith("\n"):
text += "\n"
- self.blocks.append( text )
-
- def add_file( self, relative_input_path, wrap_in_comment=False ):
- def add_marker( prefix ):
- self.add_text( "" )
- self.add_text( "// " + "/"*70 )
- self.add_text( "// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")) )
- self.add_text( "// " + "/"*70 )
- self.add_text( "" )
- add_marker( "Beginning" )
- f = open( os.path.join( self.top_dir, relative_input_path ), "rt" )
+ self.blocks.append(text)
+
+ def add_file(self, relative_input_path, wrap_in_comment=False):
+ def add_marker(prefix):
+ self.add_text("")
+ self.add_text("// " + "/"*70)
+ self.add_text("// %s of content of file: %s" % (prefix, relative_input_path.replace("\\","/")))
+ self.add_text("// " + "/"*70)
+ self.add_text("")
+ add_marker("Beginning")
+ f = open(os.path.join(self.top_dir, relative_input_path), "rt")
content = f.read()
if wrap_in_comment:
content = "/*\n" + content + "\n*/"
- self.add_text( content )
+ self.add_text(content)
f.close()
- add_marker( "End" )
- self.add_text( "\n\n\n\n" )
-
- def get_value( self ):
- return "".join( self.blocks ).replace("\r\n","\n")
-
- def write_to( self, output_path ):
- output_dir = os.path.dirname( output_path )
- if output_dir and not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
- f = open( output_path, "wb" )
- f.write( str.encode(self.get_value(), 'UTF-8') )
+ add_marker("End")
+ self.add_text("\n\n\n\n")
+
+ def get_value(self):
+ return "".join(self.blocks).replace("\r\n","\n")
+
+ def write_to(self, output_path):
+ output_dir = os.path.dirname(output_path)
+ if output_dir and not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ f = open(output_path, "wb")
+ f.write(str.encode(self.get_value(), 'UTF-8'))
f.close()
-def amalgamate_source( source_top_dir=None,
+def amalgamate_source(source_top_dir=None,
target_source_path=None,
- header_include_path=None ):
+ header_include_path=None):
"""Produces amalgated source.
Parameters:
source_top_dir: top-directory
header_include_path: generated header path relative to target_source_path.
"""
print("Amalgating header...")
- header = AmalgamationFile( source_top_dir )
- header.add_text( "/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/)." )
- header.add_text( "/// It is intented to be used with #include <%s>" % header_include_path )
- header.add_file( "LICENSE", wrap_in_comment=True )
- header.add_text( "#ifndef JSON_AMALGATED_H_INCLUDED" )
- header.add_text( "# define JSON_AMALGATED_H_INCLUDED" )
- header.add_text( "/// If defined, indicates that the source file is amalgated" )
- header.add_text( "/// to prevent private header inclusion." )
- header.add_text( "#define JSON_IS_AMALGAMATION" )
- header.add_file( "include/json/version.h" )
- header.add_file( "include/json/config.h" )
- header.add_file( "include/json/forwards.h" )
- header.add_file( "include/json/features.h" )
- header.add_file( "include/json/value.h" )
- header.add_file( "include/json/reader.h" )
- header.add_file( "include/json/writer.h" )
- header.add_file( "include/json/assertions.h" )
- header.add_text( "#endif //ifndef JSON_AMALGATED_H_INCLUDED" )
-
- target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path )
+ header = AmalgamationFile(source_top_dir)
+ header.add_text("/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).")
+ header.add_text("/// It is intented to be used with #include <%s>" % header_include_path)
+ header.add_file("LICENSE", wrap_in_comment=True)
+ header.add_text("#ifndef JSON_AMALGATED_H_INCLUDED")
+ header.add_text("# define JSON_AMALGATED_H_INCLUDED")
+ header.add_text("/// If defined, indicates that the source file is amalgated")
+ header.add_text("/// to prevent private header inclusion.")
+ header.add_text("#define JSON_IS_AMALGAMATION")
+ header.add_file("include/json/version.h")
+ header.add_file("include/json/config.h")
+ header.add_file("include/json/forwards.h")
+ header.add_file("include/json/features.h")
+ header.add_file("include/json/value.h")
+ header.add_file("include/json/reader.h")
+ header.add_file("include/json/writer.h")
+ header.add_file("include/json/assertions.h")
+ header.add_text("#endif //ifndef JSON_AMALGATED_H_INCLUDED")
+
+ target_header_path = os.path.join(os.path.dirname(target_source_path), header_include_path)
print("Writing amalgated header to %r" % target_header_path)
- header.write_to( target_header_path )
+ header.write_to(target_header_path)
- base, ext = os.path.splitext( header_include_path )
+ base, ext = os.path.splitext(header_include_path)
forward_header_include_path = base + "-forwards" + ext
print("Amalgating forward header...")
- header = AmalgamationFile( source_top_dir )
- header.add_text( "/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/)." )
- header.add_text( "/// It is intented to be used with #include <%s>" % forward_header_include_path )
- header.add_text( "/// This header provides forward declaration for all JsonCpp types." )
- header.add_file( "LICENSE", wrap_in_comment=True )
- header.add_text( "#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" )
- header.add_text( "# define JSON_FORWARD_AMALGATED_H_INCLUDED" )
- header.add_text( "/// If defined, indicates that the source file is amalgated" )
- header.add_text( "/// to prevent private header inclusion." )
- header.add_text( "#define JSON_IS_AMALGAMATION" )
- header.add_file( "include/json/config.h" )
- header.add_file( "include/json/forwards.h" )
- header.add_text( "#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED" )
-
- target_forward_header_path = os.path.join( os.path.dirname(target_source_path),
- forward_header_include_path )
+ header = AmalgamationFile(source_top_dir)
+ header.add_text("/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).")
+ header.add_text("/// It is intented to be used with #include <%s>" % forward_header_include_path)
+ header.add_text("/// This header provides forward declaration for all JsonCpp types.")
+ header.add_file("LICENSE", wrap_in_comment=True)
+ header.add_text("#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED")
+ header.add_text("# define JSON_FORWARD_AMALGATED_H_INCLUDED")
+ header.add_text("/// If defined, indicates that the source file is amalgated")
+ header.add_text("/// to prevent private header inclusion.")
+ header.add_text("#define JSON_IS_AMALGAMATION")
+ header.add_file("include/json/config.h")
+ header.add_file("include/json/forwards.h")
+ header.add_text("#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED")
+
+ target_forward_header_path = os.path.join(os.path.dirname(target_source_path),
+ forward_header_include_path)
print("Writing amalgated forward header to %r" % target_forward_header_path)
- header.write_to( target_forward_header_path )
+ header.write_to(target_forward_header_path)
print("Amalgating source...")
- source = AmalgamationFile( source_top_dir )
- source.add_text( "/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/)." )
- source.add_text( "/// It is intented to be used with #include <%s>" % header_include_path )
- source.add_file( "LICENSE", wrap_in_comment=True )
- source.add_text( "" )
- source.add_text( "#include <%s>" % header_include_path )
- source.add_text( "" )
+ source = AmalgamationFile(source_top_dir)
+ source.add_text("/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).")
+ source.add_text("/// It is intented to be used with #include <%s>" % header_include_path)
+ source.add_file("LICENSE", wrap_in_comment=True)
+ source.add_text("")
+ source.add_text("#include <%s>" % header_include_path)
+ source.add_text("")
lib_json = "src/lib_json"
- source.add_file( os.path.join(lib_json, "json_tool.h") )
- source.add_file( os.path.join(lib_json, "json_reader.cpp") )
- source.add_file( os.path.join(lib_json, "json_batchallocator.h") )
- source.add_file( os.path.join(lib_json, "json_valueiterator.inl") )
- source.add_file( os.path.join(lib_json, "json_value.cpp") )
- source.add_file( os.path.join(lib_json, "json_writer.cpp") )
+ source.add_file(os.path.join(lib_json, "json_tool.h"))
+ source.add_file(os.path.join(lib_json, "json_reader.cpp"))
+ source.add_file(os.path.join(lib_json, "json_batchallocator.h"))
+ source.add_file(os.path.join(lib_json, "json_valueiterator.inl"))
+ source.add_file(os.path.join(lib_json, "json_value.cpp"))
+ source.add_file(os.path.join(lib_json, "json_writer.cpp"))
print("Writing amalgated source to %r" % target_source_path)
- source.write_to( target_source_path )
+ source.write_to(target_source_path)
def main():
usage = """%prog [options]
parser.enable_interspersed_args()
options, args = parser.parse_args()
- msg = amalgamate_source( source_top_dir=options.top_dir,
+ msg = amalgamate_source(source_top_dir=options.top_dir,
target_source_path=options.target_source_path,
- header_include_path=options.header_include_path )
+ header_include_path=options.header_include_path)
if msg:
- sys.stderr.write( msg + "\n" )
- sys.exit( 1 )
+ sys.stderr.write(msg + "\n")
+ sys.exit(1)
else:
print("Source succesfully amalagated")
ALL_NO_LINK = DIR | FILE
ALL = DIR | FILE | LINKS
-_ANT_RE = re.compile( r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)' )
+_ANT_RE = re.compile(r'(/\*\*/)|(\*\*/)|(/\*\*)|(\*)|(/)|([^\*/]*)')
-def ant_pattern_to_re( ant_pattern ):
+def ant_pattern_to_re(ant_pattern):
"""Generates a regular expression from the ant pattern.
Matching convention:
**/a: match 'a', 'dir/a', 'dir1/dir2/a'
"""
rex = ['^']
next_pos = 0
- sep_rex = r'(?:/|%s)' % re.escape( os.path.sep )
+ sep_rex = r'(?:/|%s)' % re.escape(os.path.sep)
## print 'Converting', ant_pattern
- for match in _ANT_RE.finditer( ant_pattern ):
+ for match in _ANT_RE.finditer(ant_pattern):
## print 'Matched', match.group()
## print match.start(0), next_pos
if match.start(0) != next_pos:
- raise ValueError( "Invalid ant pattern" )
+ raise ValueError("Invalid ant pattern")
if match.group(1): # /**/
- rex.append( sep_rex + '(?:.*%s)?' % sep_rex )
+ rex.append(sep_rex + '(?:.*%s)?' % sep_rex)
elif match.group(2): # **/
- rex.append( '(?:.*%s)?' % sep_rex )
+ rex.append('(?:.*%s)?' % sep_rex)
elif match.group(3): # /**
- rex.append( sep_rex + '.*' )
+ rex.append(sep_rex + '.*')
elif match.group(4): # *
- rex.append( '[^/%s]*' % re.escape(os.path.sep) )
+ rex.append('[^/%s]*' % re.escape(os.path.sep))
elif match.group(5): # /
- rex.append( sep_rex )
+ rex.append(sep_rex)
else: # somepath
- rex.append( re.escape(match.group(6)) )
+ rex.append(re.escape(match.group(6)))
next_pos = match.end()
rex.append('$')
- return re.compile( ''.join( rex ) )
+ return re.compile(''.join(rex))
-def _as_list( l ):
+def _as_list(l):
if isinstance(l, basestring):
return l.split()
return l
dir_path = dir_path.replace('/',os.path.sep)
entry_type_filter = entry_type
- def is_pruned_dir( dir_name ):
+ def is_pruned_dir(dir_name):
for pattern in prune_dirs:
- if fnmatch.fnmatch( dir_name, pattern ):
+ if fnmatch.fnmatch(dir_name, pattern):
return True
return False
- def apply_filter( full_path, filter_rexs ):
+ def apply_filter(full_path, filter_rexs):
"""Return True if at least one of the filter regular expression match full_path."""
for rex in filter_rexs:
- if rex.match( full_path ):
+ if rex.match(full_path):
return True
return False
- def glob_impl( root_dir_path ):
+ def glob_impl(root_dir_path):
child_dirs = [root_dir_path]
while child_dirs:
dir_path = child_dirs.pop()
- for entry in listdir( dir_path ):
- full_path = os.path.join( dir_path, entry )
+ for entry in listdir(dir_path):
+ full_path = os.path.join(dir_path, entry)
## print 'Testing:', full_path,
- is_dir = os.path.isdir( full_path )
- if is_dir and not is_pruned_dir( entry ): # explore child directory ?
+ is_dir = os.path.isdir(full_path)
+ if is_dir and not is_pruned_dir(entry): # explore child directory ?
## print '===> marked for recursion',
- child_dirs.append( full_path )
- included = apply_filter( full_path, include_filter )
- rejected = apply_filter( full_path, exclude_filter )
+ child_dirs.append(full_path)
+ included = apply_filter(full_path, include_filter)
+ rejected = apply_filter(full_path, exclude_filter)
if not included or rejected: # do not include entry ?
## print '=> not included or rejected'
continue
- link = os.path.islink( full_path )
- is_file = os.path.isfile( full_path )
+ link = os.path.islink(full_path)
+ is_file = os.path.isfile(full_path)
if not is_file and not is_dir:
## print '=> unknown entry type'
continue
## print '=> type: %d' % entry_type,
if (entry_type & entry_type_filter) != 0:
## print ' => KEEP'
- yield os.path.join( dir_path, entry )
+ yield os.path.join(dir_path, entry)
## else:
## print ' => TYPE REJECTED'
- return list( glob_impl( dir_path ) )
+ return list(glob_impl(dir_path))
if __name__ == "__main__":
import unittest
class AntPatternToRETest(unittest.TestCase):
-## def test_conversion( self ):
-## self.assertEqual( '^somepath$', ant_pattern_to_re( 'somepath' ).pattern )
+## def test_conversion(self):
+## self.assertEqual('^somepath$', ant_pattern_to_re('somepath').pattern)
- def test_matching( self ):
- test_cases = [ ( 'path',
+ def test_matching(self):
+ test_cases = [ ('path',
['path'],
- ['somepath', 'pathsuffix', '/path', '/path'] ),
- ( '*.py',
+ ['somepath', 'pathsuffix', '/path', '/path']),
+ ('*.py',
['source.py', 'source.ext.py', '.py'],
- ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c'] ),
- ( '**/path',
+ ['path/source.py', '/.py', 'dir.py/z', 'z.pyc', 'z.c']),
+ ('**/path',
['path', '/path', '/a/path', 'c:/a/path', '/a/b/path', '//a/path', '/a/path/b/path'],
- ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath'] ),
- ( 'path/**',
+ ['path/', 'a/path/b', 'dir.py/z', 'somepath', 'pathsuffix', 'a/somepath']),
+ ('path/**',
['path/a', 'path/path/a', 'path//'],
- ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a'] ),
- ( '/**/path',
+ ['path', 'somepath/a', 'a/path', 'a/path/a', 'pathsuffix/a']),
+ ('/**/path',
['/path', '/a/path', '/a/b/path/path', '/path/path'],
- ['path', 'path/', 'a/path', '/pathsuffix', '/somepath'] ),
- ( 'a/b',
+ ['path', 'path/', 'a/path', '/pathsuffix', '/somepath']),
+ ('a/b',
['a/b'],
- ['somea/b', 'a/bsuffix', 'a/b/c'] ),
- ( '**/*.py',
+ ['somea/b', 'a/bsuffix', 'a/b/c']),
+ ('**/*.py',
['script.py', 'src/script.py', 'a/b/script.py', '/a/b/script.py'],
- ['script.pyc', 'script.pyo', 'a.py/b'] ),
- ( 'src/**/*.py',
+ ['script.pyc', 'script.pyo', 'a.py/b']),
+ ('src/**/*.py',
['src/a.py', 'src/dir/a.py'],
- ['a/src/a.py', '/src/a.py'] ),
+ ['a/src/a.py', '/src/a.py']),
]
for ant_pattern, accepted_matches, rejected_matches in list(test_cases):
- def local_path( paths ):
+ def local_path(paths):
return [ p.replace('/',os.path.sep) for p in paths ]
- test_cases.append( (ant_pattern, local_path(accepted_matches), local_path( rejected_matches )) )
+ test_cases.append((ant_pattern, local_path(accepted_matches), local_path(rejected_matches)))
for ant_pattern, accepted_matches, rejected_matches in test_cases:
- rex = ant_pattern_to_re( ant_pattern )
+ rex = ant_pattern_to_re(ant_pattern)
print('ant_pattern:', ant_pattern, ' => ', rex.pattern)
for accepted_match in accepted_matches:
print('Accepted?:', accepted_match)
- self.assertTrue( rex.match( accepted_match ) is not None )
+ self.assertTrue(rex.match(accepted_match) is not None)
for rejected_match in rejected_matches:
print('Rejected?:', rejected_match)
- self.assertTrue( rex.match( rejected_match ) is None )
+ self.assertTrue(rex.match(rejected_match) is None)
unittest.main()
self.build_type = build_type
self.generator = generator
- def merged_with( self, build_desc ):
+ def merged_with(self, build_desc):
"""Returns a new BuildDesc by merging field content.
Prefer build_desc fields to self fields for single valued field.
"""
- return BuildDesc( self.prepend_envs + build_desc.prepend_envs,
+ return BuildDesc(self.prepend_envs + build_desc.prepend_envs,
self.variables + build_desc.variables,
build_desc.build_type or self.build_type,
- build_desc.generator or self.generator )
+ build_desc.generator or self.generator)
- def env( self ):
+ def env(self):
environ = os.environ.copy()
for values_by_name in self.prepend_envs:
for var, value in list(values_by_name.items()):
var = var.upper()
if type(value) is unicode:
- value = value.encode( sys.getdefaultencoding() )
+ value = value.encode(sys.getdefaultencoding())
if var in environ:
environ[var] = value + os.pathsep + environ[var]
else:
environ[var] = value
return environ
- def cmake_args( self ):
+ def cmake_args(self):
args = ["-D%s" % var for var in self.variables]
# skip build type for Visual Studio solution as it cause warning
if self.build_type and 'Visual' not in self.generator:
- args.append( "-DCMAKE_BUILD_TYPE=%s" % self.build_type )
+ args.append("-DCMAKE_BUILD_TYPE=%s" % self.build_type)
if self.generator:
- args.extend( ['-G', self.generator] )
+ args.extend(['-G', self.generator])
return args
- def __repr__( self ):
- return "BuildDesc( %s, build_type=%s )" % (" ".join( self.cmake_args()), self.build_type)
+ def __repr__(self):
+ return "BuildDesc(%s, build_type=%s)" % (" ".join(self.cmake_args()), self.build_type)
class BuildData:
- def __init__( self, desc, work_dir, source_dir ):
+ def __init__(self, desc, work_dir, source_dir):
self.desc = desc
self.work_dir = work_dir
self.source_dir = source_dir
- self.cmake_log_path = os.path.join( work_dir, 'batchbuild_cmake.log' )
- self.build_log_path = os.path.join( work_dir, 'batchbuild_build.log' )
+ self.cmake_log_path = os.path.join(work_dir, 'batchbuild_cmake.log')
+ self.build_log_path = os.path.join(work_dir, 'batchbuild_build.log')
self.cmake_succeeded = False
self.build_succeeded = False
def execute_build(self):
print('Build %s' % self.desc)
- self._make_new_work_dir( )
- self.cmake_succeeded = self._generate_makefiles( )
+ self._make_new_work_dir()
+ self.cmake_succeeded = self._generate_makefiles()
if self.cmake_succeeded:
- self.build_succeeded = self._build_using_makefiles( )
+ self.build_succeeded = self._build_using_makefiles()
return self.build_succeeded
def _generate_makefiles(self):
print(' Generating makefiles: ', end=' ')
- cmd = ['cmake'] + self.desc.cmake_args( ) + [os.path.abspath( self.source_dir )]
- succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.cmake_log_path )
+ cmd = ['cmake'] + self.desc.cmake_args() + [os.path.abspath(self.source_dir)]
+ succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.cmake_log_path)
print('done' if succeeded else 'FAILED')
return succeeded
cmd = ['cmake', '--build', self.work_dir]
if self.desc.build_type:
cmd += ['--config', self.desc.build_type]
- succeeded = self._execute_build_subprocess( cmd, self.desc.env(), self.build_log_path )
+ succeeded = self._execute_build_subprocess(cmd, self.desc.env(), self.build_log_path)
print('done' if succeeded else 'FAILED')
return succeeded
def _execute_build_subprocess(self, cmd, env, log_path):
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
- env=env )
- stdout, _ = process.communicate( )
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=self.work_dir,
+ env=env)
+ stdout, _ = process.communicate()
succeeded = (process.returncode == 0)
- with open( log_path, 'wb' ) as flog:
- log = ' '.join( cmd ) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
- flog.write( fix_eol( log ) )
+ with open(log_path, 'wb') as flog:
+ log = ' '.join(cmd) + '\n' + stdout + '\nExit code: %r\n' % process.returncode
+ flog.write(fix_eol(log))
return succeeded
def _make_new_work_dir(self):
- if os.path.isdir( self.work_dir ):
+ if os.path.isdir(self.work_dir):
print(' Removing work directory', self.work_dir)
- shutil.rmtree( self.work_dir, ignore_errors=True )
- if not os.path.isdir( self.work_dir ):
- os.makedirs( self.work_dir )
+ shutil.rmtree(self.work_dir, ignore_errors=True)
+ if not os.path.isdir(self.work_dir):
+ os.makedirs(self.work_dir)
-def fix_eol( stdout ):
+def fix_eol(stdout):
"""Fixes wrong EOL produced by cmake --build on Windows (\r\r\n instead of \r\n).
"""
- return re.sub( '\r*\n', os.linesep, stdout )
+ return re.sub('\r*\n', os.linesep, stdout)
-def load_build_variants_from_config( config_path ):
- with open( config_path, 'rb' ) as fconfig:
- data = json.load( fconfig )
+def load_build_variants_from_config(config_path):
+ with open(config_path, 'rb') as fconfig:
+ data = json.load(fconfig)
variants = data[ 'cmake_variants' ]
- build_descs_by_axis = collections.defaultdict( list )
+ build_descs_by_axis = collections.defaultdict(list)
for axis in variants:
axis_name = axis["name"]
build_descs = []
if "generators" in axis:
for generator_data in axis["generators"]:
for generator in generator_data["generator"]:
- build_desc = BuildDesc( generator=generator,
- prepend_envs=generator_data.get("env_prepend") )
- build_descs.append( build_desc )
+ build_desc = BuildDesc(generator=generator,
+ prepend_envs=generator_data.get("env_prepend"))
+ build_descs.append(build_desc)
elif "variables" in axis:
for variables in axis["variables"]:
- build_desc = BuildDesc( variables=variables )
- build_descs.append( build_desc )
+ build_desc = BuildDesc(variables=variables)
+ build_descs.append(build_desc)
elif "build_types" in axis:
for build_type in axis["build_types"]:
- build_desc = BuildDesc( build_type=build_type )
- build_descs.append( build_desc )
- build_descs_by_axis[axis_name].extend( build_descs )
+ build_desc = BuildDesc(build_type=build_type)
+ build_descs.append(build_desc)
+ build_descs_by_axis[axis_name].extend(build_descs)
return build_descs_by_axis
-def generate_build_variants( build_descs_by_axis ):
+def generate_build_variants(build_descs_by_axis):
"""Returns a list of BuildDesc generated for the partial BuildDesc for each axis."""
axis_names = list(build_descs_by_axis.keys())
build_descs = []
if len(build_descs):
# for each existing build_desc and each axis build desc, create a new build_desc
new_build_descs = []
- for prototype_build_desc, axis_build_desc in itertools.product( build_descs, axis_build_descs):
- new_build_descs.append( prototype_build_desc.merged_with( axis_build_desc ) )
+ for prototype_build_desc, axis_build_desc in itertools.product(build_descs, axis_build_descs):
+ new_build_descs.append(prototype_build_desc.merged_with(axis_build_desc))
build_descs = new_build_descs
else:
build_descs = axis_build_descs
</table>
</body></html>''')
-def generate_html_report( html_report_path, builds ):
- report_dir = os.path.dirname( html_report_path )
+def generate_html_report(html_report_path, builds):
+ report_dir = os.path.dirname(html_report_path)
# Vertical axis: generator
# Horizontal: variables, then build_type
- builds_by_generator = collections.defaultdict( list )
+ builds_by_generator = collections.defaultdict(list)
variables = set()
- build_types_by_variable = collections.defaultdict( set )
+ build_types_by_variable = collections.defaultdict(set)
build_by_pos_key = {} # { (generator, var_key, build_type): build }
for build in builds:
- builds_by_generator[build.desc.generator].append( build )
+ builds_by_generator[build.desc.generator].append(build)
var_key = tuple(sorted(build.desc.variables))
- variables.add( var_key )
- build_types_by_variable[var_key].add( build.desc.build_type )
+ variables.add(var_key)
+ build_types_by_variable[var_key].add(build.desc.build_type)
pos_key = (build.desc.generator, var_key, build.desc.build_type)
build_by_pos_key[pos_key] = build
- variables = sorted( variables )
+ variables = sorted(variables)
th_vars = []
th_build_types = []
for variable in variables:
- build_types = sorted( build_types_by_variable[variable] )
+ build_types = sorted(build_types_by_variable[variable])
nb_build_type = len(build_types_by_variable[variable])
- th_vars.append( '<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape( ' '.join( variable ) ) ) )
+ th_vars.append('<th colspan="%d">%s</th>' % (nb_build_type, cgi.escape(' '.join(variable))))
for build_type in build_types:
- th_build_types.append( '<th>%s</th>' % cgi.escape(build_type) )
+ th_build_types.append('<th>%s</th>' % cgi.escape(build_type))
tr_builds = []
- for generator in sorted( builds_by_generator ):
- tds = [ '<td>%s</td>\n' % cgi.escape( generator ) ]
+ for generator in sorted(builds_by_generator):
+ tds = [ '<td>%s</td>\n' % cgi.escape(generator) ]
for variable in variables:
- build_types = sorted( build_types_by_variable[variable] )
+ build_types = sorted(build_types_by_variable[variable])
for build_type in build_types:
pos_key = (generator, variable, build_type)
build = build_by_pos_key.get(pos_key)
if build:
cmake_status = 'ok' if build.cmake_succeeded else 'FAILED'
build_status = 'ok' if build.build_succeeded else 'FAILED'
- cmake_log_url = os.path.relpath( build.cmake_log_path, report_dir )
- build_log_url = os.path.relpath( build.build_log_path, report_dir )
- td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % (
- build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
+ cmake_log_url = os.path.relpath(build.cmake_log_path, report_dir)
+ build_log_url = os.path.relpath(build.build_log_path, report_dir)
+ td = '<td class="%s"><a href="%s" class="%s">CMake: %s</a>' % ( build_status.lower(), cmake_log_url, cmake_status.lower(), cmake_status)
if build.cmake_succeeded:
- td += '<br><a href="%s" class="%s">Build: %s</a>' % (
- build_log_url, build_status.lower(), build_status)
+ td += '<br><a href="%s" class="%s">Build: %s</a>' % ( build_log_url, build_status.lower(), build_status)
td += '</td>'
else:
td = '<td></td>'
- tds.append( td )
- tr_builds.append( '<tr>%s</tr>' % '\n'.join( tds ) )
- html = HTML_TEMPLATE.substitute(
- title='Batch build report',
+ tds.append(td)
+ tr_builds.append('<tr>%s</tr>' % '\n'.join(tds))
+ html = HTML_TEMPLATE.substitute( title='Batch build report',
th_vars=' '.join(th_vars),
- th_build_types=' '.join( th_build_types),
- tr_builds='\n'.join( tr_builds ) )
- with open( html_report_path, 'wt' ) as fhtml:
- fhtml.write( html )
+ th_build_types=' '.join(th_build_types),
+ tr_builds='\n'.join(tr_builds))
+ with open(html_report_path, 'wt') as fhtml:
+ fhtml.write(html)
print('HTML report generated in:', html_report_path)
def main():
parser.enable_interspersed_args()
options, args = parser.parse_args()
if len(args) < 3:
- parser.error( "Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH." )
+ parser.error("Missing one of WORK_DIR SOURCE_DIR CONFIG_JSON_PATH.")
work_dir = args[0]
source_dir = args[1].rstrip('/\\')
config_paths = args[2:]
for config_path in config_paths:
- if not os.path.isfile( config_path ):
- parser.error( "Can not read: %r" % config_path )
+ if not os.path.isfile(config_path):
+ parser.error("Can not read: %r" % config_path)
# generate build variants
build_descs = []
for config_path in config_paths:
- build_descs_by_axis = load_build_variants_from_config( config_path )
- build_descs.extend( generate_build_variants( build_descs_by_axis ) )
+ build_descs_by_axis = load_build_variants_from_config(config_path)
+ build_descs.extend(generate_build_variants(build_descs_by_axis))
print('Build variants (%d):' % len(build_descs))
# assign build directory for each variant
- if not os.path.isdir( work_dir ):
- os.makedirs( work_dir )
+ if not os.path.isdir(work_dir):
+ os.makedirs(work_dir)
builds = []
- with open( os.path.join( work_dir, 'matrix-dir-map.txt' ), 'wt' ) as fmatrixmap:
- for index, build_desc in enumerate( build_descs ):
- build_desc_work_dir = os.path.join( work_dir, '%03d' % (index+1) )
- builds.append( BuildData( build_desc, build_desc_work_dir, source_dir ) )
- fmatrixmap.write( '%s: %s\n' % (build_desc_work_dir, build_desc) )
+ with open(os.path.join(work_dir, 'matrix-dir-map.txt'), 'wt') as fmatrixmap:
+ for index, build_desc in enumerate(build_descs):
+ build_desc_work_dir = os.path.join(work_dir, '%03d' % (index+1))
+ builds.append(BuildData(build_desc, build_desc_work_dir, source_dir))
+ fmatrixmap.write('%s: %s\n' % (build_desc_work_dir, build_desc))
for build in builds:
build.execute_build()
- html_report_path = os.path.join( work_dir, 'batchbuild-report.html' )
- generate_html_report( html_report_path, builds )
+ html_report_path = os.path.join(work_dir, 'batchbuild-report.html')
+ generate_html_report(html_report_path, builds)
print('Done')
from __future__ import print_function
import os.path
-def fix_source_eol( path, is_dry_run = True, verbose = True, eol = '\n' ):
+def fix_source_eol(path, is_dry_run = True, verbose = True, eol = '\n'):
"""Makes sure that all sources have the specified eol sequence (default: unix)."""
- if not os.path.isfile( path ):
- raise ValueError( 'Path "%s" is not a file' % path )
+ if not os.path.isfile(path):
+ raise ValueError('Path "%s" is not a file' % path)
try:
f = open(path, 'rb')
except IOError as msg:
##
##
##
-##def _do_fix( is_dry_run = True ):
+##def _do_fix(is_dry_run = True):
## from waftools import antglob
-## python_sources = antglob.glob( '.',
+## python_sources = antglob.glob('.',
## includes = '**/*.py **/wscript **/wscript_build',
## excludes = antglob.default_excludes + './waf.py',
-## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in python_sources:
-## _fix_python_source( path, is_dry_run )
+## _fix_python_source(path, is_dry_run)
##
-## cpp_sources = antglob.glob( '.',
+## cpp_sources = antglob.glob('.',
## includes = '**/*.cpp **/*.h **/*.inl',
-## prune_dirs = antglob.prune_dirs + 'waf-* ./build' )
+## prune_dirs = antglob.prune_dirs + 'waf-* ./build')
## for path in cpp_sources:
-## _fix_source_eol( path, is_dry_run )
+## _fix_source_eol(path, is_dry_run)
##
##
##def dry_fix(context):
-## _do_fix( is_dry_run = True )
+## _do_fix(is_dry_run = True)
##
##def fix(context):
-## _do_fix( is_dry_run = False )
+## _do_fix(is_dry_run = False)
##
##def shutdown():
## pass
""".replace('\r\n','\n')
-def update_license( path, dry_run, show_diff ):
+def update_license(path, dry_run, show_diff):
"""Update the license statement in the specified file.
Parameters:
path: path of the C++ source file to update.
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
- with open( path, 'rt' ) as fin:
+ with open(path, 'rt') as fin:
original_text = fin.read().replace('\r\n','\n')
newline = fin.newlines and fin.newlines[0] or '\n'
- if not original_text.startswith( LICENSE_BEGIN ):
+ if not original_text.startswith(LICENSE_BEGIN):
# No existing license found => prepend it
new_text = BRIEF_LICENSE + original_text
else:
- license_end_index = original_text.index( '\n\n' ) # search first blank line
+ license_end_index = original_text.index('\n\n') # search first blank line
new_text = BRIEF_LICENSE + original_text[license_end_index+2:]
if original_text != new_text:
if not dry_run:
- with open( path, 'wb' ) as fout:
- fout.write( new_text.replace('\n', newline ) )
+ with open(path, 'wb') as fout:
+ fout.write(new_text.replace('\n', newline))
print('Updated', path)
if show_diff:
import difflib
- print('\n'.join( difflib.unified_diff( original_text.split('\n'),
- new_text.split('\n') ) ))
+ print('\n'.join(difflib.unified_diff(original_text.split('\n'),
+ new_text.split('\n'))))
return True
return False
-def update_license_in_source_directories( source_dirs, dry_run, show_diff ):
+def update_license_in_source_directories(source_dirs, dry_run, show_diff):
"""Updates license text in C++ source files found in directory source_dirs.
Parameters:
source_dirs: list of directory to scan for C++ sources. Directories are
from devtools import antglob
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
for source_dir in source_dirs:
- cpp_sources = antglob.glob( source_dir,
+ cpp_sources = antglob.glob(source_dir,
includes = '''**/*.h **/*.cpp **/*.inl''',
- prune_dirs = prune_dirs )
+ prune_dirs = prune_dirs)
for source in cpp_sources:
- update_license( source, dry_run, show_diff )
+ update_license(source, dry_run, show_diff)
def main():
usage = """%prog DIR [DIR2...]
help="""On update, show change made to the file.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
- update_license_in_source_directories( args, options.dry_run, options.show_diff )
+ update_license_in_source_directories(args, options.dry_run, options.show_diff)
print('Done')
if __name__ == '__main__':
prefix_dir: all files stored in the tarball be sub-directory of prefix_dir. Set to ''
to make them child of root.
"""
- base_dir = os.path.normpath( os.path.abspath( base_dir ) )
- def archive_name( path ):
+ base_dir = os.path.normpath(os.path.abspath(base_dir))
+ def archive_name(path):
"""Makes path relative to base_dir."""
- path = os.path.normpath( os.path.abspath( path ) )
- common_path = os.path.commonprefix( (base_dir, path) )
+ path = os.path.normpath(os.path.abspath(path))
+ common_path = os.path.commonprefix((base_dir, path))
archive_name = path[len(common_path):]
- if os.path.isabs( archive_name ):
+ if os.path.isabs(archive_name):
archive_name = archive_name[1:]
- return os.path.join( prefix_dir, archive_name )
+ return os.path.join(prefix_dir, archive_name)
def visit(tar, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
path_in_tar = archive_name(path)
- tar.add(path, path_in_tar )
+ tar.add(path, path_in_tar)
compression = TARGZ_DEFAULT_COMPRESSION_LEVEL
- tar = tarfile.TarFile.gzopen( tarball_path, 'w', compresslevel=compression )
+ tar = tarfile.TarFile.gzopen(tarball_path, 'w', compresslevel=compression)
try:
for source in sources:
source_path = source
- if os.path.isdir( source ):
+ if os.path.isdir(source):
os.path.walk(source_path, visit, tar)
else:
path_in_tar = archive_name(source_path)
- tar.add(source_path, path_in_tar ) # filename, arcname
+ tar.add(source_path, path_in_tar) # filename, arcname
finally:
tar.close()
-def decompress( tarball_path, base_dir ):
+def decompress(tarball_path, base_dir):
"""Decompress the gzipped tarball into directory base_dir.
"""
# !!! This class method is not documented in the online doc
# nor is bz2open!
tar = tarfile.TarFile.gzopen(tarball_path, mode='r')
try:
- tar.extractall( base_dir )
+ tar.extractall(base_dir)
finally:
tar.close()
@return: the full path of the filename if found, or '' if filename could not be found
"""
paths = os.environ.get('PATH', '').split(os.pathsep)
- suffixes = ('win32' in sys.platform ) and '.exe .com .bat .cmd' or ''
+ suffixes = ('win32' in sys.platform) and '.exe .com .bat .cmd' or ''
for filename in filenames:
for name in [filename+ext for ext in suffixes.split()]:
for directory in paths:
raise
def run_doxygen(doxygen_path, config_file, working_dir, is_silent):
- config_file = os.path.abspath( config_file )
+ config_file = os.path.abspath(config_file)
doxygen_path = doxygen_path
old_cwd = os.getcwd()
try:
- os.chdir( working_dir )
+ os.chdir(working_dir)
cmd = [doxygen_path, config_file]
- print('Running:', ' '.join( cmd ))
+ print('Running:', ' '.join(cmd))
try:
import subprocess
except:
- if os.system( ' '.join( cmd ) ) != 0:
+ if os.system(' '.join(cmd)) != 0:
print('Documentation generation failed')
return False
else:
if is_silent:
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
- process = subprocess.Popen( cmd )
+ process = subprocess.Popen(cmd)
stdout, _ = process.communicate()
if process.returncode:
print('Documentation generation failed:')
return False
return True
finally:
- os.chdir( old_cwd )
+ os.chdir(old_cwd)
-def build_doc( options, make_release=False ):
+def build_doc(options, make_release=False):
if make_release:
options.make_tarball = True
options.with_dot = True
version = open('version','rt').read().strip()
output_dir = 'dist/doxygen' # relative to doc/doxyfile location.
- if not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
- top_dir = os.path.abspath( '.' )
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
+ top_dir = os.path.abspath('.')
html_output_dirname = 'jsoncpp-api-html-' + version
- tarball_path = os.path.join( 'dist', html_output_dirname + '.tar.gz' )
- warning_log_path = os.path.join( output_dir, '../jsoncpp-doxygen-warning.log' )
- html_output_path = os.path.join( output_dir, html_output_dirname )
- def yesno( bool ):
+ tarball_path = os.path.join('dist', html_output_dirname + '.tar.gz')
+ warning_log_path = os.path.join(output_dir, '../jsoncpp-doxygen-warning.log')
+ html_output_path = os.path.join(output_dir, html_output_dirname)
+ def yesno(bool):
return bool and 'YES' or 'NO'
subst_keys = {
'%JSONCPP_VERSION%': version,
'%DOC_TOPDIR%': '',
'%TOPDIR%': top_dir,
- '%HTML_OUTPUT%': os.path.join( '..', output_dir, html_output_dirname ),
+ '%HTML_OUTPUT%': os.path.join('..', output_dir, html_output_dirname),
'%HAVE_DOT%': yesno(options.with_dot),
'%DOT_PATH%': os.path.split(options.dot_path)[0],
'%HTML_HELP%': yesno(options.with_html_help),
'%UML_LOOK%': yesno(options.with_uml_look),
- '%WARNING_LOG_PATH%': os.path.join( '..', warning_log_path )
+ '%WARNING_LOG_PATH%': os.path.join('..', warning_log_path)
}
- if os.path.isdir( output_dir ):
+ if os.path.isdir(output_dir):
print('Deleting directory:', output_dir)
- shutil.rmtree( output_dir )
- if not os.path.isdir( output_dir ):
- os.makedirs( output_dir )
+ shutil.rmtree(output_dir)
+ if not os.path.isdir(output_dir):
+ os.makedirs(output_dir)
- do_subst_in_file( 'doc/doxyfile', 'doc/doxyfile.in', subst_keys )
- ok = run_doxygen( options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent )
+ do_subst_in_file('doc/doxyfile', 'doc/doxyfile.in', subst_keys)
+ ok = run_doxygen(options.doxygen_path, 'doc/doxyfile', 'doc', is_silent=options.silent)
if not options.silent:
print(open(warning_log_path, 'rb').read())
index_path = os.path.abspath(os.path.join('doc', subst_keys['%HTML_OUTPUT%'], 'index.html'))
print(index_path)
if options.open:
import webbrowser
- webbrowser.open( 'file://' + index_path )
+ webbrowser.open('file://' + index_path)
if options.make_tarball:
print('Generating doc tarball to', tarball_path)
tarball_sources = [
'NEWS.txt',
'version'
]
- tarball_basedir = os.path.join( output_dir, html_output_dirname )
- tarball.make_tarball( tarball_path, tarball_sources, tarball_basedir, html_output_dirname )
+ tarball_basedir = os.path.join(output_dir, html_output_dirname)
+ tarball.make_tarball(tarball_path, tarball_sources, tarball_basedir, html_output_dirname)
return tarball_path, html_output_dirname
def main():
help="""Hides doxygen output""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
- build_doc( options )
+ build_doc(options)
if __name__ == '__main__':
main()
SCONS_LOCAL_URL = 'http://sourceforge.net/projects/scons/files/scons-local/1.2.0/scons-local-1.2.0.tar.gz/download'
SOURCEFORGE_PROJECT = 'jsoncpp'
-def set_version( version ):
+def set_version(version):
with open('version','wb') as f:
- f.write( version.strip() )
+ f.write(version.strip())
-def rmdir_if_exist( dir_path ):
- if os.path.isdir( dir_path ):
- shutil.rmtree( dir_path )
+def rmdir_if_exist(dir_path):
+ if os.path.isdir(dir_path):
+ shutil.rmtree(dir_path)
class SVNError(Exception):
pass
-def svn_command( command, *args ):
+def svn_command(command, *args):
cmd = ['svn', '--non-interactive', command] + list(args)
- print('Running:', ' '.join( cmd ))
- process = subprocess.Popen( cmd,
+ print('Running:', ' '.join(cmd))
+ process = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT )
+ stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode:
- error = SVNError( 'SVN command failed:\n' + stdout )
+ error = SVNError('SVN command failed:\n' + stdout)
error.returncode = process.returncode
raise error
return stdout
def check_no_pending_commit():
"""Checks that there is no pending commit in the sandbox."""
- stdout = svn_command( 'status', '--xml' )
- etree = ElementTree.fromstring( stdout )
+ stdout = svn_command('status', '--xml')
+ etree = ElementTree.fromstring(stdout)
msg = []
- for entry in etree.getiterator( 'entry' ):
+ for entry in etree.getiterator('entry'):
path = entry.get('path')
status = entry.find('wc-status').get('item')
if status != 'unversioned' and path != 'version':
- msg.append( 'File "%s" has pending change (status="%s")' % (path, status) )
+ msg.append('File "%s" has pending change (status="%s")' % (path, status))
if msg:
- msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!' )
- return '\n'.join( msg )
+ msg.insert(0, 'Pending change to commit found in sandbox. Commit them first!')
+ return '\n'.join(msg)
-def svn_join_url( base_url, suffix ):
+def svn_join_url(base_url, suffix):
if not base_url.endswith('/'):
base_url += '/'
if suffix.startswith('/'):
suffix = suffix[1:]
return base_url + suffix
-def svn_check_if_tag_exist( tag_url ):
+def svn_check_if_tag_exist(tag_url):
"""Checks if a tag exist.
Returns: True if the tag exist, False otherwise.
"""
try:
- list_stdout = svn_command( 'list', tag_url )
+ list_stdout = svn_command('list', tag_url)
except SVNError as e:
if e.returncode != 1 or not str(e).find('tag_url'):
raise e
return False
return True
-def svn_commit( message ):
+def svn_commit(message):
"""Commit the sandbox, providing the specified comment.
"""
- svn_command( 'ci', '-m', message )
+ svn_command('ci', '-m', message)
-def svn_tag_sandbox( tag_url, message ):
+def svn_tag_sandbox(tag_url, message):
"""Makes a tag based on the sandbox revisions.
"""
- svn_command( 'copy', '-m', message, '.', tag_url )
+ svn_command('copy', '-m', message, '.', tag_url)
-def svn_remove_tag( tag_url, message ):
+def svn_remove_tag(tag_url, message):
"""Removes an existing tag.
"""
- svn_command( 'delete', '-m', message, tag_url )
+ svn_command('delete', '-m', message, tag_url)
-def svn_export( tag_url, export_dir ):
+def svn_export(tag_url, export_dir):
"""Exports the tag_url revision to export_dir.
Target directory, including its parent is created if it does not exist.
If the directory export_dir exist, it is deleted before export proceed.
"""
- rmdir_if_exist( export_dir )
- svn_command( 'export', tag_url, export_dir )
+ rmdir_if_exist(export_dir)
+ svn_command('export', tag_url, export_dir)
-def fix_sources_eol( dist_dir ):
+def fix_sources_eol(dist_dir):
"""Set file EOL for tarball distribution.
"""
print('Preparing exported source file EOL for distribution...')
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
- win_sources = antglob.glob( dist_dir,
+ win_sources = antglob.glob(dist_dir,
includes = '**/*.sln **/*.vcproj',
- prune_dirs = prune_dirs )
- unix_sources = antglob.glob( dist_dir,
+ prune_dirs = prune_dirs)
+ unix_sources = antglob.glob(dist_dir,
includes = '''**/*.h **/*.cpp **/*.inl **/*.txt **/*.dox **/*.py **/*.html **/*.in
sconscript *.json *.expected AUTHORS LICENSE''',
excludes = antglob.default_excludes + 'scons.py sconsign.py scons-*',
- prune_dirs = prune_dirs )
+ prune_dirs = prune_dirs)
for path in win_sources:
- fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\r\n' )
+ fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\r\n')
for path in unix_sources:
- fixeol.fix_source_eol( path, is_dry_run = False, verbose = True, eol = '\n' )
+ fixeol.fix_source_eol(path, is_dry_run = False, verbose = True, eol = '\n')
-def download( url, target_path ):
+def download(url, target_path):
"""Download file represented by url to target_path.
"""
- f = urllib2.urlopen( url )
+ f = urllib2.urlopen(url)
try:
data = f.read()
finally:
f.close()
- fout = open( target_path, 'wb' )
+ fout = open(target_path, 'wb')
try:
- fout.write( data )
+ fout.write(data)
finally:
fout.close()
-def check_compile( distcheck_top_dir, platform ):
+def check_compile(distcheck_top_dir, platform):
cmd = [sys.executable, 'scons.py', 'platform=%s' % platform, 'check']
- print('Running:', ' '.join( cmd ))
- log_path = os.path.join( distcheck_top_dir, 'build-%s.log' % platform )
- flog = open( log_path, 'wb' )
+ print('Running:', ' '.join(cmd))
+ log_path = os.path.join(distcheck_top_dir, 'build-%s.log' % platform)
+ flog = open(log_path, 'wb')
try:
- process = subprocess.Popen( cmd,
+ process = subprocess.Popen(cmd,
stdout=flog,
stderr=subprocess.STDOUT,
- cwd=distcheck_top_dir )
+ cwd=distcheck_top_dir)
stdout = process.communicate()[0]
status = (process.returncode == 0)
finally:
flog.close()
return (status, log_path)
-def write_tempfile( content, **kwargs ):
- fd, path = tempfile.mkstemp( **kwargs )
- f = os.fdopen( fd, 'wt' )
+def write_tempfile(content, **kwargs):
+ fd, path = tempfile.mkstemp(**kwargs)
+ f = os.fdopen(fd, 'wt')
try:
- f.write( content )
+ f.write(content)
finally:
f.close()
return path
class SFTPError(Exception):
pass
-def run_sftp_batch( userhost, sftp, batch, retry=0 ):
- path = write_tempfile( batch, suffix='.sftp', text=True )
+def run_sftp_batch(userhost, sftp, batch, retry=0):
+ path = write_tempfile(batch, suffix='.sftp', text=True)
# psftp -agent -C blep,jsoncpp@web.sourceforge.net -batch -b batch.sftp -bc
cmd = [sftp, '-agent', '-C', '-batch', '-b', path, '-bc', userhost]
error = None
for retry_index in range(0, max(1,retry)):
heading = retry_index == 0 and 'Running:' or 'Retrying:'
- print(heading, ' '.join( cmd ))
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ print(heading, ' '.join(cmd))
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = process.communicate()[0]
if process.returncode != 0:
- error = SFTPError( 'SFTP batch failed:\n' + stdout )
+ error = SFTPError('SFTP batch failed:\n' + stdout)
else:
break
if error:
raise error
return stdout
-def sourceforge_web_synchro( sourceforge_project, doc_dir,
- user=None, sftp='sftp' ):
+def sourceforge_web_synchro(sourceforge_project, doc_dir,
+ user=None, sftp='sftp'):
"""Notes: does not synchronize sub-directory of doc-dir.
"""
userhost = '%s,%s@web.sourceforge.net' % (user, sourceforge_project)
- stdout = run_sftp_batch( userhost, sftp, """
+ stdout = run_sftp_batch(userhost, sftp, """
cd htdocs
dir
exit
-""" )
+""")
existing_paths = set()
collect = 0
for line in stdout.split('\n'):
elif collect == 2:
path = line.strip().split()[-1:]
if path and path[0] not in ('.', '..'):
- existing_paths.add( path[0] )
- upload_paths = set( [os.path.basename(p) for p in antglob.glob( doc_dir )] )
+ existing_paths.add(path[0])
+ upload_paths = set([os.path.basename(p) for p in antglob.glob(doc_dir)])
paths_to_remove = existing_paths - upload_paths
if paths_to_remove:
print('Removing the following file from web:')
- print('\n'.join( paths_to_remove ))
- stdout = run_sftp_batch( userhost, sftp, """cd htdocs
+ print('\n'.join(paths_to_remove))
+ stdout = run_sftp_batch(userhost, sftp, """cd htdocs
rm %s
-exit""" % ' '.join(paths_to_remove) )
+exit""" % ' '.join(paths_to_remove))
print('Uploading %d files:' % len(upload_paths))
batch_size = 10
upload_paths = list(upload_paths)
remaining_files = len(upload_paths) - index
remaining_sec = file_per_sec * remaining_files
print('%d/%d, ETA=%.1fs' % (index+1, len(upload_paths), remaining_sec))
- run_sftp_batch( userhost, sftp, """cd htdocs
+ run_sftp_batch(userhost, sftp, """cd htdocs
lcd %s
mput %s
-exit""" % (doc_dir, ' '.join(paths) ), retry=3 )
+exit""" % (doc_dir, ' '.join(paths)), retry=3)
-def sourceforge_release_tarball( sourceforge_project, paths, user=None, sftp='sftp' ):
+def sourceforge_release_tarball(sourceforge_project, paths, user=None, sftp='sftp'):
userhost = '%s,%s@frs.sourceforge.net' % (user, sourceforge_project)
- run_sftp_batch( userhost, sftp, """
+ run_sftp_batch(userhost, sftp, """
mput %s
exit
-""" % (' '.join(paths),) )
+""" % (' '.join(paths),))
def main():
options, args = parser.parse_args()
if len(args) != 2:
- parser.error( 'release_version missing on command-line.' )
+ parser.error('release_version missing on command-line.')
release_version = args[0]
next_version = args[1]
if not options.platforms and not options.no_test:
- parser.error( 'You must specify either --platform or --no-test option.' )
+ parser.error('You must specify either --platform or --no-test option.')
if options.ignore_pending_commit:
msg = ''
msg = check_no_pending_commit()
if not msg:
print('Setting version to', release_version)
- set_version( release_version )
- svn_commit( 'Release ' + release_version )
- tag_url = svn_join_url( SVN_TAG_ROOT, release_version )
- if svn_check_if_tag_exist( tag_url ):
+ set_version(release_version)
+ svn_commit('Release ' + release_version)
+ tag_url = svn_join_url(SVN_TAG_ROOT, release_version)
+ if svn_check_if_tag_exist(tag_url):
if options.retag_release:
- svn_remove_tag( tag_url, 'Overwriting previous tag' )
+ svn_remove_tag(tag_url, 'Overwriting previous tag')
else:
print('Aborting, tag %s already exist. Use --retag to overwrite it!' % tag_url)
- sys.exit( 1 )
- svn_tag_sandbox( tag_url, 'Release ' + release_version )
+ sys.exit(1)
+ svn_tag_sandbox(tag_url, 'Release ' + release_version)
print('Generated doxygen document...')
## doc_dirname = r'jsoncpp-api-html-0.5.0'
## doc_tarball_path = r'e:\prg\vc\Lib\jsoncpp-trunk\dist\jsoncpp-api-html-0.5.0.tar.gz'
- doc_tarball_path, doc_dirname = doxybuild.build_doc( options, make_release=True )
+ doc_tarball_path, doc_dirname = doxybuild.build_doc(options, make_release=True)
doc_distcheck_dir = 'dist/doccheck'
- tarball.decompress( doc_tarball_path, doc_distcheck_dir )
- doc_distcheck_top_dir = os.path.join( doc_distcheck_dir, doc_dirname )
+ tarball.decompress(doc_tarball_path, doc_distcheck_dir)
+ doc_distcheck_top_dir = os.path.join(doc_distcheck_dir, doc_dirname)
export_dir = 'dist/export'
- svn_export( tag_url, export_dir )
- fix_sources_eol( export_dir )
+ svn_export(tag_url, export_dir)
+ fix_sources_eol(export_dir)
source_dir = 'jsoncpp-src-' + release_version
source_tarball_path = 'dist/%s.tar.gz' % source_dir
print('Generating source tarball to', source_tarball_path)
- tarball.make_tarball( source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir )
+ tarball.make_tarball(source_tarball_path, [export_dir], export_dir, prefix_dir=source_dir)
amalgamation_tarball_path = 'dist/%s-amalgamation.tar.gz' % source_dir
print('Generating amalgamation source tarball to', amalgamation_tarball_path)
amalgamation_dir = 'dist/amalgamation'
- amalgamate.amalgamate_source( export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h' )
+ amalgamate.amalgamate_source(export_dir, '%s/jsoncpp.cpp' % amalgamation_dir, 'json/json.h')
amalgamation_source_dir = 'jsoncpp-src-amalgamation' + release_version
- tarball.make_tarball( amalgamation_tarball_path, [amalgamation_dir],
- amalgamation_dir, prefix_dir=amalgamation_source_dir )
+ tarball.make_tarball(amalgamation_tarball_path, [amalgamation_dir],
+ amalgamation_dir, prefix_dir=amalgamation_source_dir)
# Decompress source tarball, download and install scons-local
distcheck_dir = 'dist/distcheck'
distcheck_top_dir = distcheck_dir + '/' + source_dir
print('Decompressing source tarball to', distcheck_dir)
- rmdir_if_exist( distcheck_dir )
- tarball.decompress( source_tarball_path, distcheck_dir )
+ rmdir_if_exist(distcheck_dir)
+ tarball.decompress(source_tarball_path, distcheck_dir)
scons_local_path = 'dist/scons-local.tar.gz'
print('Downloading scons-local to', scons_local_path)
- download( SCONS_LOCAL_URL, scons_local_path )
+ download(SCONS_LOCAL_URL, scons_local_path)
print('Decompressing scons-local to', distcheck_top_dir)
- tarball.decompress( scons_local_path, distcheck_top_dir )
+ tarball.decompress(scons_local_path, distcheck_top_dir)
# Run compilation
print('Compiling decompressed tarball')
all_build_status = True
for platform in options.platforms.split(','):
print('Testing platform:', platform)
- build_status, log_path = check_compile( distcheck_top_dir, platform )
+ build_status, log_path = check_compile(distcheck_top_dir, platform)
print('see build log:', log_path)
print(build_status and '=> ok' or '=> FAILED')
all_build_status = all_build_status and build_status
if not build_status:
print('Testing failed on at least one platform, aborting...')
- svn_remove_tag( tag_url, 'Removing tag due to failed testing' )
+ svn_remove_tag(tag_url, 'Removing tag due to failed testing')
sys.exit(1)
if options.user:
if not options.no_web:
print('Uploading documentation using user', options.user)
- sourceforge_web_synchro( SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp )
+ sourceforge_web_synchro(SOURCEFORGE_PROJECT, doc_distcheck_top_dir, user=options.user, sftp=options.sftp)
print('Completed documentation upload')
print('Uploading source and documentation tarballs for release using user', options.user)
- sourceforge_release_tarball( SOURCEFORGE_PROJECT,
+ sourceforge_release_tarball(SOURCEFORGE_PROJECT,
[source_tarball_path, doc_tarball_path],
- user=options.user, sftp=options.sftp )
+ user=options.user, sftp=options.sftp)
print('Source and doc release tarballs uploaded')
else:
print('No upload user specified. Web site and download tarbal were not uploaded.')
print('Tarball can be found at:', doc_tarball_path)
# Set next version number and commit
- set_version( next_version )
- svn_commit( 'Released ' + release_version )
+ set_version(next_version)
+ svn_commit('Released ' + release_version)
else:
- sys.stderr.write( msg + '\n' )
+ sys.stderr.write(msg + '\n')
if __name__ == '__main__':
main()
import fnmatch
import os
-def generate( env ):
- def Glob( env, includes = None, excludes = None, dir = '.' ):
- """Adds Glob( includes = Split( '*' ), excludes = None, dir = '.')
+def generate(env):
+ def Glob(env, includes = None, excludes = None, dir = '.'):
+ """Adds Glob(includes = Split('*'), excludes = None, dir = '.')
helper function to environment.
Glob both the file-system files.
excludes: list of file name pattern exluced from the return list.
Example:
- sources = env.Glob( ("*.cpp", '*.h'), "~*.cpp", "#src" )
+ sources = env.Glob(("*.cpp", '*.h'), "~*.cpp", "#src")
"""
def filterFilename(path):
- abs_path = os.path.join( dir, path )
+ abs_path = os.path.join(dir, path)
if not os.path.isfile(abs_path):
return 0
fn = os.path.basename(path)
match = 0
for include in includes:
- if fnmatch.fnmatchcase( fn, include ):
+ if fnmatch.fnmatchcase(fn, include):
match = 1
break
if match == 1 and not excludes is None:
for exclude in excludes:
- if fnmatch.fnmatchcase( fn, exclude ):
+ if fnmatch.fnmatchcase(fn, exclude):
match = 0
break
return match
if includes is None:
includes = ('*',)
- elif type(includes) in ( type(''), type(u'') ):
+ elif type(includes) in (type(''), type(u'')):
includes = (includes,)
- if type(excludes) in ( type(''), type(u'') ):
+ if type(excludes) in (type(''), type(u'')):
excludes = (excludes,)
dir = env.Dir(dir).abspath
- paths = os.listdir( dir )
- def makeAbsFileNode( path ):
- return env.File( os.path.join( dir, path ) )
- nodes = filter( filterFilename, paths )
- return map( makeAbsFileNode, nodes )
+ paths = os.listdir(dir)
+ def makeAbsFileNode(path):
+ return env.File(os.path.join(dir, path))
+ nodes = filter(filterFilename, paths)
+ return map(makeAbsFileNode, nodes)
from SCons.Script import Environment
Environment.Glob = Glob
## elif token == "=":
## data[key] = list()
## else:
-## append_data( data, key, new_data, token )
+## append_data(data, key, new_data, token)
## new_data = True
##
## last_token = token
##
## if last_token == '\\' and token != '\n':
## new_data = False
-## append_data( data, key, new_data, '\\' )
+## append_data(data, key, new_data, '\\')
##
## # compress lists of len 1 into single strings
## for (k, v) in data.items():
## else:
## for pattern in file_patterns:
## sources.extend(glob.glob("/".join([node, pattern])))
-## sources = map( lambda path: env.File(path), sources )
+## sources = map(lambda path: env.File(path), sources)
## return sources
##
##
## # add our output locations
## for (k, v) in output_formats.items():
## if data.get("GENERATE_" + k, v[0]) == "YES":
-## targets.append(env.Dir( os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))) )
+## targets.append(env.Dir(os.path.join(out_dir, data.get(k + "_OUTPUT", v[1]))))
##
## # don't clobber targets
## for node in targets:
Add builders and construction variables for the
SrcDist tool.
"""
-## doxyfile_scanner = env.Scanner(
-## DoxySourceScan,
+## doxyfile_scanner = env.Scanner(## DoxySourceScan,
## "DoxySourceScan",
## scan_check = DoxySourceScanCheck,
-## )
+##)
if targz.exists(env):
- srcdist_builder = targz.makeBuilder( srcDistEmitter )
+ srcdist_builder = targz.makeBuilder(srcDistEmitter)
env['BUILDERS']['SrcDist'] = srcdist_builder
return target, source
## env.Append(TOOLS = 'substinfile') # this should be automaticaly done by Scons ?!?
- subst_action = SCons.Action.Action( subst_in_file, subst_in_file_string )
+ subst_action = SCons.Action.Action(subst_in_file, subst_in_file_string)
env['BUILDERS']['SubstInFile'] = Builder(action=subst_action, emitter=subst_emitter)
def exists(env):
if internal_targz:
def targz(target, source, env):
- def archive_name( path ):
- path = os.path.normpath( os.path.abspath( path ) )
- common_path = os.path.commonprefix( (base_dir, path) )
+ def archive_name(path):
+ path = os.path.normpath(os.path.abspath(path))
+ common_path = os.path.commonprefix((base_dir, path))
archive_name = path[len(common_path):]
return archive_name
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
- tar.add(path, archive_name(path) )
+ tar.add(path, archive_name(path))
compression = env.get('TARGZ_COMPRESSION_LEVEL',TARGZ_DEFAULT_COMPRESSION_LEVEL)
- base_dir = os.path.normpath( env.get('TARGZ_BASEDIR', env.Dir('.')).abspath )
+ base_dir = os.path.normpath(env.get('TARGZ_BASEDIR', env.Dir('.')).abspath)
target_path = str(target[0])
- fileobj = gzip.GzipFile( target_path, 'wb', compression )
+ fileobj = gzip.GzipFile(target_path, 'wb', compression)
tar = tarfile.TarFile(os.path.splitext(target_path)[0], 'w', fileobj)
for source in source:
source_path = str(source)
if source.isdir():
os.path.walk(source_path, visit, tar)
else:
- tar.add(source_path, archive_name(source_path) ) # filename, arcname
+ tar.add(source_path, archive_name(source_path)) # filename, arcname
tar.close()
targzAction = SCons.Action.Action(targz, varlist=['TARGZ_COMPRESSION_LEVEL','TARGZ_BASEDIR'])
- def makeBuilder( emitter = None ):
+ def makeBuilder(emitter = None):
return SCons.Builder.Builder(action = SCons.Action.Action('$TARGZ_COM', '$TARGZ_COMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
paths = []
for pattern in [ '*.actual', '*.actual-rewrite', '*.rewrite', '*.process-output' ]:
- paths += glob.glob( 'data/' + pattern )
+ paths += glob.glob('data/' + pattern)
for path in paths:
- os.unlink( path )
+ os.unlink(path)
from __future__ import print_function
import glob
import os.path
-for path in glob.glob( '*.json' ):
+for path in glob.glob('*.json'):
text = file(path,'rt').read()
target = os.path.splitext(path)[0] + '.expected'
- if os.path.exists( target ):
+ if os.path.exists(target):
print('skipping:', target)
else:
print('creating:', target)
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
-def valueTreeToString( fout, value, path = '.' ):
+def valueTreeToString(fout, value, path = '.'):
ty = type(value)
if ty is types.DictType:
- fout.write( '%s={}\n' % path )
+ fout.write('%s={}\n' % path)
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
- valueTreeToString( fout, value[name], path + suffix + name )
+ valueTreeToString(fout, value[name], path + suffix + name)
elif ty is types.ListType:
- fout.write( '%s=[]\n' % path )
- for index, childValue in zip( xrange(0,len(value)), value ):
- valueTreeToString( fout, childValue, path + '[%d]' % index )
+ fout.write('%s=[]\n' % path)
+ for index, childValue in zip(xrange(0,len(value)), value):
+ valueTreeToString(fout, childValue, path + '[%d]' % index)
elif ty is types.StringType:
- fout.write( '%s="%s"\n' % (path,value) )
+ fout.write('%s="%s"\n' % (path,value))
elif ty is types.IntType:
- fout.write( '%s=%d\n' % (path,value) )
+ fout.write('%s=%d\n' % (path,value))
elif ty is types.FloatType:
- fout.write( '%s=%.16g\n' % (path,value) )
+ fout.write('%s=%.16g\n' % (path,value))
elif value is True:
- fout.write( '%s=true\n' % path )
+ fout.write('%s=true\n' % path)
elif value is False:
- fout.write( '%s=false\n' % path )
+ fout.write('%s=false\n' % path)
elif value is None:
- fout.write( '%s=null\n' % path )
+ fout.write('%s=null\n' % path)
else:
assert False and "Unexpected value type"
-def parseAndSaveValueTree( input, actual_path ):
- root = json.loads( input )
- fout = file( actual_path, 'wt' )
- valueTreeToString( fout, root )
+def parseAndSaveValueTree(input, actual_path):
+ root = json.loads(input)
+ fout = file(actual_path, 'wt')
+ valueTreeToString(fout, root)
fout.close()
return root
-def rewriteValueTree( value, rewrite_path ):
- rewrite = json.dumps( value )
+def rewriteValueTree(value, rewrite_path):
+ rewrite = json.dumps(value)
#rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
- file( rewrite_path, 'wt').write( rewrite + '\n' )
+ file(rewrite_path, 'wt').write(rewrite + '\n')
return rewrite
-input = file( input_path, 'rt' ).read()
-root = parseAndSaveValueTree( input, actual_path )
-rewrite = rewriteValueTree( json.write( root ), rewrite_path )
-rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
+input = file(input_path, 'rt').read()
+root = parseAndSaveValueTree(input, actual_path)
+rewrite = rewriteValueTree(json.write(root), rewrite_path)
+rewrite_root = parseAndSaveValueTree(rewrite, rewrite_actual_path)
-sys.exit( 0 )
+sys.exit(0)
pass # python3
status = pipe.close()
return status, process_output
-def compareOutputs( expected, actual, message ):
+def compareOutputs(expected, actual, message):
expected = expected.strip().replace('\r','').split('\n')
actual = actual.strip().replace('\r','').split('\n')
diff_line = 0
- max_line_to_compare = min( len(expected), len(actual) )
+ max_line_to_compare = min(len(expected), len(actual))
for index in range(0,max_line_to_compare):
if expected[index].strip() != actual[index].strip():
diff_line = index + 1
diff_line = max_line_to_compare+1
if diff_line == 0:
return None
- def safeGetLine( lines, index ):
+ def safeGetLine(lines, index):
index += -1
if index >= len(lines):
return ''
Actual: '%s'
""" % (message, diff_line,
safeGetLine(expected,diff_line),
- safeGetLine(actual,diff_line) )
+ safeGetLine(actual,diff_line))
-def safeReadFile( path ):
+def safeReadFile(path):
try:
- return open( path, 'rt', encoding = 'utf-8' ).read()
+ return open(path, 'rt', encoding = 'utf-8').read()
except IOError as e:
return '<File "%s" is missing: %s>' % (path,e)
-def runAllTests( jsontest_executable_path, input_dir = None,
+def runAllTests(jsontest_executable_path, input_dir = None,
use_valgrind=False, with_json_checker=False,
writerClass='StyledWriter'):
if not input_dir:
- input_dir = os.path.join( os.getcwd(), 'data' )
- tests = glob( os.path.join( input_dir, '*.json' ) )
+ input_dir = os.path.join(os.getcwd(), 'data')
+ tests = glob(os.path.join(input_dir, '*.json'))
if with_json_checker:
- test_jsonchecker = glob( os.path.join( input_dir, '../jsonchecker', '*.json' ) )
+ test_jsonchecker = glob(os.path.join(input_dir, '../jsonchecker', '*.json'))
else:
test_jsonchecker = []
failed_tests = []
valgrind_path = use_valgrind and VALGRIND_CMD or ''
for input_path in tests + test_jsonchecker:
- expect_failure = os.path.basename( input_path ).startswith( 'fail' )
+ expect_failure = os.path.basename(input_path).startswith('fail')
is_json_checker_test = (input_path in test_jsonchecker) or expect_failure
print('TESTING:', input_path, end=' ')
options = is_json_checker_test and '--json-checker' or ''
options += ' --json-writer %s'%writerClass
- cmd = '%s%s %s "%s"' % (
- valgrind_path, jsontest_executable_path, options,
+ cmd = '%s%s %s "%s"' % ( valgrind_path, jsontest_executable_path, options,
input_path)
status, process_output = getStatusOutput(cmd)
if is_json_checker_test:
if expect_failure:
if not status:
print('FAILED')
- failed_tests.append( (input_path, 'Parsing should have failed:\n%s' %
- safeReadFile(input_path)) )
+ failed_tests.append((input_path, 'Parsing should have failed:\n%s' %
+ safeReadFile(input_path)))
else:
print('OK')
else:
if status:
print('FAILED')
- failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+ failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
else:
print('OK')
else:
base_path = os.path.splitext(input_path)[0]
- actual_output = safeReadFile( base_path + '.actual' )
- actual_rewrite_output = safeReadFile( base_path + '.actual-rewrite' )
- open(base_path + '.process-output', 'wt', encoding = 'utf-8').write( process_output )
+ actual_output = safeReadFile(base_path + '.actual')
+ actual_rewrite_output = safeReadFile(base_path + '.actual-rewrite')
+ open(base_path + '.process-output', 'wt', encoding = 'utf-8').write(process_output)
if status:
print('parsing failed')
- failed_tests.append( (input_path, 'Parsing failed:\n' + process_output) )
+ failed_tests.append((input_path, 'Parsing failed:\n' + process_output))
else:
expected_output_path = os.path.splitext(input_path)[0] + '.expected'
- expected_output = open( expected_output_path, 'rt', encoding = 'utf-8' ).read()
- detail = ( compareOutputs( expected_output, actual_output, 'input' )
- or compareOutputs( expected_output, actual_rewrite_output, 'rewrite' ) )
+ expected_output = open(expected_output_path, 'rt', encoding = 'utf-8').read()
+ detail = (compareOutputs(expected_output, actual_output, 'input')
+ or compareOutputs(expected_output, actual_rewrite_output, 'rewrite'))
if detail:
print('FAILED')
- failed_tests.append( (input_path, detail) )
+ failed_tests.append((input_path, detail))
else:
print('OK')
print(failed_test[1])
print()
print('Test results: %d passed, %d failed.' % (len(tests)-len(failed_tests),
- len(failed_tests) ))
+ len(failed_tests)))
return 1
else:
print('All %d tests passed.' % len(tests))
def main():
from optparse import OptionParser
- parser = OptionParser( usage="%prog [options] <path to jsontestrunner.exe> [test case directory]" )
+ parser = OptionParser(usage="%prog [options] <path to jsontestrunner.exe> [test case directory]")
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
options, args = parser.parse_args()
if len(args) < 1 or len(args) > 2:
- parser.error( 'Must provides at least path to jsontestrunner executable.' )
- sys.exit( 1 )
+ parser.error('Must provides at least path to jsontestrunner executable.')
+ sys.exit(1)
- jsontest_executable_path = os.path.normpath( os.path.abspath( args[0] ) )
+ jsontest_executable_path = os.path.normpath(os.path.abspath(args[0]))
if len(args) > 1:
- input_path = os.path.normpath( os.path.abspath( args[1] ) )
+ input_path = os.path.normpath(os.path.abspath(args[1]))
else:
input_path = None
- status = runAllTests( jsontest_executable_path, input_path,
+ status = runAllTests(jsontest_executable_path, input_path,
use_valgrind=options.valgrind,
with_json_checker=options.with_json_checker,
writerClass='StyledWriter')
if status:
- sys.exit( status )
- status = runAllTests( jsontest_executable_path, input_path,
+ sys.exit(status)
+ status = runAllTests(jsontest_executable_path, input_path,
use_valgrind=options.valgrind,
with_json_checker=options.with_json_checker,
writerClass='StyledStreamWriter')
- sys.exit( status )
+ sys.exit(status)
if __name__ == '__main__':
main()
VALGRIND_CMD = 'valgrind --tool=memcheck --leak-check=yes --undef-value-errors=yes'
class TestProxy(object):
- def __init__( self, test_exe_path, use_valgrind=False ):
- self.test_exe_path = os.path.normpath( os.path.abspath( test_exe_path ) )
+ def __init__(self, test_exe_path, use_valgrind=False):
+ self.test_exe_path = os.path.normpath(os.path.abspath(test_exe_path))
self.use_valgrind = use_valgrind
- def run( self, options ):
+ def run(self, options):
if self.use_valgrind:
cmd = VALGRIND_CMD.split()
else:
cmd = []
- cmd.extend( [self.test_exe_path, '--test-auto'] + options )
+ cmd.extend([self.test_exe_path, '--test-auto'] + options)
try:
- process = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT )
+ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
print(cmd)
raise
return False, stdout
return True, stdout
-def runAllTests( exe_path, use_valgrind=False ):
- test_proxy = TestProxy( exe_path, use_valgrind=use_valgrind )
- status, test_names = test_proxy.run( ['--list-tests'] )
+def runAllTests(exe_path, use_valgrind=False):
+ test_proxy = TestProxy(exe_path, use_valgrind=use_valgrind)
+ status, test_names = test_proxy.run(['--list-tests'])
if not status:
print("Failed to obtain unit tests list:\n" + test_names, file=sys.stderr)
return 1
failures = []
for name in test_names:
print('TESTING %s:' % name, end=' ')
- succeed, result = test_proxy.run( ['--test', name] )
+ succeed, result = test_proxy.run(['--test', name])
if succeed:
print('OK')
else:
- failures.append( (name, result) )
+ failures.append((name, result))
print('FAILED')
failed_count = len(failures)
pass_count = len(test_names) - failed_count
print()
for name, result in failures:
print(result)
- print('%d/%d tests passed (%d failure(s))' % (
- pass_count, len(test_names), failed_count))
+ print('%d/%d tests passed (%d failure(s))' % ( pass_count, len(test_names), failed_count))
return 1
else:
print('All %d tests passed' % len(test_names))
def main():
from optparse import OptionParser
- parser = OptionParser( usage="%prog [options] <path to test_lib_json.exe>" )
+ parser = OptionParser(usage="%prog [options] <path to test_lib_json.exe>")
parser.add_option("--valgrind",
action="store_true", dest="valgrind", default=False,
help="run all the tests using valgrind to detect memory leaks")
options, args = parser.parse_args()
if len(args) != 1:
- parser.error( 'Must provides at least path to test_lib_json executable.' )
- sys.exit( 1 )
+ parser.error('Must provides at least path to test_lib_json executable.')
+ sys.exit(1)
- exit_code = runAllTests( args[0], use_valgrind=options.valgrind )
- sys.exit( exit_code )
+ exit_code = runAllTests(args[0], use_valgrind=options.valgrind)
+ sys.exit(exit_code)
if __name__ == '__main__':
main()