From: Michelle McDaniel Date: Fri, 10 Mar 2017 16:47:50 +0000 (-0800) Subject: Crossgen all assemblies for throughput perf X-Git-Tag: submit/tizen/20210909.063632~11030^2~7754^2 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=332e8ef2eb59e9c75cdec1c92ca94a5467f2eb4e;p=platform%2Fupstream%2Fdotnet%2Fruntime.git Crossgen all assemblies for throughput perf We want to run over all of the Microsoft and System dlls when running throughput perf testing. We exclude some assemblies because they require other assemblies that are not in the assemblies packages. Commit migrated from https://github.com/dotnet/coreclr/commit/436a2541a70107df06cd9fd41e147a20db488c1d --- diff --git a/src/coreclr/tests/scripts/run-throughput-perf.py b/src/coreclr/tests/scripts/run-throughput-perf.py index c59fee6..ee6e4a3 100644 --- a/src/coreclr/tests/scripts/run-throughput-perf.py +++ b/src/coreclr/tests/scripts/run-throughput-perf.py @@ -32,31 +32,20 @@ import csv # Globals ########################################################################## -# List of dlls we want to crossgen -dll_list = { +# List of dlls we want to exclude +dll_exclude_list = { 'Windows_NT': [ - "System.Private.CoreLib", - "System.Reflection.Metadata", - "System.Linq.Expressions", - "Microsoft.CSharp", - "System", - "Microsoft.CodeAnalysis.VisualBasic", - "System.Private.DataContractSerialization", - "System.Core", - "System.Xml", - "Microsoft.CodeAnalysis.CSharp", - "Microsoft.CodeAnalysis", - "System.Linq.Parallel", - "System.Private.Xml" + # Require Newtonsoft.Json + "Microsoft.DotNet.ProjectModel.dll", + "Microsoft.Extensions.DependencyModel.dll", + # Require System.Security.Principal.Windows + "System.Net.Requests.dll", + "System.Net.Security.dll", + "System.Net.Sockets.dll" ], - 'Linux': [ - "System.Private.CoreLib", - "System.Reflection.Metadata", - "System.Linq.Expressions", - "Microsoft.CSharp", - "System.Private.DataContractSerialization", - "System.Linq.Parallel", - "System.Private.Xml" + 'Linux' : [ + # Required System.Runtime.WindowsRuntime + "System.Runtime.WindowsRuntime.UI.Xaml.dll" ] } @@ -284,7 +273,7 @@ def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assem ########################################################################## def main(args): - global dll_list + global dll_exclude_list global jit_list global os_group_list global python_exe_list @@ -321,38 +310,42 @@ def main(args): python_exe = python_exe_list[os_group] # Run throughput testing - for dll_name in dll_list[os_group]: - dll_file_name = dll_name + ".dll" - dll_path = os.path.join(assembly_root, dll_file_name) - dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root) - - if len(dll_elapsed_times) != 0: - if not benchview_path is None: - # Generate the csv file - csv_file_name = generateCSV(dll_name, dll_elapsed_times) - shutil.copy(csv_file_name, clr_root) - - # For each benchmark, call measurement.py - measurement_args = [python_exe, - os.path.join(benchview_path, "measurement.py"), - "csv", - os.path.join(os.getcwd(), csv_file_name), - "--metric", - "execution_time", - "--unit", - "milliseconds", - "--better", - "desc", - "--drop-first-value", - "--append"] - log(" ".join(measurement_args)) - proc = subprocess.Popen(measurement_args) - proc.communicate() - else: - # Write output to console if we are not publishing - log("%s" % (dll_name)) - log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times))) - + for dll_file_name in os.listdir(assembly_root): + # Find all framework dlls in the assembly_root dir, which we will crossgen + if (dll_file_name.endswith(".dll") and + (not ".ni." in dll_file_name) and + ("Microsoft" in dll_file_name or "System" in dll_file_name) and + (not dll_file_name in dll_exclude_list[os_group])): + dll_name = dll_file_name.replace(".dll", "") + dll_path = os.path.join(assembly_root, dll_file_name) + dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root) + + if len(dll_elapsed_times) != 0: + if not benchview_path is None: + # Generate the csv file + csv_file_name = generateCSV(dll_name, dll_elapsed_times) + shutil.copy(csv_file_name, clr_root) + + # For each benchmark, call measurement.py + measurement_args = [python_exe, + os.path.join(benchview_path, "measurement.py"), + "csv", + os.path.join(os.getcwd(), csv_file_name), + "--metric", + "execution_time", + "--unit", + "milliseconds", + "--better", + "desc", + "--drop-first-value", + "--append"] + log(" ".join(measurement_args)) + proc = subprocess.Popen(measurement_args) + proc.communicate() + else: + # Write output to console if we are not publishing + log("%s" % (dll_name)) + log("Duration: [%s]" % (", ".join(str(x) for x in dll_elapsed_times))) # Upload the data if not benchview_path is None: