Add linux throughput testing support
authorMichelle McDaniel <adiaaida@gmail.com>
Wed, 8 Mar 2017 22:58:03 +0000 (22:58 +0000)
committerMichelle McDaniel <adiaaida@gmail.com>
Fri, 10 Mar 2017 00:27:46 +0000 (16:27 -0800)
This change modifies perf-prep.sh, run-throughput-perf.py and perf.groovy
to add support for linux throughput testing. The following changes have
been made:

1. Update run-throughput-perf to specify the jit differently on linux and
windows. Create a new set of dlls to crossgen on linux. The following are
removed: CodeAnalysis, CodeAnalysis.VisualBasic, CodeAnalysis.Csharp
(these do not exist in the corefx runtime dir that we will be using on
linux), System, System.Core, System.XML (these are significantly smaller
on Linux than windows). Make the list of architectures a dictionary so we
can specify different arches for different oses. Change the path to
crossgen/the jit to the Product directory. Change the timer to
timeit.default_timer, which will pick the most accurate timer for each
platform.

2. Modify perf-prep for throughput purposes. In throughput testing, we
need to enlist in corefx and build it. We pick a specific commit to enlist
in so that testing will be consistent. The rest of the work that perf-prep
can be skipped for throughput testing, as we are not running tests.

3. Update perf.groovy with throughput jobs on Linux.

perf.groovy
tests/scripts/perf-prep.sh
tests/scripts/run-throughput-perf.py

index 1513217..2684b3b 100644 (file)
@@ -79,7 +79,7 @@ def static getOSGroup(def os) {
                     {
                         // Download package and copy compatjit into Core_Root
                         batchFile("C:\\Tools\\nuget.exe install runtime.win7-${architecture}.Microsoft.NETCore.Jit -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion\n" +
-                        "xcopy \"%WORKSPACE%\\runtime.win7-x86.Microsoft.NETCore.Jit\\runtimes\\win7-x86\\native\\compatjit.dll\" \"%WORKSPACE%\\bin\\tests\\${os}.${architecture}.${configuration}\\Tests\\Core_Root\" /Y")
+                        "xcopy \"%WORKSPACE%\\runtime.win7-x86.Microsoft.NETCore.Jit\\runtimes\\win7-x86\\native\\compatjit.dll\" \"%WORKSPACE%\\bin\\Product\\${os}.${architecture}.${configuration}\" /Y")
                     }
 
                                        batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} ${testEnv} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -uploadToBenchview \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -runtype ${runType}")
@@ -162,7 +162,7 @@ def static getOSGroup(def os) {
                     {
                         // Download package and copy compatjit into Core_Root
                         batchFile("C:\\Tools\\nuget.exe install runtime.win7-${architecture}.Microsoft.NETCore.Jit -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion\n" +
-                        "xcopy \"%WORKSPACE%\\runtime.win7-x86.Microsoft.NETCore.Jit\\runtimes\\win7-x86\\native\\compatjit.dll\" \"%WORKSPACE%\\bin\\tests\\${os}.${architecture}.${configuration}\\Tests\\Core_Root\" /Y")
+                        "xcopy \"%WORKSPACE%\\runtime.win7-x86.Microsoft.NETCore.Jit\\runtimes\\win7-x86\\native\\compatjit.dll\" \"%WORKSPACE%\\bin\\Product\\${os}.${architecture}.${configuration}\" /Y")
                     }
                                        batchFile("py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${configuration} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}")
                                }
@@ -222,8 +222,8 @@ def static getOSGroup(def os) {
                 shell("./init-tools.sh")
                                shell("./build.sh ${architecture} ${configuration}")
                                shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
-                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name " + "\"" + benchViewName + "\"" + " --user " + "\"dotnet-bot@microsoft.com\"\n" +
-                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type " + runType)
+                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user \"dotnet-bot@microsoft.com\"\n" +
+                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
                 shell("""sudo -E bash ./tests/scripts/run-xunit-perf.sh \\
                 --testRootDir=\"\${WORKSPACE}/bin/tests/Windows_NT.${architecture}.${configuration}\" \\
                 --testNativeBinDir=\"\${WORKSPACE}/bin/obj/${osGroup}.${architecture}.${configuration}/tests\" \\
@@ -269,3 +269,80 @@ def static getOSGroup(def os) {
         }
     } // os
 } // isPR
+
+// Create the Linux/OSX/CentOS coreclr test leg for debug and release and each scenario
+[true, false].each { isPR ->
+    ['Ubuntu14.04'].each { os ->
+        def newJob = job(Utilities.getFullJobName(project, "perf_throughput_${os}", isPR)) {
+                       
+                       label('linux_clr_perf')
+                               wrappers {
+                                       credentialsBinding {
+                                               string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
+                                       }
+                               }
+                       
+                       if (isPR)
+                       {
+                               parameters
+                               {
+                                       stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
+                               }
+                       }
+                       def osGroup = getOSGroup(os)
+                       def architecture = 'x64'
+                       def configuration = 'Release'
+                       def runType = isPR ? 'private' : 'rolling'
+                       def benchViewName = isPR ? 'coreclr private \$BenchviewCommitName' : 'coreclr rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT'
+                       
+            steps {
+                shell("bash ./tests/scripts/perf-prep.sh --throughput")
+                shell("./init-tools.sh")
+                               shell("./build.sh ${architecture} ${configuration}")
+                               shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
+                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user \"dotnet-bot@microsoft.com\"\n" +
+                               "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
+                shell("""sudo -E python3.5 ./tests/scripts/run-throughput-perf.py \\
+                -arch \"${architecture}\" \\
+                -os \"${os}\" \\
+                -configuration \"${configuration}\" \\
+                -clr_root \"\${WORKSPACE}\" \\
+                -assembly_root \"\${WORKSPACE}/_/fx/bin/runtime/netcoreapp-${osGroup}-${configuration}-${architecture}\" \\
+                               -run_type \"${runType}\" \\
+                               -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\"""")
+            }
+        }
+
+        // Save machinedata.json to /artifact/bin/ Jenkins dir
+        def archiveSettings = new ArchivalSettings()
+        archiveSettings.addFiles('throughput-*.csv')
+        archiveSettings.addFiles('machinedata.json')
+        Utilities.addArchival(newJob, archiveSettings)
+
+        Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
+
+        // For perf, we need to keep the run results longer
+        newJob.with {
+            // Enable the log rotator
+            logRotator {
+                artifactDaysToKeep(7)
+                daysToKeep(300)
+                artifactNumToKeep(25)
+                numToKeep(1000)
+            }
+        }
+        if (isPR) {
+            TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
+            builder.setGithubContext("${os} Throughput Perf Tests")
+            builder.triggerOnlyOnComment()
+            builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+throughput.*")
+            builder.triggerForBranch(branch)
+            builder.emitTrigger(newJob)
+        }
+        else {
+            // Set a push trigger
+            TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
+            builder.emitTrigger(newJob)
+        }
+    } // os
+} // isPR
index effdc0a..4468dbb 100755 (executable)
@@ -11,6 +11,8 @@ function print_usage {
     echo ''
     echo 'Required arguments:'
     echo '  --branch=<path>             : branch where coreclr/corefx/test bits are copied from (e.g. dotnet_coreclr).'
+    echo 'Optional arguments:'
+    echo '  --throughput                : if we are running setup for a throughput run.'
 }
 
 # Exit code constants
@@ -20,6 +22,7 @@ readonly EXIT_CODE_SUCCESS=0       # Script ran normally.
 perfArch="x64"
 perfConfig="Release"
 perfBranch=
+throughput=0
 
 for i in "$@"
 do
@@ -31,6 +34,9 @@ do
         --branch=*)
             perfBranch=${i#*=}
             ;;
+        -t|--throughput)
+            throughput=1
+            ;;
         *)
             echo "Unknown switch: $i"
             print_usage
@@ -57,22 +63,38 @@ unzip -q -o benchview.zip -d ./tests/scripts/Microsoft.BenchView.JSONFormat
 python3.5 --version
 python3.5 ./tests/scripts/Microsoft.BenchView.JSONFormat/tools/machinedata.py
 
-# Set up the copies
-# Coreclr build containing the tests and mscorlib
-curl https://ci.dot.net/job/$perfBranch/job/master/job/release_windows_nt/lastSuccessfulBuild/artifact/bin/tests/tests.zip -o tests.zip
+if [ $throughput -eq 1 ]; then
+    # Clone corefx
+    if [ -d "_" ]; then
+        rm -r -f _
+    fi
+    mkdir _
+    git clone https://github.com/dotnet/corefx.git _/fx
+    cd _/fx
+
+    # Checkout the specific commit we want
+    git checkout f0b9e238c08f62a1db90ef0378980ac771204d35
+
+    # Build
+    ./build.sh -release
+else
+    # Set up the copies
+    # Coreclr build containing the tests and mscorlib
+    curl https://ci.dot.net/job/$perfBranch/job/master/job/release_windows_nt/lastSuccessfulBuild/artifact/bin/tests/tests.zip -o tests.zip
 
-# Corefx components.  We now have full stack builds on all distros we test here, so we can copy straight from CoreFX jobs.
-mkdir corefx
-curl https://ci.dot.net/job/dotnet_corefx/job/master/job/ubuntu14.04_release/lastSuccessfulBuild/artifact/bin/build.tar.gz -o ./corefx/build.tar.gz
+    # Corefx components.  We now have full stack builds on all distros we test here, so we can copy straight from CoreFX jobs.
+    mkdir corefx
+    curl https://ci.dot.net/job/dotnet_corefx/job/master/job/ubuntu14.04_release/lastSuccessfulBuild/artifact/bin/build.tar.gz -o ./corefx/build.tar.gz
 
-# Unpack the corefx binaries
-pushd corefx > /dev/null
-tar -xf build.tar.gz
-rm build.tar.gz
-popd > /dev/null
+    # Unpack the corefx binaries
+    pushd corefx > /dev/null
+    tar -xf build.tar.gz
+    rm build.tar.gz
+    popd > /dev/null
 
-# Unzip the tests first.  Exit with 0
-mkdir bin
-mkdir bin/tests
-unzip -q -o tests.zip -d ./bin/tests/Windows_NT.$perfArch.$perfConfig || exit 0
-echo "unzip tests to ./bin/tests/Windows_NT.$perfArch.$perfConfig"
+    # Unzip the tests first.  Exit with 0
+    mkdir bin
+    mkdir bin/tests
+    unzip -q -o tests.zip -d ./bin/tests/Windows_NT.$perfArch.$perfConfig || exit 0
+    echo "unzip tests to ./bin/tests/Windows_NT.$perfArch.$perfConfig"
+fi
index 04491cb..c59fee6 100644 (file)
@@ -24,6 +24,7 @@ import shutil
 import subprocess
 import sys
 import time
+import timeit
 import stat
 import csv
 
@@ -32,7 +33,8 @@ import csv
 ##########################################################################
 
 # List of dlls we want to crossgen
-dll_list = [
+dll_list = {
+    'Windows_NT': [
         "System.Private.CoreLib",
         "System.Reflection.Metadata",
         "System.Linq.Expressions",
@@ -46,7 +48,38 @@ dll_list = [
         "Microsoft.CodeAnalysis",
         "System.Linq.Parallel",
         "System.Private.Xml"
-        ]
+    ],
+    'Linux': [
+        "System.Private.CoreLib",
+        "System.Reflection.Metadata",
+        "System.Linq.Expressions",
+        "Microsoft.CSharp",
+        "System.Private.DataContractSerialization",
+        "System.Linq.Parallel",
+        "System.Private.Xml"
+    ]
+}
+
+jit_list = {
+    'Windows_NT': {
+        'x64': 'clrjit.dll',
+        'x86': 'clrjit.dll',
+        'x86jit32': 'compatjit.dll'
+    },
+    'Linux': {
+        'x64': 'libclrjit.so'
+    }
+}
+
+os_group_list = {
+    'Windows_NT': 'Windows_NT',
+    'Ubuntu14.04': 'Linux'
+}
+
+python_exe_list = {
+    'Windows_NT': 'py',
+    'Linux': 'python3.5'
+}
 
 ##########################################################################
 # Argument Parser
@@ -104,18 +137,21 @@ def validate_args(args):
         if not helper(arg):
             raise Exception('Argument: %s is not valid.' % (arg))
 
-    valid_archs = ['x86', 'x64', 'x86jit32']
+    valid_archs = {'Windows_NT': ['x86', 'x64', 'x86jit32'], 'Linux': ['x64']}
     valid_build_types = ['Release']
     valid_run_types = ['rolling', 'private']
-    valid_os = ['Windows_NT', 'Linux']
+    valid_os = ['Windows_NT', 'Ubuntu14.04']
 
     arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
     build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
 
-    validate_arg(arch, lambda item: item in valid_archs)
+    validate_arg(operating_system, lambda item: item in valid_os)
+
+    os_group = os_group_list[operating_system]
+
+    validate_arg(arch, lambda item: item in valid_archs[os_group])
     validate_arg(build_type, lambda item: item in valid_build_types)
     validate_arg(run_type, lambda item: item in valid_run_types)
-    validate_arg(operating_system, lambda item: item in valid_os)
 
     if clr_root is None:
         raise Exception('--clr_root must be set')
@@ -133,12 +169,13 @@ def validate_args(args):
         benchview_path = os.path.normpath(benchview_path)
         validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))
 
-    args = (arch, operating_system, build_type, run_type, clr_root, assembly_root, benchview_path)
+    args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path)
 
     # Log configuration
     log('Configuration:')
     log(' arch: %s' % arch)
     log(' os: %s' % operating_system)
+    log(' os_group: %s' % os_group)
     log(' build_type: %s' % build_type)
     log(' run_type: %s' % run_type)
     log(' clr_root: %s' % clr_root)
@@ -228,9 +265,9 @@ def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assem
     for iteration in range(0,iterations):
         proc = subprocess.Popen(run_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
 
-        start_time = time.clock()
+        start_time = timeit.default_timer()
         (out, err) = proc.communicate()
-        end_time = time.clock()
+        end_time = timeit.default_timer()
 
         if proc.returncode == 0:
             # Calculate the runtime
@@ -248,17 +285,22 @@ def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assem
 
 def main(args):
     global dll_list
+    global jit_list
+    global os_group_list
+    global python_exe_list
 
-    architecture, operating_system, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
+    architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path = validate_args(args)
     arch = architecture
 
-    current_dir = os.getcwd()
-    jit = "clrjit.dll"
-
-    # jit32 uses compatjit.dll
     if architecture == 'x86jit32':
         arch = 'x86'
-        jit = "compatjit.dll"
+
+    current_dir = os.getcwd()
+    jit = jit_list[os_group][architecture]
+    crossgen = 'crossgen'
+
+    if os_group == 'Windows_NT':
+        crossgen += '.exe'
 
     # Make sandbox
     sandbox_path = os.path.join(clr_root, "sandbox")
@@ -269,16 +311,17 @@ def main(args):
     os.chdir(sandbox_path)
 
     # Set up paths
-    test_root = os.path.join(clr_root, 'bin', 'Tests', operating_system + '.' + arch + '.' + build_type)
-    core_root_path = os.path.join(test_root, 'Tests', 'Core_Root')
+    bin_path = os.path.join(clr_root, 'bin', 'Product',  os_group + '.' + arch + '.' + build_type)
 
-    crossgen_path = os.path.join(core_root_path,'crossgen.exe')
-    jit_path = os.path.join(core_root_path, jit)
+    crossgen_path = os.path.join(bin_path,crossgen)
+    jit_path = os.path.join(bin_path, jit)
 
     iterations = 6
 
+    python_exe = python_exe_list[os_group]
+
     # Run throughput testing
-    for dll_name in dll_list:
+    for dll_name in dll_list[os_group]:
         dll_file_name = dll_name + ".dll"
         dll_path = os.path.join(assembly_root, dll_file_name)
         dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root)
@@ -290,7 +333,7 @@ def main(args):
                 shutil.copy(csv_file_name, clr_root)
 
                 # For each benchmark, call measurement.py
-                measurement_args = ["py",
+                measurement_args = [python_exe,
                         os.path.join(benchview_path, "measurement.py"),
                         "csv",
                         os.path.join(os.getcwd(), csv_file_name),
@@ -314,7 +357,7 @@ def main(args):
     # Upload the data
     if not benchview_path is None:
         # Call submission.py
-        submission_args = ["py",
+        submission_args = [python_exe,
                 os.path.join(benchview_path, "submission.py"),
                 "measurement.json",
                 "--build",
@@ -345,7 +388,7 @@ def main(args):
         proc.communicate()
 
         # Call upload.py
-        upload_args = ["py",
+        upload_args = [python_exe,
                 os.path.join(benchview_path, "upload.py"),
                 "submission.json",
                 "--container",