Small number of bug fixes. (#14900)
authorJosé Rivero <jorive@microsoft.com>
Wed, 8 Nov 2017 22:09:14 +0000 (14:09 -0800)
committerGitHub <noreply@github.com>
Wed, 8 Nov 2017 22:09:14 +0000 (14:09 -0800)
- Added stability prefix to the scenario benchmark (JitBench)
- Specify output directory to the `run-xunit-perf.cmd` script and avoid the extra step to xcopy files to the archive folder.
- Added a command line parser class to the illink scenario, and changed its behavior where it used to fail when a new command line option passed to xUnit was not recognized.
- Save the output log of the tests into the sandbox-logs folder.
- Updating the label of the machine pool used by the illink scenario

buildpipeline/perf-pipeline.groovy
perf.groovy
tests/scripts/run-xunit-perf.cmd
tests/src/performance/linkbench/BenchmarkOptions.cs [new file with mode: 0644]
tests/src/performance/linkbench/linkbench.cs
tests/src/performance/linkbench/linkbench.csproj

index 41f7194..71e6cab 100644 (file)
@@ -56,37 +56,45 @@ def windowsPerf(String arch, String config, String uploadString, String runType,
         bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
 
         // We run run-xunit-perf differently for each of the different job types
+        String runXUnitCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\""
         if (scenario == 'perf') {
-            String runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} ${pgoTestFlag} -jitName ${jit} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library"
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality"
-
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi"
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi"
+            String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
+            String runXUnitPerflabArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerflabArgs} -collectionFlags stopwatch"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerflabArgs} -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi"
+
+            String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags stopwatch"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi"
         }
         else if (scenario == 'jitbench') {
-            String runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -scenarioTest"
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios || (echo [ERROR] JitBench failed. 1>>\"${failedOutputLogFilename}\" && exit /b 1)"
+            String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest"
+            runXUnitPerfCommonArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -collectionFlags stopwatch"
+
+            if (opt_level != 'min_opt') {
+                bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired"
+            }
         }
         else if (scenario == 'illink') {
-            String runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -scenarioTest"
-            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\linkbench\\linkbench -group ILLink -nowarmup || (echo [ERROR] IlLink failed. 1>>\"${failedOutputLogFilename}\" && exit /b 1)"
+            String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -scenarioTest"
+            bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\linkbench\\linkbench -group ILLink -nowarmup"
         }
-        archiveArtifacts allowEmptyArchive: false, artifacts:'bin/toArchive/**,machinedata.json'
+        archiveArtifacts allowEmptyArchive: false, artifacts:'bin/sandbox_logs/**,machinedata.json'
     }
 }
 
 def windowsThroughput(String arch, String os, String config, String runType, String optLevel, String jit, String pgo, boolean isBaseline) {
     withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
         checkout scm
-        
+
         String baselineString = ""
         if (isBaseline) {
             baselineString = "-baseline"
         }
 
         String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
-        
+
         dir ('.') {
             unstash "nt-${arch}-${pgo}${baselineString}-build-artifacts"
             unstash "benchview-tools"
@@ -214,7 +222,7 @@ stage ('Get Metadata and download Throughput Benchmarks') {
             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"${benchViewUser}\"\n" +
             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}\n" +
             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}-baseline\" --user-email \"${benchViewUser}\" -o submission-metadata-baseline.json\n"
-        
+
         // TODO: revisit these moves. Originally, stash could not find the directories as currently named
         bat "move Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT x64ThroughputBenchmarks"
         bat "move Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT x86ThroughputBenchmarks"
@@ -300,24 +308,26 @@ stage ('Build Product') {
 def innerLoopTests = [:]
 
 ['x64', 'x86'].each { arch ->
-    [true,false].each { isBaseline ->
-        String baseline = ""
-        if (isBaseline) {
-            baseline = " baseline"
-        }
-        if (isPR() || !isBaseline) {
-            innerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} perf"] = {
-               simpleNode('windows_server_2016_clr_perf', 180) {
-                   windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'perf', isBaseline)
-               }
+    ['full_opt'].each { opt_level ->
+        [true,false].each { isBaseline ->
+            String baseline = ""
+            if (isBaseline) {
+                baseline = " baseline"
             }
+            if (isPR() || !isBaseline) {
+                innerLoopTests["windows ${arch} ryujit ${opt_level} pgo${baseline} perf"] = {
+                    simpleNode('windows_server_2016_clr_perf', 180) {
+                        windowsPerf(arch, config, uploadString, runType, opt_level, 'ryujit', 'pgo', 'perf', isBaseline)
+                    }
+                }
 
-            if (arch == 'x64') {
-               innerLoopTests["linux ${arch} ryujit full_opt pgo${baseline} perf"] = {
-                   simpleNode('linux_clr_perf', 180) {
-                       linuxPerf('x64', 'Ubuntu14.04', config, uploadString, runType, 'full_opt', 'pgo', isBaseline)
-                   }
-               }
+                if (arch == 'x64') {
+                    innerLoopTests["linux ${arch} ryujit ${opt_level} pgo${baseline} perf"] = {
+                        simpleNode('linux_clr_perf', 180) {
+                            linuxPerf('x64', 'Ubuntu14.04', config, uploadString, runType, opt_level, 'pgo', isBaseline)
+                        }
+                    }
+                }
             }
         }
     }
@@ -334,7 +344,7 @@ if (!isPR()) {
     }
 
     outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} illink"] = {
-        simpleNode('windows_server_2015_clr_perf', 180) {
+        simpleNode('Windows_NT', '20170427-elevated') {
             windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'illink', false)
         }
     }
index 22fa822..04dc4ef 100644 (file)
@@ -87,28 +87,22 @@ def static getOSGroup(def os) {
 
                             batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
 
-                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
+                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
 
                             // Run with just stopwatch: Profile=Off
                             batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
-                            batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\Perflab\\Off\\")
-
                             batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
-                            batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\CodeQuality\\Off\\")
 
                             // Run with the full set of counters enabled: Profile=On
                             if (opt_level != 'min_opt') {
                                 batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
-                                batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\Perflab\\On\\")
-
                                 batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
-                                batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\CodeQuality\\On\\")
                             }
                         }
                     }
 
                     def archiveSettings = new ArchivalSettings()
-                    archiveSettings.addFiles('bin/toArchive/**')
+                    archiveSettings.addFiles('bin/sandbox_logs/**')
                     archiveSettings.addFiles('machinedata.json')
 
                     Utilities.addArchival(newJob, archiveSettings)
@@ -605,22 +599,20 @@ parallel(
 
                             batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
 
-                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -scenarioTest"
+                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest"
 
                             // Profile=Off
                             batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios")
-                            batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\Scenario\\JitBench\\Off\\")
 
                             // Profile=On
                             if (opt_level != 'min_opt') {
                                 batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired")
-                                batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\Scenario\\JitBench\\On\\")
                             }
                         }
                     }
 
                     def archiveSettings = new ArchivalSettings()
-                    archiveSettings.addFiles('bin/toArchive/**')
+                    archiveSettings.addFiles('bin/sandbox_logs/**')
                     archiveSettings.addFiles('machinedata.json')
 
                     Utilities.addArchival(newJob, archiveSettings)
@@ -800,16 +792,15 @@ parallel(
 
                             batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
 
-                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -scenarioTest"
+                            def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -scenarioTest"
 
                             // Scenario: ILLink
                             batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup")
-                            batchFile("xcopy.exe /VYQK bin\\sandbox\\Logs\\Perf-*.* bin\\toArchive\\sandbox\\Logs\\Scenario\\LinkBench\\")
                         }
                     }
 
                     def archiveSettings = new ArchivalSettings()
-                    archiveSettings.addFiles('bin/toArchive/**')
+                    archiveSettings.addFiles('bin/sandbox_logs/**')
                     archiveSettings.addFiles('machinedata.json')
 
                     // Set the label (currently we are only measuring size, therefore we are running on VM).
index 971403c..a47230a 100644 (file)
@@ -10,7 +10,7 @@ setlocal ENABLEDELAYEDEXPANSION
   set BENCHVIEW_RUN_TYPE=local
   set CORECLR_REPO=%CD%
   set LV_SANDBOX_DIR=%CORECLR_REPO%\bin\sandbox
-  set LV_BENCHMARKS_OUTPUT_DIR=%LV_SANDBOX_DIR%\Logs
+  set LV_SANDBOX_OUTPUT_DIR=%LV_SANDBOX_DIR%\Logs
   set TEST_FILE_EXT=exe
   set TEST_ARCH=x64
   set TEST_ARCHITECTURE=x64
@@ -36,8 +36,8 @@ setlocal ENABLEDELAYEDEXPANSION
   call :set_collection_config   || exit /b 1
   call :verify_benchview_tools  || exit /b 1
   call :verify_core_overlay     || exit /b 1
-  call :set_perf_run_log        || exit /b 1
   call :setup_sandbox           || exit /b 1
+  call :set_perf_run_log        || exit /b 1
   call :build_perfharness       || exit /b 1
 
   call :run_cmd xcopy /sy "%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root"\* . >> %RUNLOG% || exit /b 1
@@ -96,21 +96,39 @@ setlocal
   rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
   set CORE_ROOT=%LV_SANDBOX_DIR%
   set LV_RUNID=Perf-%ETW_COLLECTION%
-  set BENCHNAME_LOG_FILE_NAME=%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.log
 
+  if defined IS_SCENARIO_TEST (
+    set "LV_BENCHMARK_OUTPUT_DIR=%LV_SANDBOX_OUTPUT_DIR%\Scenarios"
+  ) else (
+    set "LV_BENCHMARK_OUTPUT_DIR=%LV_SANDBOX_OUTPUT_DIR%\Microbenchmarks"
+  )
+  set "LV_BENCHMARK_OUTPUT_DIR=%LV_BENCHMARK_OUTPUT_DIR%\%ETW_COLLECTION%\%BENCHNAME%"
+
+  set BENCHNAME_LOG_FILE_NAME=%LV_BENCHMARK_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.log
+
+  if not defined LV_BENCHMARK_OUTPUT_DIR (
+    call :print_error LV_BENCHMARK_OUTPUT_DIR was not defined.
+    exit /b 1
+  )
+  if not exist "%LV_BENCHMARK_OUTPUT_DIR%" mkdir "%LV_BENCHMARK_OUTPUT_DIR%"
+  if not exist "%LV_BENCHMARK_OUTPUT_DIR%" (
+    call :print_error Failed to create the "%LV_BENCHMARK_OUTPUT_DIR%" directory.
+    exit /b 1
+  )
 
   echo/
   echo/  ----------
   echo/  Running %LV_RUNID% %BENCHNAME%
   echo/  ----------
 
-  set LV_CMD=
+  set "LV_COMMON_ARGS="%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARK_OUTPUT_DIR%" --perf:runid "%LV_RUNID%""
   if defined IS_SCENARIO_TEST (
-    set "LV_CMD=%STABILITY_PREFIX% corerun.exe "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --target-architecture "%TEST_ARCHITECTURE%" --perf:collect %COLLECTION_FLAGS%"
+    set "LV_COMMON_ARGS=%LV_COMMON_ARGS% --target-architecture "%TEST_ARCHITECTURE%""
   ) else (
-    set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
+    set "LV_COMMON_ARGS=PerfHarness.dll %LV_COMMON_ARGS%"
   )
 
+  set "LV_CMD=%STABILITY_PREFIX% corerun.exe %LV_COMMON_ARGS% --perf:collect %COLLECTION_FLAGS%"
   call :print_to_console $ !LV_CMD!
   call :run_cmd !LV_CMD! 1>"%BENCHNAME_LOG_FILE_NAME%" 2>&1
 
@@ -229,7 +247,7 @@ rem ****************************************************************************
     goto :parse_command_line_arguments
   )
   IF /I [%~1] == [-outputdir] (
-    set LV_BENCHMARKS_OUTPUT_DIR=%~2
+    set LV_SANDBOX_OUTPUT_DIR=%~2
     shift
     shift
     goto :parse_command_line_arguments
@@ -297,11 +315,12 @@ rem ****************************************************************************
 rem ****************************************************************************
 rem   Sets the script's output log file.
 rem ****************************************************************************
-  if NOT EXIST "%CORECLR_REPO%\bin\Logs" (
-    call :print_error Cannot find the Logs folder '%CORECLR_REPO%\bin\Logs'.
+  if NOT EXIST "%LV_SANDBOX_OUTPUT_DIR%" mkdir "%LV_SANDBOX_OUTPUT_DIR%"
+  if NOT EXIST "%LV_SANDBOX_OUTPUT_DIR%" (
+    call :print_error Cannot create the Logs folder "%LV_SANDBOX_OUTPUT_DIR%".
     exit /b 1
   )
-  set RUNLOG=%CORECLR_REPO%\bin\Logs\perfrun.log
+  set "RUNLOG=%LV_SANDBOX_OUTPUT_DIR%\perfrun.log"
   exit /b 0
 
 :setup_sandbox
@@ -315,7 +334,10 @@ rem ****************************************************************************
   )
 
   if exist "%LV_SANDBOX_DIR%" rmdir /s /q "%LV_SANDBOX_DIR%"
-  if exist "%LV_SANDBOX_DIR%" call :print_error Failed to remove the "%LV_SANDBOX_DIR%" folder& exit /b 1
+  if exist "%LV_SANDBOX_DIR%" (
+    call :print_error Failed to remove the "%LV_SANDBOX_DIR%" folder
+    exit /b 1
+  )
 
   if not exist "%LV_SANDBOX_DIR%" mkdir "%LV_SANDBOX_DIR%"
   if not exist "%LV_SANDBOX_DIR%" (
@@ -323,12 +345,6 @@ rem ****************************************************************************
     exit /b 1
   )
 
-  if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" mkdir "%LV_BENCHMARKS_OUTPUT_DIR%"
-  if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" (
-    call :print_error Failed to create the "%LV_BENCHMARKS_OUTPUT_DIR%" folder.
-    exit /b 1
-  )
-
   cd "%LV_SANDBOX_DIR%"
   exit /b %ERRORLEVEL%
 
@@ -336,6 +352,10 @@ rem ****************************************************************************
 rem ****************************************************************************
 rem   Restores and publish the PerfHarness.
 rem ****************************************************************************
+  call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" --info || (
+    call :print_error Failed to get information about the CLI tool.
+    exit /b 1
+  )
   call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || (
     call :print_error Failed to restore PerfHarness.csproj
     exit /b 1
@@ -366,14 +386,12 @@ rem ****************************************************************************
 
   rem Currently xUnit Performance Api saves the scenario output
   rem   files on the current working directory.
-  set LV_PATTERN="%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.xml"
-  rem The first pattern is the general case, the second is used by IlLink
-  if defined IS_SCENARIO_TEST set LV_PATTERN="%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.xml" "%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-*-%BENCHNAME%.xml"
-
+  set "LV_PATTERN=%LV_BENCHMARK_OUTPUT_DIR%\%LV_RUNID%-*.xml"
   for %%f in (%LV_PATTERN%) do (
     if exist "%%~f" (
       call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% "%%~f" || (
-        call :print_error Failed to generate BenchView measurement data.
+        call :print_error
+        type "%%~f"
         exit /b 1
       )
     )
diff --git a/tests/src/performance/linkbench/BenchmarkOptions.cs b/tests/src/performance/linkbench/BenchmarkOptions.cs
new file mode 100644 (file)
index 0000000..068f2fc
--- /dev/null
@@ -0,0 +1,121 @@
+using CommandLine;
+using CommandLine.Text;
+using Microsoft.Xunit.Performance.Api;
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Linq;
+using System.Reflection;
+using System.Text;
+
+namespace LinkBench
+{
+    internal sealed class BenchmarkOptions
+    {
+        [Option("nosetup", Default = true, Required = false, HelpText = "Do not clone and fixup benchmark repositories.")]
+        public bool DoSetup { get; set; } = true;
+
+        [Option("nobuild", Default = true, Required = false, HelpText = "Do not build and link benchmarks.")]
+        public bool DoBuild { get; set; } = true;
+
+        [Option("benchmarks", Required = false, Separator = ',',
+            HelpText = "Any of: HelloWorld, WebAPI, MusicStore, MusicStore_R2R, CoreFX, Roslyn. Default is to run all the above benchmarks.")]
+        public IEnumerable<string> BenchmarkNames
+        {
+            get => _benchmarkNames;
+            set
+            {
+                if (value == null)
+                    throw new ArgumentNullException("Missing benchmark names.");
+
+                if (value.Count() == 0)
+                {
+                    _benchmarkNames = ValidBenchmarkNames;
+                    return;
+                }
+
+                var setDifference = value
+                    .Except(ValidBenchmarkNames, StringComparer.OrdinalIgnoreCase);
+                if (setDifference.Count() != 0)
+                    throw new ArgumentException($"Invalid Benchmark name(s) specified: {string.Join(", ", setDifference)}");
+                _benchmarkNames = value;
+            }
+        }
+
+        private IEnumerable<string> ValidBenchmarkNames => LinkBench.Benchmarks.Select(benchmark => benchmark.Name);
+
+        public static BenchmarkOptions Parse(string[] args)
+        {
+            using (var parser = new Parser((settings) => {
+                settings.CaseInsensitiveEnumValues = true;
+                settings.CaseSensitive = false;
+                settings.HelpWriter = new StringWriter();
+                settings.IgnoreUnknownArguments = true;
+            }))
+            {
+                BenchmarkOptions options = null;
+                parser.ParseArguments<BenchmarkOptions>(args)
+                    .WithParsed(parsed => options = parsed)
+                    .WithNotParsed(errors => {
+                        foreach (var error in errors)
+                        {
+                            switch (error.Tag)
+                            {
+                                case ErrorType.MissingValueOptionError:
+                                    throw new ArgumentException(
+                                            $"Missing value option for command line argument '{(error as MissingValueOptionError).NameInfo.NameText}'");
+                                case ErrorType.HelpRequestedError:
+                                    Console.WriteLine(Usage());
+                                    Environment.Exit(0);
+                                    break;
+                                case ErrorType.VersionRequestedError:
+                                    Console.WriteLine(new AssemblyName(typeof(BenchmarkOptions).GetTypeInfo().Assembly.FullName).Version);
+                                    Environment.Exit(0);
+                                    break;
+                                case ErrorType.BadFormatTokenError:
+                                case ErrorType.UnknownOptionError:
+                                case ErrorType.MissingRequiredOptionError:
+                                    throw new ArgumentException(
+                                            $"Missing required  command line argument '{(error as MissingRequiredOptionError).NameInfo.NameText}'");
+                                case ErrorType.MutuallyExclusiveSetError:
+                                case ErrorType.BadFormatConversionError:
+                                case ErrorType.SequenceOutOfRangeError:
+                                case ErrorType.RepeatedOptionError:
+                                case ErrorType.NoVerbSelectedError:
+                                case ErrorType.BadVerbSelectedError:
+                                case ErrorType.HelpVerbRequestedError:
+                                    break;
+                            }
+                        }
+                    });
+                return options;
+            }
+        }
+
+        public static string Usage()
+        {
+            var parser = new Parser((parserSettings) => {
+                parserSettings.CaseInsensitiveEnumValues = true;
+                parserSettings.CaseSensitive = false;
+                parserSettings.EnableDashDash = true;
+                parserSettings.HelpWriter = new StringWriter();
+                parserSettings.IgnoreUnknownArguments = true;
+            });
+
+            var helpTextString = new HelpText {
+                AddDashesToOption = true,
+                AddEnumValuesToHelpText = true,
+                AdditionalNewLineAfterOption = false,
+                Heading = "LinkBenchHarness",
+                MaximumDisplayWidth = 80,
+            }.AddOptions(parser.ParseArguments<BenchmarkOptions>(new string[] { "--help" })).ToString();
+
+            var sb = new StringBuilder(helpTextString);
+            sb.AppendLine();
+            sb.AppendLine(XunitPerformanceHarness.Usage());
+            return sb.ToString();
+        }
+
+        private IEnumerable<string> _benchmarkNames;
+    }
+}
index 06b951c..885546f 100644 (file)
@@ -237,7 +237,7 @@ namespace LinkBench
         public static string AssetsDir;
         private static Benchmark CurrentBenchmark;
 
-        private static Benchmark[] Benchmarks =
+        public static readonly Benchmark[] Benchmarks =
         {
             // If no benchmark name is specified at the command line, 
             // all benchmarks are set to be run by default.
@@ -272,107 +272,41 @@ namespace LinkBench
                 () => Benchmark.AddLinkerReference("roslyn\\src\\Compilers\\CSharp\\csc\\csc.csproj"), true)
         };
 
-        static int UsageError()
+        public static int Main(string[] args)
         {
-            Console.WriteLine("Usage: LinkBench [--nosetup] [--nobuild] [--perf:runid <id>] [<benchmarks>]");
-            Console.WriteLine("  --nosetup: Don't clone and fixup benchmark repositories");
-            Console.WriteLine("  --nobuild: Don't build and link benchmarks");
-            Console.WriteLine("  --perf:runid: Specify the ID to append to benchmark result files");
-            Console.WriteLine("  --perf:outputdir: Specify the output directory used by xUnit Performance");
-            Console.WriteLine("    Benchmarks: HelloWorld, WebAPI, MusicStore, MusicStore_R2R, CoreFX, Roslyn");
-            Console.WriteLine("                Default is to run all the above benchmarks.");
-            return -4;
-        }
+            var options = BenchmarkOptions.Parse(args);
 
-        public static int Main(String [] args)
-        {
-            bool doSetup = true;
-            bool doBuild = true;
-            string runId = "";
-            string outputdir = ".";
-            string runOne = null;
             bool benchmarkSpecified = false;
-
-            for (int i = 0; i < args.Length; i++)
+            foreach (Benchmark benchmark in Benchmarks)
             {
-                if (String.Compare(args[i], "--nosetup", true) == 0)
-                {
-                    doSetup = false;
-                }
-                else if (String.Compare(args[i], "--nobuild", true) == 0)
-                {
-                    doSetup = false;
-                    doBuild = false;
-                }
-                else if (String.Compare(args[i], "--perf:runid", true) == 0)
-                {
-                    if (i + 1 < args.Length)
-                    {
-                        runId = args[++i] + "-";
-                    }
-                    else
-                    {
-                        Console.WriteLine("Missing runID ");
-                        return UsageError();
-                    }
-                }
-                else if (String.Compare(args[i], "--perf:outputdir", true) == 0)
+                if (options.BenchmarkNames.Contains(benchmark.Name, StringComparer.OrdinalIgnoreCase))
                 {
-                    if (i + 1 < args.Length)
-                    {
-                        outputdir = args[++i];
-                    }
-                    else
-                    {
-                        Console.WriteLine("Missing output directory.");
-                        return UsageError();
-                    }
+                    benchmark.SetToRun();
+                    benchmarkSpecified = true;
+                    break;
                 }
-                else if (args[i].Equals("--target-architecture", StringComparison.OrdinalIgnoreCase))
+            }
+
+            var arguments = new List<string>();
+            string runId = "";
+            for (int i = 0; i < args.Length; i++)
+            {
+                if (string.Compare(args[i], "--perf:runid", true) == 0)
                 {
                     if (i + 1 < args.Length)
                     {
-                        ++i; // Ignore this argument.
+                        runId = args[++i];
                     }
                     else
                     {
-                        Console.WriteLine("Missing target architecture.");
-                        return UsageError();
+                        Console.WriteLine("Missing --perf:runid");
+                        Console.WriteLine(BenchmarkOptions.Usage());
+                        Environment.Exit(1);
                     }
                 }
-                else if (args[i][0] == '-')
-                {
-                    Console.WriteLine("Unknown Option {0}", args[i]);
-                    return UsageError();
-                }
                 else
                 {
-                    foreach (Benchmark benchmark in Benchmarks)
-                    {
-                        if (String.Compare(args[i], benchmark.Name, true) == 0)
-                        {
-                            benchmark.SetToRun();
-                            benchmarkSpecified = true;
-                            break;
-                        }
-                    }
-
-                    if (!benchmarkSpecified)
-                    {
-                        Console.WriteLine("Unknown Benchmark {0}", args[i]);
-                    }
-                }
-            }
-
-            // If benchmarks are not explicitly specified, run the default set of benchmarks
-            if (!benchmarkSpecified)
-            {
-                foreach (Benchmark benchmark in Benchmarks)
-                {
-                    if (benchmark.runByDefault)
-                    {
-                        benchmark.SetToRun();
-                    }
+                    arguments.Add(args[i]);
                 }
             }
 
@@ -398,12 +332,12 @@ namespace LinkBench
             Environment.SetEnvironmentVariable("LinkBenchRoot", LinkBenchRoot);
             Environment.SetEnvironmentVariable("__dotnet", LinkBenchRoot + "\\.Net\\dotnet.exe");
             Environment.SetEnvironmentVariable("__dotnet2", LinkBenchRoot + "\\.Net2\\dotnet.exe");
-            
 
-            // Update the build files to facilitate the link step
-            if (doSetup)
+
+            PrintHeader("Update the build files to facilitate the link step.");
+            if (options.DoSetup)
             {
-                // Clone the benchmarks
+                PrintHeader("Clone the benchmarks.");
                 using (var setup = new Process())
                 {
                     setup.StartInfo.FileName = ScriptDir + "clone.cmd";
@@ -411,13 +345,12 @@ namespace LinkBench
                     setup.WaitForExit();
                     if (setup.ExitCode != 0)
                     {
-                        Console.WriteLine("Benchmark Setup failed");
+                        PrintHeader("Benchmark Setup failed");
                         return -2;
                     }
                 }
 
-                // Setup the benchmarks
-
+                PrintHeader("Setup the benchmarks.");
                 foreach (Benchmark benchmark in Benchmarks)
                 {
                     if (benchmark.doRun && benchmark.Setup != null)
@@ -427,7 +360,7 @@ namespace LinkBench
                 }
             }
 
-            if (doBuild)
+            if (options.DoBuild)
             {
                 // Run the setup Script, which clones, builds and links the benchmarks.
                 using (var setup = new Process())
@@ -456,15 +389,12 @@ namespace LinkBench
             {
                 CurrentBenchmark = Benchmarks[i];
                 if (!CurrentBenchmark.doRun)
-                {
                     continue;
-                }
 
-                string[] scriptArgs = {
-                    "--perf:runid", runId + CurrentBenchmark.Name,
-                    "--perf:outputdir", outputdir
-                };
-                using (var h = new XunitPerformanceHarness(scriptArgs))
+                var newArgs = new List<string>(arguments);
+                newArgs.AddRange(new[] { "--perf:runid", $"{runId}-{CurrentBenchmark.Name}", });
+                Console.WriteLine($"{string.Join(" ", newArgs)}");
+                using (var h = new XunitPerformanceHarness(newArgs.ToArray()))
                 {
                     var configuration = new ScenarioConfiguration(new TimeSpan(2000000), emptyCmd);
                     h.RunScenario(configuration, PostRun);
@@ -474,6 +404,14 @@ namespace LinkBench
             return 0;
         }
 
+        private static void PrintHeader(string message)
+        {
+            Console.WriteLine();
+            Console.WriteLine("**********************************************************************");
+            Console.WriteLine($"** [{DateTime.Now}] {message}");
+            Console.WriteLine("**********************************************************************");
+        }
+
         private static ScenarioBenchmark PostRun()
         {
             // The XUnit output doesn't print the benchmark name, so print it now.
index 8605f1a..8ce3ab8 100644 (file)
@@ -1,4 +1,4 @@
-<?xml version="1.0" encoding="utf-8"?>
+<?xml version="1.0" encoding="utf-8"?>
 <Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
   <PropertyGroup>
@@ -29,6 +29,7 @@
     </CodeAnalysisDependentAssemblyPaths>
   </ItemGroup>
   <ItemGroup>
+    <Compile Include="BenchmarkOptions.cs" />
     <Compile Include="linkbench.cs" />
   </ItemGroup>
   <ItemGroup>
@@ -38,4 +39,4 @@
   <PropertyGroup>
     <ProjectAssetsFile>$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), performance.targets))\obj\project.assets.json</ProjectAssetsFile>
   </PropertyGroup>
-</Project>
+</Project>
\ No newline at end of file