def architecture = arch
def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_smoketest" : "perf_perflab_${os}_${arch}"
- if (arch == 'x86')
- {
+ if (arch == 'x86') {
testEnv = '-testEnv %WORKSPACE%\\tests\\x86\\ryujit_x86_testenv.cmd'
}
}
}
- if (isPR)
- {
- parameters
- {
+ if (isPR) {
+ parameters {
stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
}
}
- if (isSmoketest)
- {
- parameters
- {
+
+ if (isSmoketest) {
+ parameters {
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
}
}
- else
- {
- parameters
- {
+ else {
+ parameters {
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
}
}
+
def configuration = 'Release'
def runType = isPR ? 'private' : 'rolling'
def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
+ def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
+
// Run with just stopwatch: Profile=Off
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\"")
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\"")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
// Run with the full set of counters enabled: Profile=On
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\"")
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\"")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
}
}
- if (isSmoketest)
- {
+ if (isSmoketest) {
Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
}
+
// Save machinedata.json to /artifact/bin/ Jenkins dir
def archiveSettings = new ArchivalSettings()
- archiveSettings.addFiles('Perf-*.xml')
- archiveSettings.addFiles('Perf-*.etl')
- archiveSettings.addFiles('Perf-*.log')
+ archiveSettings.addFiles('.\\bin\\sandbox\\Logs\\Perf-*.xml')
+ archiveSettings.addFiles('.\\bin\\sandbox\\Logs\\Perf-*.etl')
+ archiveSettings.addFiles('.\\bin\\sandbox\\Logs\\Perf-*.log')
archiveSettings.addFiles('machinedata.json')
Utilities.addArchival(newJob, archiveSettings)
}
}
- if (isPR)
- {
- parameters
- {
+ if (isPR) {
+ parameters {
stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
}
}
+
def configuration = 'Release'
def runType = isPR ? 'private' : 'rolling'
def benchViewName = isPR ? 'coreclr-throughput private %BenchviewCommitName%' : 'coreclr-throughput rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
steps {
// Batch
-
batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\"")
batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
Utilities.addArchival(newBuildJob, "bin/Product/**,bin/obj/*/tests/**/*.dylib,bin/obj/*/tests/**/*.so", "bin/Product/**/.nuget/**")
}
+
// Actual perf testing on the following OSes
def perfOSList = ['Ubuntu14.04']
perfOSList.each { os ->
}
}
- if (isPR)
- {
- parameters
- {
+ if (isPR) {
+ parameters {
stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
}
}
// Save machinedata.json to /artifact/bin/ Jenkins dir
def archiveSettings = new ArchivalSettings()
- archiveSettings.addFiles('Perf-*.log')
- archiveSettings.addFiles('Perf-*.xml')
+ archiveSettings.addFiles('./bin/sandbox/Logs/Perf-*.log')
+ archiveSettings.addFiles('./bin/sandbox/Logs/Perf-*.xml')
archiveSettings.addFiles('machinedata.json')
Utilities.addArchival(newJob, archiveSettings)
}
}
- if (isPR)
- {
- parameters
- {
+ if (isPR) {
+ parameters {
stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
}
}
- parameters
- {
+ parameters {
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
}
+
def configuration = 'Release'
def runType = isPR ? 'private' : 'rolling'
def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
+ def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -scenarioTest"
+ def failedOutputLogFilename = "run-xunit-perf-scenario.log"
+
+ // Using a sentinel file to
+ batchFile("if exist \"${failedOutputLogFilename}\" del /q /f \"${failedOutputLogFilename}\"")
+ batchFile("if exist \"${failedOutputLogFilename}\" (echo [ERROR] Failed to delete previously created \"${failedOutputLogFilename}\" file.& exit /b 1)")
+
// Scenario: JitBench
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -scenarioTest -group CoreCLR-Scenarios")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios || (echo [ERROR] JitBench failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
// Scenario: ILLink
if (arch == 'x64') {
- batchFile("tests\\scripts\\run-xunit-perf.cmd -arch ${arch} -configuration ${configuration} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -nowarmup -runtype ${runType} -scenarioTest -group ILLink")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup || (echo [ERROR] IlLink failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
}
+
+ batchFile("if exist \"${failedOutputLogFilename}\" (type \"${failedOutputLogFilename}\"& exit /b 1)")
}
- }
+ }
// Save machinedata.json to /artifact/bin/ Jenkins dir
def archiveSettings = new ArchivalSettings()
- archiveSettings.addFiles('Perf-*.xml')
- archiveSettings.addFiles('Perf-*.log')
+ archiveSettings.addFiles('.\\bin\\sandbox\\Perf-*.xml')
+ archiveSettings.addFiles('.\\bin\\sandbox\\Perf-*.log')
archiveSettings.addFiles('machinedata.json')
Utilities.addArchival(newJob, archiveSettings)
@rem The .NET Foundation licenses this file to you under the MIT license.
@rem See the LICENSE file in the project root for more information.
-@echo off
-@if defined _echo echo on
+@if not defined _echo echo off
setlocal ENABLEDELAYEDEXPANSION
set ERRORLEVEL=
set BENCHVIEW_RUN_TYPE=local
set CORECLR_REPO=%CD%
+ set LV_SANDBOX_DIR=%CORECLR_REPO%\bin\sandbox
+ set LV_BENCHMARKS_OUTPUT_DIR=%LV_SANDBOX_DIR%\Logs
set TEST_FILE_EXT=exe
set TEST_ARCH=x64
set TEST_ARCHITECTURE=x64
call :set_perf_run_log || exit /b 1
call :setup_sandbox || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || exit /b 1
- call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%CORECLR_REPO%\sandbox" || exit /b 1
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" restore "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" || (
+ call :print_error Failed to restore PerfHarness.csproj
+ exit /b 1
+ )
+ call :run_cmd "%CORECLR_REPO%\Tools\dotnetcli\dotnet.exe" publish "%CORECLR_REPO%\tests\src\Common\PerfHarness\PerfHarness.csproj" -c Release -o "%LV_SANDBOX_DIR%" || (
+ call :print_error Failed to publish PerfHarness.csproj
+ exit /b 1
+ )
rem TODO: Remove the version of the package to copy. e.g.) if multiple version exist, then error out?
- call :run_cmd xcopy /sy "%CORECLR_REPO%\packages\Microsoft.Diagnostics.Tracing.TraceEvent\1.0.3-alpha-experimental\lib\native"\* . >> %RUNLOG% || exit /b 1
- call :run_cmd xcopy /sy "%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root"\* . >> %RUNLOG% || exit /b 1
+ call :run_cmd xcopy /sy "%CORECLR_REPO%\bin\tests\Windows_NT.%TEST_ARCH%.%TEST_CONFIG%\Tests\Core_Root"\* . >> %RUNLOG% || exit /b 1
rem find and stage the tests
set /A "LV_FAILURES=0"
)
)
+ rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
+ set CORE_ROOT=%LV_SANDBOX_DIR%
+ set LV_RUNID=Perf-%ETW_COLLECTION%
+ set BENCHNAME_LOG_FILE_NAME=%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.log
+
echo/
echo/ ----------
- echo/ Running %BENCHNAME%
+ echo/ Running %LV_RUNID% %BENCHNAME%
echo/ ----------
- rem CORE_ROOT environment variable is used by some benchmarks such as Roslyn / CscBench.
- set CORE_ROOT=%CORECLR_REPO%\sandbox
-
- set LV_RUNID=Perf-%ETW_COLLECTION%
- set BENCHNAME_LOG_FILE_NAME=%LV_RUNID%-%BENCHNAME%.log
set LV_CMD=
if defined IS_SCENARIO_TEST (
- set "LV_CMD=corerun.exe "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%" --target-architecture "%TEST_ARCHITECTURE%""
+ set "LV_CMD=corerun.exe "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --target-architecture "%TEST_ARCHITECTURE%""
) else (
- set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%CORECLR_REPO%\sandbox\%BENCHNAME%.%TEST_FILE_EXT%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
+ set "LV_CMD=%STABILITY_PREFIX% corerun.exe PerfHarness.dll "%LV_SANDBOX_DIR%\%BENCHNAME%.%TEST_FILE_EXT%" --perf:outputdir "%LV_BENCHMARKS_OUTPUT_DIR%" --perf:runid "%LV_RUNID%" --perf:collect %COLLECTION_FLAGS%"
)
call :print_to_console $ !LV_CMD!
call :generate_results_for_benchview || exit /b 1
)
- rem Save off the results to the root directory for recovery later in Jenkins
- for %%e in (xml etl log) do (
- IF EXIST ".\%LV_RUNID%-%BENCHNAME%.%%e" (
- call :run_cmd xcopy /vy ".\%LV_RUNID%-%BENCHNAME%.%%e" .. || exit /b 1
- )
- )
-
exit /b 0
:parse_command_line_arguments
shift
goto :parse_command_line_arguments
)
+ IF /I [%~1] == [-outputdir] (
+ set LV_BENCHMARKS_OUTPUT_DIR=%~2
+ shift
+ shift
+ goto :parse_command_line_arguments
+ )
if /I [%~1] == [-?] (
call :USAGE
exit /b 0
rem Creates the sandbox folder used by the script to copy binaries locally,
rem and execute benchmarks.
rem ****************************************************************************
- if exist sandbox rmdir /s /q sandbox
- if exist sandbox call :print_error Failed to remove the sandbox folder& exit /b 1
- if not exist sandbox mkdir sandbox
- if not exist sandbox call :print_error Failed to create the sandbox folder& exit /b 1
- cd sandbox
- exit /b 0
+ if not defined LV_SANDBOX_DIR (
+ call :print_error LV_SANDBOX_DIR was not defined.
+ exit /b 1
+ )
+
+ if exist "%LV_SANDBOX_DIR%" rmdir /s /q "%LV_SANDBOX_DIR%"
+ if exist "%LV_SANDBOX_DIR%" call :print_error Failed to remove the "%LV_SANDBOX_DIR%" folder& exit /b 1
+
+ if not exist "%LV_SANDBOX_DIR%" mkdir "%LV_SANDBOX_DIR%"
+ if not exist "%LV_SANDBOX_DIR%" (
+ call :print_error Failed to create the "%LV_SANDBOX_DIR%" folder.
+ exit /b 1
+ )
+
+ if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" mkdir "%LV_BENCHMARKS_OUTPUT_DIR%"
+ if not exist "%LV_BENCHMARKS_OUTPUT_DIR%" (
+ call :print_error Failed to create the "%LV_BENCHMARKS_OUTPUT_DIR%" folder.
+ exit /b 1
+ )
+
+ cd "%LV_SANDBOX_DIR%"
+ exit /b %ERRORLEVEL%
:generate_results_for_benchview
rem ****************************************************************************
rem Generates results for BenchView, by appending new data to the existing
rem measurement.json file.
rem ****************************************************************************
+ if not defined LV_RUNID (
+ call :print_error LV_RUNID was not defined before calling generate_results_for_benchview.
+ exit /b 1
+ )
set BENCHVIEW_MEASUREMENT_PARSER=xunit
if defined IS_SCENARIO_TEST set BENCHVIEW_MEASUREMENT_PARSER=xunitscenario
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% %HAS_WARMUP_RUN%
set LV_MEASUREMENT_ARGS=%LV_MEASUREMENT_ARGS% --append
- for /f %%f in ('dir /b Perf-*%BENCHNAME%.xml 2^>nul') do (
- call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% %%f
+ rem Currently xUnit Performance Api saves the scenario output
+ rem files on the current working directory.
+ set LV_PATTERN="%LV_BENCHMARKS_OUTPUT_DIR%\%LV_RUNID%-%BENCHNAME%.xml"
+ if defined IS_SCENARIO_TEST set LV_PATTERN="%LV_RUNID%-*-%BENCHNAME%.xml"
+
+ for %%f in (%LV_PATTERN%) do (
+ call :run_cmd py.exe "%BENCHVIEW_PATH%\measurement.py" %LV_MEASUREMENT_ARGS% "%%~f"
IF !ERRORLEVEL! NEQ 0 (
call :print_error Failed to generate BenchView measurement data.
rem ****************************************************************************
setlocal
set LV_SUBMISSION_ARGS=
- set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build ..\build.json
- set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data ..\machinedata.json
- set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata ..\submission-metadata.json
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --build "%CORECLR_REPO%\build.json"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --machine-data "%CORECLR_REPO%\machinedata.json"
+ set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --metadata "%CORECLR_REPO%\submission-metadata.json"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --group "%BENCHVIEW_GROUP%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --type "%BENCHVIEW_RUN_TYPE%"
set LV_SUBMISSION_ARGS=%LV_SUBMISSION_ARGS% --config-name "%TEST_CONFIG%"
rem Script's usage.
rem ****************************************************************************
set USAGE_DISPLAYED=1
- echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^>
+ echo run-xunit-perf.cmd -testBinLoc ^<path_to_tests^> [-library] [-arch] ^<x86^|x64^> [-configuration] ^<Release^|Debug^> [-generateBenchviewData] ^<path_to_benchview_tools^> [-warmup] [-better] ^<asc ^| desc^> [-group] ^<group^> [-runtype] ^<rolling^|private^> [-scenarioTest] [-collectionFlags] ^<default^+CacheMisses^+InstructionRetired^+BranchMispredictions^+gcapi^> [-outputdir] ^<outputdir^>
echo/
echo For the path to the tests you can pass a parent directory and the script will grovel for
echo all tests in subdirectories and run them.
echo Runtype sets the runtype that we upload to Benchview, rolling for regular runs, and private for
echo PRs.
echo -scenarioTest should be included if you are running a scenario benchmark.
+ echo -outputdir Specifies the directory where the generated performance output will be saved.
echo -collectionFlags This is used to specify what collectoin flags get passed to the performance
echo harness that is doing the test running. If this is not specified we only use stopwatch.
echo Other flags are "default", which is the whatever the test being run specified, "CacheMisses",
#!/usr/bin/env bash
+dp0=$( cd -P "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
+
function run_command {
echo ""
echo $USER@`hostname` "$PWD"
echo ' also have the BV_UPLOAD_SAS_TOKEN set to a SAS token for the Benchview upload container'
echo ' --benchViewOS=<os> : Specify the os that will be used to insert data into Benchview.'
echo ' --runType=<local|private|rolling> : Specify the runType for Benchview. [Default: local]'
+ echo ' --outputdir : Specifies the directory where the generated performance output will be saved.'
}
# libExtension determines extension for dynamic library files
collectionflags=stopwatch
hasWarmupRun=--drop-first-value
stabilityPrefix=
+benchmarksOutputDir=$dp0/../../bin/sandbox/Logs
for i in "$@"
do
--stabilityPrefix=*)
stabilityPrefix=${i#*=}
;;
+ --outputdir=*)
+ benchmarksOutputDir=${i#*=}
+ ;;
--uploadToBenchview)
uploadToBenchview=TRUE
;;
create_core_overlay || { echo "Creating core overlay failed."; exit 1; }
precompile_overlay_assemblies || { echo "Precompiling overlay assemblies failed."; exit 1; }
-# Deploy xunit performance packages
+# If the output Logs folder exist, it was from a previous run (It needs to be deleted).
+if [ ! -d "$benchmarksOutputDir" ]; then
+ mkdir -p "$benchmarksOutputDir" || { echo "Failed to delete $benchmarksOutputDir"; exit 1; }
+fi
+
cd $CORE_ROOT
DO_SETUP=TRUE
if [ ${DO_SETUP} == "TRUE" ]; then
+ # Deploy xunit performance packages
$DOTNETCLI_PATH/dotnet restore $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj || { echo "dotnet restore failed."; exit 1; }
$DOTNETCLI_PATH/dotnet publish $CORECLR_REPO/tests/src/Common/PerfHarness/PerfHarness.csproj -c Release -o "$coreOverlayDir" || { echo "dotnet publish failed."; exit 1; }
fi
cp "$directory/$filename"*.txt . || exit 1
fi
- # TODO: Do we need this here.
+ # FIXME: We should not need this here.
chmod u+x ./corerun
+ xUnitRunId=Perf-$perfCollection
+ perfLogFileName=$benchmarksOutputDir/$xUnitRunId-$filename.log
+ perfXmlFileName=$benchmarksOutputDir/$xUnitRunId-$filename.xml
+
echo ""
echo "----------"
- echo " Running $testname"
+ echo " Running $xUnitRunId $testname"
echo "----------"
- run_command $stabilityPrefix ./corerun PerfHarness.dll $test --perf:runid Perf --perf:collect $collectionflags 1>"Perf-$filename.log" 2>&1 || exit 1
+
+ run_command $stabilityPrefix ./corerun PerfHarness.dll $test --perf:runid "$xUnitRunId" --perf:outputdir "$benchmarksOutputDir" --perf:collect $collectionflags 1>"$perfLogFileName" 2>&1 || exit 1
if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then
- run_command python3.5 "$BENCHVIEW_TOOLS_PATH/measurement.py" xunit "Perf-$filename.xml" --better desc $hasWarmupRun --append || {
+ run_command python3.5 "$BENCHVIEW_TOOLS_PATH/measurement.py" xunit "$perfXmlFileName" --better desc $hasWarmupRun --append || {
echo [ERROR] Failed to generate BenchView data;
exit 1;
}
fi
-
- # Rename file to be archived by Jenkins.
- mv -f "Perf-$filename.log" "$CORECLR_REPO/Perf-$filename-$perfCollection.log" || {
- echo [ERROR] Failed to move "Perf-$filename.log" to "$CORECLR_REPO".
- exit 1;
- }
- mv -f "Perf-$filename.xml" "$CORECLR_REPO/Perf-$filename-$perfCollection.xml" || {
- echo [ERROR] Failed to move "Perf-$filename.xml" to "$CORECLR_REPO".
- exit 1;
- }
done
if [ -d "$BENCHVIEW_TOOLS_PATH" ]; then
args=measurement.json
- args+=" --build ../../../../../build.json"
- args+=" --machine-data ../../../../../machinedata.json"
- args+=" --metadata ../../../../../submission-metadata.json"
+ args+=" --build $CORECLR_REPO/build.json"
+ args+=" --machine-data $CORECLR_REPO/machinedata.json"
+ args+=" --metadata $CORECLR_REPO/submission-metadata.json"
args+=" --group $benchViewGroup"
args+=" --type $runType"
args+=" --config-name Release"
<Project Sdk="Microsoft.NET.Sdk">
+ <Import Project="..\..\..\..\dependencies.props" />
+
<PropertyGroup>
<OutputType>exe</OutputType>
<TargetFramework>netcoreapp2.0</TargetFramework>
</PropertyGroup>
<ItemGroup>
- <PackageReference Include="xunit.performance.api" Version="1.0.0-beta-build0007" />
+ <PackageReference Include="Microsoft.Diagnostics.Tracing.TraceEvent" Version="$(MicrosoftDiagnosticsTracingLibraryVersion)">
+ <IncludeAssets>All</IncludeAssets>
+ </PackageReference>
+ <PackageReference Include="xunit.performance.api" Version="$(XUnitPerformanceApiVersion)" />
</ItemGroup>
</Project>
\ No newline at end of file
return size;
}
- // Get the total size difference for all certificates in all managed binaries
+ // Get the total size difference for all certificates in all managed binaries
// in the unlinked and linked directories.
private double ComputeCertDiff()
{
// We should check only for BadImageFormatException.
// But Checking for any exception until the following
// issue is fixed:
- // https://github.com/dotnet/coreclr/issues/11499
+ // https://github.com/dotnet/coreclr/issues/11499
return false;
}
"corefx\\bin\\ILLinkTrimAssembly\\netcoreapp-Windows_NT-Release-x64\\trimmed"),
new Benchmark("Roslyn",
"roslyn\\Binaries\\Release\\Exes\\CscCore\\win7-x64\\publish",
- "roslyn\\Binaries\\Release\\Exes\\CscCore\\win7-x64\\Linked")
+ "roslyn\\Binaries\\Release\\Exes\\CscCore\\win7-x64\\Linked")
};
static int UsageError()
Console.WriteLine(" --nosetup: Don't clone and fixup benchmark repositories");
Console.WriteLine(" --nobuild: Don't build and link benchmarks");
Console.WriteLine(" --perf:runid: Specify the ID to append to benchmark result files");
+ Console.WriteLine(" --perf:outputdir: Specify the output directory used by xUnit Performance");
Console.WriteLine(" Benchmarks: HelloWorld, WebAPI, MusicStore, MusicStore_R2R, CoreFX, Roslyn");
Console.WriteLine(" Default is to run all the above benchmarks.");
return -4;
bool doSetup = true;
bool doBuild = true;
string runId = "";
+ string outputdir = "";
string runOne = null;
bool benchmarkSpecified = false;
return UsageError();
}
}
+ else if (String.Compare(args[i], "--perf:outputdir", true) == 0)
+ {
+ if (i + 1 < args.Length)
+ {
+ outputdir = args[++i];
+ }
+ else
+ {
+ Console.WriteLine("Missing output directory.");
+ return UsageError();
+ }
+ }
else if (args[i].Equals("--target-architecture", StringComparison.OrdinalIgnoreCase))
{
if (i + 1 < args.Length)
if (String.Compare(benchmark.Name, "CoreFX", true) == 0)
{
// CoreFX is not enabled by default, because the lab cannot run it yet.
- // Jenkins runs on an older OS with path-length limit, which causes
+ // Jenkins runs on an older OS with path-length limit, which causes
// CoreFX build to fail.
continue;
}
}
}
- // Since this is a size measurement scenario, there are no iterations
+ // Since this is a size measurement scenario, there are no iterations
// to perform. So, create a process that does nothing, to satisfy XUnit.
// All size measurements are performed PostRun()
var emptyCmd = new ProcessStartInfo()
continue;
}
- string[] scriptArgs = { "--perf:runid", runId + CurrentBenchmark.Name };
+ string[] scriptArgs = {
+ "--perf:runid", runId + CurrentBenchmark.Name,
+ "--perf:outputdir", outputdir
+ };
using (var h = new XunitPerformanceHarness(scriptArgs))
{
h.RunScenario(emptyCmd, null, null, PostRun, scenarioConfiguration);