['Windows_NT'].each { os ->
['x64', 'x86'].each { arch ->
[true, false].each { isSmoketest ->
- def architecture = arch
- def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_smoketest" : "perf_perflab_${os}_${arch}"
+ ['ryujit', 'legacy_backend'].each { jit ->
- if (arch == 'x86') {
- testEnv = '-testEnv %WORKSPACE%\\tests\\x86\\ryujit_x86_testenv.cmd'
- }
-
- def newJob = job(Utilities.getFullJobName(project, jobName, isPR)) {
- // Set the label.
- label('windows_clr_perf')
- wrappers {
- credentialsBinding {
- string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
- }
+ if (arch == 'x64' && jit == 'legacy_backend')
+ {
+ return
}
- if (isPR) {
- parameters {
- stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
- }
- }
+ ['full_opt', 'min_opt'].each { opt_level ->
+ def architecture = arch
+ def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_${opt_level}_${jit}_smoketest" : "perf_perflab_${os}_${arch}_${opt_level}_${jit}"
+ def testEnv = ""
- if (isSmoketest) {
- parameters {
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
+ if (jit == 'legacy_backend')
+ {
+ testEnv = '-testEnv %WORKSPACE%\\tests\\legacyjit_x86_testenv.cmd'
}
- }
- else {
- parameters {
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
- }
- }
- def configuration = 'Release'
- def runType = isPR ? 'private' : 'rolling'
- def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
- def uploadString = isSmoketest ? '' : '-uploadToBenchview'
-
- steps {
- // Batch
-
- batchFile("powershell wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
- batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
- batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
- //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
- //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
- batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
- "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
- "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
- batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
- batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
-
- batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
-
- def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
-
- // Run with just stopwatch: Profile=Off
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
-
- // Run with the full set of counters enabled: Profile=On
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
- }
- }
+ def newJob = job(Utilities.getFullJobName(project, jobName, isPR)) {
+ // Set the label.
+ label('windows_clr_perf')
+ wrappers {
+ credentialsBinding {
+ string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
+ }
+ }
+
+ if (isPR) {
+ parameters {
+ stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
+ }
+ }
+ if (isSmoketest) {
+ parameters {
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to two. We want to do this so that we can run as fast as possible as this is just for smoke testing')
+ }
+ }
+ else {
+ parameters {
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enought to get a good sample')
+ }
+ }
+
+ def configuration = 'Release'
+ def runType = isPR ? 'private' : 'rolling'
+ def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
+ def uploadString = isSmoketest ? '' : '-uploadToBenchview'
+
+ steps {
+ // Batch
+
+ batchFile("powershell wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
+ batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
+ batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
+ //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
+ //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
+ batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
+ "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
+ "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
+ batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
+ batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
+
+ batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
+
+ def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
+
+ // Run with just stopwatch: Profile=Off
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
+
+ // Run with the full set of counters enabled: Profile=On
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
+ }
+ }
- if (isSmoketest) {
- Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
- }
+ if (isSmoketest) {
+ Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
+ }
- // Save machinedata.json to /artifact/bin/ Jenkins dir
- def archiveSettings = new ArchivalSettings()
+ // Save machinedata.json to /artifact/bin/ Jenkins dir
+ def archiveSettings = new ArchivalSettings()
archiveSettings.addFiles('bin/sandbox/Logs/Perf-*.*')
- archiveSettings.addFiles('machinedata.json')
- Utilities.addArchival(newJob, archiveSettings)
+ archiveSettings.addFiles('machinedata.json')
+ Utilities.addArchival(newJob, archiveSettings)
- Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
+ Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
- newJob.with {
- wrappers {
- timeout {
- absolute(240)
+ newJob.with {
+ wrappers {
+ timeout {
+ absolute(240)
+ }
+ }
}
- }
- }
- if (isPR) {
- TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
- if (isSmoketest)
- {
- builder.setGithubContext("${os} ${arch} CoreCLR Perf Tests Correctness")
- }
- else
- {
- builder.setGithubContext("${os} ${arch} CoreCLR Perf Tests")
- builder.triggerOnlyOnComment()
- builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}\\W+perf.*")
+ if (isPR) {
+ TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
+ if (isSmoketest)
+ {
+ builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests Correctness")
+ }
+ else
+ {
+ builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests")
+ def opts = ""
+ if (opt_level == 'min_opts')
+ {
+ opts = '\\W+min_opts'
+ }
+ def jitt = ""
+ if (jit != 'ryujit')
+ {
+ jitt = '\\W+${jit}'
+ }
+
+ builder.triggerOnlyOnComment()
+ builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+perf.*")
+ }
+ builder.triggerForBranch(branch)
+ builder.emitTrigger(newJob)
+ }
+ else {
+ // Set a push trigger
+ TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
+ builder.emitTrigger(newJob)
+ }
}
- builder.triggerForBranch(branch)
- builder.emitTrigger(newJob)
- }
- else {
- // Set a push trigger
- TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
- builder.emitTrigger(newJob)
}
}
}
[true, false].each { isPR ->
['Windows_NT'].each { os ->
['x64', 'x86'].each { arch ->
- ['full_opt', 'min_opt'].each { opt_level ->
- def architecture = arch
+ ['ryujit', 'legacy_backend'].each { jit ->
- def newJob = job(Utilities.getFullJobName(project, "perf_throughput_perflab_${os}_${arch}_${opt_level}", isPR)) {
- // Set the label.
- label('windows_clr_perf')
- wrappers {
- credentialsBinding {
- string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
+ if (arch == 'x64' && jit == 'legacy_backend')
+ {
+ return
+ }
+
+ ['full_opt', 'min_opt'].each { opt_level ->
+ def architecture = arch
+
+ def newJob = job(Utilities.getFullJobName(project, "perf_throughput_perflab_${os}_${arch}_${opt_level}_${jit}", isPR)) {
+ // Set the label.
+ label('windows_clr_perf')
+ wrappers {
+ credentialsBinding {
+ string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
+ }
}
- }
- if (isPR) {
- parameters {
- stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
+ if (isPR) {
+ parameters {
+ stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
+ }
}
- }
- def configuration = 'Release'
- def runType = isPR ? 'private' : 'rolling'
- def benchViewName = isPR ? 'coreclr-throughput private %BenchviewCommitName%' : 'coreclr-throughput rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
-
- steps {
- // Batch
- batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
- batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\"")
- batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
- batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os} -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
- //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
- //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
- batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
- "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
- "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user \"dotnet-bot@microsoft.com\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
- batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
- batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture} skiptests")
- batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
- batchFile("py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${configuration} -opt_level ${opt_level} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}")
+ def configuration = 'Release'
+ def runType = isPR ? 'private' : 'rolling'
+ def benchViewName = isPR ? 'coreclr-throughput private %BenchviewCommitName%' : 'coreclr-throughput rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
+
+ steps {
+ // Batch
+ batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
+ batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\"")
+ batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
+ batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os} -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
+ //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
+ //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
+ batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
+ "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
+ "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user \"dotnet-bot@microsoft.com\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
+ batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
+ batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture} skiptests")
+ batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
+ batchFile("py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${configuration} -opt_level ${opt_level} -jit_name ${jit} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}")
+ }
}
- }
- // Save machinedata.json to /artifact/bin/ Jenkins dir
- def archiveSettings = new ArchivalSettings()
- archiveSettings.addFiles('throughput-*.csv')
- Utilities.addArchival(newJob, archiveSettings)
+ // Save machinedata.json to /artifact/bin/ Jenkins dir
+ def archiveSettings = new ArchivalSettings()
+ archiveSettings.addFiles('throughput-*.csv')
+ Utilities.addArchival(newJob, archiveSettings)
- Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
+ Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
- if (isPR) {
- def opts = ""
- if (opt_level == 'min_opts')
- {
- opts = '\\W+min_opts'
+ if (isPR) {
+ def opts = ""
+ if (opt_level == 'min_opts')
+ {
+ opts = '\\W+min_opts'
+ }
+ def jitt = ""
+ if (jit != 'ryujit')
+ {
+ jitt = '\\W+${jit}'
+ }
+
+ TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
+ builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Throughput Perf Tests")
+ builder.triggerOnlyOnComment()
+ builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+throughput.*")
+ builder.triggerForBranch(branch)
+ builder.emitTrigger(newJob)
+ }
+ else {
+ // Set a push trigger
+ TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
+ builder.emitTrigger(newJob)
}
- TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
- builder.setGithubContext("${os} ${arch} ${opt_level} CoreCLR Throughput Perf Tests")
- builder.triggerOnlyOnComment()
- builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}\\W+throughput.*")
- builder.triggerForBranch(branch)
- builder.emitTrigger(newJob)
- }
- else {
- // Set a push trigger
- TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
- builder.emitTrigger(newJob)
}
}
}
[true, false].each { isPR ->
['Windows_NT'].each { os ->
['x64', 'x86'].each { arch ->
- def architecture = arch
- def newJob = job(Utilities.getFullJobName(project, "perf_scenarios_${os}_${arch}", isPR)) {
- // Set the label.
- label('windows_clr_perf')
- wrappers {
- credentialsBinding {
- string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
- }
- }
+ ['ryujit', 'legacy_backend'].each { jit ->
- if (isPR) {
- parameters {
- stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
- }
+ if (arch == 'x64' && jit == 'legacy_backend')
+ {
+ return
}
- parameters {
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
- stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
- }
+ ['full_opt', 'min_opt'].each { opt_level ->
+ def architecture = arch
+ def newJob = job(Utilities.getFullJobName(project, "perf_scenarios_${os}_${arch}_${opt_level}_${jit}", isPR)) {
- def configuration = 'Release'
- def runType = isPR ? 'private' : 'rolling'
- def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
- def uploadString = '-uploadToBenchview'
+ def testEnv = ""
+ if (jit == 'legacy_backend')
+ {
+ testEnv = '-testEnv %WORKSPACE%\\tests\\legacyjit_x86_testenv.cmd'
+ }
- steps {
- // Batch
- batchFile("powershell wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
- batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
- batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
-
- //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
- //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
- batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
- "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
- "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" +
- "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
- batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
- batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
-
- batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
-
- def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} -scenarioTest"
- def failedOutputLogFilename = "run-xunit-perf-scenario.log"
-
- // Using a sentinel file to
- batchFile("if exist \"${failedOutputLogFilename}\" del /q /f \"${failedOutputLogFilename}\"")
- batchFile("if exist \"${failedOutputLogFilename}\" (echo [ERROR] Failed to delete previously created \"${failedOutputLogFilename}\" file.& exit /b 1)")
-
- // Scenario: JitBench
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios || (echo [ERROR] JitBench failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
-
- // Scenario: ILLink
- if (arch == 'x64') {
- batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup || (echo [ERROR] IlLink failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
- }
+ // Set the label.
+ label('windows_clr_perf')
+ wrappers {
+ credentialsBinding {
+ string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
+ }
+ }
- batchFile("if exist \"${failedOutputLogFilename}\" (type \"${failedOutputLogFilename}\"& exit /b 1)")
- }
- }
+ if (isPR) {
+ parameters {
+ stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
+ }
+ }
- // Save machinedata.json to /artifact/bin/ Jenkins dir
- def archiveSettings = new ArchivalSettings()
+ parameters {
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
+ stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
+ }
+
+ def configuration = 'Release'
+ def runType = isPR ? 'private' : 'rolling'
+ def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
+ def uploadString = '-uploadToBenchview'
+
+ steps {
+ // Batch
+ batchFile("powershell wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
+ batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
+ batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
+
+ //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
+ //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
+ batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
+ "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
+ "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=%\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user \"dotnet-bot@microsoft.com\"\n" +
+ "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
+ batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
+ batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
+
+ batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
+
+ def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -scenarioTest"
+ def failedOutputLogFilename = "run-xunit-perf-scenario.log"
+
+ // Using a sentinel file to
+ batchFile("if exist \"${failedOutputLogFilename}\" del /q /f \"${failedOutputLogFilename}\"")
+ batchFile("if exist \"${failedOutputLogFilename}\" (echo [ERROR] Failed to delete previously created \"${failedOutputLogFilename}\" file.& exit /b 1)")
+
+ // Scenario: JitBench
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios || (echo [ERROR] JitBench failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
+
+ // Scenario: ILLink
+ if (arch == 'x64' && opt_level == 'full_opt') {
+ batchFile("tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup || (echo [ERROR] IlLink failed. 1>>\"${failedOutputLogFilename}\"& exit /b 0)")
+ }
+
+ batchFile("if exist \"${failedOutputLogFilename}\" (type \"${failedOutputLogFilename}\"& exit /b 1)")
+ }
+ }
+
+ // Save machinedata.json to /artifact/bin/ Jenkins dir
+ def archiveSettings = new ArchivalSettings()
archiveSettings.addFiles('bin/sandbox/Perf-*.*')
- archiveSettings.addFiles('machinedata.json')
- Utilities.addArchival(newJob, archiveSettings)
+ archiveSettings.addFiles('machinedata.json')
+ Utilities.addArchival(newJob, archiveSettings)
- Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
+ Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
- newJob.with {
- wrappers {
- timeout {
- absolute(240)
+ newJob.with {
+ wrappers {
+ timeout {
+ absolute(240)
+ }
+ }
}
- }
- }
- if (isPR) {
- TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
- builder.setGithubContext("${os} ${arch} Performance Scenarios Tests")
- builder.triggerOnlyOnComment()
- builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}\\W+perf\\W+scenarios.*")
- builder.triggerForBranch(branch)
- builder.emitTrigger(newJob)
- }
- else {
- // Set a push trigger
- TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
- builder.emitTrigger(newJob)
+ if (isPR) {
+ def opts = ""
+ if (opt_level == 'min_opts')
+ {
+ opts = '\\W+min_opts'
+ }
+ def jitt = ""
+ if (jit != 'ryujit')
+ {
+ jitt = '\\W+${jit}'
+ }
+
+ TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
+ builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} Performance Scenarios Tests")
+ builder.triggerOnlyOnComment()
+ builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}{$opts}${jitt}\\W+perf\\W+scenarios.*")
+ builder.triggerForBranch(branch)
+ builder.emitTrigger(newJob)
+ }
+ else {
+ // Set a push trigger
+ TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
+ builder.emitTrigger(newJob)
+ }
+ }
}
}
}
'Windows_NT': {
'x64': 'clrjit.dll',
'x86': 'clrjit.dll',
+ 'x86lb': 'legacyjit.dll'
},
'Linux': {
'x64': 'libclrjit.so'
parser.add_argument('-benchview_path', dest='benchview_path', default=None)
parser.add_argument('-iterations', dest='iterations', default=5, type=int)
parser.add_argument('-opt_level', dest='opt_level', default='full_opt')
+parser.add_argument('-jit_name', dest='jit_name', default='ryujit')
##########################################################################
# Helper Functions
benchview_path = args.benchview_path
iterations = args.iterations
opt_level = args.opt_level.lower()
+ jit_name = args.jit_name.lower()
def validate_arg(arg, check):
""" Validate an individual arg
valid_run_types = ['rolling', 'private']
valid_os = ['Windows_NT', 'Ubuntu14.04']
valid_opt_levels = ['full_opt', 'min_opt']
+ valid_jit_names = {'x64': ['ryujit'], 'x86': ['ryujit', 'legacy_backend']}
arch = next((a for a in valid_archs if a.lower() == arch.lower()), arch)
build_type = next((b for b in valid_build_types if b.lower() == build_type.lower()), build_type)
validate_arg(run_type, lambda item: item in valid_run_types)
validate_arg(iterations, lambda item: item > 0)
validate_arg(opt_level, lambda item: item in valid_opt_levels)
+ validate_arg(jit_name, lambda item: item in valid_jit_names[arch])
if clr_root is None:
raise Exception('--clr_root must be set')
benchview_path = os.path.normpath(benchview_path)
validate_arg(benchview_path, lambda item: os.path.isdir(benchview_path))
- args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level)
+ args = (arch, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level, jit_name)
# Log configuration
log('Configuration:')
log(' os_group: %s' % os_group)
log(' build_type: %s' % build_type)
log(' opt_level: %s' % opt_level)
+ log(' jit_name: %s' % jit_name)
log(' run_type: %s' % run_type)
log(' iterations: %d' % iterations)
log(' clr_root: %s' % clr_root)
return csv_file_name
-def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path, opt_level):
+def runIterations(dll_name, dll_path, iterations, crossgen_path, jit_path, assemblies_path, opt_level, jit_name):
""" Run throughput testing for a given dll
Args:
dll_name: the name of the dll
if opt_level == 'min_opt':
my_env['COMPlus_JITMinOpts'] = '1'
+ if jit_name == 'legacy_backend':
+ my_env['COMPlus_AltJit'] = '*'
+ my_env['COMPlus_AltJitNgen'] = '*'
+
log(" ".join(run_args))
# Time.clock() returns seconds, with a resolution of 0.4 microseconds, so multiply by the multiplier to get milliseconds
global os_group_list
global python_exe_list
- architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level = validate_args(args)
+ architecture, operating_system, os_group, build_type, run_type, clr_root, assembly_root, benchview_path, iterations, opt_level, jit_name = validate_args(args)
arch = architecture
+ if jit_name == 'legacy_backend':
+ architecture = 'x86lb'
+
current_dir = os.getcwd()
jit = jit_list[os_group][architecture]
crossgen = 'crossgen'
(not dll_file_name in dll_exclude_list["All"])):
dll_name = dll_file_name.replace(".dll", "")
dll_path = os.path.join(assembly_root, dll_file_name)
- dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root, opt_level)
+ dll_elapsed_times = runIterations(dll_file_name, dll_path, iterations, crossgen_path, jit_path, assembly_root, opt_level, jit_name)
if len(dll_elapsed_times) != 0:
if not benchview_path is None:
"--config",
"OptLevel",
opt_level,
+ "--config",
+ "JitName",
+ jit_name,
"--arch",
architecture,
"--machinepool",