1 // Import the utility functionality.
3 import jobs.generation.*;
5 def project = GithubProject
6 def branch = GithubBranchName
7 def projectName = Utilities.getFolderName(project)
8 def projectFolder = projectName + '/' + Utilities.getFolderName(branch)
10 def static getOSGroup(def os) {
11 def osGroupMap = ['Ubuntu14.04':'Linux',
13 'Ubuntu16.04': 'Linux',
17 'Windows_NT':'Windows_NT',
20 'OpenSUSE13.2': 'Linux',
21 'OpenSUSE42.1': 'Linux',
22 'LinuxARMEmulator': 'Linux']
23 def osGroup = osGroupMap.get(os, null)
24 assert osGroup != null : "Could not find os group for ${os}"
28 // Setup perflab tests runs
29 [true, false].each { isPR ->
30 ['Windows_NT'].each { os ->
31 ['x64', 'x86'].each { arch ->
32 [true, false].each { isSmoketest ->
33 ['ryujit'].each { jit ->
34 ['full_opt', 'min_opt'].each { opt_level ->
36 def architecture = arch
37 def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_${opt_level}_${jit}_smoketest" : "perf_perflab_${os}_${arch}_${opt_level}_${jit}"
40 def newJob = job(Utilities.getFullJobName(project, jobName, isPR)) {
42 label('windows_server_2016_clr_perf')
45 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
51 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
57 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
58 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
63 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
64 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
68 def configuration = 'Release'
69 def runType = isPR ? 'private' : 'rolling'
70 def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
71 def uploadString = isSmoketest ? '' : '-uploadToBenchview'
76 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
77 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
78 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
79 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
80 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
81 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
82 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
83 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
84 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
85 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
86 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
87 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
89 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
91 def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH /AFFINITY 0x2\""
93 // Run with just stopwatch: Profile=Off
94 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
95 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
97 // Run with the full set of counters enabled: Profile=On
98 if (opt_level != 'min_opt') {
99 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
100 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
106 Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
108 def archiveSettings = new ArchivalSettings()
109 archiveSettings.addFiles('bin/sandbox_logs/**/*_log.txt')
110 archiveSettings.addFiles('bin/sandbox_logs/**/*.csv')
111 archiveSettings.addFiles('bin/sandbox_logs/**/*.xml')
112 archiveSettings.addFiles('bin/sandbox_logs/**/*.log')
113 archiveSettings.addFiles('bin/sandbox_logs/**/*.md')
114 archiveSettings.addFiles('bin/sandbox_logs/**/*.etl')
115 archiveSettings.addFiles('machinedata.json')
116 archiveSettings.setAlwaysArchive()
118 Utilities.addArchival(newJob, archiveSettings)
119 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
123 artifactDaysToKeep(14)
125 artifactNumToKeep(100)
136 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
138 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests Correctness")
141 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests")
144 if (opt_level == 'min_opt') {
145 opts = '\\W+min_opts'
148 if (jit != 'ryujit') {
152 builder.triggerOnlyOnComment()
153 builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+perf.*")
156 builder.triggerForBranch(branch)
157 builder.emitTrigger(newJob)
159 else if (opt_level == 'full_opt') {
160 // Set a push trigger
161 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
162 builder.emitTrigger(newJob)
165 // Set periodic trigger
166 Utilities.addPeriodicTrigger(newJob, '@daily')
175 // Setup throughput perflab tests runs
176 [true, false].each { isPR ->
177 ['Windows_NT'].each { os ->
178 ['x64', 'x86'].each { arch ->
179 ['ryujit'].each { jit ->
180 [true, false].each { pgo_optimized ->
181 ['full_opt', 'min_opt'].each { opt_level ->
182 def architecture = arch
187 if (!pgo_optimized) {
188 pgo_build = " -nopgooptimize"
193 def newJob = job(Utilities.getFullJobName(project, "perf_throughput_perflab_${os}_${arch}_${opt_level}_${jit}_${pgo_string}", isPR)) {
195 label('windows_server_2016_clr_perf')
198 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
204 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
208 def configuration = 'Release'
209 def runType = isPR ? 'private' : 'rolling'
210 def benchViewName = isPR ? 'coreclr-throughput private %BenchviewCommitName%' : 'coreclr-throughput rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
214 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
215 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\"")
216 batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
217 batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os} -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
218 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
219 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
220 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
221 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
222 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
223 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"dotnet-bot@microsoft.com\"\n" +
224 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
225 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
226 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}${pgo_build} skiptests")
227 batchFile("py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${configuration} -opt_level ${opt_level} -jit_name ${jit}${pgo_test} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}")
231 // Save machinedata.json to /artifact/bin/ Jenkins dir
232 def archiveSettings = new ArchivalSettings()
233 archiveSettings.addFiles('throughput-*.csv')
234 archiveSettings.setAlwaysArchive()
235 Utilities.addArchival(newJob, archiveSettings)
237 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
241 if (opt_level == 'min_opt') {
242 opts = '\\W+min_opts'
246 if (jit != 'ryujit') {
252 pgo_trigger = "\\W+nopgo"
256 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
257 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} ${pgo_string} CoreCLR Throughput Perf Tests")
258 builder.triggerOnlyOnComment()
259 builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}${pgo_trigger}\\W+throughput.*")
260 builder.triggerForBranch(branch)
261 builder.emitTrigger(newJob)
263 else if (opt_level == 'full_opt' && pgo_optimized) {
264 // Set a push trigger
265 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
266 builder.emitTrigger(newJob)
269 // Set periodic trigger
270 Utilities.addPeriodicTrigger(newJob, '@daily')
279 def static getFullPerfJobName(def project, def os, def isPR) {
280 return Utilities.getFullJobName(project, "perf_${os}", isPR)
283 // Create the Linux/OSX/CentOS coreclr test leg for debug and release and each scenario
284 [true, false].each { isPR ->
285 def fullBuildJobName = Utilities.getFullJobName(project, 'perf_linux_build', isPR)
286 def architecture = 'x64'
287 def configuration = 'Release'
289 // Build has to happen on RHEL7.2 (that's where we produce the bits we ship)
290 ['RHEL7.2'].each { os ->
291 def newBuildJob = job(fullBuildJobName) {
293 shell("./build.sh verbose ${architecture} ${configuration}")
294 shell("./build-test.sh generatelayoutonly ${architecture} ${configuration}")
297 Utilities.setMachineAffinity(newBuildJob, os, 'latest-or-auto')
298 Utilities.standardJobSetup(newBuildJob, project, isPR, "*/${branch}")
299 Utilities.addArchival(newBuildJob, "bin/Product/**,bin/obj/*/tests/**/*.dylib,bin/obj/*/tests/**/*.so,bin/tests/**", "bin/Product/**/.nuget/**")
303 // Actual perf testing on the following OSes
304 def perfOSList = ['Ubuntu16.04']
305 perfOSList.each { os ->
306 def newJob = job(getFullPerfJobName(project, os, isPR)) {
308 label('ubuntu_1604_clr_perf')
311 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
317 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
322 // Cap the maximum number of iterations to 21.
323 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
324 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one. We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
325 stringParam('PRODUCT_BUILD', '', 'Build number from which to copy down the CoreCLR Product binaries built for Linux')
328 def osGroup = getOSGroup(os)
329 def runType = isPR ? 'private' : 'rolling'
330 def benchViewName = isPR ? 'coreclr private \$BenchviewCommitName' : 'coreclr rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT'
331 def uploadString = '-uploadToBenchview'
333 def runXUnitCommonArgs = "-arch ${architecture} -os ${os} -configuration ${configuration} -stabilityPrefix \"taskset 0x00000002 nice --adjustment=-10\" -generateBenchviewData \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\" ${uploadString} -runtype ${runType} -outputdir \"\${WORKSPACE}/bin/sandbox_logs\""
336 shell("./tests/scripts/perf-prep.sh --nocorefx")
337 shell("./init-tools.sh")
338 copyArtifacts(fullBuildJobName) {
339 includePatterns("bin/**")
341 buildNumber('\${PRODUCT_BUILD}')
344 shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
345 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user-email \"dotnet-bot@microsoft.com\"\n" +
346 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
347 shell("""python3 ./tests/scripts/run-xunit-perf.py -testBinLoc bin/tests/Windows_NT.${architecture}.${configuration}/JIT/Performance/CodeQuality ${runXUnitCommonArgs}""")
351 def archiveSettings = new ArchivalSettings()
352 archiveSettings.addFiles('bin/sandbox_logs/**/*_log.txt')
353 archiveSettings.addFiles('bin/sandbox_logs/**/*.csv')
354 archiveSettings.addFiles('bin/sandbox_logs/**/*.xml')
355 archiveSettings.addFiles('bin/sandbox_logs/**/*.log')
356 archiveSettings.addFiles('bin/sandbox_logs/**/*.md')
357 archiveSettings.addFiles('bin/sandbox_logs/**/*.etl')
358 archiveSettings.addFiles('machinedata.json')
359 archiveSettings.setAlwaysArchive()
361 Utilities.addArchival(newJob, archiveSettings)
362 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
364 // For perf, we need to keep the run results longer
366 // Enable the log rotator
368 artifactDaysToKeep(14)
370 artifactNumToKeep(100)
381 def flowJobPerfRunList = perfOSList.collect { os ->
382 "{ build(params + [PRODUCT_BUILD: b.build.number], '${getFullPerfJobName(project, os, isPR)}') }"
384 def newFlowJob = buildFlowJob(Utilities.getFullJobName(project, "perf_linux_flow", isPR, '')) {
387 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
391 // First, build the bits on RHEL7.2
392 b = build(params, '${fullBuildJobName}')
394 // Then, run the perf tests
396 ${flowJobPerfRunList.join(",\n ")}
401 Utilities.setMachineAffinity(newFlowJob, 'Windows_NT', 'latest-or-auto')
402 Utilities.standardJobSetup(newFlowJob, project, isPR, "*/${branch}")
405 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
406 builder.setGithubContext("Linux Perf Test Flow")
407 builder.triggerOnlyOnComment()
408 builder.setCustomTriggerPhrase("(?i).*test\\W+linux\\W+perf\\W+flow.*")
409 builder.triggerForBranch(branch)
410 builder.emitTrigger(newFlowJob)
413 // Set a push trigger
414 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
415 builder.emitTrigger(newFlowJob)
420 def static getFullThroughputJobName(def project, def os, def isPR) {
421 return Utilities.getFullJobName(project, "perf_throughput_${os}", isPR)
424 // Create the Linux/OSX/CentOS coreclr test leg for debug and release and each scenario
425 [true, false].each { isPR ->
426 def fullBuildJobName = Utilities.getFullJobName(project, 'perf_throughput_linux_build', isPR)
427 def architecture = 'x64'
428 def configuration = 'Release'
430 // Build has to happen on RHEL7.2 (that's where we produce the bits we ship)
431 ['RHEL7.2'].each { os ->
432 def newBuildJob = job(fullBuildJobName) {
434 shell("./build.sh verbose ${architecture} ${configuration}")
437 Utilities.setMachineAffinity(newBuildJob, os, 'latest-or-auto')
438 Utilities.standardJobSetup(newBuildJob, project, isPR, "*/${branch}")
439 Utilities.addArchival(newBuildJob, "bin/Product/**")
442 // Actual perf testing on the following OSes
443 def throughputOSList = ['Ubuntu14.04']
444 def throughputOptLevelList = ['full_opt', 'min_opt']
446 def throughputOSOptLevelList = []
448 throughputOSList.each { os ->
449 throughputOptLevelList.each { opt_level ->
450 throughputOSOptLevelList.add("${os}_${opt_level}")
454 throughputOSList.each { os ->
455 throughputOptLevelList.each { opt_level ->
456 def newJob = job(getFullThroughputJobName(project, "${os}_${opt_level}", isPR)) {
458 label('ubuntu_1604_clr_perf')
461 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
467 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
472 stringParam('PRODUCT_BUILD', '', 'Build number from which to copy down the CoreCLR Product binaries built for Linux')
475 def osGroup = getOSGroup(os)
476 def runType = isPR ? 'private' : 'rolling'
477 def benchViewName = isPR ? 'coreclr-throughput private \$BenchviewCommitName' : 'coreclr-throughput rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT'
480 shell("bash ./tests/scripts/perf-prep.sh --throughput")
481 shell("./init-tools.sh")
482 copyArtifacts(fullBuildJobName) {
483 includePatterns("bin/Product/**")
485 buildNumber('\${PRODUCT_BUILD}')
488 shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
489 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user-email \"dotnet-bot@microsoft.com\"\n" +
490 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
491 shell("""python3.5 ./tests/scripts/run-throughput-perf.py \\
492 -arch \"${architecture}\" \\
494 -configuration \"${configuration}\" \\
495 -opt_level \"${opt_level}\" \\
496 -clr_root \"\${WORKSPACE}\" \\
497 -assembly_root \"\${WORKSPACE}/Microsoft.Benchview.ThroughputBenchmarks.${architecture}.Windows_NT/lib\" \\
498 -run_type \"${runType}\" \\
499 -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\"""")
503 // Save machinedata.json to /artifact/bin/ Jenkins dir
504 def archiveSettings = new ArchivalSettings()
505 archiveSettings.addFiles('throughput-*.csv')
506 archiveSettings.addFiles('machinedata.json')
507 archiveSettings.setAlwaysArchive()
508 Utilities.addArchival(newJob, archiveSettings)
510 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
512 // For perf, we need to keep the run results longer
514 // Enable the log rotator
516 artifactDaysToKeep(7)
518 artifactNumToKeep(25)
525 def flowJobTPRunList = throughputOSOptLevelList.collect { os ->
526 "{ build(params + [PRODUCT_BUILD: b.build.number], '${getFullThroughputJobName(project, os, isPR)}') }"
528 def newFlowJob = buildFlowJob(Utilities.getFullJobName(project, "perf_throughput_linux_flow", isPR, '')) {
531 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
535 // First, build the bits on RHEL7.2
536 b = build(params, '${fullBuildJobName}')
538 // Then, run the perf tests
540 ${flowJobTPRunList.join(",\n ")}
545 Utilities.setMachineAffinity(newFlowJob, 'Windows_NT', 'latest-or-auto')
546 Utilities.standardJobSetup(newFlowJob, project, isPR, "*/${branch}")
549 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
550 builder.setGithubContext("Linux Throughput Perf Test Flow")
551 builder.triggerOnlyOnComment()
552 builder.setCustomTriggerPhrase("(?i).*test\\W+linux\\W+throughput\\W+flow.*")
553 builder.triggerForBranch(branch)
554 builder.emitTrigger(newFlowJob)
557 // Set a push trigger
558 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
559 builder.emitTrigger(newFlowJob)
564 // Setup CoreCLR-Scenarios tests
565 [true, false].each { isPR ->
566 ['Windows_NT'].each { os ->
567 ['x64', 'x86'].each { arch ->
568 ['ryujit'].each { jit ->
569 ['full_opt', 'min_opt', 'tiered'].each { opt_level ->
570 def architecture = arch
571 def newJob = job(Utilities.getFullJobName(project, "perf_scenarios_${os}_${arch}_${opt_level}_${jit}", isPR)) {
576 label('windows_server_2016_clr_perf')
579 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
585 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
590 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
591 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
594 def configuration = 'Release'
595 def runType = isPR ? 'private' : 'rolling'
596 def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
597 def uploadString = '-uploadToBenchview'
601 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
602 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
603 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
605 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
606 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
607 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
608 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
609 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
610 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
611 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
612 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
613 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
615 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
617 def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH\" -scenarioTest"
620 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios")
623 if (opt_level != 'min_opt') {
624 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired")
629 def archiveSettings = new ArchivalSettings()
630 archiveSettings.addFiles('bin/sandbox_logs/**/*_log.txt')
631 archiveSettings.addFiles('bin/sandbox_logs/**/*.csv')
632 archiveSettings.addFiles('bin/sandbox_logs/**/*.xml')
633 archiveSettings.addFiles('bin/sandbox_logs/**/*.log')
634 archiveSettings.addFiles('bin/sandbox_logs/**/*.md')
635 archiveSettings.addFiles('bin/sandbox_logs/**/*.etl')
636 archiveSettings.addFiles('machinedata.json')
637 archiveSettings.setAlwaysArchive()
639 Utilities.addArchival(newJob, archiveSettings)
640 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
644 artifactDaysToKeep(14)
646 artifactNumToKeep(100)
658 if (opt_level == 'min_opt') {
659 opts = '\\W+min_opts'
662 if (jit != 'ryujit') {
666 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
667 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} Performance Scenarios Tests")
668 builder.triggerOnlyOnComment()
669 builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+perf\\W+scenarios.*")
670 builder.triggerForBranch(branch)
671 builder.emitTrigger(newJob)
673 else if (opt_level == 'full_opt') {
674 // Set a push trigger
675 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
676 builder.emitTrigger(newJob)
679 // Set periodic trigger
680 Utilities.addPeriodicTrigger(newJob, '@daily')
688 // Setup size-on-disk test
689 ['Windows_NT'].each { os ->
690 ['x64', 'x86'].each { arch ->
691 def architecture = arch
692 def newJob = job(Utilities.getFullJobName(project, "sizeondisk_${arch}", false)) {
696 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
700 def channel = 'master'
701 def configuration = 'Release'
702 def runType = 'rolling'
703 def benchViewName = 'Dotnet Size on Disk %DATE% %TIME%'
704 def testBin = "%WORKSPACE%\\bin\\tests\\${os}.${architecture}.${configuration}"
705 def coreRoot = "${testBin}\\Tests\\Core_Root"
706 def benchViewTools = "%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools"
709 // Install nuget and get BenchView tools
710 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
711 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
712 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
714 // Generate submission metadata for BenchView
715 // Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
716 // we have to do it all as one statement because cmd is called each time and we lose the set environment variable
717 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
718 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
719 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
720 "py \"${benchViewTools}\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
721 "py \"${benchViewTools}\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
723 // Generate machine data from BenchView
724 batchFile("py \"${benchViewTools}\\machinedata.py\"")
726 // Build CoreCLR and gnerate test layout
727 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
728 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
730 // Run the size on disk benchmark
731 batchFile("\"${coreRoot}\\CoreRun.exe\" \"${testBin}\\sizeondisk\\sodbench\\SoDBench\\SoDBench.exe\" -o \"%WORKSPACE%\\sodbench.csv\" --architecture ${arch} --channel ${channel}")
733 // From sodbench.csv, create measurment.json, then submission.json
734 batchFile("py \"${benchViewTools}\\measurement.py\" csv \"%WORKSPACE%\\sodbench.csv\" --metric \"Size on Disk\" --unit \"bytes\" --better \"desc\"")
735 batchFile("py \"${benchViewTools}\\submission.py\" measurement.json --build build.json --machine-data machinedata.json --metadata submission-metadata.json --group \"Dotnet Size on Disk\" --type ${runType} --config-name ${configuration} --architecture ${arch} --machinepool VM --config Channel ${channel}")
737 // If this is a PR, upload submission.json
738 batchFile("py \"${benchViewTools}\\upload.py\" submission.json --container coreclr")
742 Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
744 def archiveSettings = new ArchivalSettings()
745 archiveSettings.addFiles('bin/toArchive/**')
746 archiveSettings.addFiles('machinedata.json')
747 archiveSettings.setAlwaysArchive()
749 Utilities.addArchival(newJob, archiveSettings)
750 Utilities.standardJobSetup(newJob, project, false, "*/${branch}")
752 // Set the cron job here. We run nightly on each flavor, regardless of code changes
753 Utilities.addPeriodicTrigger(newJob, "@daily", true /*always run*/)
757 artifactDaysToKeep(14)
759 artifactNumToKeep(100)
771 // Setup IlLink tests
772 [true, false].each { isPR ->
773 ['Windows_NT'].each { os ->
774 ['x64'].each { arch ->
775 ['ryujit'].each { jit ->
776 ['full_opt'].each { opt_level ->
777 def architecture = arch
778 def newJob = job(Utilities.getFullJobName(project, "perf_illink_${os}_${arch}_${opt_level}_${jit}", isPR)) {
783 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
789 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview. The final name will be of the form <branch> private BenchviewCommitName')
794 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
795 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
798 def configuration = 'Release'
799 def runType = isPR ? 'private' : 'rolling'
800 def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
801 def uploadString = '-uploadToBenchview'
805 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
806 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
807 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
809 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
810 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
811 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
812 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
813 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
814 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
815 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
816 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
817 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
819 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
821 def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -scenarioTest"
824 batchFile("\"%VS140COMNTOOLS%\\..\\..\\VC\\vcvarsall.bat\" x86_amd64 && " +
825 "py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup")
829 def archiveSettings = new ArchivalSettings()
830 archiveSettings.addFiles('bin/sandbox_logs/**/*_log.txt')
831 archiveSettings.addFiles('bin/sandbox_logs/**/*.csv')
832 archiveSettings.addFiles('bin/sandbox_logs/**/*.xml')
833 archiveSettings.addFiles('bin/sandbox_logs/**/*.log')
834 archiveSettings.addFiles('bin/sandbox_logs/**/*.md')
835 archiveSettings.addFiles('bin/sandbox_logs/**/*.etl')
836 archiveSettings.addFiles('machinedata.json')
837 archiveSettings.setAlwaysArchive()
839 // Set the label (currently we are only measuring size, therefore we are running on VM).
840 Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
841 Utilities.addArchival(newJob, archiveSettings)
842 Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
846 artifactDaysToKeep(14)
848 artifactNumToKeep(100)
859 TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
860 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} IlLink Tests")
861 builder.triggerOnlyOnComment()
862 builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}\\W+illink.*")
863 builder.triggerForBranch(branch)
864 builder.emitTrigger(newJob)
867 // Set a push trigger
868 TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
869 builder.emitTrigger(newJob)
877 Utilities.createHelperJob(this, project, branch,
878 "Welcome to the ${project} Perf help",