[Tizen] Add crossgen and coreconsole to coreclr package
[platform/upstream/coreclr.git] / perf.groovy
1 // Import the utility functionality.
2
3 import jobs.generation.*;
4
5 def project = GithubProject
6 def branch = GithubBranchName
7 def projectName = Utilities.getFolderName(project)
8 def projectFolder = projectName + '/' + Utilities.getFolderName(branch)
9
10 def static getOSGroup(def os) {
11     def osGroupMap = ['Ubuntu14.04':'Linux',
12         'RHEL7.2': 'Linux',
13         'Ubuntu16.04': 'Linux',
14         'Debian8.4':'Linux',
15         'Fedora24':'Linux',
16         'OSX':'OSX',
17         'Windows_NT':'Windows_NT',
18         'FreeBSD':'FreeBSD',
19         'CentOS7.1': 'Linux',
20         'OpenSUSE13.2': 'Linux',
21         'OpenSUSE42.1': 'Linux',
22         'LinuxARMEmulator': 'Linux']
23     def osGroup = osGroupMap.get(os, null)
24     assert osGroup != null : "Could not find os group for ${os}"
25     return osGroupMap[os]
26 }
27
28 // Setup perflab tests runs
29 [true, false].each { isPR ->
30     ['Windows_NT'].each { os ->
31         ['x64', 'x86'].each { arch ->
32             [true, false].each { isSmoketest ->
33                 ['ryujit'].each { jit ->
34                     ['full_opt'].each { opt_level ->
35
36                         def architecture = arch
37                         def jobName = isSmoketest ? "perf_perflab_${os}_${arch}_${opt_level}_${jit}_smoketest" : "perf_perflab_${os}_${arch}_${opt_level}_${jit}"
38                         def testEnv = ""
39
40                         def newJob = job(Utilities.getFullJobName(project, jobName, isPR)) {
41                             // Set the label.
42                             label('windows_server_2016_clr_perf')
43                             wrappers {
44                                 credentialsBinding {
45                                     string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
46                                 }
47                             }
48
49                             if (isPR) {
50                                 parameters {
51                                     stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
52                                 }
53                             }
54
55                             if (isSmoketest) {
56                                 parameters {
57                                     stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '2', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
58                                     stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '2', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
59                                 }
60                             }
61                             else{
62                                 parameters {
63                                     stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
64                                     stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
65                                 }
66                             }
67
68                             def configuration = 'Release'
69                             def runType = isPR ? 'private' : 'rolling'
70                             def benchViewName = isPR ? 'coreclr private %BenchviewCommitName%' : 'coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
71                             def uploadString = isSmoketest ? '' : '-uploadToBenchview'
72
73                             steps {
74                                 // Batch
75
76                                 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
77                                 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
78                                 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
79                                 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
80                                 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
81                                 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
82                                 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
83                                 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
84                                 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
85                                 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
86                                 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
87                                 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
88
89                                 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
90
91                                 def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH /AFFINITY 0x2\""
92
93                                 // Run with just stopwatch: Profile=Off
94                                 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library")
95                                 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality")
96
97                                 // Run with the full set of counters enabled: Profile=On
98                                 if (opt_level != 'min_opt') {
99                                     batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\perflab\\Perflab -library -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
100                                     batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\Jit\\Performance\\CodeQuality -collectionFlags default+BranchMispredictions+CacheMisses+InstructionRetired+gcapi")
101                                 }
102                             }
103                         }
104
105                         if (isSmoketest) {
106                             Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
107                         }
108                         def archiveSettings = new ArchivalSettings()
109                         archiveSettings.addFiles('bin/sandbox_logs/**')
110                         archiveSettings.addFiles('machinedata.json')
111                         archiveSettings.setAlwaysArchive()
112
113                         Utilities.addArchival(newJob, archiveSettings)
114                         Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
115
116                         newJob.with {
117                             logRotator {
118                                 artifactDaysToKeep(30)
119                                 daysToKeep(30)
120                                 artifactNumToKeep(200)
121                                 numToKeep(200)
122                             }
123                             wrappers {
124                                 timeout {
125                                     absolute(240)
126                                 }
127                             }
128                         }
129
130                         if (isPR) {
131                             TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
132                             if (isSmoketest) {
133                                 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests Correctness")
134                             }
135                             else {
136                                 builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} CoreCLR Perf Tests")
137
138                                 def opts = ""
139                                 if (opt_level == 'min_opt') {
140                                     opts = '\\W+min_opts'
141                                 }
142                                 def jitt = ""
143                                 if (jit != 'ryujit') {
144                                     jitt = "\\W+${jit}"
145                                 }
146
147                                 builder.triggerOnlyOnComment()
148                                 builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+perf.*")
149                             }
150
151                             builder.triggerForBranch(branch)
152                             builder.emitTrigger(newJob)
153                         }
154                         else if (opt_level == 'full_opt') {
155                             // Set a push trigger
156                             TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
157                             builder.emitTrigger(newJob)
158                         }
159                         else {
160                             // Set periodic trigger
161                             Utilities.addPeriodicTrigger(newJob, '@daily')
162                         }
163                     }
164                 }
165             }
166         }
167     }
168 }
169
170 // Setup throughput perflab tests runs
171 [true, false].each { isPR ->
172     ['Windows_NT'].each { os ->
173         ['x64', 'x86'].each { arch ->
174             ['ryujit'].each { jit ->
175                 [true, false].each { pgo_optimized ->
176                     ['full_opt'].each { opt_level ->
177                         def architecture = arch
178
179                         pgo_build = ""
180                         pgo_test = ""
181                         pgo_string = "pgo"
182                         if (!pgo_optimized) {
183                             pgo_build = " -nopgooptimize"
184                             pgo_test = " -nopgo"
185                             pgo_string = "nopgo"
186                         }
187
188                         def newJob = job(Utilities.getFullJobName(project, "perf_throughput_perflab_${os}_${arch}_${opt_level}_${jit}_${pgo_string}", isPR)) {
189                             // Set the label.
190                             label('windows_server_2016_clr_perf')
191                             wrappers {
192                                 credentialsBinding {
193                                     string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
194                                 }
195                             }
196
197                             if (isPR) {
198                                 parameters {
199                                     stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
200                                 }
201                             }
202
203                             def configuration = 'Release'
204                             def runType = isPR ? 'private' : 'rolling'
205                             def benchViewName = isPR ? 'coreclr-throughput private %BenchviewCommitName%' : 'coreclr-throughput rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
206
207                             steps {
208                                 // Batch
209                                 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
210                                 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\"")
211                                 batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
212                                 batchFile("C:\\Tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os} -Source https://dotnet.myget.org/F/dotnet-core -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
213                                 //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
214                                 //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
215                                 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
216                                 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
217                                 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
218                                 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"dotnet-bot@microsoft.com\"\n" +
219                                 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
220                                 batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
221                                 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}${pgo_build} skiptests")
222                                 batchFile("py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${configuration} -opt_level ${opt_level} -jit_name ${jit}${pgo_test} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\Microsoft.BenchView.ThroughputBenchmarks.${architecture}.${os}\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}")
223                             }
224                         }
225
226                         // Save machinedata.json to /artifact/bin/ Jenkins dir
227                         def archiveSettings = new ArchivalSettings()
228                         archiveSettings.addFiles('throughput-*.csv')
229                         archiveSettings.setAlwaysArchive()
230                         Utilities.addArchival(newJob, archiveSettings)
231
232                         Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
233
234                         if (isPR) {
235                             def opts = ""
236                             if (opt_level == 'min_opt') {
237                                 opts = '\\W+min_opts'
238                             }
239
240                             def jitt = ""
241                             if (jit != 'ryujit') {
242                                 jitt = "\\W+${jit}"
243                             }
244
245                             def pgo_trigger = ""
246                             if (pgo_optimized) {
247                                 pgo_trigger = "\\W+nopgo"
248                             }
249
250
251                             TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
252                             builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} ${pgo_string} CoreCLR Throughput Perf Tests")
253                             builder.triggerOnlyOnComment()
254                             builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}${pgo_trigger}\\W+throughput.*")
255                             builder.triggerForBranch(branch)
256                             builder.emitTrigger(newJob)
257                         }
258                         else if (opt_level == 'full_opt' && pgo_optimized) {
259                             // Set a push trigger
260                             TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
261                             builder.emitTrigger(newJob)
262                         }
263                         else {
264                             // Set periodic trigger
265                             Utilities.addPeriodicTrigger(newJob, '@daily')
266                         }
267                     }
268                 }
269             }
270         }
271     }
272 }
273
274 def static getFullPerfJobName(def project, def os, def isPR) {
275     return Utilities.getFullJobName(project, "perf_${os}", isPR)
276 }
277
278 // Create the Linux/OSX/CentOS coreclr test leg for debug and release and each scenario
279 [true, false].each { isPR ->
280     def fullBuildJobName = Utilities.getFullJobName(project, 'perf_linux_build', isPR)
281     def architecture = 'x64'
282     def configuration = 'Release'
283
284     // Build has to happen on RHEL7.2 (that's where we produce the bits we ship)
285     ['RHEL7.2'].each { os ->
286         def newBuildJob = job(fullBuildJobName) {
287             steps {
288                 shell("./build.sh verbose ${architecture} ${configuration}")
289                 shell("./build-test.sh generatelayoutonly ${architecture} ${configuration}")
290             }
291         }
292         Utilities.setMachineAffinity(newBuildJob, os, 'latest-or-auto')
293         Utilities.standardJobSetup(newBuildJob, project, isPR, "*/${branch}")
294         Utilities.addArchival(newBuildJob, "bin/Product/**,bin/obj/*/tests/**/*.dylib,bin/obj/*/tests/**/*.so,bin/tests/**", "bin/Product/**/.nuget/**")
295     }
296
297
298     // Actual perf testing on the following OSes
299     def perfOSList = ['Ubuntu16.04']
300     perfOSList.each { os ->
301         def newJob = job(getFullPerfJobName(project, os, isPR)) {
302
303             label('ubuntu_1604_clr_perf')
304             wrappers {
305                 credentialsBinding {
306                     string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
307                 }
308             }
309
310             if (isPR) {
311                 parameters {
312                     stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
313                 }
314             }
315
316             parameters {
317                 // Cap the maximum number of iterations to 21.
318                 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '21', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
319                 stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '21', 'Sets the number of iterations to twenty one.  We are doing this to limit the amount of data that we upload as 20 iterations is enough to get a good sample')
320                 stringParam('PRODUCT_BUILD', '', 'Build number from which to copy down the CoreCLR Product binaries built for Linux')
321             }
322
323             def osGroup = getOSGroup(os)
324             def runType = isPR ? 'private' : 'rolling'
325             def benchViewName = isPR ? 'coreclr private \$BenchviewCommitName' : 'coreclr rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT'
326             def uploadString = '-uploadToBenchview'
327
328             def runXUnitCommonArgs = "-arch ${architecture} -os ${os} -configuration ${configuration} -stabilityPrefix \"taskset 0x00000002 nice --adjustment=-10\" -generateBenchviewData \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\" ${uploadString} -runtype ${runType} -outputdir \"\${WORKSPACE}/bin/sandbox_logs\""
329
330             steps {
331                 shell("./tests/scripts/perf-prep.sh --nocorefx")
332                 shell("./init-tools.sh")
333                 copyArtifacts(fullBuildJobName) {
334                     includePatterns("bin/**")
335                     buildSelector {
336                         buildNumber('\${PRODUCT_BUILD}')
337                     }
338                 }
339                 shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
340                 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user-email \"dotnet-bot@microsoft.com\"\n" +
341                 "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
342                 shell("""python3 ./tests/scripts/run-xunit-perf.py -testBinLoc bin/tests/Windows_NT.${architecture}.${configuration}/JIT/Performance/CodeQuality ${runXUnitCommonArgs}""")
343             }
344         }
345
346         def archiveSettings = new ArchivalSettings()
347         archiveSettings.addFiles('bin/sandbox_logs/**')
348         archiveSettings.addFiles('machinedata.json')
349         archiveSettings.setAlwaysArchive()
350
351         Utilities.addArchival(newJob, archiveSettings)
352         Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
353
354         // For perf, we need to keep the run results longer
355         newJob.with {
356             // Enable the log rotator
357             logRotator {
358                 artifactDaysToKeep(30)
359                 daysToKeep(30)
360                 artifactNumToKeep(200)
361                 numToKeep(200)
362             }
363             wrappers {
364                 timeout {
365                     absolute(240)
366                 }
367             }
368         }
369     } // os
370
371     def flowJobPerfRunList = perfOSList.collect { os ->
372         "{ build(params + [PRODUCT_BUILD: b.build.number], '${getFullPerfJobName(project, os, isPR)}') }"
373     }
374     def newFlowJob = buildFlowJob(Utilities.getFullJobName(project, "perf_linux_flow", isPR, '')) {
375         if (isPR) {
376             parameters {
377                 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
378             }
379         }
380         buildFlow("""
381 // First, build the bits on RHEL7.2
382 b = build(params, '${fullBuildJobName}')
383
384 // Then, run the perf tests
385 parallel(
386     ${flowJobPerfRunList.join(",\n    ")}
387 )
388 """)
389     }
390
391     Utilities.setMachineAffinity(newFlowJob, 'Windows_NT', 'latest-or-auto')
392     Utilities.standardJobSetup(newFlowJob, project, isPR, "*/${branch}")
393
394     if (isPR) {
395         TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
396         builder.setGithubContext("Linux Perf Test Flow")
397         builder.triggerOnlyOnComment()
398         builder.setCustomTriggerPhrase("(?i).*test\\W+linux\\W+perf\\W+flow.*")
399         builder.triggerForBranch(branch)
400         builder.emitTrigger(newFlowJob)
401     }
402     else {
403         // Set a push trigger
404         TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
405         builder.emitTrigger(newFlowJob)
406     }
407
408 } // isPR
409
410 def static getFullThroughputJobName(def project, def os, def isPR) {
411     return Utilities.getFullJobName(project, "perf_throughput_${os}", isPR)
412 }
413
414 // Create the Linux/OSX/CentOS coreclr test leg for debug and release and each scenario
415 [true, false].each { isPR ->
416     def fullBuildJobName = Utilities.getFullJobName(project, 'perf_throughput_linux_build', isPR)
417     def architecture = 'x64'
418     def configuration = 'Release'
419
420     // Build has to happen on RHEL7.2 (that's where we produce the bits we ship)
421     ['RHEL7.2'].each { os ->
422         def newBuildJob = job(fullBuildJobName) {
423             steps {
424                 shell("./build.sh verbose ${architecture} ${configuration}")
425             }
426         }
427         Utilities.setMachineAffinity(newBuildJob, os, 'latest-or-auto')
428         Utilities.standardJobSetup(newBuildJob, project, isPR, "*/${branch}")
429         Utilities.addArchival(newBuildJob, "bin/Product/**")
430     }
431
432     // Actual perf testing on the following OSes
433     def throughputOSList = ['Ubuntu16.04']
434     def throughputOptLevelList = ['full_opt']
435
436     def throughputOSOptLevelList = []
437
438     throughputOSList.each { os ->
439         throughputOptLevelList.each { opt_level ->
440             throughputOSOptLevelList.add("${os}_${opt_level}")
441         }
442     }
443
444     throughputOSList.each { os ->
445         throughputOptLevelList.each { opt_level ->
446             def newJob = job(getFullThroughputJobName(project, "${os}_${opt_level}", isPR)) {
447
448                 label('ubuntu_1604_clr_perf')
449                     wrappers {
450                         credentialsBinding {
451                             string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
452                         }
453                     }
454
455                 if (isPR) {
456                     parameters {
457                         stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that will be used to build the full title of a run in Benchview.')
458                     }
459                 }
460
461                 parameters {
462                     stringParam('PRODUCT_BUILD', '', 'Build number from which to copy down the CoreCLR Product binaries built for Linux')
463                 }
464
465                 def osGroup = getOSGroup(os)
466                 def runType = isPR ? 'private' : 'rolling'
467                 def benchViewName = isPR ? 'coreclr-throughput private \$BenchviewCommitName' : 'coreclr-throughput rolling \$GIT_BRANCH_WITHOUT_ORIGIN \$GIT_COMMIT'
468
469                 steps {
470                     shell("bash ./tests/scripts/perf-prep.sh --throughput")
471                     shell("./init-tools.sh")
472                     copyArtifacts(fullBuildJobName) {
473                         includePatterns("bin/Product/**")
474                         buildSelector {
475                             buildNumber('\${PRODUCT_BUILD}')
476                         }
477                     }
478                     shell("GIT_BRANCH_WITHOUT_ORIGIN=\$(echo \$GIT_BRANCH | sed \"s/[^/]*\\/\\(.*\\)/\\1 /\")\n" +
479                     "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/submission-metadata.py\" --name \" ${benchViewName} \" --user-email \"dotnet-bot@microsoft.com\"\n" +
480                     "python3.5 \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools/build.py\" git --branch \$GIT_BRANCH_WITHOUT_ORIGIN --type ${runType}")
481                     shell("""python3.5 ./tests/scripts/run-throughput-perf.py \\
482                     -arch \"${architecture}\" \\
483                     -os \"${os}\" \\
484                     -configuration \"${configuration}\" \\
485                     -opt_level \"${opt_level}\" \\
486                     -clr_root \"\${WORKSPACE}\" \\
487                     -assembly_root \"\${WORKSPACE}/Microsoft.Benchview.ThroughputBenchmarks.${architecture}.Windows_NT/lib\" \\
488                     -run_type \"${runType}\" \\
489                     -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\"""")
490                 }
491             }
492
493             // Save machinedata.json to /artifact/bin/ Jenkins dir
494             def archiveSettings = new ArchivalSettings()
495             archiveSettings.addFiles('throughput-*.csv')
496             archiveSettings.addFiles('machinedata.json')
497             archiveSettings.setAlwaysArchive()
498             Utilities.addArchival(newJob, archiveSettings)
499
500             Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
501
502             // For perf, we need to keep the run results longer
503             newJob.with {
504                 // Enable the log rotator
505                 logRotator {
506                     artifactDaysToKeep(7)
507                     daysToKeep(300)
508                     artifactNumToKeep(25)
509                     numToKeep(1000)
510                 }
511             }
512         } // opt_level
513     } // os
514
515     def flowJobTPRunList = throughputOSOptLevelList.collect { os ->
516         "{ build(params + [PRODUCT_BUILD: b.build.number], '${getFullThroughputJobName(project, os, isPR)}') }"
517     }
518     def newFlowJob = buildFlowJob(Utilities.getFullJobName(project, "perf_throughput_linux_flow", isPR, '')) {
519         if (isPR) {
520             parameters {
521                 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
522             }
523         }
524         buildFlow("""
525 // First, build the bits on RHEL7.2
526 b = build(params, '${fullBuildJobName}')
527
528 // Then, run the perf tests
529 parallel(
530     ${flowJobTPRunList.join(",\n    ")}
531 )
532 """)
533     }
534
535     Utilities.setMachineAffinity(newFlowJob, 'Windows_NT', 'latest-or-auto')
536     Utilities.standardJobSetup(newFlowJob, project, isPR, "*/${branch}")
537
538     if (isPR) {
539         TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
540         builder.setGithubContext("Linux Throughput Perf Test Flow")
541         builder.triggerOnlyOnComment()
542         builder.setCustomTriggerPhrase("(?i).*test\\W+linux\\W+throughput\\W+flow.*")
543         builder.triggerForBranch(branch)
544         builder.emitTrigger(newFlowJob)
545     }
546     else {
547         // Set a push trigger
548         TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
549         builder.emitTrigger(newFlowJob)
550     }
551
552 } // isPR
553
554 // Setup CoreCLR-Scenarios tests
555 [true, false].each { isPR ->
556     ['Windows_NT'].each { os ->
557         ['x64', 'x86'].each { arch ->
558             ['ryujit'].each { jit ->
559                 ['full_opt'].each { opt_level ->
560                     def architecture = arch
561                     def newJob = job(Utilities.getFullJobName(project, "perf_scenarios_${os}_${arch}_${opt_level}_${jit}", isPR)) {
562
563                         def testEnv = ""
564
565                         // Set the label.
566                         label('windows_server_2016_clr_perf')
567                         wrappers {
568                             credentialsBinding {
569                                 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
570                             }
571                         }
572
573                         if (isPR) {
574                             parameters {
575                                 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
576                             }
577                         }
578
579                         parameters {
580                             stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
581                             stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
582                         }
583
584                         def configuration = 'Release'
585                         def runType = isPR ? 'private' : 'rolling'
586                         def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
587                         def uploadString = '-uploadToBenchview'
588
589                         steps {
590                             // Batch
591                             batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
592                             batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
593                             batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
594
595                             //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
596                             //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
597                             batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
598                             "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
599                             "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
600                             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
601                             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
602                             batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
603                             batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
604
605                             batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
606
607                             def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH\" -scenarioTest"
608
609                             // Profile=Off
610                             batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios")
611
612                             // Profile=On
613                             if (opt_level != 'min_opt') {
614                                 batchFile("py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios -collectionFlags BranchMispredictions+CacheMisses+InstructionRetired")
615                             }
616                         }
617                     }
618
619                     def archiveSettings = new ArchivalSettings()
620                     archiveSettings.addFiles('bin/sandbox_logs/**')
621                     archiveSettings.addFiles('machinedata.json')
622                     archiveSettings.setAlwaysArchive()
623
624                     Utilities.addArchival(newJob, archiveSettings)
625                     Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
626
627                     newJob.with {
628                         logRotator {
629                             artifactDaysToKeep(30)
630                             daysToKeep(30)
631                             artifactNumToKeep(200)
632                             numToKeep(200)
633                         }
634                         wrappers {
635                             timeout {
636                                 absolute(240)
637                             }
638                         }
639                     }
640
641                     if (isPR) {
642                         def opts = ""
643                         if (opt_level == 'min_opt') {
644                             opts = '\\W+min_opts'
645                         }
646                         def jitt = ""
647                         if (jit != 'ryujit') {
648                             jitt = "\\W+${jit}"
649                         }
650
651                         TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
652                         builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} Performance Scenarios Tests")
653                         builder.triggerOnlyOnComment()
654                         builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}${opts}${jitt}\\W+perf\\W+scenarios.*")
655                         builder.triggerForBranch(branch)
656                         builder.emitTrigger(newJob)
657                     }
658                     else if (opt_level == 'full_opt') {
659                         // Set a push trigger
660                         TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
661                         builder.emitTrigger(newJob)
662                     }
663                     else {
664                         // Set periodic trigger
665                         Utilities.addPeriodicTrigger(newJob, '@daily')
666                     }
667                 }
668             }
669         }
670     }
671 }
672
673 // Setup size-on-disk test
674 ['Windows_NT'].each { os ->
675     ['x64', 'x86'].each { arch ->
676         def architecture = arch
677         def newJob = job(Utilities.getFullJobName(project, "sizeondisk_${arch}", false)) {
678
679             wrappers {
680                 credentialsBinding {
681                     string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
682                 }
683             }
684
685             def channel = 'master'
686             def configuration = 'Release'
687             def runType = 'rolling'
688             def benchViewName = 'Dotnet Size on Disk %DATE% %TIME%'
689             def testBin = "%WORKSPACE%\\bin\\tests\\${os}.${architecture}.${configuration}"
690             def coreRoot = "${testBin}\\Tests\\Core_Root"
691             def benchViewTools = "%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools"
692
693             steps {
694                 // Install nuget and get BenchView tools
695                 batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
696                 batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
697                 batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
698
699                 // Generate submission metadata for BenchView
700                 // Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
701                 // we have to do it all as one statement because cmd is called each time and we lose the set environment variable
702                 batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
703                 "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
704                 "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
705                 "py \"${benchViewTools}\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
706                 "py \"${benchViewTools}\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
707
708                 // Generate machine data from BenchView
709                 batchFile("py \"${benchViewTools}\\machinedata.py\"")
710
711                 // Build CoreCLR and gnerate test layout
712                 batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
713                 batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
714
715                 // Run the size on disk benchmark
716                 batchFile("\"${coreRoot}\\CoreRun.exe\" \"${testBin}\\sizeondisk\\sodbench\\SoDBench\\SoDBench.exe\" -o \"%WORKSPACE%\\sodbench.csv\" --architecture ${arch} --channel ${channel}")
717
718                 // From sodbench.csv, create measurment.json, then submission.json
719                 batchFile("py \"${benchViewTools}\\measurement.py\" csv \"%WORKSPACE%\\sodbench.csv\" --metric \"Size on Disk\" --unit \"bytes\" --better \"desc\"")
720                 batchFile("py \"${benchViewTools}\\submission.py\" measurement.json --build build.json --machine-data machinedata.json --metadata submission-metadata.json --group \"Dotnet Size on Disk\" --type ${runType} --config-name ${configuration} --architecture ${arch} --machinepool VM --config Channel ${channel}")
721
722                 // If this is a PR, upload submission.json
723                 batchFile("py \"${benchViewTools}\\upload.py\" submission.json --container coreclr")
724             }
725         }
726
727         Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
728
729         def archiveSettings = new ArchivalSettings()
730         archiveSettings.addFiles('bin/toArchive/**')
731         archiveSettings.addFiles('machinedata.json')
732         archiveSettings.setAlwaysArchive()
733
734         Utilities.addArchival(newJob, archiveSettings)
735         Utilities.standardJobSetup(newJob, project, false, "*/${branch}")
736
737         // Set the cron job here.  We run nightly on each flavor, regardless of code changes
738         Utilities.addPeriodicTrigger(newJob, "@daily", true /*always run*/)
739
740         newJob.with {
741             logRotator {
742                 artifactDaysToKeep(30)
743                 daysToKeep(30)
744                 artifactNumToKeep(200)
745                 numToKeep(200)
746             }
747             wrappers {
748                 timeout {
749                     absolute(240)
750                 }
751             }
752         }
753     }
754 }
755
756 // Setup IlLink tests
757 [true, false].each { isPR ->
758     ['Windows_NT'].each { os ->
759         ['x64'].each { arch ->
760             ['ryujit'].each { jit ->
761                 ['full_opt'].each { opt_level ->
762                     def architecture = arch
763                     def newJob = job(Utilities.getFullJobName(project, "perf_illink_${os}_${arch}_${opt_level}_${jit}", isPR)) {
764
765                         def testEnv = ""
766                         wrappers {
767                             credentialsBinding {
768                                 string('BV_UPLOAD_SAS_TOKEN', 'CoreCLR Perf BenchView Sas')
769                             }
770                         }
771
772                         if (isPR) {
773                             parameters {
774                                 stringParam('BenchviewCommitName', '\${ghprbPullTitle}', 'The name that you will be used to build the full title of a run in Benchview.  The final name will be of the form <branch> private BenchviewCommitName')
775                             }
776                         }
777
778                         parameters {
779                             stringParam('XUNIT_PERFORMANCE_MAX_ITERATION', '1', 'Size test, one iteration is sufficient')
780                             stringParam('XUNIT_PERFORMANCE_MAX_ITERATION_INNER_SPECIFIED', '1', 'Size test, one iteration is sufficient')
781                         }
782
783                         def configuration = 'Release'
784                         def runType = isPR ? 'private' : 'rolling'
785                         def benchViewName = isPR ? 'CoreCLR-Scenarios private %BenchviewCommitName%' : 'CoreCLR-Scenarios rolling %GIT_BRANCH_WITHOUT_ORIGIN% %GIT_COMMIT%'
786                         def uploadString = '-uploadToBenchview'
787
788                         steps {
789                             // Batch
790                             batchFile("powershell -NoProfile wget https://dist.nuget.org/win-x86-commandline/latest/nuget.exe -OutFile \"%WORKSPACE%\\nuget.exe\"")
791                             batchFile("if exist \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\" rmdir /s /q \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\"")
792                             batchFile("\"%WORKSPACE%\\nuget.exe\" install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -OutputDirectory \"%WORKSPACE%\" -Prerelease -ExcludeVersion")
793
794                             //Do this here to remove the origin but at the front of the branch name as this is a problem for BenchView
795                             //we have to do it all as one statement because cmd is called each time and we lose the set environment variable
796                             batchFile("if \"%GIT_BRANCH:~0,7%\" == \"origin/\" (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH:origin/=%\") else (set \"GIT_BRANCH_WITHOUT_ORIGIN=%GIT_BRANCH%\")\n" +
797                             "set \"BENCHVIEWNAME=${benchViewName}\"\n" +
798                             "set \"BENCHVIEWNAME=%BENCHVIEWNAME:\"=\"\"%\"\n" +
799                             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"%BENCHVIEWNAME%\" --user-email \"dotnet-bot@microsoft.com\"\n" +
800                             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}")
801                             batchFile("py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\"")
802                             batchFile("set __TestIntermediateDir=int&&build.cmd ${configuration} ${architecture}")
803
804                             batchFile("tests\\runtest.cmd ${configuration} ${architecture} GenerateLayoutOnly")
805
806                             def runXUnitPerfCommonArgs = "-arch ${arch} -configuration ${configuration} -os ${os} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\" -scenarioTest"
807
808                             // Scenario: ILLink
809                             batchFile("\"%VS140COMNTOOLS%\\..\\..\\VC\\vcvarsall.bat\" x86_amd64 && " +
810                             "py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${architecture}.${configuration}\\performance\\linkbench\\linkbench -group ILLink -nowarmup")
811                         }
812                     }
813
814                     def archiveSettings = new ArchivalSettings()
815                     archiveSettings.addFiles('bin/sandbox_logs/**')
816                     archiveSettings.addFiles('machinedata.json')
817                     archiveSettings.setAlwaysArchive()
818
819                     // Set the label (currently we are only measuring size, therefore we are running on VM).
820                     Utilities.setMachineAffinity(newJob, "Windows_NT", '20170427-elevated')
821                     Utilities.addArchival(newJob, archiveSettings)
822                     Utilities.standardJobSetup(newJob, project, isPR, "*/${branch}")
823
824                     newJob.with {
825                         logRotator {
826                             artifactDaysToKeep(30)
827                             daysToKeep(30)
828                             artifactNumToKeep(200)
829                             numToKeep(200)
830                         }
831                         wrappers {
832                             timeout {
833                                 absolute(240)
834                             }
835                         }
836                     }
837
838                     if (isPR) {
839                         TriggerBuilder builder = TriggerBuilder.triggerOnPullRequest()
840                         builder.setGithubContext("${os} ${arch} ${opt_level} ${jit} IlLink Tests")
841                         builder.triggerOnlyOnComment()
842                         builder.setCustomTriggerPhrase("(?i).*test\\W+${os}\\W+${arch}\\W+illink.*")
843                         builder.triggerForBranch(branch)
844                         builder.emitTrigger(newJob)
845                     }
846                     else {
847                         // Set a push trigger
848                         TriggerBuilder builder = TriggerBuilder.triggerOnCommit()
849                         builder.emitTrigger(newJob)
850                     }
851                 }
852             }
853         }
854     }
855 }
856
857 Utilities.createHelperJob(this, project, branch,
858     "Welcome to the ${project} Perf help",
859     "Have a nice day!")