Separate large perf benchmarks into their own legs (#15231)
[platform/upstream/coreclr.git] / buildpipeline / perf-pipeline.groovy
1 @Library('dotnet-ci') _
2
3 // Incoming parameters.  Access with "params.<param name>".
4 // Note that the parameters will be set as env variables so we cannot use names that conflict
5 // with the engineering system parameter names.
6
7 //-------------------------- Globals ---------------------------------//
8
9 def validTestFolders = [
10     'Benchstones',
11     'BenchmarksGame',
12     'Bytemark',
13     'Math',
14     'Span',
15     'first half',
16     'last half'
17     ]
18
19 //--------------------- Windows Functions ----------------------------//
20
21 def windowsBuild(String arch, String config, String pgo, boolean isBaseline) {
22     checkout scm
23
24     String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '-enforcepgo')
25     String baselineString = ""
26
27     // For baseline builds, checkout the merge's parent
28     if (isBaseline) {
29         baselineString = "-baseline"
30         bat "git checkout HEAD^^1"
31     }
32
33     bat "set __TestIntermediateDir=int&&.\\build.cmd -${config} -${arch} -skipbuildpackages ${pgoBuildFlag}"
34     bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
35     bat "rd /s /q bin\\obj"
36
37     // Stash build artifacts. Stash tests in an additional stash to be used by Linux test runs
38     stash name: "nt-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
39     stash name: "nt-${arch}-${pgo}${baselineString}-test-artifacts", includes: 'bin/tests/**'
40 }
41
42 def windowsPerf(String arch, String config, String uploadString, String runType, String opt_level, String jit, String pgo, String scenario, boolean isBaseline, boolean isProfileOn, String testFolder) {
43     withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
44         checkout scm
45         String baselineString = ""
46         if (isBaseline) {
47             baselineString = "-baseline"
48         }
49         dir ('.') {
50             unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
51             unstash "benchview-tools"
52             unstash "metadata"
53         }
54
55         String test = ''
56         if (testFolder != 'all') {
57             test = testFolder
58         }
59
60         String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
61
62         // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
63         // submission-metadata.py
64         if (isBaseline) {
65             bat "move /y submission-metadata-baseline.json submission-metadata.json"
66         }
67
68         String testEnv = ""
69
70         String failedOutputLogFilename = "run-xunit-perf-scenario.log"
71
72         bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
73         bat ".\\init-tools.cmd"
74
75         // We run run-xunit-perf differently for each of the different job types
76
77         String profileArg = isProfileOn ? "BranchMispredictions+CacheMisses+InstructionRetired" : "stopwatch"
78
79         String runXUnitCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\""
80         if (scenario == 'perf') {
81             String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
82             if (test == 'first half' || testFolder == 'all')
83             {
84                 String runXUnitPerflabArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library"
85
86                 profileArg = isProfileOn ? "default+${profileArg}+gcapi" : profileArg
87                 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerflabArgs} -collectionFlags ${profileArg}"
88             }
89
90             if (test == 'first half') {
91
92                 [
93                     'Burgers',
94                     'Devirtualization',
95                     'FractalPerf',
96                     'Inlining',
97                     'Layout'
98                 ].each { benchmark ->
99                     String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${benchmark}"
100                     bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
101                 }
102             }
103             else if (test == 'last half') {
104                 [
105                     'Linq',
106                     'Roslyn',
107                     'SciMark',
108                     'Serialization',
109                     'V8'
110                 ].each { benchmark ->
111                     String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${benchmark}"
112                     bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
113                 }
114             }
115             else {
116                 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${test}"
117                 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
118             }
119         }
120         else if (scenario == 'jitbench') {
121             String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest"
122             runXUnitPerfCommonArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios"
123
124             if (!(opt_level == 'min_opt' && isProfileOn)) {
125                 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -collectionFlags ${profileArgs}"
126             }
127         }
128         else if (scenario == 'illink') {
129             String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -scenarioTest"
130             bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\linkbench\\linkbench -group ILLink -nowarmup"
131         }
132         archiveArtifacts allowEmptyArchive: false, artifacts:'bin/sandbox_logs/**,machinedata.json'
133     }
134 }
135
136 def windowsThroughput(String arch, String os, String config, String runType, String optLevel, String jit, String pgo, boolean isBaseline) {
137     withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
138         checkout scm
139
140         String baselineString = ""
141         if (isBaseline) {
142             baselineString = "-baseline"
143         }
144
145         String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
146
147         dir ('.') {
148             unstash "nt-${arch}-${pgo}${baselineString}-build-artifacts"
149             unstash "benchview-tools"
150             unstash "throughput-benchmarks-${arch}"
151             unstash "metadata"
152         }
153
154         // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
155         // submission-metadata.py
156         if (isBaseline) {
157             bat "move /y submission-metadata-baseline.json submission-metadata.json"
158         }
159
160         bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
161         bat ".\\init-tools.cmd"
162         bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
163         bat "py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${config} -opt_level ${optLevel} -jit_name ${jit} ${pgoTestFlag} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\${arch}ThroughputBenchmarks\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}"
164         archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json'
165     }
166 }
167
168 //------------------------ Linux Functions ----------------------------//
169
170 def linuxBuild(String arch, String config, String pgo, boolean isBaseline) {
171     checkout scm
172
173     String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '')
174     String baselineString = ""
175
176     // For baseline runs, checkout the merge's parent
177     if (isBaseline) {
178         baselineString = "-baseline"
179         sh "git checkout HEAD^1"
180     }
181
182     sh "./build.sh -verbose -${config} -${arch} ${pgoBuildFlag}"
183     stash name: "linux-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
184 }
185
186 def linuxPerf(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
187     withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
188         checkout scm
189
190         String baselineString = ""
191         if (isBaseline) {
192             baselineString = "-baseline"
193         }
194
195         String pgoTestFlag = ((pgo == 'nopgo') ? '--nopgo' : '')
196
197         dir ('.') {
198             unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
199             unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
200             unstash "metadata"
201         }
202         dir ('./tests/scripts') {
203             unstash "benchview-tools"
204         }
205
206         // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
207         // submission-metadata.py
208         if (isBaseline) {
209             sh "mv -f submission-metadata-baseline.json submission-metadata.json"
210         }
211
212         sh "./tests/scripts/perf-prep.sh"
213         sh "./init-tools.sh"
214         sh "./tests/scripts/run-xunit-perf.sh --testRootDir=\"\${WORKSPACE}/bin/tests/Windows_NT.${arch}.${config}\" --optLevel=${optLevel} ${pgoTestFlag} --testNativeBinDir=\"\${WORKSPACE}/bin/obj/Linux.${arch}.${config}/tests\" --coreClrBinDir=\"\${WORKSPACE}/bin/Product/Linux.${arch}.${config}\" --mscorlibDir=\"\${WORKSPACE}/bin/Product/Linux.${arch}.${config}\" --coreFxBinDir=\"\${WORKSPACE}/corefx\" --runType=\"${runType}\" --benchViewOS=\"${os}\" --stabilityPrefix=\"taskset 0x00000002 nice --adjustment=-10\" --uploadToBenchview --generatebenchviewdata=\"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\""
215         archiveArtifacts allowEmptyArchive: false, artifacts:'bin/toArchive/**,machinedata.json'
216     }
217 }
218
219 def linuxThroughput(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
220     withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
221         checkout scm
222
223         String baselineString = ""
224         if (isBaseline) {
225             baselineString = "-baseline"
226         }
227
228         String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
229
230         dir ('.') {
231             unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
232             unstash "throughput-benchmarks-${arch}"
233             unstash "metadata"
234         }
235         dir ('./tests/scripts') {
236             unstash "benchview-tools"
237         }
238
239         // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
240         // submission-metadata.py
241         if (isBaseline) {
242             sh "mv -f submission-metadata-baseline.json submission-metadata.json"
243         }
244
245         sh "./tests/scripts/perf-prep.sh --throughput"
246         sh "./init-tools.sh"
247         sh "python3 ./tests/scripts/run-throughput-perf.py -arch \"${arch}\" -os \"${os}\" -configuration \"${config}\" -opt_level ${optLevel} ${pgoTestFlag} -clr_root \"\${WORKSPACE}\" -assembly_root \"\${WORKSPACE}/${arch}ThroughputBenchmarks/lib\" -run_type \"${runType}\"  -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\""
248         archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json'
249     }
250 }
251
252 //-------------------------- Job Definitions --------------------------//
253
254 String config = "Release"
255 String runType = isPR() ? 'private' : 'rolling'
256
257 String uploadString = '-uploadToBenchview'
258
259 stage ('Get Metadata and download Throughput Benchmarks') {
260     simpleNode('Windows_NT', '20170427-elevated') {
261         checkout scm
262         String commit = getCommit()
263         def benchViewName = isPR() ? "coreclr private %ghprbPullTitle%" : "coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% ${commit}"
264         def benchViewUser = getUserEmail()
265         bat "mkdir tools\n" +
266             "powershell Invoke-WebRequest https://dist.nuget.org/win-x86-commandline/v4.1.0/nuget.exe -OutFile %WORKSPACE%\\tools\\nuget.exe"
267         bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -Prerelease -ExcludeVersion"
268         bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
269         bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
270         bat "set \"GIT_BRANCH_WITHOUT_ORIGIN=%GitBranchOrCommit:*/=%\"\n" +
271             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"${benchViewUser}\"\n" +
272             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}\n" +
273             "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}-baseline\" --user-email \"${benchViewUser}\" -o submission-metadata-baseline.json\n"
274
275         // TODO: revisit these moves. Originally, stash could not find the directories as currently named
276         bat "move Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT x64ThroughputBenchmarks"
277         bat "move Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT x86ThroughputBenchmarks"
278
279         stash includes: 'Microsoft.BenchView.JSONFormat/**/*', name: 'benchview-tools'
280         stash name: "metadata", includes: "*.json"
281         stash name: "throughput-benchmarks-x64", includes: "x64ThroughputBenchmarks/**/*"
282         stash name: "throughput-benchmarks-x86", includes: "x86ThroughputBenchmarks/**/*"
283     }
284 }
285
286 // TODO: use non-pgo builds for throughput?
287 def innerLoopBuilds = [
288     "windows x64 pgo build": {
289         simpleNode('Windows_NT','latest') {
290             windowsBuild('x64', config, 'pgo', false)
291         }
292      },
293     "windows x86 pgo build": {
294         simpleNode('Windows_NT','latest') {
295             windowsBuild('x86', config, 'pgo', false)
296         }
297     }
298 ]
299
300 // Only run non-pgo builds on offical builds
301 def outerLoopBuilds = [:]
302
303 if (!isPR()) {
304     outerLoopBuilds = [
305         "windows x64 nopgo build": {
306             simpleNode('Windows_NT','latest') {
307                 windowsBuild('x64', config, 'nopgo', false)
308             }
309         },
310         "windows x86 nopgo build": {
311            simpleNode('Windows_NT','latest') {
312                windowsBuild('x86', config, 'nopgo', false)
313            }
314         },
315         "linux x64 pgo build": {
316             simpleNode('RHEL7.2', 'latest-or-auto') {
317                 linuxBuild('x64', config, 'pgo', false)
318             }
319         },
320         "linux x64 nopgo build": {
321            simpleNode('RHEL7.2', 'latest-or-auto') {
322                linuxBuild('x64', config, 'nopgo', false)
323            }
324         }
325     ]
326 }
327
328 /*def baselineBuilds = [:]
329
330 if (isPR()) {
331    baselineBuilds = [
332        "windows x64 pgo baseline build": {
333            simpleNode('Windows_NT','latest') {
334                windowsBuild('x64', config, 'pgo', true)
335            }
336        },
337        "windows x86 pgo baseline build": {
338            simpleNode('Windows_NT','latest') {
339                windowsBuild('x86', config, 'pgo', true)
340            }
341        }
342    ]
343 }*/
344
345 stage ('Build Product') {
346     parallel innerLoopBuilds + outerLoopBuilds //+ baselineBuilds
347 }
348
349 // Pipeline builds don't allow outside scripts (ie ArrayList.Add) if running from a script from SCM, so manually list these for now.
350 // Run the main test mix on all runs (PR + official)
351
352 def innerLoopTests = [:]
353
354 ['x64', 'x86'].each { arch ->
355     ['full_opt'].each { opt_level ->
356         [false].each { isBaseline ->
357             validTestFolders.each { benchmark ->
358                 String baseline = ""
359                 if (isBaseline) {
360                     baseline = " baseline"
361                 }
362                 if (isPR() || !isBaseline) {
363                     innerLoopTests["windows ${arch} ryujit ${opt_level} pgo ${benchmark}${baseline} perf"] = {
364                         simpleNode('windows_server_2016_clr_perf', 180) {
365                             windowsPerf(arch, config, uploadString, runType, opt_level, 'ryujit', 'pgo', 'perf', isBaseline, true, benchmark)
366                         }
367                     }
368                 }
369             }
370         }
371     }
372 }
373
374 // Run the full test mix only on commits, not PRs
375 def outerLoopTests = [:]
376
377 if (!isPR()) {
378     outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} jitbench"] = {
379         simpleNode('windows_server_2016_clr_perf', 180) {
380             windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'jitbench', false, false, '')
381         }
382     }
383
384     outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} illink"] = {
385         simpleNode('Windows_NT', '20170427-elevated') {
386             windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'illink', false, false, '')
387         }
388     }
389
390     ['x64', 'x86'].each { arch ->
391         ['min_opt', 'full_opt'].each { opt_level ->
392             ['ryujit'].each { jit ->
393                 ['pgo', 'nopgo'].each { pgo_enabled ->
394                     [true, false].each { isProfileOn ->
395                         outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} perf"] = {
396                             simpleNode('windows_server_2016_clr_perf', 180) {
397                                 windowsPerf(arch, config, uploadString, runType, opt_level, jit, pgo_enabled, 'perf', false, isProfileOn, 'all')
398                             }
399                         }
400
401                         outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} throughput"] = {
402                             simpleNode('windows_server_2016_clr_perf', 180) {
403                                 windowsThroughput(arch, 'Windows_NT', config, runType, opt_level, jit, pgo_enabled, false, isProfileOn)
404                             }
405                         }
406                     }
407                 }
408             }
409         }
410     }
411
412     ['x64'].each { arch ->
413         ['min_opt', 'full_opt'].each { opt_level ->
414             ['pgo', 'nopgo'].each { pgo_enabled ->
415                 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} perf"] = {
416                     simpleNode('ubuntu_1604_clr_perf', 180) {
417                         linuxPerf(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
418                     }
419                 }
420
421                 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} throughput"] = {
422                     simpleNode('ubuntu_1604_clr_perf', 180) {
423                         linuxThroughput(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
424                     }
425                 }
426             }
427         }
428     }
429 }
430
431 stage ('Run testing') {
432     parallel innerLoopTests + outerLoopTests
433 }