1 @Library('dotnet-ci') _
3 // Incoming parameters. Access with "params.<param name>".
4 // Note that the parameters will be set as env variables so we cannot use names that conflict
5 // with the engineering system parameter names.
7 //-------------------------- Globals ---------------------------------//
9 def validTestFolders = [
19 //--------------------- Windows Functions ----------------------------//
21 def windowsBuild(String arch, String config, String pgo, boolean isBaseline) {
24 String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '-enforcepgo')
25 String baselineString = ""
27 // For baseline builds, checkout the merge's parent
29 baselineString = "-baseline"
30 bat "git checkout HEAD^^1"
33 bat "set __TestIntermediateDir=int&&.\\build.cmd -${config} -${arch} -skipbuildpackages ${pgoBuildFlag}"
34 bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
35 bat "rd /s /q bin\\obj"
37 // Stash build artifacts. Stash tests in an additional stash to be used by Linux test runs
38 stash name: "nt-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
39 stash name: "nt-${arch}-${pgo}${baselineString}-test-artifacts", includes: 'bin/tests/**'
42 def windowsPerf(String arch, String config, String uploadString, String runType, String opt_level, String jit, String pgo, String scenario, boolean isBaseline, boolean isProfileOn, String testFolder) {
43 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
45 String baselineString = ""
47 baselineString = "-baseline"
50 unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
51 unstash "benchview-tools"
56 if (testFolder != 'all') {
60 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
62 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
63 // submission-metadata.py
65 bat "move /y submission-metadata-baseline.json submission-metadata.json"
70 String failedOutputLogFilename = "run-xunit-perf-scenario.log"
72 bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
73 bat ".\\init-tools.cmd"
75 // We run run-xunit-perf differently for each of the different job types
77 String profileArg = isProfileOn ? "BranchMispredictions+CacheMisses+InstructionRetired" : "stopwatch"
79 String runXUnitCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\""
80 if (scenario == 'perf') {
81 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH /AFFINITY 0x2\""
82 if (test == 'first half' || testFolder == 'all')
84 String runXUnitPerflabArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library"
86 profileArg = isProfileOn ? "default+${profileArg}+gcapi" : profileArg
87 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerflabArgs} -collectionFlags ${profileArg}"
90 if (test == 'first half') {
99 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${benchmark}"
100 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
103 else if (test == 'last half') {
110 ].each { benchmark ->
111 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${benchmark}"
112 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
116 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\${test}"
117 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
120 else if (scenario == 'jitbench') {
121 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest"
122 runXUnitPerfCommonArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios"
124 if (!(opt_level == 'min_opt' && isProfileOn)) {
125 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -collectionFlags ${profileArgs}"
128 else if (scenario == 'illink') {
129 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -scenarioTest"
130 bat "tests\\scripts\\run-xunit-perf.cmd ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\linkbench\\linkbench -group ILLink -nowarmup"
132 archiveArtifacts allowEmptyArchive: false, artifacts:'bin/sandbox_logs/**,machinedata.json'
136 def windowsThroughput(String arch, String os, String config, String runType, String optLevel, String jit, String pgo, boolean isBaseline) {
137 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
140 String baselineString = ""
142 baselineString = "-baseline"
145 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
148 unstash "nt-${arch}-${pgo}${baselineString}-build-artifacts"
149 unstash "benchview-tools"
150 unstash "throughput-benchmarks-${arch}"
154 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
155 // submission-metadata.py
157 bat "move /y submission-metadata-baseline.json submission-metadata.json"
160 bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
161 bat ".\\init-tools.cmd"
162 bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
163 bat "py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${config} -opt_level ${optLevel} -jit_name ${jit} ${pgoTestFlag} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\${arch}ThroughputBenchmarks\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}"
164 archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json'
168 //------------------------ Linux Functions ----------------------------//
170 def linuxBuild(String arch, String config, String pgo, boolean isBaseline) {
173 String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '')
174 String baselineString = ""
176 // For baseline runs, checkout the merge's parent
178 baselineString = "-baseline"
179 sh "git checkout HEAD^1"
182 sh "./build.sh -verbose -${config} -${arch} ${pgoBuildFlag}"
183 stash name: "linux-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
186 def linuxPerf(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
187 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
190 String baselineString = ""
192 baselineString = "-baseline"
195 String pgoTestFlag = ((pgo == 'nopgo') ? '--nopgo' : '')
198 unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
199 unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
202 dir ('./tests/scripts') {
203 unstash "benchview-tools"
206 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
207 // submission-metadata.py
209 sh "mv -f submission-metadata-baseline.json submission-metadata.json"
212 sh "./tests/scripts/perf-prep.sh"
214 sh "./tests/scripts/run-xunit-perf.sh --testRootDir=\"\${WORKSPACE}/bin/tests/Windows_NT.${arch}.${config}\" --optLevel=${optLevel} ${pgoTestFlag} --testNativeBinDir=\"\${WORKSPACE}/bin/obj/Linux.${arch}.${config}/tests\" --coreClrBinDir=\"\${WORKSPACE}/bin/Product/Linux.${arch}.${config}\" --mscorlibDir=\"\${WORKSPACE}/bin/Product/Linux.${arch}.${config}\" --coreFxBinDir=\"\${WORKSPACE}/corefx\" --runType=\"${runType}\" --benchViewOS=\"${os}\" --stabilityPrefix=\"taskset 0x00000002 nice --adjustment=-10\" --uploadToBenchview --generatebenchviewdata=\"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\""
215 archiveArtifacts allowEmptyArchive: false, artifacts:'bin/toArchive/**,machinedata.json'
219 def linuxThroughput(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
220 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
223 String baselineString = ""
225 baselineString = "-baseline"
228 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
231 unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
232 unstash "throughput-benchmarks-${arch}"
235 dir ('./tests/scripts') {
236 unstash "benchview-tools"
239 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
240 // submission-metadata.py
242 sh "mv -f submission-metadata-baseline.json submission-metadata.json"
245 sh "./tests/scripts/perf-prep.sh --throughput"
247 sh "python3 ./tests/scripts/run-throughput-perf.py -arch \"${arch}\" -os \"${os}\" -configuration \"${config}\" -opt_level ${optLevel} ${pgoTestFlag} -clr_root \"\${WORKSPACE}\" -assembly_root \"\${WORKSPACE}/${arch}ThroughputBenchmarks/lib\" -run_type \"${runType}\" -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\""
248 archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json'
252 //-------------------------- Job Definitions --------------------------//
254 String config = "Release"
255 String runType = isPR() ? 'private' : 'rolling'
257 String uploadString = '-uploadToBenchview'
259 stage ('Get Metadata and download Throughput Benchmarks') {
260 simpleNode('Windows_NT', '20170427-elevated') {
262 String commit = getCommit()
263 def benchViewName = isPR() ? "coreclr private %ghprbPullTitle%" : "coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% ${commit}"
264 def benchViewUser = getUserEmail()
265 bat "mkdir tools\n" +
266 "powershell Invoke-WebRequest https://dist.nuget.org/win-x86-commandline/v4.1.0/nuget.exe -OutFile %WORKSPACE%\\tools\\nuget.exe"
267 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -Prerelease -ExcludeVersion"
268 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
269 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
270 bat "set \"GIT_BRANCH_WITHOUT_ORIGIN=%GitBranchOrCommit:*/=%\"\n" +
271 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"${benchViewUser}\"\n" +
272 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}\n" +
273 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}-baseline\" --user-email \"${benchViewUser}\" -o submission-metadata-baseline.json\n"
275 // TODO: revisit these moves. Originally, stash could not find the directories as currently named
276 bat "move Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT x64ThroughputBenchmarks"
277 bat "move Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT x86ThroughputBenchmarks"
279 stash includes: 'Microsoft.BenchView.JSONFormat/**/*', name: 'benchview-tools'
280 stash name: "metadata", includes: "*.json"
281 stash name: "throughput-benchmarks-x64", includes: "x64ThroughputBenchmarks/**/*"
282 stash name: "throughput-benchmarks-x86", includes: "x86ThroughputBenchmarks/**/*"
286 // TODO: use non-pgo builds for throughput?
287 def innerLoopBuilds = [
288 "windows x64 pgo build": {
289 simpleNode('Windows_NT','latest') {
290 windowsBuild('x64', config, 'pgo', false)
293 "windows x86 pgo build": {
294 simpleNode('Windows_NT','latest') {
295 windowsBuild('x86', config, 'pgo', false)
300 // Only run non-pgo builds on offical builds
301 def outerLoopBuilds = [:]
305 "windows x64 nopgo build": {
306 simpleNode('Windows_NT','latest') {
307 windowsBuild('x64', config, 'nopgo', false)
310 "windows x86 nopgo build": {
311 simpleNode('Windows_NT','latest') {
312 windowsBuild('x86', config, 'nopgo', false)
315 "linux x64 pgo build": {
316 simpleNode('RHEL7.2', 'latest-or-auto') {
317 linuxBuild('x64', config, 'pgo', false)
320 "linux x64 nopgo build": {
321 simpleNode('RHEL7.2', 'latest-or-auto') {
322 linuxBuild('x64', config, 'nopgo', false)
328 /*def baselineBuilds = [:]
332 "windows x64 pgo baseline build": {
333 simpleNode('Windows_NT','latest') {
334 windowsBuild('x64', config, 'pgo', true)
337 "windows x86 pgo baseline build": {
338 simpleNode('Windows_NT','latest') {
339 windowsBuild('x86', config, 'pgo', true)
345 stage ('Build Product') {
346 parallel innerLoopBuilds + outerLoopBuilds //+ baselineBuilds
349 // Pipeline builds don't allow outside scripts (ie ArrayList.Add) if running from a script from SCM, so manually list these for now.
350 // Run the main test mix on all runs (PR + official)
352 def innerLoopTests = [:]
354 ['x64', 'x86'].each { arch ->
355 ['full_opt'].each { opt_level ->
356 [false].each { isBaseline ->
357 validTestFolders.each { benchmark ->
360 baseline = " baseline"
362 if (isPR() || !isBaseline) {
363 innerLoopTests["windows ${arch} ryujit ${opt_level} pgo ${benchmark}${baseline} perf"] = {
364 simpleNode('windows_server_2016_clr_perf', 180) {
365 windowsPerf(arch, config, uploadString, runType, opt_level, 'ryujit', 'pgo', 'perf', isBaseline, true, benchmark)
374 // Run the full test mix only on commits, not PRs
375 def outerLoopTests = [:]
378 outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} jitbench"] = {
379 simpleNode('windows_server_2016_clr_perf', 180) {
380 windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'jitbench', false, false, '')
384 outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} illink"] = {
385 simpleNode('Windows_NT', '20170427-elevated') {
386 windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'illink', false, false, '')
390 ['x64', 'x86'].each { arch ->
391 ['min_opt', 'full_opt'].each { opt_level ->
392 ['ryujit'].each { jit ->
393 ['pgo', 'nopgo'].each { pgo_enabled ->
394 [true, false].each { isProfileOn ->
395 outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} perf"] = {
396 simpleNode('windows_server_2016_clr_perf', 180) {
397 windowsPerf(arch, config, uploadString, runType, opt_level, jit, pgo_enabled, 'perf', false, isProfileOn, 'all')
401 outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} throughput"] = {
402 simpleNode('windows_server_2016_clr_perf', 180) {
403 windowsThroughput(arch, 'Windows_NT', config, runType, opt_level, jit, pgo_enabled, false, isProfileOn)
412 ['x64'].each { arch ->
413 ['min_opt', 'full_opt'].each { opt_level ->
414 ['pgo', 'nopgo'].each { pgo_enabled ->
415 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} perf"] = {
416 simpleNode('ubuntu_1604_clr_perf', 180) {
417 linuxPerf(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
421 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} throughput"] = {
422 simpleNode('ubuntu_1604_clr_perf', 180) {
423 linuxThroughput(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
431 stage ('Run testing') {
432 parallel innerLoopTests + outerLoopTests