1 @Library('dotnet-ci') _
3 // Incoming parameters. Access with "params.<param name>".
4 // Note that the parameters will be set as env variables so we cannot use names that conflict
5 // with the engineering system parameter names.
7 //--------------------- Windows Functions ----------------------------//
9 def windowsBuild(String arch, String config, String pgo, boolean isBaseline) {
12 String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '-enforcepgo')
13 String baselineString = ""
15 // For baseline builds, checkout the merge's parent
17 baselineString = "-baseline"
18 bat "git checkout HEAD^^1"
21 bat "set __TestIntermediateDir=int&&.\\build.cmd -${config} -${arch} -skipbuildpackages ${pgoBuildFlag}"
22 bat "tests\\runtest.cmd ${config} ${arch} GenerateLayoutOnly"
23 bat "rd /s /q bin\\obj"
25 // Stash build artifacts. Stash tests in an additional stash to be used by Linux test runs
26 stash name: "nt-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
27 stash name: "nt-${arch}-${pgo}${baselineString}-test-artifacts", includes: 'bin/tests/**'
30 def windowsPerf(String arch, String config, String uploadString, String runType, String opt_level, String jit, String pgo, String scenario, boolean isBaseline, boolean isProfileOn, int slice) {
31 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
33 String baselineString = ""
35 baselineString = "-baseline"
38 unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
39 unstash "benchview-tools"
43 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
45 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
46 // submission-metadata.py
48 bat "move /y submission-metadata-baseline.json submission-metadata.json"
53 String failedOutputLogFilename = "run-xunit-perf-scenario.log"
55 bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
56 bat ".\\init-tools.cmd"
58 // We run run-xunit-perf differently for each of the different job types
60 String profileArg = isProfileOn ? "BranchMispredictions+CacheMisses+InstructionRetired" : "stopwatch"
62 String runXUnitCommonArgs = "-arch ${arch} -configuration ${config} -generateBenchviewData \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} ${testEnv} -optLevel ${opt_level} -jitName ${jit} -outputdir \"%WORKSPACE%\\bin\\sandbox_logs\""
63 if (scenario == 'perf') {
64 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \\\"CORECLR_PERF_RUN\\\" /B /WAIT /HIGH /AFFINITY 0x2\""
67 String runXUnitPerflabArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\perflab\\Perflab -library"
69 profileArg = isProfileOn ? "default+${profileArg}+gcapi" : profileArg
70 bat "py tests\\scripts\\run-xunit-perf.py ${runXUnitPerflabArgs} -collectionFlags ${profileArg}"
72 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\Jit\\Performance\\CodeQuality\\"
73 bat "py tests\\scripts\\run-xunit-perf.py ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
77 String runXUnitCodeQualityArgs = "${runXUnitPerfCommonArgs} -slice ${slice} -sliceConfigFile \"%WORKSPACE%\\tests\\scripts\\perf-slices.json\" -testBinLoc bin\\tests\\${os}.${arch}.${config}"
78 bat "py tests\\scripts\\run-xunit-perf.py ${runXUnitCodeQualityArgs} -collectionFlags ${profileArg}"
81 else if (scenario == 'jitbench') {
82 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -stabilityPrefix \"START \"CORECLR_PERF_RUN\" /B /WAIT /HIGH\" -scenarioTest"
83 runXUnitPerfCommonArgs = "${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\Scenario\\JitBench -group CoreCLR-Scenarios"
85 if (!(opt_level == 'min_opt' && isProfileOn)) {
86 bat "py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -collectionFlags ${profileArgs}"
89 else if (scenario == 'illink') {
90 String runXUnitPerfCommonArgs = "${runXUnitCommonArgs} -scenarioTest"
91 bat "\"%VS140COMNTOOLS%\\..\\..\\VC\\vcvarsall.bat\" x86_amd64\n" +
92 "py tests\\scripts\\run-xunit-perf.py ${runXUnitPerfCommonArgs} -testBinLoc bin\\tests\\${os}.${arch}.${config}\\performance\\linkbench\\linkbench -group ILLink -nowarmup"
94 archiveArtifacts allowEmptyArchive: false, artifacts:'bin/sandbox_logs/**,machinedata.json', onlyIfSuccessful: false
98 def windowsThroughput(String arch, String os, String config, String runType, String optLevel, String jit, String pgo, boolean isBaseline) {
99 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
102 String baselineString = ""
104 baselineString = "-baseline"
107 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
110 unstash "nt-${arch}-${pgo}${baselineString}-build-artifacts"
111 unstash "benchview-tools"
112 unstash "throughput-benchmarks-${arch}"
116 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
117 // submission-metadata.py
119 bat "move /y submission-metadata-baseline.json submission-metadata.json"
122 bat "py \".\\Microsoft.BenchView.JSONFormat\\tools\\machinedata.py\""
123 bat ".\\init-tools.cmd"
124 bat "py -u tests\\scripts\\run-throughput-perf.py -arch ${arch} -os ${os} -configuration ${config} -opt_level ${optLevel} -jit_name ${jit} ${pgoTestFlag} -clr_root \"%WORKSPACE%\" -assembly_root \"%WORKSPACE%\\${arch}ThroughputBenchmarks\\lib\" -benchview_path \"%WORKSPACE%\\Microsoft.Benchview.JSONFormat\\tools\" -run_type ${runType}"
125 archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json', onlyIfSuccessful: false
129 //------------------------ Linux Functions ----------------------------//
131 def linuxBuild(String arch, String config, String pgo, boolean isBaseline) {
134 String pgoBuildFlag = ((pgo == 'nopgo') ? '-nopgooptimize' : '')
135 String baselineString = ""
137 // For baseline runs, checkout the merge's parent
139 baselineString = "-baseline"
140 sh "git checkout HEAD^1"
143 sh "./build.sh -verbose -${config} -${arch} ${pgoBuildFlag}"
144 stash name: "linux-${arch}-${pgo}${baselineString}-build-artifacts", includes: 'bin/**'
147 def linuxPerf(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
148 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
151 String baselineString = ""
153 baselineString = "-baseline"
156 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
159 unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
160 unstash "nt-${arch}-${pgo}${baselineString}-test-artifacts"
163 dir ('./tests/scripts') {
164 unstash "benchview-tools"
167 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
168 // submission-metadata.py
170 sh "mv -f submission-metadata-baseline.json submission-metadata.json"
173 sh "./tests/scripts/perf-prep.sh --nocorefx"
175 sh "./build-test.sh release $arch generatelayoutonly"
177 String runXUnitCommonArgs = "-arch ${arch} -os Ubuntu16.04 -configuration ${config} -stabilityPrefix \"taskset 0x00000002 nice --adjustment=-10\" -generateBenchviewData \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\" ${uploadString} ${pgoTestFlag} -runtype ${runType} -optLevel ${optLevel} -outputdir \"\${WORKSPACE}/bin/sandbox_logs\""
179 sh "python3 ./tests/scripts/run-xunit-perf.py -testBinLoc bin/tests/Windows_NT.${arch}.${config}/JIT/Performance/CodeQuality ${runXUnitCommonArgs}"
180 archiveArtifacts allowEmptyArchive: false, artifacts:'bin/toArchive/**,machinedata.json', onlyIfSuccessful: false
184 def linuxThroughput(String arch, String os, String config, String uploadString, String runType, String optLevel, String pgo, boolean isBaseline) {
185 withCredentials([string(credentialsId: 'CoreCLR Perf BenchView Sas', variable: 'BV_UPLOAD_SAS_TOKEN')]) {
188 String baselineString = ""
190 baselineString = "-baseline"
193 String pgoTestFlag = ((pgo == 'nopgo') ? '-nopgo' : '')
196 unstash "linux-${arch}-${pgo}${baselineString}-build-artifacts"
197 unstash "throughput-benchmarks-${arch}"
200 dir ('./tests/scripts') {
201 unstash "benchview-tools"
204 // We want to use the baseline metadata for baseline runs. We expect to find the submission metadata in
205 // submission-metadata.py
207 sh "mv -f submission-metadata-baseline.json submission-metadata.json"
210 sh "./tests/scripts/perf-prep.sh --throughput"
212 sh "python3 ./tests/scripts/run-throughput-perf.py -arch \"${arch}\" -os \"${os}\" -configuration \"${config}\" -opt_level ${optLevel} ${pgoTestFlag} -clr_root \"\${WORKSPACE}\" -assembly_root \"\${WORKSPACE}/${arch}ThroughputBenchmarks/lib\" -run_type \"${runType}\" -benchview_path \"\${WORKSPACE}/tests/scripts/Microsoft.BenchView.JSONFormat/tools\""
213 archiveArtifacts allowEmptyArchive: false, artifacts:'throughput-*.csv,machinedata.json', onlyIfSuccessful: false
217 //-------------------------- Job Definitions --------------------------//
219 String config = "Release"
220 String runType = isPR() ? 'private' : 'rolling'
222 String uploadString = '-uploadToBenchview'
224 stage ('Get Metadata and download Throughput Benchmarks') {
225 simpleNode('Windows_NT', '20170427-elevated') {
227 String commit = getCommit()
228 def benchViewName = isPR() ? "coreclr private %ghprbPullTitle%" : "coreclr rolling %GIT_BRANCH_WITHOUT_ORIGIN% ${commit}"
229 def benchViewUser = getUserEmail()
230 bat "mkdir tools\n" +
231 "powershell Invoke-WebRequest https://dist.nuget.org/win-x86-commandline/v4.1.0/nuget.exe -OutFile %WORKSPACE%\\tools\\nuget.exe"
232 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.JSONFormat -Source http://benchviewtestfeed.azurewebsites.net/nuget -Prerelease -ExcludeVersion"
233 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
234 bat "%WORKSPACE%\\tools\\nuget.exe install Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT -Source https://dotnet.myget.org/F/dotnet-core -Prerelease -ExcludeVersion"
235 bat "set \"GIT_BRANCH_WITHOUT_ORIGIN=%GitBranchOrCommit:*/=%\"\n" +
236 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}\" --user-email \"${benchViewUser}\"\n" +
237 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\build.py\" git --branch %GIT_BRANCH_WITHOUT_ORIGIN% --type ${runType}\n" +
238 "py \"%WORKSPACE%\\Microsoft.BenchView.JSONFormat\\tools\\submission-metadata.py\" --name \"${benchViewName}-baseline\" --user-email \"${benchViewUser}\" -o submission-metadata-baseline.json\n"
240 // TODO: revisit these moves. Originally, stash could not find the directories as currently named
241 bat "move Microsoft.BenchView.ThroughputBenchmarks.x64.Windows_NT x64ThroughputBenchmarks"
242 bat "move Microsoft.BenchView.ThroughputBenchmarks.x86.Windows_NT x86ThroughputBenchmarks"
244 stash includes: 'Microsoft.BenchView.JSONFormat/**/*', name: 'benchview-tools'
245 stash name: "metadata", includes: "*.json"
246 stash name: "throughput-benchmarks-x64", includes: "x64ThroughputBenchmarks/**/*"
247 stash name: "throughput-benchmarks-x86", includes: "x86ThroughputBenchmarks/**/*"
251 // TODO: use non-pgo builds for throughput?
252 def innerLoopBuilds = [
253 "windows x64 pgo build": {
254 simpleNode('Windows_NT','latest') {
255 windowsBuild('x64', config, 'pgo', false)
258 "windows x86 pgo build": {
259 simpleNode('Windows_NT','latest') {
260 windowsBuild('x86', config, 'pgo', false)
263 "linux x64 pgo build": {
264 simpleNode('RHEL7.2', 'latest-or-auto') {
265 linuxBuild('x64', config, 'pgo', false)
270 // Only run non-pgo builds on offical builds
271 def outerLoopBuilds = [:]
275 "windows x64 nopgo build": {
276 simpleNode('Windows_NT','latest') {
277 windowsBuild('x64', config, 'nopgo', false)
280 "windows x86 nopgo build": {
281 simpleNode('Windows_NT','latest') {
282 windowsBuild('x86', config, 'nopgo', false)
285 "linux x64 nopgo build": {
286 simpleNode('RHEL7.2', 'latest-or-auto') {
287 linuxBuild('x64', config, 'nopgo', false)
293 /*def baselineBuilds = [:]
297 "windows x64 pgo baseline build": {
298 simpleNode('Windows_NT','latest') {
299 windowsBuild('x64', config, 'pgo', true)
302 "windows x86 pgo baseline build": {
303 simpleNode('Windows_NT','latest') {
304 windowsBuild('x86', config, 'pgo', true)
310 stage ('Build Product') {
311 parallel innerLoopBuilds //+ outerLoopBuilds //+ baselineBuilds
314 // Pipeline builds don't allow outside scripts (ie ArrayList.Add) if running from a script from SCM, so manually list these for now.
315 // Run the main test mix on all runs (PR + official)
317 def innerLoopTests = [:]
319 ['x64', 'x86'].each { arch ->
320 ['full_opt'].each { opt_level ->
321 [false].each { isBaseline ->
322 [0,1,2,3,4,5].each { slice ->
325 baseline = " baseline"
327 if (isPR() || !isBaseline) {
328 innerLoopTests["windows ${arch} ryujit ${opt_level} pgo ${slice}${baseline} perf"] = {
329 simpleNode('windows_server_2016_clr_perf', 180) {
330 windowsPerf(arch, config, uploadString, runType, opt_level, 'ryujit', 'pgo', 'perf', isBaseline, true, slice)
338 innerLoopTests["linux ${arch} ryujit ${opt_level} pgo perf"] = {
339 simpleNode('ubuntu_1604_clr_perf', 180) {
340 linuxPerf(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, 'pgo', false)
348 // Run the full test mix only on commits, not PRs
349 def outerLoopTests = [:]
352 ['x64', 'x86'].each { arch ->
353 outerLoopTests["windows ${arch} ryujit full_opt pgo${baseline} jitbench"] = {
354 simpleNode('windows_server_2016_clr_perf', 180) {
355 windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'jitbench', false, false, -1)
359 outerLoopTests["windows ${arch} ryujit full_opt pgo illink"] = {
360 simpleNode('Windows_NT', '20170427-elevated') {
361 windowsPerf(arch, config, uploadString, runType, 'full_opt', 'ryujit', 'pgo', 'illink', false, false, -1)
366 ['x64', 'x86'].each { arch ->
367 ['min_opt', 'full_opt'].each { opt_level ->
368 ['ryujit'].each { jit ->
369 ['pgo', 'nopgo'].each { pgo_enabled ->
370 [true, false].each { isProfileOn ->
371 outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} perf"] = {
372 simpleNode('windows_server_2016_clr_perf', 180) {
373 windowsPerf(arch, config, uploadString, runType, opt_level, jit, pgo_enabled, 'perf', false, isProfileOn, -1)
377 outerLoopTests["windows ${arch} ${jit} ${opt_level} ${pgo_enabled} throughput"] = {
378 simpleNode('windows_server_2016_clr_perf', 180) {
379 windowsThroughput(arch, 'Windows_NT', config, runType, opt_level, jit, pgo_enabled, false, isProfileOn)
388 ['x64'].each { arch ->
389 ['min_opt', 'full_opt'].each { opt_level ->
390 ['pgo', 'nopgo'].each { pgo_enabled ->
391 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} perf"] = {
392 simpleNode('ubuntu_1604_clr_perf', 180) {
393 linuxPerf(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
397 outerLoopTests["linux ${arch} ryujit ${opt_level} ${pgo_enabled} throughput"] = {
398 simpleNode('ubuntu_1604_clr_perf', 180) {
399 linuxThroughput(arch, 'Ubuntu16.04', config, uploadString, runType, opt_level, pgo_enabled, false)
407 stage ('Run testing') {
408 parallel innerLoopTests //+ outerLoopTests