Add two new run configurations for SPMI benchmarks: tiered and tiered pgo.
So benchmark runs now have 3 separate collections.
The new ones are named "run_tiered" and "run_pgo", eg
```
benchmarks.run.windows.x64.checked.mch
benchmarks.run_tiered.windows.x64.checked.mch
benchmarks.run_pgo.windows.x64.checked.mch
```
collectionType: run
collectionName: benchmarks
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/superpmi-collect-job.yml
+ buildConfig: checked
+ platforms:
+ - osx_arm64
+ - linux_arm
+ - linux_arm64
+ - linux_x64
+ - windows_x64
+ - windows_x86
+ - windows_arm64
+ helixQueueGroup: ci
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ jobParameters:
+ testGroup: outerloop
+ liveLibrariesBuildConfig: Release
+ collectionType: run_tiered
+ collectionName: benchmarks
+
+ - template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/superpmi-collect-job.yml
+ buildConfig: checked
+ platforms:
+ - osx_arm64
+ - linux_arm
+ - linux_arm64
+ - linux_x64
+ - windows_x64
+ - windows_x86
+ - windows_arm64
+ helixQueueGroup: ci
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ jobParameters:
+ testGroup: outerloop
+ liveLibrariesBuildConfig: Release
+ collectionType: run_pgo
+ collectionName: benchmarks
+
#
# Collection of coreclr test run
#
<PmiArguments></PmiArguments>
</PropertyGroup>
+ <!-- Settings for Run collections -->
+
+ <PropertyGroup Condition=" '$(CollectionType)' == 'run_tiered' ">
+ <RunArguments>--tiered_compilation</RunArguments>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(CollectionType)' == 'run_pgo' ">
+ <RunArguments>--tiered_pgo</RunArguments>
+ </PropertyGroup>
+ <PropertyGroup Condition=" '$(CollectionType)' != 'run_tiered' and '$(CollectionType)' != 'run_pgo' ">
+ <RunArguments></RunArguments>
+ </PropertyGroup>
+
<!-- Define the Helix work item command and timeout for each collection type -->
<PropertyGroup Condition="'$(CollectionName)' != 'benchmarks'">
</PropertyGroup>
<PropertyGroup Condition="'$(CollectionName)' == 'benchmarks'">
- <WorkItemCommand>$(Python) $(SuperPMIDirectory)$(FileSeparatorChar)superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -core_root $(SuperPMIDirectory) -arch $(Architecture)</WorkItemCommand>
+ <WorkItemCommand>$(Python) $(SuperPMIDirectory)$(FileSeparatorChar)superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -core_root $(SuperPMIDirectory) -arch $(Architecture) $(RunArguments)</WorkItemCommand>
<WorkItemTimeout>3:00</WorkItemTimeout>
</PropertyGroup>
collect_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help="Pass a sequence of MCH files which will be merged. Required by --merge_mch_files.")
collect_parser.add_argument("--use_zapdisable", action="store_true", help="Sets DOTNET_ZapDisable=1 and DOTNET_ReadyToRun=0 when doing collection to cause NGEN/ReadyToRun images to not be used, and thus causes JIT compilation and SuperPMI collection of these methods.")
collect_parser.add_argument("--tiered_compilation", action="store_true", help="Sets DOTNET_TieredCompilation=1 when doing collections.")
+collect_parser.add_argument("--tiered_pgo", action="store_true", help="Sets DOTNET_TieredCompilation=1 and DOTNET_TieredPGO=1 when doing collections.")
collect_parser.add_argument("--ci", action="store_true", help="Special collection mode for handling zero-sized files in Azure DevOps + Helix pipelines collections.")
# Allow for continuing a collection in progress
if coreclr_args.pmi or coreclr_args.crossgen2:
self.assemblies = coreclr_args.assemblies
self.exclude = coreclr_args.exclude
+ if coreclr_args.tiered_compilation or coreclr_args.tiered_pgo:
+ raise RuntimeError("Tiering options have no effect for pmi or crossgen2 collections.")
+
+ if coreclr_args.tiered_compilation and coreclr_args.tiered_pgo:
+ raise RuntimeError("Pass only one tiering option.")
self.coreclr_args = coreclr_args
dotnet_env = {}
dotnet_env["EnableExtraSuperPmiQueries"] = "1"
- if not self.coreclr_args.tiered_compilation:
+ if self.coreclr_args.tiered_compilation:
+ dotnet_env["TieredCompilation"] = "1"
+ elif self.coreclr_args.tiered_pgo:
+ dotnet_env["TieredCompilation"] = "1"
+ dotnet_env["TieredPGO"] = "1"
+ else:
dotnet_env["TieredCompilation"] = "0"
if self.coreclr_args.use_zapdisable:
num_same,
byte_improvements,
byte_regressions))
-
+
if byte_improvements > 0 and byte_regressions > 0:
logging.info(" -{:,d}/+{:,d} bytes".format(byte_improvements, byte_regressions))
elif byte_improvements > 0:
sum_base = sum(int(base_metrics[row]["Diffed code bytes"]) for (_, base_metrics, _, _, _, _) in asm_diffs)
sum_diff = sum(int(diff_metrics[row]["Diffed code bytes"]) for (_, _, diff_metrics, _, _, _) in asm_diffs)
-
+
with DetailsSection(write_fh, "{} ({} bytes)".format(row, format_delta(sum_base, sum_diff))):
write_fh.write("|Collection|Base size (bytes)|Diff size (bytes)|\n")
write_fh.write("|---|--:|--:|\n")
num_missed_base / num_contexts * 100,
num_missed_diff,
num_missed_diff / num_contexts * 100))
-
+
for t in rows:
write_row(*t)
first_line = f.readline().rstrip()
if first_line and first_line.startswith("; Assembly listing for method "):
func_name += " - " + first_line[len("; Assembly listing for method "):]
-
+
git_diff_command = [ git_path, "diff", "--diff-algorithm=histogram", "--no-index", "--", base_dasm_path, diff_dasm_path ]
git_diff_proc = subprocess.Popen(git_diff_command, stdout=subprocess.PIPE)
(stdout, _) = git_diff_proc.communicate()
display_subset("Top {} improvements, percentage-wise:", top_improvements_pct)
display_subset("Top {} regressions, percentage-wise:", top_regressions_pct)
-
+
# 20 contexts without size diffs (possibly GC info diffs), sorted by size
zero_size_diffs = filter(lambda r: int(r["Diff size"]) == int(r["Base size"]), diffs)
smallest_zero_size_contexts = sorted(zero_size_diffs, key=lambda r: int(r["Context size"]))[:20]
lambda unused: True,
"Unable to set tiered_compilation")
+ coreclr_args.verify(args,
+ "tiered_pgo",
+ lambda unused: True,
+ "Unable to set tiered_pgo")
+
coreclr_args.verify(args,
"pmi_path",
lambda unused: True,
parser.add_argument("-partition_count", help="Total number of partitions")
parser.add_argument("-partition_index", help="Partition index to do the collection for")
parser.add_argument("-arch", help="Architecture")
-
+parser.add_argument("--tiered_compilation", action="store_true", help="Sets DOTNET_TieredCompilation=1 when doing collections.")
+parser.add_argument("--tiered_pgo", action="store_true", help="Sets DOTNET_TieredCompilation=1 and DOTNET_TieredPGO=1 when doing collections.")
def setup_args(args):
""" Setup the args for SuperPMI to use.
lambda arch: arch.lower() in ["x86", "x64", "arm", "arm64"],
"Unable to set arch")
+ coreclr_args.verify(args,
+ "tiered_compilation",
+ lambda unused: True,
+ "Unable to set tiered_compilation")
+
+ coreclr_args.verify(args,
+ "tiered_pgo",
+ lambda unused: True,
+ "Unable to set tiered_pgo")
+
return coreclr_args
make_executable(script_name)
- run_command([
- python_path, os.path.join(superpmi_directory, "superpmi.py"), "collect", "--clean", "-core_root", core_root,
- "-output_mch_path", output_mch_name, "-log_file", log_file, "-log_level", "debug",
- script_name], _exit_on_fail=True)
+ script_args = [python_path,
+ os.path.join(superpmi_directory, "superpmi.py"),
+ "collect",
+ "--clean",
+ "-core_root", core_root,
+ "-log_file", log_file,
+ "-output_mch_path", output_mch_name,
+ "-log_level", "debug"]
+
+ if coreclr_args.tiered_compilation:
+ script_args.append("--tiered_compilation");
+ elif coreclr_args.tiered_pgo:
+ script_args.append("--tiered_pgo");
+
+ script_args.append(script_name);
+
+ run_command(script_args, _exit_on_fail=True)
def strip_unrelated_mc(coreclr_args, old_mch_filename, new_mch_filename):
"""
coreclr_args = setup_args(main_args)
+ if coreclr_args.tiered_compilation and coreclr_args.tiered_pgo:
+ raise RuntimeError("Pass only one tiering option.")
+
all_output_mch_name = os.path.join(coreclr_args.output_mch_path + "_all.mch")
build_and_run(coreclr_args, all_output_mch_name)
if os.path.isfile(all_output_mch_name):
parser = argparse.ArgumentParser(description="description")
-parser.add_argument("-collection_type", required=True, help="Type of the SPMI collection to be done (crossgen2, pmi, run)")
+parser.add_argument("-collection_type", required=True, help="Type of the SPMI collection to be done (crossgen2, pmi, run, run_tiered, run_pgo)")
parser.add_argument("-collection_name", required=True, help="Name of the SPMI collection to be done (e.g., libraries, libraries_tests, coreclr_tests, benchmarks)")
parser.add_argument("-payload_directory", required=True, help="Path to payload directory to create: subdirectories are created for the correlation payload as well as the per-partition work items")
parser.add_argument("-source_directory", required=True, help="Path to source directory")
is_windows = platform.system() == "Windows"
-legal_collection_types = [ "crossgen2", "pmi", "run" ]
+legal_collection_types = [ "crossgen2", "pmi", "run", "run_tiered", "run_pgo" ]
directories_to_ignore = [
"runtimes", # This appears to be the result of a nuget package that includes a bunch of native code