--- /dev/null
+parameters:
+ unpackFolder: ''
+ cleanUnpackFolder: true
+ artifactFileName: ''
+ artifactName: ''
+ displayName: ''
+
+steps:
+ # Download artifact
+ - task: DownloadBuildArtifacts@0
+ displayName: 'Download specific ${{ parameters.displayName }}'
+ inputs:
+ buildType: specific
+ project: 'internal' # 'internal' or 'public'
+ pipeline: 'superpmi-replay'
+ buildVersionToDownload: specific
+ branchName: 'your/branch/having/artifacts'
+ buildId: '1274841'
+ downloadType: single
+ downloadPath: '$(Build.SourcesDirectory)/__download__'
+ artifactName: '${{ parameters.artifactName }}'
+
+ # Unzip artifact
+ - task: ExtractFiles@1
+ displayName: 'Unzip specific ${{ parameters.displayName }}'
+ inputs:
+ archiveFilePatterns: $(Build.SourcesDirectory)/__download__/${{ parameters.artifactName }}/${{ parameters.artifactFileName }}
+ destinationFolder: ${{ parameters.unpackFolder }}
+ cleanDestinationFolder: ${{ parameters.cleanUnpackFolder }}
\ No newline at end of file
--- /dev/null
+trigger:
+ batch: false
+ branches:
+ include:
+ - main
+ paths:
+ include:
+ - src/coreclr/jit/*
+ - src/coreclr/inc/jiteeversionguid.h
+
+# This pipeline is supposed to be run only on merged changes
+# and should not be triggerable from a PR.
+pr: none
+
+jobs:
+
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/build-jit-job.yml
+ buildConfig: checked
+ platforms:
+ - windows_x64
+ - windows_x86
+ jobParameters:
+ uploadAs: 'pipelineArtifacts'
+
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/coreclr/templates/superpmi-replay-job.yml
+ buildConfig: checked
+ platforms:
+ - windows_x64
+ - windows_x86
+ helixQueueGroup: ci
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
\ No newline at end of file
timeoutInMinutes: ''
variables: {}
dependOnEvaluatePaths: false
+ uploadAs: 'azureBlob'
### Product build
jobs:
- name: publishLogsArtifactPrefix
value: 'BuildLogs_CoreCLR_JIT'
+ - name: uploadAs
+ value: ${{ parameters.uploadAs }}
- name: compilerArg
value: ''
# preinstalled, so we only need this step for OSX and Windows.
- ${{ if eq(parameters.osGroup, 'OSX') }}:
- script: $(Build.SourcesDirectory)/eng/install-native-dependencies.sh $(osGroup) ${{ parameters.archType }} azDO
- displayName: Install native dependencies
+ displayName: Install native dependencies (OSX)
- ${{ if eq(parameters.osGroup, 'windows') }}:
# Necessary to install python
- script: $(Build.SourcesDirectory)\eng\common\init-tools-native.cmd -InstallDirectory $(Build.SourcesDirectory)\native-tools -Force
- displayName: Install native dependencies
+ displayName: Install native dependencies (windows)
# Install internal tools on official builds
# Since our internal tools are behind an authenticated feed,
- script: set __TestIntermediateDir=int&&$(Build.SourcesDirectory)/src/coreclr/build-runtime$(scriptExt) $(buildConfig) $(archType) -ci -nopgooptimize -skiprestoreoptdata -component alljits
displayName: Build CoreCLR JIT
- # Ensure the Python azure-storage-blob package is installed before doing the upload.
- - script: $(PipScript) install --user --upgrade pip && $(PipScript) install --user azure.storage.blob==12.5.0 --force-reinstall
- displayName: Upgrade Pip to latest and install azure-storage-blob Python package
-
- - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/jitrollingbuild.py upload -build_type $(buildConfig) -arch $(archType) -host_os $(osGroup) -git_hash $(Build.SourceVersion)
- displayName: Upload JIT to Azure Storage
- env:
- CLRJIT_AZ_KEY: $(clrjit_key1) # secret key stored as variable in pipeline
+ - ${{ if eq(parameters.uploadAs, 'azureBlob') }}:
+ # Ensure the Python azure-storage-blob package is installed before doing the upload.
+ - script: $(PipScript) install --user --upgrade pip && $(PipScript) install --user azure.storage.blob==12.5.0 --force-reinstall
+ displayName: Upgrade Pip to latest and install azure-storage-blob Python package
+
+ - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/jitrollingbuild.py upload -build_type $(buildConfig) -arch $(archType) -host_os $(osGroup) -git_hash $(Build.SourceVersion)
+ displayName: Upload JIT to Azure Storage
+ env:
+ CLRJIT_AZ_KEY: $(clrjit_key1) # secret key stored as variable in pipeline
+
+ - ${{ if eq(parameters.uploadAs, 'pipelineArtifacts') }}:
+ # Publish product output directory for consumption by tests.
+ - template: /eng/pipelines/common/upload-artifact-step.yml
+ parameters:
+ rootFolder: $(buildProductRootFolderPath)
+ includeRootFolder: false
+ archiveType: $(archiveType)
+ tarCompression: $(tarCompression)
+ archiveExtension: $(archiveExtension)
+ artifactName: $(buildProductArtifactName)
+ displayName: 'product build'
# Publish Logs
- task: PublishPipelineArtifact@1
liveLibrariesBuildConfig: '' # optional -- live-live libraries configuration to use for the run
runtimeType: 'coreclr' # optional -- Sets the runtime as coreclr or mono
codeGenType: 'JIT' # optional -- Decides on the codegen technology if running on mono
- projectFile: 'superpmi.proj' # optional -- project file to build helix workitems
runKind: '' # required -- test category
collectionType: ''
collectionName: ''
WorkItemTimeout: 4:00 # 4 hours
WorkItemDirectory: '$(WorkItemDirectory)'
CorrelationPayloadDirectory: '$(CorrelationPayloadDirectory)'
- ProjectFile: ${{ parameters.projectFile }}
+ ProjectFile: 'superpmi.proj'
BuildConfig: ${{ parameters.buildConfig }}
osGroup: ${{ parameters.osGroup }}
InputArtifacts: '$(InputArtifacts)'
--- /dev/null
+parameters:
+ steps: [] # optional -- any additional steps that need to happen before pulling down the jitutils repo and sending the jitutils to helix (ie building your repo)
+ variables: [] # optional -- list of additional variables to send to the template
+ jobName: '' # required -- job name
+ displayName: '' # optional -- display name for the job. Will use jobName if not passed
+ pool: '' # required -- name of the Build pool
+ container: '' # required -- name of the container
+ buildConfig: '' # required -- build configuration
+ archType: '' # required -- targeting CPU architecture
+ osGroup: '' # required -- operating system for the job
+ osSubgroup: '' # optional -- operating system subgroup
+ extraSetupParameters: '' # optional -- extra arguments to pass to the setup script
+ frameworks: ['netcoreapp3.0'] # optional -- list of frameworks to run against
+ continueOnError: 'false' # optional -- determines whether to continue the build if the step errors
+ dependsOn: '' # optional -- dependencies of the job
+ timeoutInMinutes: 320 # optional -- timeout for the job
+ enableTelemetry: false # optional -- enable for telemetry
+ liveLibrariesBuildConfig: '' # optional -- live-live libraries configuration to use for the run
+ runtimeType: 'coreclr' # optional -- Sets the runtime as coreclr or mono
+ codeGenType: 'JIT' # optional -- Decides on the codegen technology if running on mono
+ runKind: '' # required -- test category
+ collectionType: ''
+ collectionName: ''
+ dependOnEvaluatePaths: false
+
+jobs:
+- template: xplat-pipeline-job.yml
+ parameters:
+ dependsOn: ${{ parameters.dependsOn }}
+ buildConfig: ${{ parameters.buildConfig }}
+ archType: ${{ parameters.archType }}
+ osGroup: ${{ parameters.osGroup }}
+ osSubgroup: ${{ parameters.osSubgroup }}
+ liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }}
+ enableTelemetry: ${{ parameters.enableTelemetry }}
+ enablePublishBuildArtifacts: true
+ continueOnError: ${{ parameters.continueOnError }}
+ collectionType: $ {{ parameters.collectionType }}
+ collectionName: ${{ parameters.collectionName }}
+ dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }}
+ timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
+
+ ${{ if ne(parameters.displayName, '') }}:
+ displayName: '${{ parameters.displayName }}'
+ ${{ if eq(parameters.displayName, '') }}:
+ displayName: '${{ parameters.jobName }}'
+
+ variables:
+ - ${{ each variable in parameters.variables }}:
+ - ${{ if ne(variable.name, '') }}:
+ - name: ${{ variable.name }}
+ value: ${{ variable.value }}
+ - ${{ if ne(variable.group, '') }}:
+ - group: ${{ variable.group }}
+
+ - name: PythonScript
+ value: 'py -3'
+ - name: PipScript
+ value: 'py -3 -m pip'
+ - name: SpmiCollectionLocation
+ value: '$(Build.SourcesDirectory)\artifacts\spmi\'
+ - name: SpmiLogsLocation
+ value: '$(Build.SourcesDirectory)\artifacts\spmi_logs\'
+ - name: HelixResultLocation
+ value: '$(Build.SourcesDirectory)\artifacts\helixresults\'
+
+ workspace:
+ clean: all
+ pool:
+ ${{ parameters.pool }}
+ container: ${{ parameters.container }}
+ strategy:
+ matrix:
+ ${{ each framework in parameters.frameworks }}:
+ ${{ framework }}:
+ _Framework: ${{ framework }}
+ steps:
+ - ${{ parameters.steps }}
+
+ - script: |
+ mkdir -p $(SpmiCollectionLocation)
+ displayName: Create directory for SPMI collection
+
+ - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi_replay_setup.py -source_directory $(Build.SourcesDirectory) -product_directory $(buildProductRootFolderPath) -arch $(archType)
+ displayName: ${{ format('SuperPMI replay setup ({0} {1})', parameters.osGroup, parameters.archType) }}
+
+ # Run superpmi replay in helix
+ - template: /eng/pipelines/coreclr/templates/superpmi-send-to-helix.yml
+ parameters:
+ HelixSource: '$(HelixSourcePrefix)/$(Build.Repository.Name)/$(Build.SourceBranch)' # sources must start with pr/, official/, prodcon/, or agent/
+ HelixAccessToken: $(HelixApiAccessToken)
+ HelixTargetQueues: $(Queue)
+ HelixPreCommands: $(HelixPreCommand)
+ Creator: $(Creator)
+ WorkItemTimeout: 4:00 # 4 hours
+ WorkItemDirectory: '$(WorkItemDirectory)'
+ CorrelationPayloadDirectory: '$(CorrelationPayloadDirectory)'
+ ProjectFile: 'superpmi-replay.proj'
+ BuildConfig: ${{ parameters.buildConfig }}
+ osGroup: ${{ parameters.osGroup }}
+ archType: ${{ parameters.archType }}
+ continueOnError: true # Run the future step i.e. upload superpmi logs
+
+ # Always upload the available logs for diagnostics
+ - task: CopyFiles@2
+ displayName: Copying superpmi.log of all partitions
+ inputs:
+ sourceFolder: '$(HelixResultLocation)'
+ contents: '**/superpmi_*.log'
+ targetFolder: '$(SpmiLogsLocation)'
+ condition: always()
+
+ - task: PublishPipelineArtifact@1
+ displayName: Publish Superpmi logs
+ inputs:
+ targetPath: $(SpmiLogsLocation)
+ artifactName: 'SuperPMI_Logs_$(archType)_$(buildConfig)'
+ condition: always()
+
+ - task: PublishPipelineArtifact@1
+ displayName: Publish SuperPMI build logs
+ inputs:
+ targetPath: $(Build.SourcesDirectory)/artifacts/log
+ artifactName: 'SuperPMI_BuildLogs__$(archType)_$(buildConfig)'
+ condition: always()
\ No newline at end of file
liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }}
runtimeType: ${{ parameters.runtimeType }}
codeGenType: ${{ parameters.codeGenType }}
- projectFile: ${{ parameters.projectFile }}
runKind: ${{ parameters.runKind }}
testGroup: ${{ parameters.testGroup }}
collectionType: ${{ parameters.collectionType }}
--- /dev/null
+parameters:
+ buildConfig: '' # required -- build configuration
+ archType: '' # required -- targeting CPU architecture
+ osGroup: '' # required -- operating system for the job
+ osSubgroup: '' # optional -- operating system subgroup
+ pool: ''
+ stagedBuild: false
+ timeoutInMinutes: 320 # build timeout
+ framework: net5.0 # Specify the appropriate framework when running release branches (ie netcoreapp3.0 for release/3.0)
+ variables: {}
+ dependOnEvaluatePaths: false
+ runJobTemplate: '/eng/pipelines/coreclr/templates/run-superpmi-replay-job.yml'
+
+jobs:
+- template: ${{ parameters.runJobTemplate }}
+ parameters:
+ jobName: ${{ format('superpmibuild_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
+ displayName: ${{ format('SuperPMI replay {0} {1}', parameters.osGroup, parameters.archType) }}
+ pool: ${{ parameters.pool }}
+ buildConfig: ${{ parameters.buildConfig }}
+ archType: ${{ parameters.archType }}
+ osGroup: ${{ parameters.osGroup }}
+ osSubgroup: ${{ parameters.osSubgroup }}
+ dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }}
+ timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
+ additionalSetupParameters: ${{ parameters.additionalSetupParameters }}
+ dependsOn:
+ - ${{ format('coreclr_jit_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }}
+
+ variables: ${{ parameters.variables }}
+
+ frameworks:
+ - ${{ parameters.framework }}
+
+ steps:
+
+ # Download jit builds
+ - template: /eng/pipelines/common/download-artifact-step.yml
+ parameters:
+ unpackFolder: $(buildProductRootFolderPath)
+ artifactFileName: '$(buildProductArtifactName)$(archiveExtension)'
+ artifactName: '$(buildProductArtifactName)'
+ displayName: 'JIT product build'
\ No newline at end of file
InputArtifacts: ''
CollectionType: ''
CollectionName: ''
+ ProjectFile: ''
steps:
- template: /eng/pipelines/common/templates/runtimes/send-to-helix-inner-step.yml
parameters:
osGroup: ${{ parameters.osGroup }}
- sendParams: $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi.proj /restore /t:Test /bl:$(Build.SourcesDirectory)/artifacts/log/$(BuildConfig)/SendToHelix.binlog
+ sendParams: $(Build.SourcesDirectory)/src/coreclr/scripts/${{ parameters.ProjectFile }} /restore /t:Test /bl:$(Build.SourcesDirectory)/artifacts/log/$(BuildConfig)/SendToHelix.binlog
displayName: ${{ parameters.DisplayNamePrefix }}
condition: ${{ parameters.condition }}
shouldContinueOnError: ${{ parameters.continueOnError }}
--- /dev/null
+<Project Sdk="Microsoft.DotNet.Helix.Sdk" DefaultTargets="Test">
+ <!--
+ This is useful for local testing to print the produced helix items
+ To use this when you are changing how items are produced, uncomment the target
+ and replace the Project item at the top of the file with this:
+ <Project DefaultTargets="printItems">
+
+ Once you've done that you can run this to see the results:
+ dotnet msbuild .\superpmi-replay.proj /v:n
+ -->
+
+ <!-- <PropertyGroup>
+ <HelixTargetQueues>Some_Queue</HelixTargetQueues>
+ <Platform>Windows</Platform>
+ <Architecture>x64</Architecture>
+ </PropertyGroup>
+
+ <Target Name="printItems">
+ <Message Text="@(HelixWorkItem -> 'name: %(HelixWorkItem.Identity)
+ dir: %(HelixWorkItem.PayloadDirectory)
+ pre: %(HelixWorkItem.PreCommands)
+ command: %(HelixWorkItem.Command)
+ post: %(HelixWorkItem.PostCommands)
+ timeout: %(HelixWorkItem.Timeout) '"/>
+ </Target> -->
+
+ <PropertyGroup>
+ <Python>%HELIX_PYTHONPATH%</Python>
+ <ProductDirectory>%HELIX_CORRELATION_PAYLOAD%</ProductDirectory>
+ <SuperpmiLogsLocation>%HELIX_WORKITEM_UPLOAD_ROOT%</SuperpmiLogsLocation>
+ <!-- Workaround until https://github.com/dotnet/arcade/pull/6179 is not available -->
+ <HelixResultsDestinationDir>$(BUILD_SOURCESDIRECTORY)\artifacts\helixresults</HelixResultsDestinationDir>
+ <WorkItemCommand>$(Python) $(ProductDirectory)\superpmi-replay.py -jit_directory $(ProductDirectory)</WorkItemCommand>
+ <EnableAzurePipelinesReporter>false</EnableAzurePipelinesReporter>
+ <EnableXUnitReporter>false</EnableXUnitReporter>
+ <WorkItemTimeout>5:00</WorkItemTimeout>
+ </PropertyGroup>
+
+ <ItemGroup>
+ <HelixCorrelationPayload Include="$(CorrelationPayloadDirectory)">
+ <PayloadDirectory>%(Identity)</PayloadDirectory>
+ </HelixCorrelationPayload>
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(Architecture)' == 'x64'">
+ <SPMI_Partition Include="win-x64" Platform="windows" Architecture="x64" />
+ <SPMI_Partition Include="win-arm64" Platform="windows" Architecture="arm64" />
+ <SPMI_Partition Include="unix-x64" Platform="Linux" Architecture="x64" />
+ <SPMI_Partition Include="unix-arm64" Platform="Linux" Architecture="arm64" />
+ </ItemGroup>
+
+ <ItemGroup Condition="'$(Architecture)' == 'x86'">
+ <SPMI_Partition Include="win-x86" Platform="windows" Architecture="x86" />
+ <SPMI_Partition Include="unix-arm" Platform="Linux" Architecture="arm" />
+ </ItemGroup>
+
+ <ItemGroup>
+ <HelixWorkItem Include="@(SPMI_Partition)">
+ <Command>$(WorkItemCommand) -arch %(HelixWorkItem.Architecture) -platform %(HelixWorkItem.Platform) -log_directory $(SuperpmiLogsLocation)</Command>
+ <Timeout>$(WorkItemTimeout)</Timeout>
+ <DownloadFilesFromResults>superpmi_%(HelixWorkItem.Platform)_%(HelixWorkItem.Architecture).log</DownloadFilesFromResults>
+ </HelixWorkItem>
+ </ItemGroup>
+ </Project>
--- /dev/null
+#!/usr/bin/env python3
+#
+# Licensed to the .NET Foundation under one or more agreements.
+# The .NET Foundation licenses this file to you under the MIT license.
+#
+##
+# Title : superpmi_setup.py
+#
+# Notes:
+#
+# Script to run "superpmi replay" for various collections under various COMPlus_JitStressRegs value.
+################################################################################
+################################################################################
+
+
+import argparse
+from os import path
+import os
+from os import listdir
+from coreclr_arguments import *
+from superpmi_setup import run_command
+
+parser = argparse.ArgumentParser(description="description")
+
+parser.add_argument("-arch", help="Architecture")
+parser.add_argument("-platform", help="OS platform")
+parser.add_argument("-jit_directory", help="path to the directory containing clrjit binaries")
+parser.add_argument("-log_directory", help="path to the directory containing superpmi log files")
+
+jit_flags = [
+ "JitStressRegs=0",
+ "JitStressRegs=1",
+ "JitStressRegs=2",
+ "JitStressRegs=3",
+ "JitStressRegs=4",
+ "JitStressRegs=8",
+ "JitStressRegs=0x10",
+ "JitStressRegs=0x80",
+ "JitStressRegs=0x1000",
+]
+
+
+def setup_args(args):
+ """ Setup the args for SuperPMI to use.
+
+ Args:
+ args (ArgParse): args parsed by arg parser
+
+ Returns:
+ args (CoreclrArguments)
+
+ """
+ coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
+ require_built_test_dir=False, default_build_type="Checked")
+
+ coreclr_args.verify(args,
+ "arch",
+ lambda unused: True,
+ "Unable to set arch")
+
+ coreclr_args.verify(args,
+ "platform",
+ lambda unused: True,
+ "Unable to set platform")
+
+ coreclr_args.verify(args,
+ "jit_directory",
+ lambda jit_directory: os.path.isdir(jit_directory),
+ "jit_directory doesn't exist")
+
+ coreclr_args.verify(args,
+ "log_directory",
+ lambda log_directory: True,
+ "log_directory doesn't exist")
+
+ return coreclr_args
+
+
+def main(main_args):
+ """Main entrypoint
+
+ Args:
+ main_args ([type]): Arguments to the script
+ """
+
+ python_path = sys.executable
+ cwd = os.path.dirname(os.path.realpath(__file__))
+ coreclr_args = setup_args(main_args)
+ spmi_location = path.join(cwd, "artifacts", "spmi")
+ log_directory = coreclr_args.log_directory
+ platform_name = coreclr_args.platform
+ os_name = "win" if platform_name.lower() == "windows" else "unix"
+ arch_name = coreclr_args.arch
+ host_arch_name = "x64" if arch_name.endswith("64") else "x86"
+ jit_path = path.join(coreclr_args.jit_directory, 'clrjit_{}_{}_{}.dll'.format(os_name, arch_name, host_arch_name))
+
+ print("Running superpmi.py download")
+ run_command([python_path, path.join(cwd, "superpmi.py"), "download", "--no_progress", "-target_os", platform_name,
+ "-target_arch", arch_name, "-core_root", cwd, "-spmi_location", spmi_location], _exit_on_fail=True)
+
+ failed_runs = []
+ for jit_flag in jit_flags:
+ log_file = path.join(log_directory, 'superpmi_{}.log'.format(jit_flag.replace("=", "_")))
+ print("Running superpmi.py replay for {}".format(jit_flag))
+
+ _, _, return_code = run_command([
+ python_path, path.join(cwd, "superpmi.py"), "replay", "-core_root", cwd,
+ "-jitoption", jit_flag, "-jitoption", "TieredCompilation=0",
+ "-target_os", platform_name, "-target_arch", arch_name,
+ "-arch", host_arch_name,
+ "-jit_path", jit_path, "-spmi_location", spmi_location,
+ "-log_level", "debug", "-log_file", log_file])
+
+ if return_code != 0:
+ failed_runs.append("Failure in {}".format(log_file))
+
+ # Consolidate all superpmi_*.logs in superpmi_platform_architecture.log
+ final_log_name = path.join(log_directory, "superpmi_{}_{}.log".format(platform_name, arch_name))
+ print("Consolidating final {}".format(final_log_name))
+ with open(final_log_name, "a") as final_superpmi_log:
+ for superpmi_log in listdir(log_directory):
+ if not superpmi_log.startswith("superpmi_Jit") or not superpmi_log.endswith(".log"):
+ continue
+
+ print("Appending {}".format(superpmi_log))
+ final_superpmi_log.write("======================================================={}".format(os.linesep))
+ final_superpmi_log.write("Contents from {}{}".format(superpmi_log, os.linesep))
+ final_superpmi_log.write("======================================================={}".format(os.linesep))
+ with open(path.join(log_directory, superpmi_log), "r") as current_superpmi_log:
+ contents = current_superpmi_log.read()
+ final_superpmi_log.write(contents)
+
+ # Log failures summary
+ if len(failed_runs) > 0:
+ final_superpmi_log.write(os.linesep)
+ final_superpmi_log.write(os.linesep)
+ final_superpmi_log.write("========Failed runs summary========".format(os.linesep))
+ final_superpmi_log.write(os.linesep.join(failed_runs))
+
+ return 0 if len(failed_runs) == 0 else 1
+
+
+if __name__ == "__main__":
+ args = parser.parse_args()
+ sys.exit(main(args))
target directory already exists.
"""
+download_no_progress_help = """\
+If specified, then download progress will not be shown.
+"""
+
merge_mch_pattern_help = """\
A pattern to describing files to merge, passed through directly to `mcs -merge`.
Acceptable patterns include `*.mch`, `file*.mch`, and `c:\\my\\directory\\*.mch`.
download_parser.add_argument("-jit_ee_version", help=jit_ee_version_help)
download_parser.add_argument("--skip_cleanup", action="store_true", help=skip_cleanup_help)
download_parser.add_argument("--force_download", action="store_true", help=force_download_help)
+download_parser.add_argument("--no_progress", action="store_true", help=download_no_progress_help)
download_parser.add_argument("-mch_files", metavar="MCH_FILE", nargs='+', help=replay_mch_files_help)
download_parser.add_argument("-private_store", action="append", help=private_store_help)
sys.stdout.flush()
-def download_with_progress_urlretrieve(uri, target_location, fail_if_not_found=True):
+def download_with_progress_urlretrieve(uri, target_location, fail_if_not_found=True, display_progress=True):
""" Do an URI download using urllib.request.urlretrieve with a progress hook.
Args:
ok = True
try:
- urllib.request.urlretrieve(uri, target_location, reporthook=download_progress_hook)
+ progress_display_method = download_progress_hook if display_progress else None
+ urllib.request.urlretrieve(uri, target_location, reporthook=progress_display_method)
except urllib.error.HTTPError as httperror:
if (httperror == 404) and fail_if_not_found:
logging.error("HTTP 404 error")
return ok
-def download_one_url(uri, target_location, fail_if_not_found=True):
+def download_one_url(uri, target_location, fail_if_not_found=True, display_progress=True):
""" Do an URI download using urllib.request.urlretrieve or Azure Storage APIs.
Args:
if authenticate_using_azure:
return download_with_azure(uri, target_location, fail_if_not_found)
else:
- return download_with_progress_urlretrieve(uri, target_location, fail_if_not_found)
+ return download_with_progress_urlretrieve(uri, target_location, fail_if_not_found, display_progress)
def is_zero_length_file(fpath):
for item in mch_files:
# On Windows only, see if any of the mch_files are UNC paths (i.e., "\\server\share\...").
# If so, download and cache all the files found there to our usual local cache location, to avoid future network access.
- if coreclr_args.host_os == "windows" and item.startswith("\\\\"):
+ if coreclr_args.host_os == "windows":# and item.startswith("\\\\"):
# Special case: if the user specifies a .mch file, we'll also look for and cache a .mch.mct file next to it, if one exists.
# This happens naturally if a directory is passed and we search for all .mch and .mct files in that directory.
mch_file = os.path.abspath(item)
# Download all the urls at once, and add the local cache filenames to our accumulated list of local file names.
if len(urls) != 0:
- local_mch_files += download_files(urls, mch_cache_dir)
+ local_mch_files += download_files(urls, mch_cache_dir, display_progress=not coreclr_args.no_progress)
# Special case: walk the URLs list and for every ".mch" or ".mch.zip" file, check to see that either the associated ".mct" file is already
# in the list, or add it to a new list to attempt to download (but don't fail the download if it doesn't exist).
if mct_url not in urls:
mct_urls.append(mct_url)
if len(mct_urls) != 0:
- local_mch_files += download_files(mct_urls, mch_cache_dir, fail_if_not_found=False)
+ local_mch_files += download_files(mct_urls, mch_cache_dir, fail_if_not_found=False, display_progress=not coreclr_args.no_progress)
# Even though we might have downloaded MCT files, only return the set of MCH files.
local_mch_files = [file for file in local_mch_files if any(file.lower().endswith(extension) for extension in [".mch"])]
list containing the local path of files downloaded
"""
- blob_filter_string = "{}/{}/{}/".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch).lower()
+ blob_filter_string = "{}/{}/{}/".format(coreclr_args.jit_ee_version, coreclr_args.target_os, coreclr_args.mch_arch).lower()
# Determine if a URL in Azure Storage should be allowed. The path looks like:
# jit-ee-guid/Linux/x64/Linux.x64.Checked.frameworks.mch.zip
blob_url_prefix = "{}/{}/".format(az_blob_storage_superpmi_container_uri, az_collections_root_folder)
urls = [blob_url_prefix + path for path in paths]
- return download_files(urls, target_dir)
+ return download_files(urls, target_dir, display_progress=not coreclr_args.no_progress)
-def download_files(paths, target_dir, verbose=True, fail_if_not_found=True):
+def download_files(paths, target_dir, verbose=True, fail_if_not_found=True, display_progress=True):
""" Download a set of files, specified as URLs or paths (such as Windows UNC paths),
to a target directory. If a file is a .ZIP file, then uncompress the file and
copy all its contents to the target directory.
download_path = os.path.join(temp_location, item_name)
if is_item_url:
- ok = download_one_url(item_path, download_path, fail_if_not_found)
+ ok = download_one_url(item_path, download_path, fail_if_not_found, display_progress)
if not ok:
continue
else:
# Not a zip file; download directory to target directory
download_path = os.path.join(target_dir, item_name)
if is_item_url:
- ok = download_one_url(item_path, download_path, fail_if_not_found)
+ ok = download_one_url(item_path, download_path, fail_if_not_found, display_progress)
if not ok:
continue
else:
"Unable to set force_download")
coreclr_args.verify(args,
+ "no_progress",
+ lambda unused: True,
+ "Unable to set no_progress")
+
+ coreclr_args.verify(args,
"filter",
lambda unused: True,
"Unable to set filter.")
--- /dev/null
+#!/usr/bin/env python3
+#
+# Licensed to the .NET Foundation under one or more agreements.
+# The .NET Foundation licenses this file to you under the MIT license.
+#
+##
+# Title : superpmi_replay_setup.py
+#
+# Notes:
+#
+# Script to setup directory structure required to perform SuperPMI replay in CI.
+# It creates `correlation_payload_directory` that contains clrjit*_x64.dll and clrjit*_x86.dll
+################################################################################
+################################################################################
+
+import argparse
+from os import path, walk
+import os
+import shutil
+import stat
+import subprocess
+import tempfile
+
+from os.path import isfile, join
+from coreclr_arguments import *
+from superpmi_setup import copy_directory, copy_files, set_pipeline_variable, run_command
+
+parser = argparse.ArgumentParser(description="description")
+
+parser.add_argument("-arch", help="Architecture")
+parser.add_argument("-source_directory", help="path to the directory containing binaries")
+parser.add_argument("-product_directory", help="path to the directory containing binaries")
+
+
+def setup_args(args):
+ """ Setup the args for SuperPMI to use.
+
+ Args:
+ args (ArgParse): args parsed by arg parser
+
+ Returns:
+ args (CoreclrArguments)
+
+ """
+ coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
+ require_built_test_dir=False, default_build_type="Checked")
+
+ coreclr_args.verify(args,
+ "arch",
+ lambda unused: True,
+ "Unable to set arch")
+
+ coreclr_args.verify(args,
+ "source_directory",
+ lambda source_directory: os.path.isdir(source_directory),
+ "source_directory doesn't exist")
+
+ coreclr_args.verify(args,
+ "product_directory",
+ lambda product_directory: os.path.isdir(product_directory),
+ "product_directory doesn't exist")
+
+ return coreclr_args
+
+
+def partition_mch(mch_directory, dst_directory):
+ from os import listdir
+
+ print("Inside partition_mch")
+ mch_zip_files = []
+ for file_path, dirs, files in walk(mch_directory, topdown=True):
+ for name in files:
+ curr_file_path = path.join(file_path, name)
+
+ if not isfile(curr_file_path):
+ continue
+ if not name.endswith(".mch.zip"):
+ continue
+
+ mch_zip_files.append(curr_file_path)
+
+ index = 1
+ for mch_file in mch_zip_files:
+ print("Processing {}".format(mch_file))
+ file_names = []
+ file_names += [mch_file]
+ file_names += [mch_file.replace(".mch.zip", ".mch.mct.zip")]
+ curr_dst_path = path.join(dst_directory, "partitions", str(index))
+ copy_files(mch_directory, curr_dst_path, file_names)
+ index += 1
+
+
+def match_correlation_files(full_path):
+ file_name = os.path.basename(full_path)
+
+ if file_name.startswith("clrjit_") and file_name.endswith(".dll") and file_name.find(
+ "osx") == -1 and file_name.find("armel") == -1:
+ return True
+
+ if file_name == "superpmi.exe" or file_name == "mcs.exe":
+ return True
+
+ return False
+
+
+def main(main_args):
+ """Main entrypoint
+
+ Args:
+ main_args ([type]): Arguments to the script
+ """
+ coreclr_args = setup_args(main_args)
+
+ arch = coreclr_args.arch
+ source_directory = coreclr_args.source_directory
+ product_directory = coreclr_args.product_directory
+
+ # CorrelationPayload directories
+ correlation_payload_directory = path.join(coreclr_args.source_directory, "payload")
+ superpmi_src_directory = path.join(source_directory, 'src', 'coreclr', 'scripts')
+
+ helix_source_prefix = "official"
+ creator = ""
+ ci = True
+ helix_queue = "Windows.10.Amd64.X86"
+
+ # Copy *.py to CorrelationPayload
+ print('Copying {} -> {}'.format(superpmi_src_directory, correlation_payload_directory))
+ copy_directory(superpmi_src_directory, correlation_payload_directory,
+ match_func=lambda path: any(path.endswith(extension) for extension in [".py"]))
+
+ # Copy clrjit*_arch.dll binaries to CorrelationPayload
+ print('Copying binaries {} -> {}'.format(arch, product_directory, correlation_payload_directory))
+ copy_directory(product_directory, correlation_payload_directory, match_func=match_correlation_files)
+
+ # Set variables
+ print('Setting pipeline variables:')
+ set_pipeline_variable("CorrelationPayloadDirectory", correlation_payload_directory)
+ set_pipeline_variable("Architecture", arch)
+ set_pipeline_variable("Creator", creator)
+ set_pipeline_variable("Queue", helix_queue)
+ set_pipeline_variable("HelixSourcePrefix", helix_source_prefix)
+
+
+if __name__ == "__main__":
+ args = parser.parse_args()
+ sys.exit(main(args))
file_names ([string]): List of full path file names to be copied.
"""
- print('### Copying below files to {0}:'.format(dst_path))
+ print('### Copying below files from {0} to {1}:'.format(src_path, dst_path))
print('')
print(os.linesep.join(file_names))
for f in file_names: