steps:
- ${{ parameters.steps }}
- - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi_collect_setup.py -source_directory $(Build.SourcesDirectory) -core_root_directory $(Core_Root_Dir) -arch $(archType) -platform $(osGroup) -mch_file_tag $(MchFileTag) -input_directory $(InputDirectory) -collection_name $(CollectionName) -collection_type $(CollectionType) -max_size 50 # size in MB
+ - script: $(PythonScript) $(Build.SourcesDirectory)/src/coreclr/scripts/superpmi_collect_setup.py -source_directory $(Build.SourcesDirectory) -core_root_directory $(Core_Root_Dir) -arch $(archType) -platform $(osGroup) -mch_file_tag $(MchFileTag) -input_directory $(InputDirectory) -collection_name $(CollectionName) -collection_type $(CollectionType) -max_size 25 # size in MB
displayName: ${{ format('SuperPMI setup ({0})', parameters.osGroup) }}
# Create required directories for merged mch collection and superpmi logs
<WorkItemCommand>$(Python) $(WorkItemCommand) -assemblies $(PmiAssembliesDirectory) -arch $(Architecture) -build_type $(BuildConfig) -core_root $(SuperPMIDirectory)</WorkItemCommand>
</PropertyGroup>
+ <PropertyGroup Condition="'$(CollectionName)' != 'benchmarks'">
+ <WorkItemTimeout>2:00</WorkItemTimeout>
+ </PropertyGroup>
+
<PropertyGroup Condition="'$(CollectionName)' == 'benchmarks'">
<WorkItemCommand>$(Python) $(SuperPMIDirectory)/superpmi_benchmarks.py -performance_directory $(PerformanceDirectory) -superpmi_directory $(SuperPMIDirectory) -core_root $(SuperPMIDirectory) -arch $(Architecture)</WorkItemCommand>
+ <WorkItemTimeout>3:00</WorkItemTimeout>
</PropertyGroup>
<PropertyGroup>
<EnableAzurePipelinesReporter>false</EnableAzurePipelinesReporter>
<EnableXUnitReporter>false</EnableXUnitReporter>
- <WorkItemTimeout>5:00</WorkItemTimeout>
</PropertyGroup>
<ItemGroup Condition=" '$(AGENT_OS)' == 'Windows_NT' ">
"mscordbi.dll",
"mscorrc.dll",
"msdia140.dll",
+ "msquic.dll",
"R2RDump.exe",
"R2RTest.exe",
"superpmi.exe",
def first_fit(sorted_by_size, max_size):
""" Given a list of file names along with size in descending order, divides the files
- in number of buckets such that each bucket doesn't exceed max_size. Since this is a first-fit
+ in number of buckets such that each bucket doesn't exceed max_size (unless a single file exceeds
+ max_size, in which case it gets its own bucket). Since this is a first-fit
approach, it doesn't guarantee to find the bucket with tighest spot available.
Args:
found_bucket = True
break
- if not found_bucket:
- partitions[len(partitions)] = [curr_file]
+ if not found_bucket:
+ partitions[len(partitions)] = [curr_file]
total_size = 0
for p_index in partitions: