@echo off
-powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\build.ps1""" -restore -build %*"
+powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0eng\build.ps1""" -restore %*"
exit /b %ErrorLevel%
done
scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
-"$scriptroot/eng/build.sh" --restore --build $@
+"$scriptroot/eng/build.sh" --restore $@
- name: skipComponentGovernanceDetection
value: true
-extends:
- template: /eng/pipelines/pipeline-resources.yml
- parameters:
- stages:
- - stage: build
- displayName: Build and Test Diagnostics
- jobs:
- - template: /eng/pipelines/build.yml
- parameters:
- name: Windows
- osGroup: Windows_NT
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- Build_Release_x86:
- _BuildConfig: Release
- _BuildArch: x86
- Build_Release_arm:
- _BuildConfig: Release
- _BuildArch: arm
- Build_Release_arm64:
- _BuildConfig: Release
- _BuildArch: arm64
+stages:
+- stage: build
+ displayName: Build and Test Diagnostics
+ jobs:
+ - template: /eng/build.yml
+ parameters:
+ name: Windows
+ osGroup: Windows_NT
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
+ Build_Release_x86:
+ _BuildConfig: Release
+ _BuildArch: x86
+ Build_Release_arm:
+ _BuildConfig: Release
+ _BuildArch: arm
+ Build_Release_arm64:
+ _BuildConfig: Release
+ _BuildArch: arm64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_x64
- osGroup: Linux
- nativeBuildContainer: linux_x64
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: CentOS_7
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-3e800f1-20190501005343
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl
- osGroup: Linux
- nativeBuildContainer: linux_musl_x64
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: Alpine3_13
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-WithNode-20210910135845-c401c85
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: MacOS
- osGroup: MacOS
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: MacOS
+ osGroup: MacOS
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: MacOS_arm64
- osGroup: MacOS_cross
- crossBuild: true
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
+ - template: /eng/build.yml
+ parameters:
+ name: MacOS_arm64
+ osGroup: MacOS_cross
+ crossbuild: true
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_arm
- osGroup: Linux
- nativeBuildContainer: linux_arm
- crossBuild: true
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_arm
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-20210719121212-8a8d3be
+ crossrootfsDir: '/crossrootfs/arm'
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_arm64
- osGroup: Linux
- nativeBuildContainer: linux_arm64
- crossBuild: true
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_arm64
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-20210719121212-8a8d3be
+ crossrootfsDir: '/crossrootfs/arm64'
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl_arm
- osGroup: Linux
- nativeBuildContainer: linux_musl_arm
- crossBuild: true
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_musl_arm
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm-alpine-20210923140502-78f7860
+ crossrootfsDir: '/crossrootfs/arm'
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl_arm64
- osGroup: Linux
- nativeBuildContainer: linux_musl_arm64
- crossBuild: true
- isCodeQLRun: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_musl_arm64
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-alpine-20210923140502-78f7860
+ crossrootfsDir: '/crossrootfs/arm64'
+ isCodeQLRun: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
\ No newline at end of file
- name: RuntimeFeedBase64SasToken
value: $(dotnetclimsrc-read-sas-token-base64)
-extends:
- template: /eng/pipelines/pipeline-resources.yml
- parameters:
- stages:
- - stage: build
- displayName: Build and Test Diagnostics
- jobs:
+stages:
+ - stage: build
+ displayName: Build and Test Diagnostics
+ jobs:
- ############################
- # #
- # Source Build legs #
- # #
- ############################
+ ############################
+ # #
+ # Source Build legs #
+ # #
+ ############################
- - template: /eng/common/templates/job/source-build.yml
- parameters:
- platform:
- name: Complete
- buildScript: ./eng/common/build.sh
+ - template: /eng/common/templates/job/source-build.yml
+ parameters:
+ platform:
+ name: Complete
+ buildScript: ./eng/common/build.sh
- ############################
- # #
- # Build legs #
- # #
- ############################
+ ############################
+ # #
+ # Build legs #
+ # #
+ ############################
- - template: /eng/pipelines/build.yml
- parameters:
- name: Windows
- osGroup: Windows_NT
- strategy:
- matrix:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- _PublishArtifacts: bin
- Build_Release_x86:
- _BuildConfig: Release
- _BuildArch: x86
- _PublishArtifacts: bin/Windows_NT.x86.Release
- ${{ if ne(variables['System.TeamProject'], 'public') }}:
- Build_Release_arm:
- _BuildConfig: Release
- _BuildArch: arm
- _PublishArtifacts: bin/Windows_NT.arm.Release
- Build_Release_arm64:
- _BuildConfig: Release
- _BuildArch: arm64
- _PublishArtifacts: bin/Windows_NT.arm64.Release
+ - template: /eng/build.yml
+ parameters:
+ name: Windows
+ osGroup: Windows_NT
+ strategy:
+ matrix:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
+ _PublishArtifacts: bin
+ Build_Release_x86:
+ _BuildConfig: Release
+ _BuildArch: x86
+ _PublishArtifacts: bin/Windows_NT.x86.Release
+ ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ Build_Release_arm:
+ _BuildOnly: true
+ _BuildConfig: Release
+ _BuildArch: arm
+ _PublishArtifacts: bin/Windows_NT.arm.Release
+ Build_Release_arm64:
+ _BuildOnly: true
+ _BuildConfig: Release
+ _BuildArch: arm64
+ _PublishArtifacts: bin/Windows_NT.arm64.Release
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_x64
- osGroup: Linux
- nativeBuildContainer: linux_x64
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- _PublishArtifacts: bin/Linux.x64.Release
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
- _PublishArtifacts: bin/Linux.x64.Debug
+ - template: /eng/build.yml
+ parameters:
+ name: CentOS_7
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-3e800f1-20190501005343
+ strategy:
+ matrix:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
+ _PublishArtifacts: bin/Linux.x64.Debug
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
+ _PublishArtifacts: bin/Linux.x64.Release
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl
- osGroup: Linux
- osSuffix: -musl
- nativeBuildContainer: linux_musl_x64
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- _PublishArtifacts: bin/Linux.x64.Release
- _ArtifactsTargetPath: bin/Linux-musl.x64.Release
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
- _PublishArtifacts: bin/Linux.x64.Debug
- _ArtifactsTargetPath: bin/Linux-musl.x64.Debug
+ - template: /eng/build.yml
+ parameters:
+ name: Alpine3_13
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-WithNode-20210910135845-c401c85
+ artifactsTargetPath: bin/Linux-musl.x64.Release
+ requiresCapPtraceContainer: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
+ _PublishArtifacts: bin/Linux.x64.Release
+ ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: MacOS
- osGroup: MacOS
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- _PublishArtifacts: bin/OSX.x64.Release
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: MacOS
+ osGroup: MacOS
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: x64
+ _PublishArtifacts: bin/OSX.x64.Release
+ ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: MacOS_arm64
- osGroup: MacOS_cross
- crossBuild: true
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
- _PublishArtifacts: bin/OSX.arm64.Release
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: arm64
-
- - ${{ if ne(variables['System.TeamProject'], 'public') }}:
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_arm
- osGroup: Linux
- nativeBuildContainer: linux_arm
- crossBuild: true
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm
- _PublishArtifacts: bin/Linux.arm.Release
+ - template: /eng/build.yml
+ parameters:
+ name: MacOS_arm64
+ osGroup: MacOS_cross
+ crossbuild: true
+ buildAndSkipTest: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
+ _PublishArtifacts: bin/OSX.arm64.Release
+ ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: arm64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_arm64
- osGroup: Linux
- nativeBuildContainer: linux_arm64
- crossBuild: true
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
- _PublishArtifacts: bin/Linux.arm64.Release
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_arm
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-20210719121212-8a8d3be
+ crossrootfsDir: '/crossrootfs/arm'
+ buildAndSkipTest: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm
+ _PublishArtifacts: bin/Linux.arm.Release
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl_arm
- osGroup: Linux
- osSuffix: -musl
- nativeBuildContainer: linux_musl_arm
- crossBuild: true
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm
- _PublishArtifacts: bin/Linux.arm.Release
- _ArtifactsTargetPath: bin/Linux-musl.arm.Release
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_arm64
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-20210719121212-8a8d3be
+ crossrootfsDir: '/crossrootfs/arm64'
+ buildAndSkipTest: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
+ _PublishArtifacts: bin/Linux.arm64.Release
- - template: /eng/pipelines/build.yml
- parameters:
- name: Linux_musl_arm64
- osGroup: Linux
- osSuffix: -musl
- nativeBuildContainer: linux_musl_arm64
- crossBuild: true
- buildOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: arm64
- _PublishArtifacts: bin/Linux.arm64.Release
- _ArtifactsTargetPath: bin/Linux-musl.arm64.Release
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_musl_arm
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm-alpine-20210923140502-78f7860
+ crossrootfsDir: '/crossrootfs/arm'
+ artifactsTargetPath: bin/Linux-musl.arm.Release
+ buildAndSkipTest: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm
+ _PublishArtifacts: bin/Linux.arm.Release
- ############################
- # #
- # Test only legs #
- # #
- ############################
+ - template: /eng/build.yml
+ parameters:
+ name: Linux_musl_arm64
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-cross-arm64-alpine-20210923140502-78f7860
+ crossrootfsDir: '/crossrootfs/arm64'
+ artifactsTargetPath: bin/Linux-musl.arm64.Release
+ buildAndSkipTest: true
+ strategy:
+ matrix:
+ Build_Release:
+ _BuildConfig: Release
+ _BuildArch: arm64
+ _PublishArtifacts: bin/Linux.arm64.Release
- - template: /eng/pipelines/build.yml
- parameters:
- name: Ubuntu_20_04
- osGroup: Linux
- container: test_ubuntu_20_04
- dependsOn: Linux_x64
- testOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+ ############################
+ # #
+ # Test only legs #
+ # #
+ ############################
- - template: /eng/pipelines/build.yml
- parameters:
- name: Alpine3_13
- osGroup: Linux
- osSuffix: -musl
- container: test_linux_musl_x64
- dependsOn: Linux_musl
- testOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: Debian_Stretch
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:debian-stretch-3e800f1-20190521154431
+ dependsOn: CentOS_7
+ testOnly: true
+ strategy:
+ matrix:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
- - ${{ if ne(variables['System.TeamProject'], 'public') }}:
- - template: /eng/pipelines/build.yml
- parameters:
- name: Debian_Bullseye
- osGroup: Linux
- container: test_debian_11_amd64
- dependsOn: Linux_x64
- testOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+# - template: /eng/build.yml
+# parameters:
+# name: Fedora_34
+# osGroup: Linux
+# dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-34-helix-20220331150839-4f64125
+# dependsOn: CentOS_7
+# testOnly: true
+# requiresCapPtraceContainer: true
+# strategy:
+# matrix:
+# Build_Debug:
+# _BuildConfig: Debug
+# _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Fedora_36
- osGroup: Linux
- container: test_fedora_36
- dependsOn: Linux_x64
- testOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+# - template: /eng/build.yml
+# parameters:
+# name: OpenSuse_15_2
+# osGroup: Linux
+# dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-15.2-helix-amd64-20211018152525-9cc02fe
+# dependsOn: CentOS_7
+# testOnly: true
+# strategy:
+# matrix:
+# Build_Debug:
+# _BuildConfig: Debug
+# _BuildArch: x64
- #- template: /eng/pipelines/build.yml
- # parameters:
- # name: OpenSuse_15_2
- # osGroup: Linux
- # container: test_opensuse_15_2
- # dependsOn: Linux_x64
- # testOnly: true
- # strategy:
- # matrix:
- # Build_Release:
- # _BuildConfig: Release
- # _BuildArch: x64
- # ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- # Build_Debug:
- # _BuildConfig: Debug
- # _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: Ubuntu_16_04
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-09ca40b-20190520220842
+ dependsOn: CentOS_7
+ testOnly: true
+ strategy:
+ matrix:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
- #- template: /eng/pipelines/build.yml
- # parameters:
- # name: Ubuntu_18_04
- # osGroup: Linux
- # container: test_ubuntu_18_04
- # dependsOn: Linux_x64
- # testOnly: true
- # strategy:
- # matrix:
- # Build_Release:
- # _BuildConfig: Release
- # _BuildArch: x64
- # ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- # Build_Debug:
- # _BuildConfig: Debug
- # _BuildArch: x64
+ - template: /eng/build.yml
+ parameters:
+ name: Ubuntu_18_04
+ osGroup: Linux
+ dockerImage: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-3e800f1-20190508143252
+ dependsOn: CentOS_7
+ testOnly: true
+ strategy:
+ matrix:
+ Build_Debug:
+ _BuildConfig: Debug
+ _BuildArch: x64
- - template: /eng/pipelines/build.yml
- parameters:
- name: Ubuntu_22_04
- osGroup: Linux
- container: test_ubuntu_22_04
- dependsOn: Linux_x64
- testOnly: true
- strategy:
- matrix:
- Build_Release:
- _BuildConfig: Release
- _BuildArch: x64
- ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- Build_Debug:
- _BuildConfig: Debug
- _BuildArch: x64
+ # Download, sign, package and publish
+ - ${{ if notin(variables['Build.Reason'], 'PullRequest') }}:
+ - template: /eng/common/templates/job/job.yml
+ parameters:
+ name: Sign_Package_Publish
+ displayName: Sign, Package, and Generate BAR Manifests
+ dependsOn:
+ - Windows
+ - CentOS_7
+ - Alpine3_13
+ - MacOS
+ - MacOS_arm64
+ - Linux_arm
+ - Linux_arm64
+ - Linux_musl_arm
+ - Linux_musl_arm64
+ condition: succeeded()
+ pool:
+ name: NetCore1ESPool-Svc-Internal
+ demands: ImageOverride -equals windows.vs2022.amd64
+ enablePublishUsingPipelines: true
+ enableMicrobuild: true
+ artifacts:
+ publish:
+ logs:
+ name: Logs_Packaging_Signing
+ steps:
+ - task: DownloadBuildArtifacts@0
+ displayName: 'Download release builds'
+ inputs:
+ downloadPath: '$(Build.ArtifactStagingDirectory)/__download__'
+ artifactName: Build_Release
+ checkDownloadedFiles: true
+ - task: CopyFiles@2
+ displayName: 'Binplace Product'
+ inputs:
+ sourceFolder: $(Build.ArtifactStagingDirectory)/__download__/Build_Release
+ targetFolder: '$(Build.SourcesDirectory)/artifacts/'
- # Download, sign, package and publish
- - ${{ if ne(variables['System.TeamProject'], 'public') }}:
- - template: /eng/common/templates/job/job.yml
- parameters:
- name: Sign_Package_Publish
- displayName: Sign, Package, and Generate BAR Manifests
- dependsOn:
- - Windows
- - MacOS
- - MacOS_arm64
- - Linux_x64
- - Linux_musl
- - Linux_arm
- - Linux_arm64
- - Linux_musl_arm
- - Linux_musl_arm64
+ # Windows x64 download. Everything under "bin" is published for the Windows x64 build.
+ # Create nuget packages, sign binaries and publish to blob feed
+ - script: $(Build.SourcesDirectory)\eng\ci-prepare-artifacts.cmd $(_InternalBuildArgs)
+ displayName: Package, Sign, and Publish
+ continueOnError: false
condition: succeeded()
- pool:
- name: NetCore1ESPool-Internal
- demands: ImageOverride -equals windows.vs2022.amd64
- enablePublishUsingPipelines: true
- enableMicrobuild: true
- artifacts:
- publish:
- logs:
- name: Logs_Packaging_Signing
- steps:
- - task: DownloadBuildArtifacts@0
- displayName: 'Download release builds'
- inputs:
- downloadPath: '$(Build.ArtifactStagingDirectory)/__download__'
- artifactName: Build_Release
- checkDownloadedFiles: true
- - task: CopyFiles@2
- displayName: 'Binplace Product'
- inputs:
- sourceFolder: $(Build.ArtifactStagingDirectory)/__download__/Build_Release
- targetFolder: '$(Build.SourcesDirectory)/artifacts/'
-
- # Windows x64 download. Everything under "bin" is published for the Windows x64 build.
- # Create nuget packages, sign binaries and publish to blob feed
- - script: $(Build.SourcesDirectory)\eng\ci-prepare-artifacts.cmd $(_InternalBuildArgs)
- displayName: Package, Sign, and Publish
- continueOnError: false
- condition: succeeded()
- # Publish package and log build artifacts
- - task: PublishBuildArtifacts@1
- displayName: Publish Package Artifacts
- inputs:
- publishLocation: Container
- pathtoPublish: '$(Build.SourcesDirectory)/artifacts/packages'
- artifactName: Packages
- continueOnError: true
- condition: always()
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Bundled Tools
- inputs:
- publishLocation: Container
- pathtoPublish: '$(Build.SourcesDirectory)/artifacts/bundledtools'
- artifactName: BundledTools
- continueOnError: true
- condition: always()
+ # Publish package and log build artifacts
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Package Artifacts
+ inputs:
+ publishLocation: Container
+ pathtoPublish: '$(Build.SourcesDirectory)/artifacts/packages'
+ artifactName: Packages
+ continueOnError: true
+ condition: always()
- - template: /eng/common/templates/job/publish-build-assets.yml
- parameters:
- configuration: Release
- dependsOn: Sign_Package_Publish
- publishUsingPipelines: true
- pool:
- name: NetCore1ESPool-Internal
- demands: ImageOverride -equals windows.vs2022.amd64
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Bundled Tools
+ inputs:
+ publishLocation: Container
+ pathtoPublish: '$(Build.SourcesDirectory)/artifacts/bundledtools'
+ artifactName: BundledTools
+ continueOnError: true
+ condition: always()
- - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- - template: /eng/common/templates/post-build/post-build.yml
+ - template: /eng/common/templates/job/publish-build-assets.yml
parameters:
- # This is to enable SDL runs part of Post-Build Validation Stage.
- # as well as NuGet, SourceLink, and signing validation.
- # The variables get imported from group dotnet-diagnostics-sdl-params
- publishingInfraVersion: 3
- enableSourceLinkValidation: true
- enableSigningValidation: false
- enableSymbolValidation: false
- enableNugetValidation: true
- symbolPublishingAdditionalParameters: '/p:PublishSpecialClrFiles=false'
- publishInstallersAndChecksums: true
- SDLValidationParameters:
- enable: true
- continueOnError: true
- params: ' -SourceToolsList @("policheck","credscan")
- -TsaInstanceURL $(_TsaInstanceURL)
- -TsaProjectName $(_TsaProjectName)
- -TsaNotificationEmail $(_TsaNotificationEmail)
- -TsaCodebaseAdmin $(_TsaCodebaseAdmin)
- -TsaBugAreaPath $(_TsaBugAreaPath)
- -TsaIterationPath $(_TsaIterationPath)
- -TsaRepositoryName "diagnostics"
- -TsaCodebaseName "diagnostics"
- -TsaPublish $True'
- artifactNames:
- - 'Packages'
+ configuration: Release
+ dependsOn: Sign_Package_Publish
+ publishUsingPipelines: true
+ pool:
+ name: NetCore1ESPool-Svc-Internal
+ demands: ImageOverride -equals windows.vs2022.amd64
+
+ - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - template: /eng/common/templates/post-build/post-build.yml
+ parameters:
+ # This is to enable SDL runs part of Post-Build Validation Stage.
+ # as well as NuGet, SourceLink, and signing validation.
+ # The variables get imported from group dotnet-diagnostics-sdl-params
+ publishingInfraVersion: 3
+ enableSourceLinkValidation: true
+ enableSigningValidation: false
+ enableSymbolValidation: false
+ enableNugetValidation: true
+ symbolPublishingAdditionalParameters: '/p:PublishSpecialClrFiles=false'
+ publishInstallersAndChecksums: true
+ SDLValidationParameters:
+ enable: true
+ continueOnError: true
+ params: ' -SourceToolsList @("policheck","credscan")
+ -TsaInstanceURL $(_TsaInstanceURL)
+ -TsaProjectName $(_TsaProjectName)
+ -TsaNotificationEmail $(_TsaNotificationEmail)
+ -TsaCodebaseAdmin $(_TsaCodebaseAdmin)
+ -TsaBugAreaPath $(_TsaBugAreaPath)
+ -TsaIterationPath $(_TsaIterationPath)
+ -TsaRepositoryName "diagnostics"
+ -TsaCodebaseName "diagnostics"
+ -TsaPublish $True'
+ artifactNames:
+ - 'Packages'
- # This sets up the bits to do a Release.
- - template: /eng/pipelines/prepare-release.yml
+ # This sets up the bits to do a Release.
+ - template: /eng/prepare-release.yml
--- /dev/null
+@echo off
+powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0build.ps1""" -restore -ci -prepareMachine %*"
+exit /b %ErrorLevel%
<Dependencies>
<ProductDependencies>
- <Dependency Name="Microsoft.SymbolStore" Version="1.0.430201">
+ <Dependency Name="Microsoft.SymbolStore" Version="1.0.417001">
<Uri>https://github.com/dotnet/symstore</Uri>
- <Sha>00f6edae1666690960cd207fd2b7a51232af9605</Sha>
+ <Sha>e09f81a0b38786cb20f66b589a8b88b6997a62da</Sha>
</Dependency>
- <Dependency Name="Microsoft.Diagnostics.Runtime" Version="3.0.0-beta.23302.1">
+ <Dependency Name="Microsoft.Diagnostics.Runtime" Version="3.0.0-beta.23205.1">
<Uri>https://github.com/microsoft/clrmd</Uri>
- <Sha>272986369826a777686ba616a00acd48febc2546</Sha>
+ <Sha>3368bf4451a9441076595022fdff0f2bbea57b1b</Sha>
</Dependency>
- <Dependency Name="Microsoft.Diagnostics.Runtime.Utilities" Version="3.0.0-beta.23302.1">
+ <Dependency Name="Microsoft.Diagnostics.Runtime.Utilities" Version="3.0.0-beta.23205.1">
<Uri>https://github.com/microsoft/clrmd</Uri>
- <Sha>272986369826a777686ba616a00acd48febc2546</Sha>
+ <Sha>3368bf4451a9441076595022fdff0f2bbea57b1b</Sha>
</Dependency>
</ProductDependencies>
<ToolsetDependencies>
- <Dependency Name="Microsoft.DotNet.Arcade.Sdk" Version="8.0.0-beta.23302.3">
+ <Dependency Name="Microsoft.DotNet.Arcade.Sdk" Version="8.0.0-beta.23168.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>234e0726c7384ee84bf08550f2d16a1ff2d5c543</Sha>
+ <Sha>b12f035e893c34ec2c965d75f6e21b7a2667e98d</Sha>
<SourceBuild RepoName="arcade" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.DotNet.CodeAnalysis" Version="8.0.0-beta.23302.3">
+ <Dependency Name="Microsoft.DotNet.CodeAnalysis" Version="8.0.0-beta.23168.1">
<Uri>https://github.com/dotnet/arcade</Uri>
- <Sha>234e0726c7384ee84bf08550f2d16a1ff2d5c543</Sha>
+ <Sha>b12f035e893c34ec2c965d75f6e21b7a2667e98d</Sha>
</Dependency>
<Dependency Name="Microsoft.DotNet.RemoteExecutor" Version="7.0.0-beta.22316.2" Pinned="true">
<Uri>https://github.com/dotnet/arcade</Uri>
<Sha>ccfe6da198c5f05534863bbb1bff66e830e0c6ab</Sha>
</Dependency>
- <Dependency Name="Microsoft.Dotnet.Sdk.Internal" Version="8.0.100-preview.6.23305.2">
+ <Dependency Name="Microsoft.Dotnet.Sdk.Internal" Version="8.0.100-preview.3.23156.1">
<Uri>https://github.com/dotnet/installer</Uri>
- <Sha>18dc2cf11a2daaaa1633afd0c4225e188ce6c239</Sha>
+ <Sha>51e06f6931e859f56564556fa6ba519761fa7141</Sha>
</Dependency>
- <Dependency Name="Microsoft.AspNetCore.App.Ref.Internal" Version="8.0.0-preview.6.23302.1">
+ <Dependency Name="Microsoft.AspNetCore.App.Ref.Internal" Version="8.0.0-preview.4.23179.5">
<Uri>https://github.com/dotnet/aspnetcore</Uri>
- <Sha>c2488eead6ead7208f543d0a57104b5d167b93f9</Sha>
+ <Sha>c0acf059eddd7e70498804dcc99a7c7b33732417</Sha>
</Dependency>
- <Dependency Name="Microsoft.AspNetCore.App.Ref" Version="8.0.0-preview.6.23302.1">
+ <Dependency Name="Microsoft.AspNetCore.App.Ref" Version="8.0.0-preview.4.23179.5">
<Uri>https://github.com/dotnet/aspnetcore</Uri>
- <Sha>c2488eead6ead7208f543d0a57104b5d167b93f9</Sha>
+ <Sha>c0acf059eddd7e70498804dcc99a7c7b33732417</Sha>
</Dependency>
- <Dependency Name="Microsoft.NETCore.App.Runtime.win-x64" Version="8.0.0-preview.6.23304.2">
+ <Dependency Name="Microsoft.NETCore.App.Runtime.win-x64" Version="8.0.0-preview.3.23155.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>2bf8f1aa83e192a307d5846424880cd61bec1a4f</Sha>
+ <Sha>a64420c79cb63c485138f4f1352b8730f27d7b19</Sha>
</Dependency>
- <Dependency Name="VS.Redist.Common.NetCore.SharedFramework.x64.8.0" Version="8.0.0-preview.6.23304.2">
+ <Dependency Name="VS.Redist.Common.NetCore.SharedFramework.x64.8.0" Version="8.0.0-preview.3.23155.1">
<Uri>https://github.com/dotnet/runtime</Uri>
- <Sha>2bf8f1aa83e192a307d5846424880cd61bec1a4f</Sha>
+ <Sha>a64420c79cb63c485138f4f1352b8730f27d7b19</Sha>
</Dependency>
- <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-reference-packages" Version="8.0.0-alpha.1.23302.3">
+ <Dependency Name="Microsoft.SourceBuild.Intermediate.source-build-reference-packages" Version="8.0.0-alpha.1.23178.3">
<Uri>https://github.com/dotnet/source-build-reference-packages</Uri>
- <Sha>4a3b4b6b37bdafe501477bf2e564380e1962ce61</Sha>
+ <Sha>dc842f8fab4bd38db9334a312a990d198b971fc2</Sha>
<SourceBuild RepoName="source-build-reference-packages" ManagedOnly="true" />
</Dependency>
- <Dependency Name="Microsoft.SourceLink.GitHub" Version="8.0.0-beta.23252.2" CoherentParentDependency="Microsoft.DotNet.Arcade.Sdk">
+ <Dependency Name="Microsoft.SourceLink.GitHub" Version="1.2.0-beta-23165-02" CoherentParentDependency="Microsoft.DotNet.Arcade.Sdk">
<Uri>https://github.com/dotnet/sourcelink</Uri>
- <Sha>54eb3b811c57f5e94617d31a102fc9cb664ccdd5</Sha>
+ <Sha>3f43bf1b2dead2cb51f20dc47f6dfd7981248820</Sha>
<SourceBuild RepoName="sourcelink" ManagedOnly="true" />
</Dependency>
<Dependency Name="Microsoft.CodeAnalysis" Version="4.6.0-1.23073.4">
</PropertyGroup>
<PropertyGroup>
<!-- Latest symstore version updated by darc -->
- <MicrosoftSymbolStoreVersion>1.0.430201</MicrosoftSymbolStoreVersion>
+ <MicrosoftSymbolStoreVersion>1.0.417001</MicrosoftSymbolStoreVersion>
<!-- Latest shared runtime version updated by darc -->
- <VSRedistCommonNetCoreSharedFrameworkx6480Version>8.0.0-preview.6.23304.2</VSRedistCommonNetCoreSharedFrameworkx6480Version>
- <MicrosoftNETCoreAppRuntimewinx64Version>8.0.0-preview.6.23304.2</MicrosoftNETCoreAppRuntimewinx64Version>
+ <VSRedistCommonNetCoreSharedFrameworkx6480Version>8.0.0-preview.3.23155.1</VSRedistCommonNetCoreSharedFrameworkx6480Version>
+ <MicrosoftNETCoreAppRuntimewinx64Version>8.0.0-preview.3.23155.1</MicrosoftNETCoreAppRuntimewinx64Version>
<!-- Latest shared aspnetcore version updated by darc -->
- <MicrosoftAspNetCoreAppRefInternalVersion>8.0.0-preview.6.23302.1</MicrosoftAspNetCoreAppRefInternalVersion>
- <MicrosoftAspNetCoreAppRefVersion>8.0.0-preview.6.23302.1</MicrosoftAspNetCoreAppRefVersion>
+ <MicrosoftAspNetCoreAppRefInternalVersion>8.0.0-preview.4.23179.5</MicrosoftAspNetCoreAppRefInternalVersion>
+ <MicrosoftAspNetCoreAppRefVersion>8.0.0-preview.4.23179.5</MicrosoftAspNetCoreAppRefVersion>
<!-- dotnet/installer: Testing version of the SDK. Needed for the signed & entitled host. -->
- <MicrosoftDotnetSdkInternalVersion>8.0.100-preview.6.23305.2</MicrosoftDotnetSdkInternalVersion>
+ <MicrosoftDotnetSdkInternalVersion>8.0.100-preview.3.23156.1</MicrosoftDotnetSdkInternalVersion>
</PropertyGroup>
<PropertyGroup>
<!-- Runtime versions to test -->
- <MicrosoftNETCoreApp60Version>6.0.16</MicrosoftNETCoreApp60Version>
+ <MicrosoftNETCoreApp60Version>6.0.14</MicrosoftNETCoreApp60Version>
<MicrosoftAspNetCoreApp60Version>$(MicrosoftNETCoreApp60Version)</MicrosoftAspNetCoreApp60Version>
- <MicrosoftNETCoreApp70Version>7.0.5</MicrosoftNETCoreApp70Version>
+ <MicrosoftNETCoreApp70Version>7.0.3</MicrosoftNETCoreApp70Version>
<MicrosoftAspNetCoreApp70Version>$(MicrosoftNETCoreApp70Version)</MicrosoftAspNetCoreApp70Version>
<!-- The SDK runtime version used to build single-file apps (currently hardcoded) -->
<SingleFileRuntime60Version>$(MicrosoftNETCoreApp60Version)</SingleFileRuntime60Version>
- <SingleFileRuntime70Version>$(MicrosoftNETCoreApp70Version)</SingleFileRuntime70Version>
- <SingleFileRuntimeLatestVersion>8.0.0-preview.6.23302.2</SingleFileRuntimeLatestVersion>
+ <SingleFileRuntime70Version>7.0.2</SingleFileRuntime70Version>
+ <SingleFileRuntimeLatestVersion>8.0.0-preview.2.23127.4</SingleFileRuntimeLatestVersion>
</PropertyGroup>
<PropertyGroup>
<!-- Opt-in/out repo features -->
<!-- CoreFX -->
<SystemReflectionMetadataVersion>5.0.0</SystemReflectionMetadataVersion>
<!-- Other libs -->
- <MicrosoftBclAsyncInterfacesVersion>6.0.0</MicrosoftBclAsyncInterfacesVersion>
- <MicrosoftDiagnosticsRuntimeVersion>3.0.0-beta.23302.1</MicrosoftDiagnosticsRuntimeVersion>
+ <MicrosoftBclAsyncInterfacesVersion>1.1.0</MicrosoftBclAsyncInterfacesVersion>
+ <MicrosoftDiagnosticsRuntimeVersion>3.0.0-beta.23205.1</MicrosoftDiagnosticsRuntimeVersion>
<MicrosoftDiaSymReaderNativePackageVersion>16.9.0-beta1.21055.5</MicrosoftDiaSymReaderNativePackageVersion>
<MicrosoftDiagnosticsTracingTraceEventVersion>3.0.7</MicrosoftDiagnosticsTracingTraceEventVersion>
+ <!-- Use pinned version to avoid picking up latest (which doesn't support netcoreapp3.1) during source-build -->
+ <MicrosoftExtensionsLoggingPinnedVersion>2.1.1</MicrosoftExtensionsLoggingPinnedVersion>
+ <!-- dotnet-dsrouter needs a net6.0 version of logging -->
<MicrosoftExtensionsLoggingVersion>6.0.0</MicrosoftExtensionsLoggingVersion>
<MicrosoftExtensionsLoggingConsoleVersion>6.0.0</MicrosoftExtensionsLoggingConsoleVersion>
<!-- Need version that understands UseAppFilters sentinel. -->
<SystemCommandLineVersion>2.0.0-beta1.20468.1</SystemCommandLineVersion>
<SystemCommandLineRenderingVersion>2.0.0-beta1.20074.1</SystemCommandLineRenderingVersion>
<SystemComponentModelAnnotationsVersion>5.0.0</SystemComponentModelAnnotationsVersion>
- <SystemBuffersVersion>4.5.1</SystemBuffersVersion>
- <SystemMemoryVersion>4.5.5</SystemMemoryVersion>
+ <SystemMemoryVersion>4.5.4</SystemMemoryVersion>
<SystemRuntimeLoaderVersion>4.3.0</SystemRuntimeLoaderVersion>
<SystemTextEncodingsWebVersion>4.7.2</SystemTextEncodingsWebVersion>
<SystemTextJsonVersion>4.7.1</SystemTextJsonVersion>
<XUnitAbstractionsVersion>2.0.3</XUnitAbstractionsVersion>
- <MicrosoftDotNetCodeAnalysisVersion>8.0.0-beta.23302.3</MicrosoftDotNetCodeAnalysisVersion>
+ <MicrosoftDotNetCodeAnalysisVersion>8.0.0-beta.23168.1</MicrosoftDotNetCodeAnalysisVersion>
<StyleCopAnalyzersVersion>1.2.0-beta.406</StyleCopAnalyzersVersion>
<MicrosoftDotNetRemoteExecutorVersion>7.0.0-beta.22316.2</MicrosoftDotNetRemoteExecutorVersion>
<cdbsosversion>10.0.18362</cdbsosversion>
<NewtonSoftJsonVersion>13.0.1</NewtonSoftJsonVersion>
- <MicrosoftSourceBuildIntermediatesourcebuildreferencepackagesPackageVersion>8.0.0-alpha.1.23302.3</MicrosoftSourceBuildIntermediatesourcebuildreferencepackagesPackageVersion>
- <MicrosoftSourceLinkGitHubVersion>8.0.0-beta.23252.2</MicrosoftSourceLinkGitHubVersion>
+ <MicrosoftSourceBuildIntermediatesourcebuildreferencepackagesPackageVersion>8.0.0-alpha.1.23178.3</MicrosoftSourceBuildIntermediatesourcebuildreferencepackagesPackageVersion>
+ <MicrosoftSourceLinkGitHubVersion>1.2.0-beta-23165-02</MicrosoftSourceLinkGitHubVersion>
<!-- Roslyn and analyzers -->
<!-- Compatibility with VS 16.11/.NET SDK 5.0.4xx -->
<MicrosoftCodeAnalysisVersion_3_11>3.11.0</MicrosoftCodeAnalysisVersion_3_11>
# Install sdk for building, restore and build managed components.
if (-not $skipmanaged) {
- Invoke-Expression "& `"$engroot\common\build.ps1`" -configuration $configuration -verbosity $verbosity /p:BuildArch=$architecture /p:TestArchitectures=$architecture $remainingargs"
+ Invoke-Expression "& `"$engroot\common\build.ps1`" -build -configuration $configuration -verbosity $verbosity /p:BuildArch=$architecture /p:TestArchitectures=$architecture $remainingargs"
if ($lastExitCode -ne 0) {
exit $lastExitCode
}
handle_arguments() {
- lowerI="$(echo "${1/--/-}" | tr "[:upper:]" "[:lower:]")"
+ lowerI="$(echo "$1" | tr "[:upper:]" "[:lower:]")"
case "$lowerI" in
architecture|-architecture|-a)
__BuildArch="$(echo "$2" | tr "[:upper:]" "[:lower:]")"
__ShiftArgs=1
;;
- -binarylog|-bl|-clean|-integrationtest|-pack|-performancetest|-pipelineslog|-pl|-preparemachine|-publish|-r|-rebuild|-build|-restore|-sign|-sb)
+ -binarylog|-bl|-clean|-integrationtest|-pack|-performancetest|-pipelineslog|-pl|-preparemachine|-publish|-r|-rebuild|-restore|-sign|-sb)
__ManagedBuildArgs="$__ManagedBuildArgs $1"
;;
__ShiftArgs=1
;;
+ -clean|-binarylog|-bl|-pipelineslog|-pl|-restore|-r|-rebuild|-pack|-integrationtest|-performancetest|-sign|-publish|-preparemachine|-sb)
+ __ManagedBuildArgs="$__ManagedBuildArgs $1"
+ ;;
+
-dotnetruntimeversion)
__DotnetRuntimeVersion="$2"
__ShiftArgs=1
#
if [[ "$__ManagedBuild" == 1 ]]; then
-
echo "Commencing managed build for $__BuildType in $__RootBinDir/bin"
- "$__RepoRootDir/eng/common/build.sh" --configuration "$__BuildType" $__CommonMSBuildArgs $__ManagedBuildArgs $__UnprocessedBuildArgs
-
+ "$__RepoRootDir/eng/common/build.sh" --build --configuration "$__BuildType" $__CommonMSBuildArgs $__ManagedBuildArgs $__UnprocessedBuildArgs
if [ "$?" != 0 ]; then
exit 1
fi
-
- echo "Generating Version Source File"
- __GenerateVersionLog="$__LogsDir/GenerateVersion.binlog"
-
- "$__RepoRootDir/eng/common/msbuild.sh" \
- $__RepoRootDir/eng/CreateVersionFile.proj \
- /bl:$__GenerateVersionLog \
- /t:GenerateVersionFiles \
- /restore \
- /p:GenerateVersionSourceFile=true \
- /p:NativeVersionSourceFile="$__ArtifactsIntermediatesDir/_version.c" \
- /p:Configuration="$__BuildType" \
- /p:Platform="$__BuildArch" \
- $__UnprocessedBuildArgs
-
- if [ $? != 0 ]; then
- echo "Generating Version Source File FAILED"
- exit 1
- fi
fi
#
# Build native components
#
if [[ "$__NativeBuild" == 1 ]]; then
+ echo "Generating Version Source File"
+ __GenerateVersionLog="$__LogsDir/GenerateVersion.binlog"
+
+ "$__RepoRootDir/eng/common/msbuild.sh" \
+ $__RepoRootDir/eng/CreateVersionFile.proj \
+ /bl:$__GenerateVersionLog \
+ /t:GenerateVersionFiles \
+ /restore \
+ /p:GenerateVersionSourceFile=true \
+ /p:NativeVersionSourceFile="$__ArtifactsIntermediatesDir/_version.c" \
+ /p:Configuration="$__BuildType" \
+ /p:Platform="$__BuildArch" \
+ $__UnprocessedBuildArgs
+
+ if [ $? != 0 ]; then
+ echo "Generating Version Source File FAILED"
+ exit 1
+ fi
+
build_native "$__TargetOS" "$__BuildArch" "$__RepoRootDir" "$__IntermediatesDir" "install" "$__ExtraCmakeArgs" "diagnostic component" | tee "$__LogsDir"/make.log
if [ "$?" != 0 ]; then
if [[ "$__Test" == 1 ]]; then
if [[ "$__CrossBuild" == 0 ]]; then
if [[ -z "$LLDB_PATH" ]]; then
- check_version_exists() {
- desired_version=-1
-
- # Set up the environment to be used for building with the desired debugger.
- if command -v "lldb-$1.$2" > /dev/null; then
- desired_version="-$1.$2"
- elif command -v "lldb$1$2" > /dev/null; then
- desired_version="$1$2"
- elif command -v "lldb-$1$2" > /dev/null; then
- desired_version="-$1$2"
+ export LLDB_PATH="$(which lldb-3.9.1 2> /dev/null)"
+ if [[ -z "$LLDB_PATH" ]]; then
+ export LLDB_PATH="$(which lldb-3.9 2> /dev/null)"
+ if [[ -z "$LLDB_PATH" ]]; then
+ export LLDB_PATH="$(which lldb-4.0 2> /dev/null)"
+ if [[ -z "$LLDB_PATH" ]]; then
+ export LLDB_PATH="$(which lldb-5.0 2> /dev/null)"
+ if [[ -z "$LLDB_PATH" ]]; then
+ export LLDB_PATH="$(which lldb 2> /dev/null)"
+ fi
+ fi
+ fi
fi
-
- echo "$desired_version"
- }
-
- # note: clang versions higher than 6 do not have minor version in file name, if it is zero.
- versions="16 15 14 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9"
- for version in $versions; do
- _major="${version%%.*}"
- [ -z "${version##*.*}" ] && _minor="${version#*.}"
- desired_version="$(check_version_exists "$_major" "$_minor")"
- if [ "$desired_version" != "-1" ]; then majorVersion="$_major"; break; fi
- done
-
- if [ -z "$majorVersion" ]; then
- export LLDB_PATH="$(command -v "lldb")"
- else
- export LLDB_PATH="$(command -v "lldb$desired_version")"
- fi
fi
if [[ -z "$GDB_PATH" ]]; then
--- /dev/null
+parameters:
+ # Job name
+ name: ''
+ # Agent OS (Windows_NT, Linux, MacOS, FreeBSD)
+ osGroup: Windows_NT
+ # Additional variables
+ variables: {}
+ # Build strategy - matrix
+ strategy: ''
+ # Optional: Job timeout
+ timeoutInMinutes: 180
+ # Optional: Docker image to use
+ dockerImage: ''
+ # Optional: ROOTFS_DIR to use
+ crossrootfsDir: ''
+ crossbuild: false
+ # Optional: test only job if true
+ testOnly: false
+ buildAndSkipTest: false
+ # Depends on
+ dependsOn: ''
+ artifactsTargetPath: ''
+ requiresCapPtraceContainer: false
+ isCodeQLRun: false
+
+jobs:
+- template: /eng/common/templates/job/job.yml
+ parameters:
+ name: ${{ parameters.name }}
+ timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
+ enableMicrobuild: true
+ enableTelemetry: true
+ helixRepo: dotnet/diagnostics
+ runAsPublic: ${{ parameters.isCodeQLRun }}
+
+ pool:
+ # Public Linux Build Pool
+ ${{ if and(eq(parameters.osGroup, 'Linux'), eq(variables['System.TeamProject'], 'public')) }}:
+ name: NetCore-Svc-Public
+ demands: ImageOverride -equals Build.Ubuntu.1804.Amd64.Open
+
+ # Official Build Linux Pool
+ ${{ if and(eq(parameters.osGroup, 'Linux'), ne(variables['System.TeamProject'], 'public')) }}:
+ name: NetCore1ESPool-Svc-Internal
+ demands: ImageOverride -equals Build.Ubuntu.1804.Amd64
+
+ # FreeBSD builds only in the internal project
+ ${{ if and(eq(parameters.osGroup, 'FreeBSD'), ne(variables['System.TeamProject'], 'public')) }}:
+ name: dnceng-freebsd-internal
+
+ # Build OSX Pool (we don't have on-prem OSX BuildPool)
+ ${{ if in(parameters.osGroup, 'MacOS', 'MacOS_cross') }}:
+ vmImage: macOS-latest
+
+ # Official Build Windows Pool
+ ${{ if and(eq(parameters.osGroup, 'Windows_NT'), ne(variables['System.TeamProject'], 'public')) }}:
+ name: NetCore1ESPool-Svc-Internal
+ demands: ImageOverride -equals windows.vs2022.amd64
+
+ # Public Windows Build Pool
+ ${{ if and(eq(parameters.osGroup, 'Windows_NT'), eq(variables['System.TeamProject'], 'public')) }}:
+ name: NetCore-Svc-Public
+ demands: ImageOverride -equals windows.vs2022.amd64.open
+
+ ${{ if and(ne(parameters.dockerImage, ''), ne(parameters.requiresCapPtraceContainer, 'true')) }}:
+ container: ${{ parameters.dockerImage }}
+
+ ${{ if ne(parameters.strategy, '') }}:
+ strategy: ${{ parameters.strategy }}
+
+ ${{ if ne(parameters.dependsOn, '') }}:
+ dependsOn: ${{ parameters.dependsOn }}
+
+ workspace:
+ clean: all
+
+ variables:
+ - ${{ insert }}: ${{ parameters.variables }}
+ - _DockerImageName: ${{ parameters.dockerImage }}
+ - _PhaseName : ${{ parameters.name }}
+ - _HelixType: build/product
+ - _HelixBuildConfig: $(_BuildConfig)
+ - _Pipeline_StreamDumpDir: $(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/streams
+
+ - ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
+ - _buildScript: $(Build.SourcesDirectory)\eng\cibuild.cmd
+ - ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
+ - _buildScript: $(Build.SourcesDirectory)/eng/cibuild.sh
+
+ - _TestArgs: '-test'
+ - _dockerEnv: ''
+
+ - ${{ if eq(parameters.testOnly, 'true') }}:
+ - _TestArgs: '-test -skipnative'
+ - ${{ if eq(parameters.requiresCapPtraceContainer, 'true') }}:
+ - _dockerEnv: $(Build.SourcesDirectory)/eng/docker-build.sh
+ --docker-image $(_DockerImageName)
+ --source-directory $(Build.SourcesDirectory)
+ --container-name diagnostics-$(Build.BuildId)
+ - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
+ - name: Codeql.Enabled
+ value: True
+ - name: Codeql.Cadence
+ value: 0
+ - name: Codeql.TSAEnabled
+ value: True
+ - name: Codeql.BuildIdentifier
+ value: $(System.JobDisplayName)
+ - name: Codeql.Language
+ value: csharp,cpp
+
+ - ${{ if or(eq(parameters.buildAndSkipTest, 'true'), eq(parameters.isCodeQLRun, 'true')) }}:
+ - _TestArgs: ''
+
+ - _InternalInstallArgs: ''
+ # For testing msrc's and service releases. The RuntimeSourceVersion is either "default" or the service release version to test
+ - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), eq(parameters.isCodeQLRun, 'false')) }}:
+ - _InternalInstallArgs:
+ -dotnetruntimeversion '$(DotnetRuntimeVersion)'
+ -dotnetruntimedownloadversion '$(DotnetRuntimeDownloadVersion)'
+ -runtimesourcefeed '$(RuntimeFeedUrl)'
+ -runtimesourcefeedkey '$(RuntimeFeedBase64SasToken)'
+
+ # Only enable publishing in non-public, non PR scenarios.
+ - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
+ - _HelixSource: official/dotnet/arcade/$(Build.SourceBranch)
+ - ${{ if or(eq(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'PullRequest')) }}:
+ - _HelixSource: pr/dotnet/arcade/$(Build.SourceBranch)
+
+ # This is only required for cross builds.
+ - ${{ if and(eq(parameters.crossbuild, false), eq(parameters.crossrootfsDir, '')) }}:
+ - _Cross: ''
+ - ${{ if or(eq(parameters.crossbuild, true), ne(parameters.crossrootfsDir, '')) }}:
+ - _Cross: -cross
+
+ steps:
+ - ${{ if eq(parameters.osGroup, 'Linux') }}:
+ - ${{ if eq(parameters.testOnly, 'true') }}:
+ - task: DownloadBuildArtifacts@0
+ displayName: 'Download release builds'
+ inputs:
+ downloadPath: '$(Build.ArtifactStagingDirectory)/__download__'
+ downloadType: specific
+ itemPattern: |
+ Build_$(_BuildConfig)/bin/Linux.$(_BuildArch).$(_BuildConfig)/**
+ checkDownloadedFiles: true
+ - task: CopyFiles@2
+ displayName: 'Binplace Product'
+ inputs:
+ sourceFolder: $(Build.ArtifactStagingDirectory)/__download__/Build_$(_BuildConfig)/bin/Linux.$(_BuildArch).$(_BuildConfig)
+ targetFolder: '$(Build.SourcesDirectory)/artifacts/bin/Linux.$(_BuildArch).$(_BuildConfig)'
+
+ - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
+ - task: CodeQL3000Init@0
+ displayName: CodeQL Initialize
+
+ - script: $(_dockerEnv) $(_buildScript)
+ -configuration $(_BuildConfig)
+ -architecture $(_BuildArch)
+ $(_Cross)
+ $(_TestArgs)
+ /p:OfficialBuildId=$(BUILD.BUILDNUMBER)
+ $(_InternalInstallArgs)
+ displayName: Build / Test
+ condition: succeeded()
+ env:
+ ROOTFS_DIR: ${{ parameters.crossrootfsDir }}
+
+ - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
+ - task: CodeQL3000Finalize@0
+ displayName: CodeQL Finalize
+
+ - ${{ if ne(variables['System.TeamProject'], 'public') }}:
+ - task: CopyFiles@2
+ displayName: Gather binaries for publish to artifacts
+ inputs:
+ SourceFolder: '$(Build.SourcesDirectory)/artifacts/$(_PublishArtifacts)'
+ Contents: '**'
+ TargetFolder: $(Build.ArtifactStagingDirectory)/artifacts/${{ coalesce(parameters.artifactsTargetPath, '$(_PublishArtifacts)') }}
+ condition: ne(variables['_PublishArtifacts'], '')
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Build Artifacts
+ inputs:
+ pathtoPublish: '$(Build.ArtifactStagingDirectory)/artifacts'
+ artifactName: Build_$(_BuildConfig)
+ condition: ne(variables['_PublishArtifacts'], '')
+
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Artifacts on failure
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/artifacts/bin'
+ PublishLocation: Container
+ ArtifactName: Artifacts_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
+ continueOnError: true
+ condition: failed()
+
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Dump Artifacts on failure
+ inputs:
+ PathtoPublish: '$(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/dumps'
+ PublishLocation: Container
+ ArtifactName: Dumps_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
+ continueOnError: true
+ condition: failed()
+
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Stream Artifacts on failure
+ inputs:
+ PathtoPublish: $(_Pipeline_StreamDumpDir)
+ PublishLocation: Container
+ ArtifactName: Streams_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
+ continueOnError: true
+ condition: failed()
+
+ - task: CopyFiles@2
+ displayName: Gather Logs
+ inputs:
+ sourceFolder: '$(Build.SourcesDirectory)/artifacts'
+ contents: '?(log|TestResults)/**'
+ targetFolder: '$(Build.StagingDirectory)/BuildLogs'
+ continueOnError: true
+ condition: always()
+
+ - task: PublishBuildArtifacts@1
+ displayName: Publish Logs
+ inputs:
+ PathtoPublish: '$(Build.StagingDirectory)/BuildLogs'
+ PublishLocation: Container
+ ArtifactName: Logs_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
+ continueOnError: true
+ condition: always()
+
+ - ${{ if and(eq(parameters.buildAndSkipTest, 'false'), eq(parameters.isCodeQLRun, 'false')) }}:
+ # Publish test results to Azure Pipelines
+ - task: PublishTestResults@2
+ inputs:
+ testResultsFormat: xUnit
+ testResultsFiles: '**/*UnitTests*.xml'
+ searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Tests $(_PhaseName) $(_BuildArch) $(_BuildConfig)'
+ publishRunAttachments: true
+ mergeTestResults: true
+ buildConfiguration: ${{ parameters.name }}
+ continueOnError: true
+ condition: ne(variables['_BuildOnly'], 'true')
if NOT '%ERRORLEVEL%' == '0' goto ExitWithCode
echo Creating bundles
-powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0Build.ps1""" %_commonArgs% -build -bundletools %*"
+powershell -ExecutionPolicy ByPass -NoProfile -command "& """%~dp0Build.ps1""" %_commonArgs% -bundletools %*"
if NOT '%ERRORLEVEL%' == '0' goto ExitWithCode
echo Creating dbgshim packages
--- /dev/null
+#!/usr/bin/env bash
+# Copyright (c) .NET Foundation and contributors. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+source="${BASH_SOURCE[0]}"
+
+# resolve $SOURCE until the file is no longer a symlink
+while [[ -h $source ]]; do
+ scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
+ source="$(readlink "$source")"
+
+ # if $source was a relative symlink, we need to resolve it relative to the path where
+ # the symlink file was located
+ [[ $source != /* ]] && source="$scriptroot/$source"
+done
+
+scriptroot="$( cd -P "$( dirname "$source" )" && pwd )"
+
+# Fix any CI lab docker image problems
+
+__osname=$(uname -s)
+if [ "$__osname" == "Linux" ]; then
+ if [ -e /etc/os-release ]; then
+ source /etc/os-release
+ if [[ $ID == "ubuntu" ]]; then
+ if [[ $VERSION_ID == "18.04" ]]; then
+ # Fix the CI lab's ubuntu 18.04 docker image: install curl.
+ sudo apt-get update
+ sudo apt-get install -y curl
+ fi
+ fi
+ elif [ -e /etc/redhat-release ]; then
+ __redhatRelease=$(</etc/redhat-release)
+ if [[ $__redhatRelease == "CentOS release 6."* || $__redhatRelease == "Red Hat Enterprise Linux Server release 6."* ]]; then
+ source scl_source enable python27 devtoolset-2
+ fi
+ fi
+
+ # We are using old (2019) centos image in the CI with old cmake (2.8).
+ # Upgrading to 2021 centos image was failing SOS tests which rely on
+ # lldb REPL and ptrace etc. e.g. from test attachment logs:
+ #
+ # 00:00.136: error: process launch failed: 'A' packet returned an error: 8
+ # 00:00.136:
+ # 00:00.136: <END_COMMAND_ERROR>
+ #System.Exception: 'process launch -s' FAILED
+ #
+ # so we will keep using old image for now and install newer cmake as a workaround instead..
+ # FIXME: delete this comment and the next `if` block once centos image is upgraded.
+ if [ "$ID" = "centos" ]; then
+ # upgrade cmake
+ requiredversion=3.6.2
+ cmakeversion="$(cmake --version | head -1)"
+ currentversion="${cmakeversion##* }"
+ if ! printf '%s\n' "$requiredversion" "$currentversion" | sort --version-sort --check 2>/dev/null; then
+ echo "Old cmake version found: $currentversion, minimal requirement is $requiredversion. Upgrading to 3.15.5 .."
+ curl -sSL -o /tmp/cmake-install.sh https://github.com/Kitware/CMake/releases/download/v3.15.5/cmake-3.15.5-Linux-$(uname -m).sh
+ mkdir "$HOME/.cmake"
+ bash /tmp/cmake-install.sh --skip-license --exclude-subdir --prefix="$HOME/.cmake"
+ PATH="$HOME/.cmake/bin:$PATH"
+ export PATH
+ cmakeversion="$(cmake --version | head -1)"
+ newversion="${cmakeversion##* }"
+ echo "New cmake version is: $newversion"
+ fi
+ fi
+fi
+
+"$scriptroot/build.sh" -restore -prepareMachine -ci $@
+if [[ $? != 0 ]]; then
+ exit 1
+fi
deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-backports main restricted
deb http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
-deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
+deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
\ No newline at end of file
deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-backports main restricted
deb http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
-deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
+deb-src http://ports.ubuntu.com/ubuntu-ports/ xenial-security main restricted universe multiverse
\ No newline at end of file
echo "lldbx.y - optional, LLDB version, can be: lldb3.9(default), lldb4.0, lldb5.0, lldb6.0 no-lldb. Ignored for alpine and FreeBSD"
echo "llvmx[.y] - optional, LLVM version for LLVM related packages."
echo "--skipunmount - optional, will skip the unmount of rootfs folder."
- echo "--skipsigcheck - optional, will skip package signature checks (allowing untrusted packages)."
echo "--use-mirror - optional, use mirror URL to fetch resources, when available."
echo "--jobs N - optional, restrict to N jobs."
exit 1
__FreeBSDArch=arm
__FreeBSDMachineArch=armv7
__IllumosArch=arm7
-__HaikuArch=arm
__QEMUArch=arm
__UbuntuArch=armhf
__UbuntuRepo="http://ports.ubuntu.com/"
__AlpinePackages+=" openssl-dev"
__AlpinePackages+=" zlib-dev"
-__FreeBSDBase="12.4-RELEASE"
+__FreeBSDBase="12.3-RELEASE"
__FreeBSDPkg="1.17.0"
__FreeBSDABI="12"
__FreeBSDPackages="libunwind"
__IllumosPackages+=" openssl"
__IllumosPackages+=" zlib"
-__HaikuPackages="gcc_syslibs"
-__HaikuPackages+=" gcc_syslibs_devel"
-__HaikuPackages+=" gmp"
+__HaikuPackages="gmp"
__HaikuPackages+=" gmp_devel"
-__HaikuPackages+=" icu66"
-__HaikuPackages+=" icu66_devel"
__HaikuPackages+=" krb5"
__HaikuPackages+=" krb5_devel"
__HaikuPackages+=" libiconv"
__HaikuPackages+=" llvm12_libunwind_devel"
__HaikuPackages+=" mpfr"
__HaikuPackages+=" mpfr_devel"
-__HaikuPackages+=" openssl"
-__HaikuPackages+=" openssl_devel"
-__HaikuPackages+=" zlib"
-__HaikuPackages+=" zlib_devel"
# ML.NET dependencies
__UbuntuPackages+=" libomp5"
__UbuntuPackages+=" libomp-dev"
-# Taken from https://github.com/alpinelinux/alpine-chroot-install/blob/6d08f12a8a70dd9b9dc7d997c88aa7789cc03c42/alpine-chroot-install#L85-L133
-__AlpineKeys='
-4a6a0840:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1yHJxQgsHQREclQu4Ohe\nqxTxd1tHcNnvnQTu/UrTky8wWvgXT+jpveroeWWnzmsYlDI93eLI2ORakxb3gA2O\nQ0Ry4ws8vhaxLQGC74uQR5+/yYrLuTKydFzuPaS1dK19qJPXB8GMdmFOijnXX4SA\njixuHLe1WW7kZVtjL7nufvpXkWBGjsfrvskdNA/5MfxAeBbqPgaq0QMEfxMAn6/R\nL5kNepi/Vr4S39Xvf2DzWkTLEK8pcnjNkt9/aafhWqFVW7m3HCAII6h/qlQNQKSo\nGuH34Q8GsFG30izUENV9avY7hSLq7nggsvknlNBZtFUcmGoQrtx3FmyYsIC8/R+B\nywIDAQAB
-5243ef4b:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvNijDxJ8kloskKQpJdx+\nmTMVFFUGDoDCbulnhZMJoKNkSuZOzBoFC94omYPtxnIcBdWBGnrm6ncbKRlR+6oy\nDO0W7c44uHKCFGFqBhDasdI4RCYP+fcIX/lyMh6MLbOxqS22TwSLhCVjTyJeeH7K\naA7vqk+QSsF4TGbYzQDDpg7+6aAcNzg6InNePaywA6hbT0JXbxnDWsB+2/LLSF2G\nmnhJlJrWB1WGjkz23ONIWk85W4S0XB/ewDefd4Ly/zyIciastA7Zqnh7p3Ody6Q0\nsS2MJzo7p3os1smGjUF158s6m/JbVh4DN6YIsxwl2OjDOz9R0OycfJSDaBVIGZzg\ncQIDAQAB
-524d27bb:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr8s1q88XpuJWLCZALdKj\nlN8wg2ePB2T9aIcaxryYE/Jkmtu+ZQ5zKq6BT3y/udt5jAsMrhHTwroOjIsF9DeG\ne8Y3vjz+Hh4L8a7hZDaw8jy3CPag47L7nsZFwQOIo2Cl1SnzUc6/owoyjRU7ab0p\niWG5HK8IfiybRbZxnEbNAfT4R53hyI6z5FhyXGS2Ld8zCoU/R4E1P0CUuXKEN4p0\n64dyeUoOLXEWHjgKiU1mElIQj3k/IF02W89gDj285YgwqA49deLUM7QOd53QLnx+\nxrIrPv3A+eyXMFgexNwCKQU9ZdmWa00MjjHlegSGK8Y2NPnRoXhzqSP9T9i2HiXL\nVQIDAQAB
-5261cecb:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwlzMkl7b5PBdfMzGdCT0\ncGloRr5xGgVmsdq5EtJvFkFAiN8Ac9MCFy/vAFmS8/7ZaGOXoCDWbYVLTLOO2qtX\nyHRl+7fJVh2N6qrDDFPmdgCi8NaE+3rITWXGrrQ1spJ0B6HIzTDNEjRKnD4xyg4j\ng01FMcJTU6E+V2JBY45CKN9dWr1JDM/nei/Pf0byBJlMp/mSSfjodykmz4Oe13xB\nCa1WTwgFykKYthoLGYrmo+LKIGpMoeEbY1kuUe04UiDe47l6Oggwnl+8XD1MeRWY\nsWgj8sF4dTcSfCMavK4zHRFFQbGp/YFJ/Ww6U9lA3Vq0wyEI6MCMQnoSMFwrbgZw\nwwIDAQAB
-58199dcc:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3v8/ye/V/t5xf4JiXLXa\nhWFRozsnmn3hobON20GdmkrzKzO/eUqPOKTpg2GtvBhK30fu5oY5uN2ORiv2Y2ht\neLiZ9HVz3XP8Fm9frha60B7KNu66FO5P2o3i+E+DWTPqqPcCG6t4Znk2BypILcit\nwiPKTsgbBQR2qo/cO01eLLdt6oOzAaF94NH0656kvRewdo6HG4urbO46tCAizvCR\nCA7KGFMyad8WdKkTjxh8YLDLoOCtoZmXmQAiwfRe9pKXRH/XXGop8SYptLqyVVQ+\ntegOD9wRs2tOlgcLx4F/uMzHN7uoho6okBPiifRX+Pf38Vx+ozXh056tjmdZkCaV\naQIDAQAB
-58cbb476:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoSPnuAGKtRIS5fEgYPXD\n8pSGvKAmIv3A08LBViDUe+YwhilSHbYXUEAcSH1KZvOo1WT1x2FNEPBEFEFU1Eyc\n+qGzbA03UFgBNvArurHQ5Z/GngGqE7IarSQFSoqewYRtFSfp+TL9CUNBvM0rT7vz\n2eMu3/wWG+CBmb92lkmyWwC1WSWFKO3x8w+Br2IFWvAZqHRt8oiG5QtYvcZL6jym\nY8T6sgdDlj+Y+wWaLHs9Fc+7vBuyK9C4O1ORdMPW15qVSl4Lc2Wu1QVwRiKnmA+c\nDsH/m7kDNRHM7TjWnuj+nrBOKAHzYquiu5iB3Qmx+0gwnrSVf27Arc3ozUmmJbLj\nzQIDAQAB
-58e4f17d:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvBxJN9ErBgdRcPr5g4hV\nqyUSGZEKuvQliq2Z9SRHLh2J43+EdB6A+yzVvLnzcHVpBJ+BZ9RV30EM9guck9sh\nr+bryZcRHyjG2wiIEoduxF2a8KeWeQH7QlpwGhuobo1+gA8L0AGImiA6UP3LOirl\nI0G2+iaKZowME8/tydww4jx5vG132JCOScMjTalRsYZYJcjFbebQQolpqRaGB4iG\nWqhytWQGWuKiB1A22wjmIYf3t96l1Mp+FmM2URPxD1gk/BIBnX7ew+2gWppXOK9j\n1BJpo0/HaX5XoZ/uMqISAAtgHZAqq+g3IUPouxTphgYQRTRYpz2COw3NF43VYQrR\nbQIDAQAB
-60ac2099:MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwR4uJVtJOnOFGchnMW5Y\nj5/waBdG1u5BTMlH+iQMcV5+VgWhmpZHJCBz3ocD+0IGk2I68S5TDOHec/GSC0lv\n6R9o6F7h429GmgPgVKQsc8mPTPtbjJMuLLs4xKc+viCplXc0Nc0ZoHmCH4da6fCV\ntdpHQjVe6F9zjdquZ4RjV6R6JTiN9v924dGMAkbW/xXmamtz51FzondKC52Gh8Mo\n/oA0/T0KsCMCi7tb4QNQUYrf+Xcha9uus4ww1kWNZyfXJB87a2kORLiWMfs2IBBJ\nTmZ2Fnk0JnHDb8Oknxd9PvJPT0mvyT8DA+KIAPqNvOjUXP4bnjEHJcoCP9S5HkGC\nIQIDAQAB
-6165ee59:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAutQkua2CAig4VFSJ7v54\nALyu/J1WB3oni7qwCZD3veURw7HxpNAj9hR+S5N/pNeZgubQvJWyaPuQDm7PTs1+\ntFGiYNfAsiibX6Rv0wci3M+z2XEVAeR9Vzg6v4qoofDyoTbovn2LztaNEjTkB+oK\ntlvpNhg1zhou0jDVYFniEXvzjckxswHVb8cT0OMTKHALyLPrPOJzVtM9C1ew2Nnc\n3848xLiApMu3NBk0JqfcS3Bo5Y2b1FRVBvdt+2gFoKZix1MnZdAEZ8xQzL/a0YS5\nHd0wj5+EEKHfOd3A75uPa/WQmA+o0cBFfrzm69QDcSJSwGpzWrD1ScH3AK8nWvoj\nv7e9gukK/9yl1b4fQQ00vttwJPSgm9EnfPHLAtgXkRloI27H6/PuLoNvSAMQwuCD\nhQRlyGLPBETKkHeodfLoULjhDi1K2gKJTMhtbnUcAA7nEphkMhPWkBpgFdrH+5z4\nLxy+3ek0cqcI7K68EtrffU8jtUj9LFTUC8dERaIBs7NgQ/LfDbDfGh9g6qVj1hZl\nk9aaIPTm/xsi8v3u+0qaq7KzIBc9s59JOoA8TlpOaYdVgSQhHHLBaahOuAigH+VI\nisbC9vmqsThF2QdDtQt37keuqoda2E6sL7PUvIyVXDRfwX7uMDjlzTxHTymvq2Ck\nhtBqojBnThmjJQFgZXocHG8CAwEAAQ==
-61666e3f:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlEyxkHggKCXC2Wf5Mzx4\nnZLFZvU2bgcA3exfNPO/g1YunKfQY+Jg4fr6tJUUTZ3XZUrhmLNWvpvSwDS19ZmC\nIXOu0+V94aNgnhMsk9rr59I8qcbsQGIBoHzuAl8NzZCgdbEXkiY90w1skUw8J57z\nqCsMBydAueMXuWqF5nGtYbi5vHwK42PffpiZ7G5Kjwn8nYMW5IZdL6ZnMEVJUWC9\nI4waeKg0yskczYDmZUEAtrn3laX9677ToCpiKrvmZYjlGl0BaGp3cxggP2xaDbUq\nqfFxWNgvUAb3pXD09JM6Mt6HSIJaFc9vQbrKB9KT515y763j5CC2KUsilszKi3mB\nHYe5PoebdjS7D1Oh+tRqfegU2IImzSwW3iwA7PJvefFuc/kNIijfS/gH/cAqAK6z\nbhdOtE/zc7TtqW2Wn5Y03jIZdtm12CxSxwgtCF1NPyEWyIxAQUX9ACb3M0FAZ61n\nfpPrvwTaIIxxZ01L3IzPLpbc44x/DhJIEU+iDt6IMTrHOphD9MCG4631eIdB0H1b\n6zbNX1CXTsafqHRFV9XmYYIeOMggmd90s3xIbEujA6HKNP/gwzO6CDJ+nHFDEqoF\nSkxRdTkEqjTjVKieURW7Swv7zpfu5PrsrrkyGnsRrBJJzXlm2FOOxnbI2iSL1B5F\nrO5kbUxFeZUIDq+7Yv4kLWcCAwEAAQ==
-616a9724:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAnC+bR4bHf/L6QdU4puhQ\ngl1MHePszRC38bzvVFDUJsmCaMCL2suCs2A2yxAgGb9pu9AJYLAmxQC4mM3jNqhg\n/E7yuaBbek3O02zN/ctvflJ250wZCy+z0ZGIp1ak6pu1j14IwHokl9j36zNfGtfv\nADVOcdpWITFFlPqwq1qt/H3UsKVmtiF3BNWWTeUEQwKvlU8ymxgS99yn0+4OPyNT\nL3EUeS+NQJtDS01unau0t7LnjUXn+XIneWny8bIYOQCuVR6s/gpIGuhBaUqwaJOw\n7jkJZYF2Ij7uPb4b5/R3vX2FfxxqEHqssFSg8FFUNTZz3qNZs0CRVyfA972g9WkJ\nhPfn31pQYil4QGRibCMIeU27YAEjXoqfJKEPh4UWMQsQLrEfdGfb8VgwrPbniGfU\nL3jKJR3VAafL9330iawzVQDlIlwGl6u77gEXMl9K0pfazunYhAp+BMP+9ot5ckK+\nosmrqj11qMESsAj083GeFdfV3pXEIwUytaB0AKEht9DbqUfiE/oeZ/LAXgySMtVC\nsbC4ESmgVeY2xSBIJdDyUap7FR49GGrw0W49NUv9gRgQtGGaNVQQO9oGL2PBC41P\niWF9GLoX30HIz1P8PF/cZvicSSPkQf2Z6TV+t0ebdGNS5DjapdnCrq8m9Z0pyKsQ\nuxAL2a7zX8l5i1CZh1ycUGsCAwEAAQ==
-616abc23:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0MfCDrhODRCIxR9Dep1s\neXafh5CE5BrF4WbCgCsevyPIdvTeyIaW4vmO3bbG4VzhogDZju+R3IQYFuhoXP5v\nY+zYJGnwrgz3r5wYAvPnLEs1+dtDKYOgJXQj+wLJBW1mzRDL8FoRXOe5iRmn1EFS\nwZ1DoUvyu7/J5r0itKicZp3QKED6YoilXed+1vnS4Sk0mzN4smuMR9eO1mMCqNp9\n9KTfRDHTbakIHwasECCXCp50uXdoW6ig/xUAFanpm9LtK6jctNDbXDhQmgvAaLXZ\nLvFqoaYJ/CvWkyYCgL6qxvMvVmPoRv7OPcyni4xR/WgWa0MSaEWjgPx3+yj9fiMA\n1S02pFWFDOr5OUF/O4YhFJvUCOtVsUPPfA/Lj6faL0h5QI9mQhy5Zb9TTaS9jB6p\nLw7u0dJlrjFedk8KTJdFCcaGYHP6kNPnOxMylcB/5WcztXZVQD5WpCicGNBxCGMm\nW64SgrV7M07gQfL/32QLsdqPUf0i8hoVD8wfQ3EpbQzv6Fk1Cn90bZqZafg8XWGY\nwddhkXk7egrr23Djv37V2okjzdqoyLBYBxMz63qQzFoAVv5VoY2NDTbXYUYytOvG\nGJ1afYDRVWrExCech1mX5ZVUB1br6WM+psFLJFoBFl6mDmiYt0vMYBddKISsvwLl\nIJQkzDwtXzT2cSjoj3T5QekCAwEAAQ==
-616ac3bc:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvaaoSLab+IluixwKV5Od\n0gib2YurjPatGIbn5Ov2DLUFYiebj2oJINXJSwUOO+4WcuHFEqiL/1rya+k5hLZt\nhnPL1tn6QD4rESznvGSasRCQNT2vS/oyZbTYJRyAtFkEYLlq0t3S3xBxxHWuvIf0\nqVxVNYpQWyM3N9RIeYBR/euXKJXileSHk/uq1I5wTC0XBIHWcthczGN0m9wBEiWS\n0m3cnPk4q0Ea8mUJ91Rqob19qETz6VbSPYYpZk3qOycjKosuwcuzoMpwU8KRiMFd\n5LHtX0Hx85ghGsWDVtS0c0+aJa4lOMGvJCAOvDfqvODv7gKlCXUpgumGpLdTmaZ8\n1RwqspAe3IqBcdKTqRD4m2mSg23nVx2FAY3cjFvZQtfooT7q1ItRV5RgH6FhQSl7\n+6YIMJ1Bf8AAlLdRLpg+doOUGcEn+pkDiHFgI8ylH1LKyFKw+eXaAml/7DaWZk1d\ndqggwhXOhc/UUZFQuQQ8A8zpA13PcbC05XxN2hyP93tCEtyynMLVPtrRwDnHxFKa\nqKzs3rMDXPSXRn3ZZTdKH3069ApkEjQdpcwUh+EmJ1Ve/5cdtzT6kKWCjKBFZP/s\n91MlRrX2BTRdHaU5QJkUheUtakwxuHrdah2F94lRmsnQlpPr2YseJu6sIE+Dnx4M\nCfhdVbQL2w54R645nlnohu8CAwEAAQ==
-616adfeb:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAq0BFD1D4lIxQcsqEpQzU\npNCYM3aP1V/fxxVdT4DWvSI53JHTwHQamKdMWtEXetWVbP5zSROniYKFXd/xrD9X\n0jiGHey3lEtylXRIPxe5s+wXoCmNLcJVnvTcDtwx/ne2NLHxp76lyc25At+6RgE6\nADjLVuoD7M4IFDkAsd8UQ8zM0Dww9SylIk/wgV3ZkifecvgUQRagrNUdUjR56EBZ\nraQrev4hhzOgwelT0kXCu3snbUuNY/lU53CoTzfBJ5UfEJ5pMw1ij6X0r5S9IVsy\nKLWH1hiO0NzU2c8ViUYCly4Fe9xMTFc6u2dy/dxf6FwERfGzETQxqZvSfrRX+GLj\n/QZAXiPg5178hT/m0Y3z5IGenIC/80Z9NCi+byF1WuJlzKjDcF/TU72zk0+PNM/H\nKuppf3JT4DyjiVzNC5YoWJT2QRMS9KLP5iKCSThwVceEEg5HfhQBRT9M6KIcFLSs\nmFjx9kNEEmc1E8hl5IR3+3Ry8G5/bTIIruz14jgeY9u5jhL8Vyyvo41jgt9sLHR1\n/J1TxKfkgksYev7PoX6/ZzJ1ksWKZY5NFoDXTNYUgzFUTOoEaOg3BAQKadb3Qbbq\nXIrxmPBdgrn9QI7NCgfnAY3Tb4EEjs3ON/BNyEhUENcXOH6I1NbcuBQ7g9P73kE4\nVORdoc8MdJ5eoKBpO8Ww8HECAwEAAQ==
-616ae350:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAyduVzi1mWm+lYo2Tqt/0\nXkCIWrDNP1QBMVPrE0/ZlU2bCGSoo2Z9FHQKz/mTyMRlhNqTfhJ5qU3U9XlyGOPJ\npiM+b91g26pnpXJ2Q2kOypSgOMOPA4cQ42PkHBEqhuzssfj9t7x47ppS94bboh46\nxLSDRff/NAbtwTpvhStV3URYkxFG++cKGGa5MPXBrxIp+iZf9GnuxVdST5PGiVGP\nODL/b69sPJQNbJHVquqUTOh5Ry8uuD2WZuXfKf7/C0jC/ie9m2+0CttNu9tMciGM\nEyKG1/Xhk5iIWO43m4SrrT2WkFlcZ1z2JSf9Pjm4C2+HovYpihwwdM/OdP8Xmsnr\nDzVB4YvQiW+IHBjStHVuyiZWc+JsgEPJzisNY0Wyc/kNyNtqVKpX6dRhMLanLmy+\nf53cCSI05KPQAcGj6tdL+D60uKDkt+FsDa0BTAobZ31OsFVid0vCXtsbplNhW1IF\nHwsGXBTVcfXg44RLyL8Lk/2dQxDHNHzAUslJXzPxaHBLmt++2COa2EI1iWlvtznk\nOk9WP8SOAIj+xdqoiHcC4j72BOVVgiITIJNHrbppZCq6qPR+fgXmXa+sDcGh30m6\n9Wpbr28kLMSHiENCWTdsFij+NQTd5S47H7XTROHnalYDuF1RpS+DpQidT5tUimaT\nJZDr++FjKrnnijbyNF8b98UCAwEAAQ==
-616db30d:MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAnpUpyWDWjlUk3smlWeA0\nlIMW+oJ38t92CRLHH3IqRhyECBRW0d0aRGtq7TY8PmxjjvBZrxTNDpJT6KUk4LRm\na6A6IuAI7QnNK8SJqM0DLzlpygd7GJf8ZL9SoHSH+gFsYF67Cpooz/YDqWrlN7Vw\ntO00s0B+eXy+PCXYU7VSfuWFGK8TGEv6HfGMALLjhqMManyvfp8hz3ubN1rK3c8C\nUS/ilRh1qckdbtPvoDPhSbTDmfU1g/EfRSIEXBrIMLg9ka/XB9PvWRrekrppnQzP\nhP9YE3x/wbFc5QqQWiRCYyQl/rgIMOXvIxhkfe8H5n1Et4VAorkpEAXdsfN8KSVv\nLSMazVlLp9GYq5SUpqYX3KnxdWBgN7BJoZ4sltsTpHQ/34SXWfu3UmyUveWj7wp0\nx9hwsPirVI00EEea9AbP7NM2rAyu6ukcm4m6ATd2DZJIViq2es6m60AE6SMCmrQF\nwmk4H/kdQgeAELVfGOm2VyJ3z69fQuywz7xu27S6zTKi05Qlnohxol4wVb6OB7qG\nLPRtK9ObgzRo/OPumyXqlzAi/Yvyd1ZQk8labZps3e16bQp8+pVPiumWioMFJDWV\nGZjCmyMSU8V6MB6njbgLHoyg2LCukCAeSjbPGGGYhnKLm1AKSoJh3IpZuqcKCk5C\n8CM1S15HxV78s9dFntEqIokCAwEAAQ==
-'
__Keyring=
-__SkipSigCheck=0
__UseMirror=0
__UnprocessedBuildArgs=
;;
x64)
__BuildArch=x64
- __AlpineArch=x86_64
__UbuntuArch=amd64
__FreeBSDArch=amd64
__FreeBSDMachineArch=amd64
__illumosArch=x86_64
- __HaikuArch=x86_64
- __UbuntuRepo="http://archive.ubuntu.com/ubuntu/"
+ __UbuntuRepo=
;;
x86)
__BuildArch=x86
;;
freebsd13)
__CodeName=freebsd
- __FreeBSDBase="13.2-RELEASE"
+ __FreeBSDBase="13.0-RELEASE"
__FreeBSDABI="13"
__SkipUnmount=1
;;
;;
haiku)
__CodeName=haiku
+ __BuildArch=x64
__SkipUnmount=1
;;
--skipunmount)
__SkipUnmount=1
;;
- --skipsigcheck)
- __SkipSigCheck=1
- ;;
--rootfsdir|-rootfsdir)
shift
__RootfsDir="$1"
edge) __AlpineLlvmLibsLookup=1 ;;
*)
if [[ "$__AlpineArch" =~ s390x|ppc64le ]]; then
+ echo boo
__AlpineVersion=3.15 # minimum version that supports lldb-dev
__AlpinePackages+=" llvm12-libs"
elif [[ "$__AlpineArch" == "x86" ]]; then
__LLDB_Package="lldb-3.5-dev"
fi
-if [[ "$__CodeName" == "xenial" && "$__UbuntuArch" == "armhf" ]]; then
- # libnuma-dev is not available on armhf for xenial
- __UbuntuPackages="${__UbuntuPackages//libnuma-dev/}"
-fi
-
__UbuntuPackages+=" ${__LLDB_Package:-}"
if [[ -n "$__LLVM_MajorVersion" ]]; then
if [[ "$__CodeName" == "alpine" ]]; then
__ApkToolsVersion=2.12.11
- __ApkToolsSHA512SUM=53e57b49230da07ef44ee0765b9592580308c407a8d4da7125550957bb72cb59638e04f8892a18b584451c8d841d1c7cb0f0ab680cc323a3015776affaa3be33
__ApkToolsDir="$(mktemp -d)"
- __ApkKeysDir="$(mktemp -d)"
wget "https://gitlab.alpinelinux.org/api/v4/projects/5/packages/generic//v$__ApkToolsVersion/x86_64/apk.static" -P "$__ApkToolsDir"
- echo "$__ApkToolsSHA512SUM $__ApkToolsDir/apk.static" | sha512sum -c
chmod +x "$__ApkToolsDir/apk.static"
- if [[ -f "/usr/bin/qemu-$__QEMUArch-static" ]]; then
- mkdir -p "$__RootfsDir"/usr/bin
- cp -v "/usr/bin/qemu-$__QEMUArch-static" "$__RootfsDir/usr/bin"
- fi
+ mkdir -p "$__RootfsDir"/usr/bin
+ cp -v "/usr/bin/qemu-$__QEMUArch-static" "$__RootfsDir/usr/bin"
if [[ "$__AlpineVersion" == "edge" ]]; then
version=edge
version="v$__AlpineVersion"
fi
- for line in $__AlpineKeys; do
- id="${line%%:*}"
- content="${line#*:}"
-
- echo -e "-----BEGIN PUBLIC KEY-----\n$content\n-----END PUBLIC KEY-----" > "$__ApkKeysDir/alpine-devel@lists.alpinelinux.org-$id.rsa.pub"
- done
-
- if [[ "$__SkipSigCheck" == "1" ]]; then
- __ApkSignatureArg="--allow-untrusted"
- else
- __ApkSignatureArg="--keys-dir $__ApkKeysDir"
- fi
-
# initialize DB
"$__ApkToolsDir/apk.static" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \
- -U $__ApkSignatureArg --root "$__RootfsDir" --arch "$__AlpineArch" --initdb add
+ -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" --initdb add
if [[ "$__AlpineLlvmLibsLookup" == 1 ]]; then
__AlpinePackages+=" $("$__ApkToolsDir/apk.static" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \
- -U $__ApkSignatureArg --root "$__RootfsDir" --arch "$__AlpineArch" \
+ -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" \
search 'llvm*-libs' | sort | tail -1 | sed 's/-[^-]*//2g')"
fi
"$__ApkToolsDir/apk.static" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/main" \
-X "http://dl-cdn.alpinelinux.org/alpine/$version/community" \
- -U $__ApkSignatureArg --root "$__RootfsDir" --arch "$__AlpineArch" \
+ -U --allow-untrusted --root "$__RootfsDir" --arch "$__AlpineArch" \
add $__AlpinePackages
rm -r "$__ApkToolsDir"
elif [[ "$__CodeName" == "haiku" ]]; then
JOBS=${MAXJOBS:="$(getconf _NPROCESSORS_ONLN)"}
- echo "Building Haiku sysroot for $__HaikuArch"
+ echo "Building Haiku sysroot for x86_64"
mkdir -p "$__RootfsDir/tmp"
- pushd "$__RootfsDir/tmp"
-
- mkdir "$__RootfsDir/tmp/download"
-
- echo "Downloading Haiku package tool"
- git clone https://github.com/haiku/haiku-toolchains-ubuntu --depth 1 $__RootfsDir/tmp/script
- wget -O "$__RootfsDir/tmp/download/hosttools.zip" $($__RootfsDir/tmp/script/fetch.sh --hosttools)
- unzip -o "$__RootfsDir/tmp/download/hosttools.zip" -d "$__RootfsDir/tmp/bin"
-
- DepotBaseUrl="https://depot.haiku-os.org/__api/v2/pkg/get-pkg"
- HpkgBaseUrl="https://eu.hpkg.haiku-os.org/haiku/master/$__HaikuArch/current"
-
- # Download Haiku packages
- echo "Downloading Haiku packages"
+ cd "$__RootfsDir/tmp"
+ git clone -b hrev56235 https://review.haiku-os.org/haiku
+ git clone -b btrev43195 https://review.haiku-os.org/buildtools
+ cd "$__RootfsDir/tmp/buildtools" && git checkout 7487388f5110021d400b9f3b88e1a7f310dc066d
+
+ # Fetch some unmerged patches
+ cd "$__RootfsDir/tmp/haiku"
+ ## Add development build profile (slimmer than nightly)
+ git fetch origin refs/changes/64/4164/1 && git -c commit.gpgsign=false cherry-pick FETCH_HEAD
+
+ # Build jam
+ cd "$__RootfsDir/tmp/buildtools/jam"
+ make
+
+ # Configure cross tools
+ echo "Building cross-compiler"
+ mkdir -p "$__RootfsDir/generated"
+ cd "$__RootfsDir/generated"
+ "$__RootfsDir/tmp/haiku/configure" -j"$JOBS" --sysroot "$__RootfsDir" --cross-tools-source "$__RootfsDir/tmp/buildtools" --build-cross-tools x86_64
+
+ # Build Haiku packages
+ echo "Building Haiku"
+ echo 'HAIKU_BUILD_PROFILE = "development-raw" ;' > UserProfileConfig
+ "$__RootfsDir/tmp/buildtools/jam/jam0" -j"$JOBS" -q '<build>package' '<repository>Haiku'
+
+ BaseUrl="https://depot.haiku-os.org/__api/v2/pkg/get-pkg"
+
+ # Download additional packages
+ echo "Downloading additional required packages"
read -ra array <<<"$__HaikuPackages"
for package in "${array[@]}"; do
echo "Downloading $package..."
# API documented here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L60
# The schema here: https://github.com/haiku/haikudepotserver/blob/master/haikudepotserver-api2/src/main/resources/api2/pkg.yaml#L598
- hpkgDownloadUrl="$(wget -qO- --post-data='{"name":"'"$package"'","repositorySourceCode":"haikuports_'$__HaikuArch'","versionType":"LATEST","naturalLanguageCode":"en"}' \
- --header='Content-Type:application/json' "$DepotBaseUrl" | jq -r '.result.versions[].hpkgDownloadURL')"
- wget -P "$__RootfsDir/tmp/download" "$hpkgDownloadUrl"
- done
- for package in haiku haiku_devel; do
- echo "Downloading $package..."
- hpkgVersion="$(wget -qO- $HpkgBaseUrl | sed -n 's/^.*version: "\([^"]*\)".*$/\1/p')"
- wget -P "$__RootfsDir/tmp/download" "$HpkgBaseUrl/packages/$package-$hpkgVersion-1-$__HaikuArch.hpkg"
+ hpkgDownloadUrl="$(wget -qO- --post-data='{"name":"'"$package"'","repositorySourceCode":"haikuports_x86_64","versionType":"LATEST","naturalLanguageCode":"en"}' \
+ --header='Content-Type:application/json' "$BaseUrl" | jq -r '.result.versions[].hpkgDownloadURL')"
+ wget -P "$__RootfsDir/generated/download" "$hpkgDownloadUrl"
done
- # Set up the sysroot
- echo "Setting up sysroot and extracting required packages"
+ # Setup the sysroot
+ echo "Setting up sysroot and extracting needed packages"
mkdir -p "$__RootfsDir/boot/system"
- for file in "$__RootfsDir/tmp/download/"*.hpkg; do
- echo "Extracting $file..."
- LD_LIBRARY_PATH="$__RootfsDir/tmp/bin" "$__RootfsDir/tmp/bin/package" extract -C "$__RootfsDir/boot/system" "$file"
+ for file in "$__RootfsDir/generated/objects/haiku/x86_64/packaging/packages/"*.hpkg; do
+ "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file"
+ done
+ for file in "$__RootfsDir/generated/download/"*.hpkg; do
+ "$__RootfsDir/generated/objects/linux/x86_64/release/tools/package/package" extract -C "$__RootfsDir/boot/system" "$file"
done
-
- # Download buildtools
- echo "Downloading Haiku buildtools"
- wget -O "$__RootfsDir/tmp/download/buildtools.zip" $($__RootfsDir/tmp/script/fetch.sh --buildtools --arch=$__HaikuArch)
- unzip -o "$__RootfsDir/tmp/download/buildtools.zip" -d "$__RootfsDir"
# Cleaning up temporary files
echo "Cleaning up temporary files"
- popd
rm -rf "$__RootfsDir/tmp"
+ for name in "$__RootfsDir/generated/"*; do
+ if [[ "$name" =~ "cross-tools-" ]]; then
+ : # Keep the cross-compiler
+ else
+ rm -rf "$name"
+ fi
+ done
elif [[ -n "$__CodeName" ]]; then
-
- if [[ "$__SkipSigCheck" == "0" ]]; then
- __Keyring="$__Keyring --force-check-gpg"
- fi
-
- debootstrap "--variant=minbase" $__Keyring --arch "$__UbuntuArch" "$__CodeName" "$__RootfsDir" "$__UbuntuRepo"
+ qemu-debootstrap $__Keyring --arch "$__UbuntuArch" "$__CodeName" "$__RootfsDir" "$__UbuntuRepo"
cp "$__CrossDir/$__BuildArch/sources.list.$__CodeName" "$__RootfsDir/etc/apt/sources.list"
chroot "$__RootfsDir" apt-get update
chroot "$__RootfsDir" apt-get -f -y install
unset(ILLUMOS)
unset(ANDROID)
unset(TIZEN)
-unset(HAIKU)
set(TARGET_ARCH_NAME $ENV{TARGET_BUILD_ARCH})
if(EXISTS ${CROSS_ROOTFS}/bin/freebsd-version)
set(ILLUMOS 1)
elseif(EXISTS ${CROSS_ROOTFS}/boot/system/develop/headers/config/HaikuConfig.h)
set(CMAKE_SYSTEM_NAME Haiku)
- set(HAIKU 1)
else()
set(CMAKE_SYSTEM_NAME Linux)
set(LINUX 1)
endif()
elseif(TARGET_ARCH_NAME STREQUAL "ppc64le")
set(CMAKE_SYSTEM_PROCESSOR ppc64le)
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/powerpc64le-alpine-linux-musl)
- set(TOOLCHAIN "powerpc64le-alpine-linux-musl")
- else()
- set(TOOLCHAIN "powerpc64le-linux-gnu")
- endif()
+ set(TOOLCHAIN "powerpc64le-linux-gnu")
elseif(TARGET_ARCH_NAME STREQUAL "riscv64")
set(CMAKE_SYSTEM_PROCESSOR riscv64)
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/riscv64-alpine-linux-musl)
- set(TOOLCHAIN "riscv64-alpine-linux-musl")
- else()
- set(TOOLCHAIN "riscv64-linux-gnu")
- endif()
+ set(TOOLCHAIN "riscv64-linux-gnu")
elseif(TARGET_ARCH_NAME STREQUAL "s390x")
set(CMAKE_SYSTEM_PROCESSOR s390x)
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/s390x-alpine-linux-musl)
- set(TOOLCHAIN "s390x-alpine-linux-musl")
- else()
- set(TOOLCHAIN "s390x-linux-gnu")
- endif()
+ set(TOOLCHAIN "s390x-linux-gnu")
elseif(TARGET_ARCH_NAME STREQUAL "x64")
set(CMAKE_SYSTEM_PROCESSOR x86_64)
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/x86_64-alpine-linux-musl)
- set(TOOLCHAIN "x86_64-alpine-linux-musl")
- elseif(LINUX)
+ if(LINUX)
set(TOOLCHAIN "x86_64-linux-gnu")
if(TIZEN)
set(TIZEN_TOOLCHAIN "x86_64-tizen-linux-gnu/9.2.0")
elseif(ILLUMOS)
set(TOOLCHAIN "x86_64-illumos")
elseif(HAIKU)
- set(TOOLCHAIN "x86_64-unknown-haiku")
+ set(TOOLCHAIN "x64_64-unknown-haiku")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
set(CMAKE_SYSTEM_PROCESSOR i686)
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/i586-alpine-linux-musl)
- set(TOOLCHAIN "i586-alpine-linux-musl")
- else()
- set(TOOLCHAIN "i686-linux-gnu")
- endif()
+ set(TOOLCHAIN "i686-linux-gnu")
if(TIZEN)
set(TIZEN_TOOLCHAIN "i586-tizen-linux-gnu/9.2.0")
endif()
return()
endif()
+ set(SEARCH_PATH "${CROSS_ROOTFS}/generated/cross-tools-x86_64/bin")
+
find_program(EXEC_LOCATION_${exec}
- PATHS "${CROSS_ROOTFS}/cross-tools-x86_64/bin"
+ PATHS ${SEARCH_PATH}
NAMES
"${TOOLSET_PREFIX}${exec}${CLR_CMAKE_COMPILER_FILE_NAME_VERSION}"
"${TOOLSET_PREFIX}${exec}")
add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib64/gcc/${TIZEN_TOOLCHAIN}")
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/i586-alpine-linux-musl)
- add_toolchain_linker_flag("--target=${TOOLCHAIN}")
- add_toolchain_linker_flag("-Wl,--rpath-link=${CROSS_ROOTFS}/usr/lib/gcc/${TOOLCHAIN}")
- endif()
add_toolchain_linker_flag(-m32)
+
if(TIZEN)
add_toolchain_linker_flag("-B${CROSS_ROOTFS}/usr/lib/gcc/${TIZEN_TOOLCHAIN}")
add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib")
elseif(ILLUMOS)
add_toolchain_linker_flag("-L${CROSS_ROOTFS}/lib/amd64")
add_toolchain_linker_flag("-L${CROSS_ROOTFS}/usr/amd64/lib")
-elseif(HAIKU)
- add_toolchain_linker_flag("-lnetwork")
- add_toolchain_linker_flag("-lroot")
endif()
# Specify compile options
-if((TARGET_ARCH_NAME MATCHES "^(arm|arm64|armel|armv6|ppc64le|riscv64|s390x|x64|x86)$" AND NOT ANDROID AND NOT FREEBSD) OR ILLUMOS OR HAIKU)
+if((TARGET_ARCH_NAME MATCHES "^(arm|arm64|armel|armv6|ppc64le|riscv64|s390x)$" AND NOT ANDROID AND NOT FREEBSD) OR ILLUMOS OR HAIKU)
set(CMAKE_C_COMPILER_TARGET ${TOOLCHAIN})
set(CMAKE_CXX_COMPILER_TARGET ${TOOLCHAIN})
set(CMAKE_ASM_COMPILER_TARGET ${TOOLCHAIN})
add_definitions (-DCLR_ARM_FPU_CAPABILITY=${CLR_ARM_FPU_CAPABILITY})
- # persist variables across multiple try_compile passes
- list(APPEND CMAKE_TRY_COMPILE_PLATFORM_VARIABLES CLR_ARM_FPU_TYPE CLR_ARM_FPU_CAPABILITY)
-
if(TARGET_ARCH_NAME STREQUAL "armel")
add_compile_options(-mfloat-abi=softfp)
endif()
elseif(TARGET_ARCH_NAME STREQUAL "x86")
- if(EXISTS ${CROSS_ROOTFS}/usr/lib/gcc/i586-alpine-linux-musl)
- add_compile_options(--target=${TOOLCHAIN})
- endif()
add_compile_options(-m32)
add_compile_options(-Wno-error=unused-command-line-argument)
endif()
if [ -z "$majorVersion" ]; then
# note: gcc (all versions) and clang versions higher than 6 do not have minor version in file name, if it is zero.
if [ "$compiler" = "clang" ]; then versions="16 15 14 13 12 11 10 9 8 7 6.0 5.0 4.0 3.9 3.8 3.7 3.6 3.5"
- elif [ "$compiler" = "gcc" ]; then versions="13 12 11 10 9 8 7 6 5 4.9"; fi
+ elif [ "$compiler" = "gcc" ]; then versions="12 11 10 9 8 7 6 5 4.9"; fi
for version in $versions; do
_major="${version%%.*}"
$GlobalJson.tools | Add-Member -Name "vs" -Value (ConvertFrom-Json "{ `"version`": `"16.5`" }") -MemberType NoteProperty
}
if( -not ($GlobalJson.tools.PSObject.Properties.Name -match "xcopy-msbuild" )) {
- $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "17.6.0-2" -MemberType NoteProperty
+ $GlobalJson.tools | Add-Member -Name "xcopy-msbuild" -Value "17.4.1" -MemberType NoteProperty
}
if ($GlobalJson.tools."xcopy-msbuild".Trim() -ine "none") {
$xcopyMSBuildToolsFolder = InitializeXCopyMSBuild $GlobalJson.tools."xcopy-msbuild" -install $true
enablePublishTestResults: false
enablePublishUsingPipelines: false
enableBuildRetry: false
- disableComponentGovernance: ''
+ disableComponentGovernance: false
componentGovernanceIgnoreDirectories: ''
mergeTestResults: false
testRunTitle: ''
- ${{ if eq(parameters.enableRichCodeNavigation, 'true') }}:
- name: EnableRichCodeNavigation
value: 'true'
- # Retry signature validation up to three times, waiting 2 seconds between attempts.
- # See https://learn.microsoft.com/en-us/nuget/reference/errors-and-warnings/nu3028#retry-untrusted-root-failures
- - name: NUGET_EXPERIMENTAL_CHAIN_BUILD_RETRY_POLICY
- value: 3,2000
- ${{ each variable in parameters.variables }}:
# handle name-value variable syntax
# example:
- ${{ if ne(variable.name, '') }}:
- name: ${{ variable.name }}
value: ${{ variable.value }}
-
+
# handle variable groups
- ${{ if ne(variable.group, '') }}:
- group: ${{ variable.group }}
uploadRichNavArtifacts: ${{ coalesce(parameters.richCodeNavigationUploadArtifacts, false) }}
continueOnError: true
- - template: /eng/common/templates/steps/component-governance.yml
- parameters:
- ${{ if eq(parameters.disableComponentGovernance, '') }}:
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), eq(parameters.runAsPublic, 'false'), or(startsWith(variables['Build.SourceBranch'], 'refs/heads/release/'), startsWith(variables['Build.SourceBranch'], 'refs/heads/dotnet/'), startsWith(variables['Build.SourceBranch'], 'refs/heads/microsoft/'), eq(variables['Build.SourceBranch'], 'refs/heads/main'))) }}:
- disableComponentGovernance: false
- ${{ else }}:
- disableComponentGovernance: true
- ${{ else }}:
- disableComponentGovernance: ${{ parameters.disableComponentGovernance }}
- componentGovernanceIgnoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }}
+ - ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), ne(parameters.disableComponentGovernance, 'true')) }}:
+ - task: ComponentGovernanceComponentDetection@0
+ continueOnError: true
+ inputs:
+ ignoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }}
- ${{ if eq(parameters.enableMicrobuild, 'true') }}:
- ${{ if and(eq(parameters.runAsPublic, 'false'), ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- task: MicroBuildCleanup@1
- displayName: Execute Microbuild cleanup tasks
+ displayName: Execute Microbuild cleanup tasks
condition: and(always(), in(variables['_SignType'], 'real', 'test'), eq(variables['Agent.Os'], 'Windows_NT'))
continueOnError: ${{ parameters.continueOnError }}
env:
displayName: Publish XUnit Test Results
inputs:
testResultsFormat: 'xUnit'
- testResultsFiles: '*.xml'
+ testResultsFiles: '*.xml'
searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults/$(_BuildConfig)'
testRunTitle: ${{ coalesce(parameters.testRunTitle, parameters.name, '$(System.JobName)') }}-xunit
mergeTestResults: ${{ parameters.mergeTestResults }}
displayName: Publish TRX Test Results
inputs:
testResultsFormat: 'VSTest'
- testResultsFiles: '*.trx'
+ testResultsFiles: '*.trx'
searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults/$(_BuildConfig)'
testRunTitle: ${{ coalesce(parameters.testRunTitle, parameters.name, '$(System.JobName)') }}-trx
mergeTestResults: ${{ parameters.mergeTestResults }}
+++ /dev/null
-parameters:
- disableComponentGovernance: false
- componentGovernanceIgnoreDirectories: ''
-
-steps:
-- ${{ if eq(parameters.disableComponentGovernance, 'true') }}:
- - script: "echo ##vso[task.setvariable variable=skipComponentGovernanceDetection]true"
- displayName: Set skipComponentGovernanceDetection variable
-- ${{ if ne(parameters.disableComponentGovernance, 'true') }}:
- - task: ComponentGovernanceComponentDetection@0
- continueOnError: true
- inputs:
- ignoreDirectories: ${{ parameters.componentGovernanceIgnoreDirectories }}
\ No newline at end of file
runtimeOsArgs='/p:RuntimeOS=${{ parameters.platform.runtimeOS }}'
fi
- baseOsArgs=
- if [ '${{ parameters.platform.baseOS }}' != '' ]; then
- baseOsArgs='/p:BaseOS=${{ parameters.platform.baseOS }}'
- fi
-
publishArgs=
if [ '${{ parameters.platform.skipPublishValidation }}' != 'true' ]; then
publishArgs='--publish'
$internalRestoreArgs \
$targetRidArgs \
$runtimeOsArgs \
- $baseOsArgs \
/p:SourceBuildNonPortable=${{ parameters.platform.nonPortable }} \
/p:ArcadeBuildFromSource=true \
/p:AssetManifestFileName=$assetManifestFileName
[string] $runtimeSourceFeedKey = '',
[switch] $noPath) {
- $dotnetVersionLabel = "'sdk v$version'"
-
- if ($runtime -ne '' -and $runtime -ne 'sdk') {
- $runtimePath = $dotnetRoot
- $runtimePath = $runtimePath + "\shared"
- if ($runtime -eq "dotnet") { $runtimePath = $runtimePath + "\Microsoft.NETCore.App" }
- if ($runtime -eq "aspnetcore") { $runtimePath = $runtimePath + "\Microsoft.AspNetCore.App" }
- if ($runtime -eq "windowsdesktop") { $runtimePath = $runtimePath + "\Microsoft.WindowsDesktop.App" }
- $runtimePath = $runtimePath + "\" + $version
-
- $dotnetVersionLabel = "runtime toolset '$runtime/$architecture v$version'"
-
- if (Test-Path $runtimePath) {
- Write-Host " Runtime toolset '$runtime/$architecture v$version' already installed."
- $installSuccess = $true
- Exit
- }
- }
-
$installScript = GetDotNetInstallScript $dotnetRoot
$installParameters = @{
Version = $version
} else {
$location = "public location";
}
- Write-Host " Attempting to install $dotnetVersionLabel from $location."
+ Write-Host "Attempting to install dotnet from $location."
try {
& $installScript @variation
$installSuccess = $true
break
}
catch {
- Write-Host " Failed to install $dotnetVersionLabel from $location."
+ Write-Host "Failed to install dotnet from $location."
}
}
if (-not $installSuccess) {
- Write-PipelineTelemetryError -Category 'InitializeToolset' -Message "Failed to install $dotnetVersionLabel from any of the specified locations."
+ Write-PipelineTelemetryError -Category 'InitializeToolset' -Message "Failed to install dotnet from any of the specified locations."
ExitWithExitCode 1
}
}
# If the version of msbuild is going to be xcopied,
# use this version. Version matches a package here:
- # https://dev.azure.com/dnceng/public/_artifacts/feed/dotnet-eng/NuGet/RoslynTools.MSBuild/versions/17.6.0-2
- $defaultXCopyMSBuildVersion = '17.6.0-2'
+ # https://dev.azure.com/dnceng/public/_packaging?_a=package&feed=dotnet-eng&package=RoslynTools.MSBuild&protocolType=NuGet&version=17.4.1&view=overview
+ $defaultXCopyMSBuildVersion = '17.4.1'
if (!$vsRequirements) {
if (Get-Member -InputObject $GlobalJson.tools -Name 'vs') {
# Locate Visual Studio installation or download x-copy msbuild.
$vsInfo = LocateVisualStudio $vsRequirements
if ($vsInfo -ne $null) {
- # Ensure vsInstallDir has a trailing slash
- $vsInstallDir = Join-Path $vsInfo.installationPath "\"
+ $vsInstallDir = $vsInfo.installationPath
$vsMajorVersion = $vsInfo.installationVersion.Split('.')[0]
InitializeVisualStudioEnvironmentVariables $vsInstallDir $vsMajorVersion
function InstallDotNet {
local root=$1
local version=$2
- local runtime=$4
-
- local dotnetVersionLabel="'$runtime v$version'"
- if [[ -n "${4:-}" ]] && [ "$4" != 'sdk' ]; then
- runtimePath="$root"
- runtimePath="$runtimePath/shared"
- case "$runtime" in
- dotnet)
- runtimePath="$runtimePath/Microsoft.NETCore.App"
- ;;
- aspnetcore)
- runtimePath="$runtimePath/Microsoft.AspNetCore.App"
- ;;
- windowsdesktop)
- runtimePath="$runtimePath/Microsoft.WindowsDesktop.App"
- ;;
- *)
- ;;
- esac
- runtimePath="$runtimePath/$version"
-
- dotnetVersionLabel="runtime toolset '$runtime/$architecture v$version'"
-
- if [ -d "$runtimePath" ]; then
- echo " Runtime toolset '$runtime/$architecture v$version' already installed."
- local installSuccess=1
- return
- fi
- fi
GetDotNetInstallScript "$root"
local install_script=$_GetDotNetInstallScript
for variationName in "${variations[@]}"; do
local name="$variationName[@]"
local variation=("${!name}")
- echo " Attempting to install $dotnetVersionLabel from $variationName."
+ echo "Attempting to install dotnet from $variationName."
bash "$install_script" "${variation[@]}" && installSuccess=1
if [[ "$installSuccess" -eq 1 ]]; then
break
fi
- echo " Failed to install $dotnetVersionLabel from $variationName."
+ echo "Failed to install dotnet from $variationName."
done
if [[ "$installSuccess" -eq 0 ]]; then
- Write-PipelineTelemetryError -category 'InitializeToolset' "Failed to install $dotnetVersionLabel from any of the specified locations."
+ Write-PipelineTelemetryError -category 'InitializeToolset' "Failed to install dotnet SDK from any of the specified locations."
ExitWithExitCode 1
fi
}
--- /dev/null
+#!/usr/bin/env bash
+# Copyright (c) .NET Foundation and contributors. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for full license information.
+
+source_directory=
+docker_image=
+docker_container_name=
+
+while [ $# -ne 0 ]; do
+ name=$1
+ case $name in
+ -s|--source-directory)
+ shift
+ source_directory=$1
+ ;;
+ -i|--docker-image)
+ shift
+ docker_image=$1
+ ;;
+ -c|--container-name)
+ shift
+ docker_container_name=$1
+ ;;
+ *)
+ args="$args $1"
+ ;;
+ esac
+ shift
+done
+
+echo "Initialize Docker Container"
+if command -v docker > /dev/null; then
+ docker_bin=$(command -v docker)
+else
+ echo "Unable to find docker"
+ exit 1
+fi
+
+$docker_bin --version
+
+# Get user id
+user_name=$(whoami)
+echo "user name: $user_name"
+user_id=$(id -u $user_name)
+echo "user id: $user_id"
+
+# Download image
+$docker_bin pull $docker_image
+
+# Create local network to avoid port conflicts when multiple agents run on same machine
+$docker_bin network create vsts_network_$docker_container_name
+
+# Create and start container
+docker_id="$($docker_bin create -it --rm --security-opt seccomp=unconfined --ulimit core=-1 \
+ --name vsts_container_$docker_container_name \
+ --network=vsts_network_$docker_container_name \
+ --volume $source_directory:$source_directory \
+ --workdir=$source_directory $docker_image bash --verbose)"
+$docker_bin start $docker_id
+
+# Create an user with the same uid in the container
+container_user_name=vsts_$(echo $user_name | awk '{print tolower($0)}')
+echo "container user name: $container_user_name"
+
+# Add sudo user with same uid that can run any sudo command without password
+$docker_bin exec $docker_id useradd -K MAIL_DIR=/dev/null -m -u $user_id $container_user_name
+$docker_bin exec $docker_id groupadd sudouser
+$docker_bin exec $docker_id usermod -a -G sudouser $container_user_name
+$docker_bin exec $docker_id su -c "echo '%sudouser ALL=(ALL:ALL) NOPASSWD:ALL' >> /etc/sudoers"
+
+echo "Execute $args"
+$docker_bin exec --workdir=$source_directory --user $container_user_name $docker_id $args
+lasterrorcode=$?
+
+echo "Cleanup Docker Container/Network"
+$docker_bin container stop $docker_id
+$docker_bin network rm vsts_network_$docker_container_name
+
+exit $lasterrorcode
+++ /dev/null
-parameters:
- # Job name
- name: ''
- # Agent OS (Windows_NT, Linux, MacOS, FreeBSD)
- osGroup: Windows_NT
- # Optional: OS suffix like -musl
- osSuffix: ''
- # Additional variables
- variables: {}
- # Build strategy - matrix
- strategy: ''
- # Optional: Job timeout
- timeoutInMinutes: 180
- # Optional: native build container resource name
- nativeBuildContainer: ''
- # Optional: container resource name
- container: ''
- # Optional: build only job if true
- buildOnly: false
- # Optional: test only job if true
- testOnly: false
- # Optional: architecture cross build if true
- crossBuild: false
- # Depends on
- dependsOn: ''
- isCodeQLRun: false
-
-jobs:
-- template: /eng/common/templates/job/job.yml
- parameters:
- name: ${{ parameters.name }}
- timeoutInMinutes: ${{ parameters.timeoutInMinutes }}
- enableMicrobuild: true
- enableTelemetry: true
- helixRepo: dotnet/diagnostics
- runAsPublic: ${{ parameters.isCodeQLRun }}
-
- pool:
- # Public Linux Build Pool
- ${{ if and(eq(parameters.osGroup, 'Linux'), eq(variables['System.TeamProject'], 'public')) }}:
- name: NetCore-Svc-Public
- demands: ImageOverride -equals Build.Ubuntu.1804.Amd64.Open
-
- # Official Build Linux Pool
- ${{ if and(eq(parameters.osGroup, 'Linux'), ne(variables['System.TeamProject'], 'public')) }}:
- name: NetCore1ESPool-Svc-Internal
- demands: ImageOverride -equals Build.Ubuntu.1804.Amd64
-
- # FreeBSD builds only in the internal project
- ${{ if and(eq(parameters.osGroup, 'FreeBSD'), ne(variables['System.TeamProject'], 'public')) }}:
- name: dnceng-freebsd-internal
-
- # Build OSX Pool (we don't have on-prem OSX BuildPool)
- ${{ if in(parameters.osGroup, 'MacOS', 'MacOS_cross') }}:
- vmImage: macOS-latest
-
- # Official Build Windows Pool
- ${{ if and(eq(parameters.osGroup, 'Windows_NT'), ne(variables['System.TeamProject'], 'public')) }}:
- name: NetCore1ESPool-Svc-Internal
- demands: ImageOverride -equals windows.vs2022.amd64
-
- # Public Windows Build Pool
- ${{ if and(eq(parameters.osGroup, 'Windows_NT'), eq(variables['System.TeamProject'], 'public')) }}:
- name: NetCore-Svc-Public
- demands: ImageOverride -equals windows.vs2022.amd64.open
-
- ${{ if ne(parameters.container, '') }}:
- container: ${{ parameters.container }}
-
- ${{ if ne(parameters.strategy, '') }}:
- strategy: ${{ parameters.strategy }}
-
- ${{ if ne(parameters.dependsOn, '') }}:
- dependsOn: ${{ parameters.dependsOn }}
-
- workspace:
- clean: all
-
- variables:
- - ${{ insert }}: ${{ parameters.variables }}
- - _PhaseName : ${{ parameters.name }}
- - _HelixType: build/product
- - _HelixBuildConfig: $(_BuildConfig)
- - _Pipeline_StreamDumpDir: $(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/streams
-
- - _BuildDisplayName: 'Build / Test'
- - _ExtraBuildParams: ''
- - _TestArgs: '-test'
- - _Cross: ''
-
- - ${{ if eq(parameters.osGroup, 'Windows_NT') }}:
- - _buildScript: $(Build.SourcesDirectory)\build.cmd
- - ${{ if ne(parameters.osGroup, 'Windows_NT') }}:
- - _buildScript: $(Build.SourcesDirectory)/build.sh
-
- - ${{ if eq(parameters.testOnly, 'true') }}:
- - _TestArgs: '-test -skipnative'
- - _BuildDisplayName: Test
-
- - ${{ if or(eq(parameters.buildOnly, 'true'), eq(parameters.isCodeQLRun, 'true')) }}:
- - _TestArgs: ''
-
- - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
- - name: Codeql.Enabled
- value: True
- - name: Codeql.Cadence
- value: 0
- - name: Codeql.TSAEnabled
- value: True
- - name: Codeql.BuildIdentifier
- value: $(System.JobDisplayName)
- - name: Codeql.Language
- value: csharp,cpp
-
- # For testing msrc's and service releases. The RuntimeSourceVersion is either "default" or the service release version to test
- - _InternalInstallArgs: ''
- - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), eq(parameters.isCodeQLRun, 'false')) }}:
- - _InternalInstallArgs:
- -dotnetruntimeversion '$(DotnetRuntimeVersion)'
- -dotnetruntimedownloadversion '$(DotnetRuntimeDownloadVersion)'
- -runtimesourcefeed '$(RuntimeFeedUrl)'
- -runtimesourcefeedkey '$(RuntimeFeedBase64SasToken)'
-
- # Only enable publishing in non-public, non PR scenarios.
- - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest')) }}:
- - _HelixSource: official/dotnet/arcade/$(Build.SourceBranch)
- - ${{ if or(eq(variables['System.TeamProject'], 'public'), in(variables['Build.Reason'], 'PullRequest')) }}:
- - _HelixSource: pr/dotnet/arcade/$(Build.SourceBranch)
-
- # This is only required for cross builds.
- - ${{ if eq(parameters.crossBuild, true) }}:
- - _Cross: -cross
-
- # If there is a native build container, build managed in the host vm/container and native in the nativeBuildContainer
- - ${{ if ne(parameters.nativeBuildContainer, '') }}:
- - _ExtraBuildParams: -skipnative
- - _BuildDisplayName: 'Build Managed'
-
- # Only add the cross build option if a combined build/test managed/native build (i.e. MacOS arm64)
- - ${{ if eq(parameters.nativeBuildContainer, '') }}:
- - _ExtraBuildParams: $(_Cross)
-
- steps:
- - ${{ if eq(parameters.osGroup, 'Linux') }}:
- - ${{ if eq(parameters.testOnly, 'true') }}:
- - task: DownloadBuildArtifacts@0
- displayName: 'Download Build Artifacts'
- inputs:
- downloadPath: '$(Build.ArtifactStagingDirectory)/__download__'
- downloadType: specific
- itemPattern: |
- Build_$(_BuildConfig)/bin/Linux${{ parameters.osSuffix }}.$(_BuildArch).$(_BuildConfig)/**
- checkDownloadedFiles: true
- - task: CopyFiles@2
- displayName: 'Binplace Product'
- inputs:
- sourceFolder: $(Build.ArtifactStagingDirectory)/__download__/Build_$(_BuildConfig)/bin/Linux${{ parameters.osSuffix }}.$(_BuildArch).$(_BuildConfig)
- targetFolder: '$(Build.SourcesDirectory)/artifacts/bin/Linux.$(_BuildArch).$(_BuildConfig)'
-
- - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
- - task: CodeQL3000Init@0
- displayName: CodeQL Initialize
-
- - script: $(_buildScript)
- -ci
- -configuration $(_BuildConfig)
- -architecture $(_BuildArch)
- $(_ExtraBuildParams)
- $(_TestArgs)
- /p:OfficialBuildId=$(BUILD.BUILDNUMBER)
- $(_InternalInstallArgs)
- displayName: $(_BuildDisplayName)
- condition: succeeded()
-
- - ${{ if ne(parameters.nativeBuildContainer, '') }}:
- - script: $(_buildScript)
- -ci
- -configuration $(_BuildConfig)
- -architecture $(_BuildArch)
- -skipmanaged
- $(_Cross)
- /p:OfficialBuildId=$(BUILD.BUILDNUMBER)
- $(_InternalInstallArgs)
- displayName: Build Native
- target: ${{ parameters.nativeBuildContainer }}
-
- - ${{ if eq(parameters.isCodeQLRun, 'true') }}:
- - task: CodeQL3000Finalize@0
- displayName: CodeQL Finalize
-
- - task: CopyFiles@2
- displayName: Gather binaries for publish to special artifacts path
- inputs:
- SourceFolder: '$(Build.SourcesDirectory)/artifacts/$(_PublishArtifacts)'
- Contents: '**'
- TargetFolder: $(Build.ArtifactStagingDirectory)/artifacts/$(_ArtifactsTargetPath)
- condition: and(ne(variables['_PublishArtifacts'], ''), ne(variables['_ArtifactsTargetPath'], ''))
-
- - task: CopyFiles@2
- displayName: Gather binaries for publish to artifacts
- inputs:
- SourceFolder: '$(Build.SourcesDirectory)/artifacts/$(_PublishArtifacts)'
- Contents: '**'
- TargetFolder: $(Build.ArtifactStagingDirectory)/artifacts/$(_PublishArtifacts)
- condition: and(ne(variables['_PublishArtifacts'], ''), eq(variables['_ArtifactsTargetPath'], ''))
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Build Artifacts
- inputs:
- pathtoPublish: '$(Build.ArtifactStagingDirectory)/artifacts'
- artifactName: Build_$(_BuildConfig)
- condition: ne(variables['_PublishArtifacts'], '')
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Artifacts on failure
- inputs:
- PathtoPublish: '$(Build.SourcesDirectory)/artifacts/bin'
- PublishLocation: Container
- ArtifactName: Artifacts_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
- continueOnError: true
- condition: failed()
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Dump Artifacts on failure
- inputs:
- PathtoPublish: '$(Build.SourcesDirectory)/artifacts/tmp/$(_BuildConfig)/dumps'
- PublishLocation: Container
- ArtifactName: Dumps_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
- continueOnError: true
- condition: failed()
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Stream Artifacts on failure
- inputs:
- PathtoPublish: $(_Pipeline_StreamDumpDir)
- PublishLocation: Container
- ArtifactName: Streams_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
- continueOnError: true
- condition: failed()
-
- - task: CopyFiles@2
- displayName: Gather Logs
- inputs:
- sourceFolder: '$(Build.SourcesDirectory)/artifacts'
- contents: '?(log|TestResults)/**'
- targetFolder: '$(Build.StagingDirectory)/BuildLogs'
- continueOnError: true
- condition: always()
-
- - task: PublishBuildArtifacts@1
- displayName: Publish Logs
- inputs:
- PathtoPublish: '$(Build.StagingDirectory)/BuildLogs'
- PublishLocation: Container
- ArtifactName: Logs_$(_PhaseName)_$(_BuildArch)_$(_BuildConfig)
- continueOnError: true
- condition: always()
-
- - ${{ if and(eq(parameters.buildOnly, 'false'), eq(parameters.isCodeQLRun, 'false')) }}:
- # Publish test results to Azure Pipelines
- - task: PublishTestResults@2
- inputs:
- testResultsFormat: xUnit
- testResultsFiles: '**/*UnitTests*.xml'
- searchFolder: '$(Build.SourcesDirectory)/artifacts/TestResults'
- failTaskOnFailedTests: true
- testRunTitle: 'Tests $(_PhaseName) $(_BuildArch) $(_BuildConfig)'
- publishRunAttachments: true
- mergeTestResults: true
- buildConfiguration: ${{ parameters.name }}
- continueOnError: true
- condition: always()
+++ /dev/null
-parameters:
- - name: stages
- type: stageList
-
-resources:
- containers:
- - container: linux_x64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7
-
- - container: linux_arm
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-cross
- env:
- ROOTFS_DIR: /crossrootfs/arm
-
- - container: linux_arm64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-cross-arm64
- env:
- ROOTFS_DIR: /crossrootfs/arm64
-
- - container: linux_musl_x64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-WithNode
-
- - container: linux_musl_arm
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-cross-arm-alpine
- env:
- ROOTFS_DIR: /crossrootfs/arm
-
- - container: linux_musl_arm64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04-cross-arm64-alpine
- env:
- ROOTFS_DIR: /crossrootfs/arm64
-
- - container: test_linux_x64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7
-
- - container: test_linux_musl_x64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-WithNode
- options: --cap-add=SYS_PTRACE
-
- - container: test_debian_11_amd64
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-amd64
- options: '--env PYTHONPATH=/usr/bin/python3.9'
-
- - container: test_fedora_36
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-36
- options: --cap-add=SYS_PTRACE
-
- - container: test_opensuse_15_2
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-15.2-helix-amd64
-
- - container: test_ubuntu_18_04
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-18.04
-
- - container: test_ubuntu_20_04
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-20.04
- options: '--env PYTHONPATH=/usr/bin/python3.8'
-
- - container: test_ubuntu_22_04
- image: mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-22.04
- options: '--env PYTHONPATH=/usr/bin/python3.10'
-
-stages: ${{ parameters.stages }}
+++ /dev/null
-stages:
-- stage: PrepareReleaseStage
- displayName: Release Preparation
- ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
- dependsOn:
- - publish_using_darc
- jobs:
- - job: PrepareReleaseJob
- displayName: Prepare release with Darc
- pool:
- ${{ if eq(variables['System.TeamProject'], 'public') }}:
- name: NetCore-Public
- demands: ImageOverride -equals windows.vs2022.amd64.open
- ${{ if eq(variables['System.TeamProject'], 'internal') }}:
- name: NetCore1ESPool-Internal
- demands: ImageOverride -equals windows.vs2022.amd64
- variables:
- - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
- - group: DotNet-Diagnostics-Storage
- - group: DotNet-DotNetStage-Storage
- - group: Release-Pipeline
- steps:
- - ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
- - script: '$(Build.Repository.LocalPath)\dotnet.cmd build $(Build.Repository.LocalPath)\eng\release\DiagnosticsReleaseTool\DiagnosticsReleaseTool.csproj -c Release /bl'
- workingDirectory: '$(System.ArtifactsDirectory)'
- displayName: 'Build Manifest generation and asset publishing tool'
- - task: PublishPipelineArtifact@1
- inputs:
- targetPath: '$(System.ArtifactsDirectory)'
- publishLocation: 'pipeline'
- artifact: 'DiagnosticsReleaseToolBin'
- - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
- - task: UseDotNet@2
- displayName: 'Use .NET Core runtime 6.x'
- inputs:
- packageType: runtime
- version: 6.x
- installationPath: '$(Build.Repository.LocalPath)\.dotnet'
- - template: /eng/common/templates/post-build/setup-maestro-vars.yml
- - task: PowerShell@2
- displayName: 'DARC Gather build'
- inputs:
- targetType: filePath
- filePath: '$(Build.Repository.LocalPath)/eng/release/Scripts/AcquireBuild.ps1'
- arguments: >-
- -BarBuildId "$(BARBuildId)"
- -AzdoToken "$(dn-bot-dotnet-all-scopes)"
- -MaestroToken "$(MaestroAccessToken)"
- -GitHubToken "$(BotAccount-dotnet-bot-repo-PAT)"
- -DownloadTargetPath "$(System.ArtifactsDirectory)\ReleaseTarget"
- -SasSuffixes "$(dotnetclichecksumsmsrc-dotnet-read-list-sas-token),$(dotnetclimsrc-read-sas-token)"
- -ReleaseVersion "$(Build.BuildNumber)"
- workingDirectory: '$(Build.Repository.LocalPath)'
- - script: >-
- dotnet.cmd run --project $(Build.Repository.LocalPath)\eng\release\DiagnosticsReleaseTool\DiagnosticsReleaseTool.csproj -c Release
- --
- prepare-release
- --input-drop-path "$(System.ArtifactsDirectory)\ReleaseTarget"
- --tool-manifest "$(Build.Repository.LocalPath)\eng\release\tool-list.json"
- --staging-directory "$(System.ArtifactsDirectory)\ReleaseStaging"
- --release-name "$(Build.BuildNumber)"
- --account-name "$(dotnet-diagnostics-storage-accountname)"
- --account-key "$(dotnetstage-storage-key)"
- --container-name "$(dotnet-diagnostics-container-name)"
- --sas-valid-days "$(dotnet-diagnostics-storage-retentiondays)"
- -v True
- workingDirectory: '$(Build.Repository.LocalPath)\'
- displayName: 'Manifest generation and asset publishing'
- - task: PublishPipelineArtifact@1
- inputs:
- targetPath: '$(System.ArtifactsDirectory)\ReleaseStaging'
- publishLocation: 'pipeline'
- artifact: 'DiagnosticsRelease'
--- /dev/null
+stages:
+- stage: PrepareReleaseStage
+ displayName: Release Preparation
+ ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
+ dependsOn:
+ - publish_using_darc
+ jobs:
+ - job: PrepareReleaseJob
+ displayName: Prepare release with Darc
+ pool:
+ ${{ if eq(variables['System.TeamProject'], 'public') }}:
+ name: NetCore-Public
+ demands: ImageOverride -equals windows.vs2022.amd64.open
+ ${{ if eq(variables['System.TeamProject'], 'internal') }}:
+ name: NetCore1ESPool-Internal
+ demands: ImageOverride -equals windows.vs2022.amd64
+ variables:
+ - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
+ - group: DotNet-Diagnostics-Storage
+ - group: DotNet-DotNetStage-Storage
+ - group: Release-Pipeline
+ steps:
+ - ${{ if in(variables['Build.Reason'], 'PullRequest') }}:
+ - script: '$(Build.Repository.LocalPath)\dotnet.cmd build $(Build.Repository.LocalPath)\eng\release\DiagnosticsReleaseTool\DiagnosticsReleaseTool.csproj -c Release /bl'
+ workingDirectory: '$(System.ArtifactsDirectory)'
+ displayName: 'Build Manifest generation and asset publishing tool'
+ - task: PublishPipelineArtifact@1
+ inputs:
+ targetPath: '$(System.ArtifactsDirectory)'
+ publishLocation: 'pipeline'
+ artifact: 'DiagnosticsReleaseToolBin'
+ - ${{ if and(ne(variables['System.TeamProject'], 'public'), notin(variables['Build.Reason'], 'PullRequest'), startsWith(variables['Build.SourceBranch'], 'refs/heads/release/')) }}:
+ - task: UseDotNet@2
+ displayName: 'Use .NET Core runtime 6.x'
+ inputs:
+ packageType: runtime
+ version: 6.x
+ installationPath: '$(Build.Repository.LocalPath)\.dotnet'
+ - template: /eng/common/templates/post-build/setup-maestro-vars.yml
+ - task: PowerShell@2
+ displayName: 'DARC Gather build'
+ inputs:
+ targetType: filePath
+ filePath: '$(Build.Repository.LocalPath)/eng/release/Scripts/AcquireBuild.ps1'
+ arguments: >-
+ -BarBuildId "$(BARBuildId)"
+ -AzdoToken "$(dn-bot-dotnet-all-scopes)"
+ -MaestroToken "$(MaestroAccessToken)"
+ -GitHubToken "$(BotAccount-dotnet-bot-repo-PAT)"
+ -DownloadTargetPath "$(System.ArtifactsDirectory)\ReleaseTarget"
+ -SasSuffixes "$(dotnetclichecksumsmsrc-dotnet-read-list-sas-token),$(dotnetclimsrc-read-sas-token)"
+ -ReleaseVersion "$(Build.BuildNumber)"
+ workingDirectory: '$(Build.Repository.LocalPath)'
+ - script: >-
+ dotnet.cmd run --project $(Build.Repository.LocalPath)\eng\release\DiagnosticsReleaseTool\DiagnosticsReleaseTool.csproj -c Release
+ --
+ prepare-release
+ --input-drop-path "$(System.ArtifactsDirectory)\ReleaseTarget"
+ --tool-manifest "$(Build.Repository.LocalPath)\eng\release\tool-list.json"
+ --staging-directory "$(System.ArtifactsDirectory)\ReleaseStaging"
+ --release-name "$(Build.BuildNumber)"
+ --account-name "$(dotnet-diagnostics-storage-accountname)"
+ --account-key "$(dotnetstage-storage-key)"
+ --container-name "$(dotnet-diagnostics-container-name)"
+ --sas-valid-days "$(dotnet-diagnostics-storage-retentiondays)"
+ -v True
+ workingDirectory: '$(Build.Repository.LocalPath)\'
+ displayName: 'Manifest generation and asset publishing'
+ - task: PublishPipelineArtifact@1
+ inputs:
+ targetPath: '$(System.ArtifactsDirectory)\ReleaseStaging'
+ publishLocation: 'pipeline'
+ artifact: 'DiagnosticsRelease'
{
"tools": {
- "dotnet": "8.0.100-preview.4.23260.5",
+ "dotnet": "8.0.100-preview.1.23115.2",
"runtimes": {
"dotnet": [
"$(MicrosoftNETCoreApp60Version)",
},
"msbuild-sdks": {
"Microsoft.Build.NoTargets": "3.5.0",
- "Microsoft.DotNet.Arcade.Sdk": "8.0.0-beta.23302.3"
+ "Microsoft.DotNet.Arcade.Sdk": "8.0.0-beta.23168.1"
}
}
try
{
byte[] registerContext = ThreadService.GetThreadFromId(threadId).GetThreadContext();
- registerContext.AsSpan().Slice(0, context.Length).CopyTo(context);
+ context = new Span<byte>(registerContext);
return true;
}
- catch (Exception ex) when (ex is DiagnosticsException or ArgumentException)
+ catch (DiagnosticsException ex)
{
Trace.TraceError($"GetThreadContext: {threadId} exception {ex.Message}");
}
bool IMemoryReader.Read<T>(ulong address, out T value)
{
- Span<byte> buffer = stackalloc byte[Unsafe.SizeOf<T>()];
+ Span<byte> buffer = stackalloc byte[Marshal.SizeOf<T>()];
if (((IMemoryReader)this).Read(address, buffer) == buffer.Length)
{
value = Unsafe.As<byte, T>(ref MemoryMarshal.GetReference(buffer));
public class Runtime : IRuntime, IDisposable
{
private readonly ClrInfo _clrInfo;
+ private readonly IDisposable _onFlushEvent;
private readonly ISymbolService _symbolService;
private Version _runtimeVersion;
private string _dacFilePath;
_serviceContainer.AddService<IRuntime>(this);
_serviceContainer.AddService(clrInfo);
+ _onFlushEvent = Target.OnFlushEvent.Register(Flush);
+
Trace.TraceInformation($"Created runtime #{id} {clrInfo.Flavor} {clrInfo}");
}
void IDisposable.Dispose()
+ {
+ _serviceContainer.RemoveService(typeof(IRuntime));
+ _serviceContainer.DisposeServices();
+ _onFlushEvent.Dispose();
+ }
+
+ private void Flush()
{
if (_serviceContainer.TryGetCachedService(typeof(ClrRuntime), out object service))
{
- // The DataTarget created in the RuntimeProvider is disposed here. The ClrRuntime
- // instance is disposed below in DisposeServices().
- ((ClrRuntime)service).DataTarget.Dispose();
+ ((ClrRuntime)service).FlushCachedData();
}
- _serviceContainer.RemoveService(typeof(IRuntime));
- _serviceContainer.DisposeServices();
}
#region IRuntime
/// <param name="startingRuntimeId">The starting runtime id for this provider</param>
public IEnumerable<IRuntime> EnumerateRuntimes(int startingRuntimeId)
{
- // The ClrInfo and DataTarget instances are disposed when Runtime instance is disposed. Runtime instances are
- // not flushed when the Target/RuntimeService is flushed; they are all disposed and the list cleared. They are
- // all re-created the next time the IRuntime or ClrRuntime instance is queried.
DataTarget dataTarget = new(new CustomDataTarget(_services.GetService<IDataReader>()))
{
FileLocator = null
switch (generation)
{
case GCGeneration.Generation0:
- if (segment.Kind == GCSegmentKind.Generation0 || segment.Kind == GCSegmentKind.Ephemeral)
- {
- start = segment.Generation0.Start;
- end = segment.Generation0.End;
- }
- break;
+ start = segment.Generation0.Start;
+ end = segment.Generation0.End;
+ return start != end;
case GCGeneration.Generation1:
- if (segment.Kind == GCSegmentKind.Generation1 || segment.Kind == GCSegmentKind.Ephemeral)
- {
- start = segment.Generation1.Start;
- end = segment.Generation1.End;
- }
- break;
+ start = segment.Generation1.Start;
+ end = segment.Generation1.End;
+ return start != end;
case GCGeneration.Generation2:
- if (segment.Kind == GCSegmentKind.Generation2 || segment.Kind == GCSegmentKind.Ephemeral)
+ if (segment.Kind != GCSegmentKind.Large && segment.Kind != GCSegmentKind.Large && segment.Kind != GCSegmentKind.Frozen)
{
start = segment.Generation2.Start;
end = segment.Generation2.End;
}
- break;
+ return start != end;
case GCGeneration.LargeObjectHeap:
if (segment.Kind == GCSegmentKind.Large)
{
start = segment.Start;
end = segment.End;
}
- break;
+ return start != end;
case GCGeneration.PinnedObjectHeap:
- if (segment.Kind == GCSegmentKind.Pinned)
+ if (segment.Kind == GCSegmentKind.Pinned || segment.Kind == GCSegmentKind.Frozen)
{
start = segment.Start;
end = segment.End;
}
- break;
- case GCGeneration.FrozenObjectHeap:
- if (segment.Kind == GCSegmentKind.Frozen)
- {
- start = segment.Start;
- end = segment.End;
- }
- break;
+ return start != end;
default:
return false;
}
- return start != end;
}
public IEnumerable<string> EnumerateConcurrentQueue(ulong address)
[ServiceImport(Optional = true)]
public ClrRuntime? Runtime { get; set; }
-
/// <summary>Gets whether to only show stacks that include the object with the specified address.</summary>
[Option(Name = "--address", Aliases = new string[] { "-addr" }, Help = "Only show stacks that include the object with the specified address.")]
- public string? ObjectAddress
- {
- get => _objectAddress?.ToString();
- set => _objectAddress = ParseAddress(value);
- }
- private ulong? _objectAddress;
+ public ulong? ObjectAddress { get; set; }
/// <summary>Gets whether to only show stacks that include objects with the specified method table.</summary>
[Option(Name = "--methodtable", Aliases = new string[] { "-mt" }, Help = "Only show stacks that include objects with the specified method table.")]
- public string? MethodTableAddress
- {
- get => _methodTableAddress?.ToString();
- set => _methodTableAddress = ParseAddress(value);
- }
- private ulong? _methodTableAddress;
+ public ulong? MethodTableAddress { get; set; }
/// <summary>Gets whether to only show stacks that include objects whose type includes the specified name in its name.</summary>
[Option(Name = "--type", Help = "Only show stacks that include objects whose type includes the specified name in its name.")]
// <summary>Determines whether the specified object is of interest to the user based on their criteria provided as command arguments.</summary>
bool IncludeInOutput(ClrObject obj)
{
- if (_objectAddress is ulong addr && obj.Address != addr)
+ if (ObjectAddress is ulong addr && obj.Address != addr)
{
return false;
}
if (obj.Type is not null)
{
- if (_methodTableAddress is ulong mt && obj.Type.MethodTable != mt)
+ if (MethodTableAddress is ulong mt && obj.Type.MethodTable != mt)
{
return false;
}
return GCGeneration.NotSet;
}
string lowerString = generation.ToLowerInvariant();
- GCGeneration result = lowerString switch
+ switch (lowerString)
{
- "gen0" => GCGeneration.Generation0,
- "gen1" => GCGeneration.Generation1,
- "gen2" => GCGeneration.Generation2,
- "loh" => GCGeneration.LargeObjectHeap,
- "poh" => GCGeneration.PinnedObjectHeap,
- "foh" => GCGeneration.FrozenObjectHeap,
- _ => GCGeneration.NotSet,
- };
- if (result == GCGeneration.NotSet)
- {
- WriteLine($"{generation} is not a supported generation (gen0, gen1, gen2, loh, poh, foh)");
+ case "gen0":
+ return GCGeneration.Generation0;
+ case "gen1":
+ return GCGeneration.Generation1;
+ case "gen2":
+ return GCGeneration.Generation2;
+ case "loh":
+ return GCGeneration.LargeObjectHeap;
+ case "poh":
+ return GCGeneration.PinnedObjectHeap;
+ default:
+ WriteLine($"{generation} is not a supported generation (gen0, gen1, gen2, loh, poh)");
+ return GCGeneration.NotSet;
}
- return result;
}
- gen2
- loh
- poh
-- foh
> dumpgen gen0
Statistics:
ParseArguments();
IEnumerable<ClrObject> objectsToPrint = FilteredHeap.EnumerateFilteredObjects(Console.CancellationToken);
-
- bool? liveObjectWarning = null;
- if ((Live || Dead) && Short)
- {
- liveObjectWarning = LiveObjects.PrintWarning;
- LiveObjects.PrintWarning = false;
- }
-
if (Live)
{
objectsToPrint = objectsToPrint.Where(LiveObjects.IsLive);
}
DumpHeap.PrintHeap(objectsToPrint, displayKind, StatOnly, printFragmentation);
-
- if (liveObjectWarning is bool original)
- {
- LiveObjects.PrintWarning = original;
- }
}
private void ParseArguments()
using System.Diagnostics;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
Dictionary<(string String, ulong Size), uint> stringTable = null;
Dictionary<ulong, (int Count, ulong Size, string TypeName)> stats = new();
- Table thinLockOutput = null;
- Table objectTable = null;
+ TableOutput thinLockOutput = null;
+ TableOutput objectTable = new(Console, (12, "x12"), (12, "x12"), (12, ""), (0, ""));
+ if (!statsOnly && (displayKind is DisplayKind.Normal or DisplayKind.Strings))
+ {
+ objectTable.WriteRow("Address", "MT", "Size");
+ }
ClrObject lastFreeObject = default;
foreach (ClrObject obj in objects)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
if (displayKind == DisplayKind.ThinLock)
{
ClrThinLock thinLock = obj.GetThinLock();
{
if (thinLockOutput is null)
{
- thinLockOutput = new(Console, ColumnKind.DumpObj, ColumnKind.Pointer, ColumnKind.HexValue, ColumnKind.Integer);
- thinLockOutput.WriteHeader("Object", "Thread", "OSId", "Recursion");
+ thinLockOutput = new(Console, (12, "x"), (16, "x"), (16, "x"), (10, "n0"));
+ thinLockOutput.WriteRow("Object", "Thread", "OSId", "Recursion");
}
- thinLockOutput.WriteRow(obj, thinLock.Thread, thinLock.Thread?.OSThreadId ?? 0, thinLock.Recursion);
+ thinLockOutput.WriteRow(new DmlDumpObj(obj), thinLock.Thread?.Address ?? 0, thinLock.Thread?.OSThreadId ?? 0, thinLock.Recursion);
}
continue;
ulong size = obj.IsValid ? obj.Size : 0;
if (!statsOnly)
{
- if (objectTable is null)
- {
- objectTable = new(Console, ColumnKind.DumpObj, ColumnKind.DumpHeap, ColumnKind.ByteCount, ColumnKind.Text);
- if (displayKind is DisplayKind.Normal or DisplayKind.Strings)
- {
- objectTable.WriteHeader("Address", "MT", "Size");
- }
- }
-
- objectTable.WriteRow(obj, obj.Type, obj.IsValid ? size : null, obj.IsFree ? "Free" : "");
+ objectTable.WriteRow(new DmlDumpObj(obj), new DmlDumpHeap(obj.Type?.MethodTable ?? 0), size, obj.IsFree ? "Free" : "");
}
if (printFragmentation)
}
Console.WriteLine("Statistics:");
- Table statsTable = new(Console, ColumnKind.Integer, ColumnKind.ByteCount, ColumnKind.Text);
+ TableOutput statsTable = new(Console, (countLen, "n0"), (sizeLen, "n0"), (0, ""));
var stringsSorted = from item in stringTable
let Count = item.Value
foreach (var item in stringsSorted)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
statsTable.WriteRow(item.Count, item.TotalSize, item.String);
}
}
}
else if (displayKind == DisplayKind.Normal)
{
- // Print statistics table
if (stats.Count != 0)
{
// Print statistics table
Console.WriteLine();
}
- Console.WriteLine("Statistics:");
+ int countLen = stats.Values.Max(ts => ts.Count).ToString("n0").Length;
+ countLen = Math.Max(countLen, "Count".Length);
- Column countColumn = ColumnKind.Integer;
- countColumn = countColumn.GetAppropriateWidth(stats.Values.Select(ts => ts.Count));
+ int sizeLen = stats.Values.Max(ts => ts.Size).ToString("n0").Length;
+ sizeLen = Math.Max(sizeLen, "TotalSize".Length);
- Column sizeColumn = ColumnKind.ByteCount;
- sizeColumn = sizeColumn.GetAppropriateWidth(stats.Values.Select(ts => ts.Size));
+ TableOutput statsTable = new(Console, (12, "x12"), (countLen, "n0"), (sizeLen, "n0"), (0, ""));
- Column methodTableColumn = ColumnKind.DumpHeap.GetAppropriateWidth(stats.Keys);
- Table statsTable = new(Console, methodTableColumn, countColumn, sizeColumn, ColumnKind.TypeName);
- statsTable.WriteHeader("MT", "Count", "TotalSize", "Class Name");
+ Console.WriteLine("Statistics:");
+ statsTable.WriteRow("MT", "Count", "TotalSize", "Class Name");
var statsSorted = from item in stats
let MethodTable = item.Key
foreach (var item in statsSorted)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- statsTable.WriteRow(item.MethodTable, item.Count, item.Size, item.TypeName);
+ statsTable.WriteRow(new DmlDumpHeap(item.MethodTable), item.Count, item.Size, item.TypeName);
}
Console.WriteLine($"Total {stats.Values.Sum(r => r.Count):n0} objects, {stats.Values.Sum(r => (long)r.Size):n0} bytes");
return;
}
+ TableOutput output = new(Console, (16, "x12"), (12, "n0"), (16, "x12"));
+
Console.WriteLine();
Console.WriteLine("Fragmented blocks larger than 0.5 MB:");
-
- Table output = new(Console, ColumnKind.ListNearObj, ColumnKind.ByteCount, ColumnKind.DumpObj, ColumnKind.TypeName);
- output.WriteHeader("Address", "Size", "Followed By");
+ output.WriteRow("Address", "Size", "Followed By");
foreach ((ClrObject free, ClrObject next) in fragmentation)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- output.WriteRow(free.Address, free.Size, next.Address, next.Type);
+ output.WriteRow(free.Address, free.Size, new DmlDumpObj(next.Address), next.Type?.Name ?? "<unknown_type>");
}
}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Linq;
-using System.Text;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "dumpobjgcrefs", Help = "A helper command to implement !dumpobj -refs")]
- public sealed class DumpObjGCRefsHelper : CommandBase
- {
- private readonly StringBuilderPool _stringBuilderPool = new(260);
-
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- [Argument(Name = "object")]
- public string ObjectAddress { get; set; }
-
- public override void Invoke()
- {
- if (!TryParseAddress(ObjectAddress, out ulong objAddress))
- {
- throw new ArgumentException($"Invalid object address: '{ObjectAddress}'", nameof(ObjectAddress));
- }
-
- ClrObject obj = Runtime.Heap.GetObject(objAddress);
- if (!obj.IsValid)
- {
- Console.WriteLine($"Unable to walk object references, invalid object.");
- return;
- }
-
- ClrReference[] refs = obj.EnumerateReferencesWithFields(carefully: false, considerDependantHandles: false).ToArray();
- if (refs.Length == 0)
- {
- Console.WriteLine("GC Refs: none");
- return;
- }
-
- Console.WriteLine("GC Refs:");
-
- Column fieldNameColumn = ColumnKind.Text.GetAppropriateWidth(refs.Select(r => GetFieldName(r)));
- Column offsetName = ColumnKind.HexOffset.GetAppropriateWidth(refs.Select(r => r.Offset));
-
- Table output = new(Console, fieldNameColumn, offsetName, ColumnKind.DumpObj, ColumnKind.TypeName);
- output.WriteHeader("Field", "Offset", "Object", "Type");
- foreach (ClrReference objRef in refs)
- {
- output.WriteRow(GetFieldName(objRef), objRef.Offset, objRef.Object, objRef.Object.Type);
- }
- }
-
- private string GetFieldName(ClrReference objRef)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (objRef.Field is null)
- {
- return null;
- }
-
- if (objRef.InnerField is null)
- {
- return objRef.Field?.Name;
- }
-
- StringBuilder sb = _stringBuilderPool.Rent();
- bool foundOneFieldName = false;
-
- for (ClrReference? curr = objRef; curr.HasValue; curr = curr.Value.InnerField)
- {
- if (sb.Length > 0)
- {
- sb.Append('.');
- }
-
- string fieldName = curr.Value.Field?.Name;
- if (string.IsNullOrWhiteSpace(fieldName))
- {
- sb.Append("???");
- }
- else
- {
- sb.Append(fieldName);
- foundOneFieldName = true;
- }
- }
-
- // Make sure we don't just return "???.???.???"
- string result = foundOneFieldName ? sb.ToString() : null;
- _stringBuilderPool.Return(sb);
- return result;
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "dumpruntimetypes", Help = "Finds all System.RuntimeType objects in the GC heap and prints the type name and MethodTable they refer too.")]
- public sealed class DumpRuntimeTypeCommand : CommandBase
- {
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- public override void Invoke()
- {
- Table output = null;
-
- foreach (ClrObject runtimeType in Runtime.Heap.EnumerateObjects())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (!runtimeType.IsValid || !runtimeType.IsRuntimeType)
- {
- continue;
- }
-
- if (!runtimeType.TryReadField("m_handle", out nuint m_handle))
- {
- continue;
- }
-
- ClrAppDomain domain = null;
- object typeName = m_handle;
- bool isMethodTable = (m_handle & 2) == 0;
- if (isMethodTable)
- {
- // Only lookup the type if we have a MethodTable.
- ClrType type = Runtime.GetTypeByMethodTable(m_handle);
- if (type is not null)
- {
- typeName = type;
- domain = type.Module?.AppDomain;
- }
- }
- else
- {
- typeName = $"typehandle: {m_handle:x} (SOS does not support resolving typehandle names.)";
- }
-
- if (output is null)
- {
- output = new(Console, DumpObj, DumpDomain, DumpHeap, TypeName);
- output.WriteHeader("Address", "Domain", "MT", "Type Name");
- }
-
- // We pass .Address here instead of the ClrObject because every type is a RuntimeType, we don't need
- // or want the alt-text.
- output.WriteRow(runtimeType.Address, domain, m_handle, typeName);
- }
-
- if (output is null)
- {
- Console.WriteLine("No System.RuntimeType objects found.");
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Buffers;
-using System.Collections.Generic;
-using System.Collections.Immutable;
-using System.Diagnostics;
-using System.Linq;
-using System.Runtime.CompilerServices;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "dumpstackobjects", Aliases = new string[] { "dso" }, Help = "Displays all managed objects found within the bounds of the current stack.")]
- public class DumpStackObjectsCommand : CommandBase
- {
- [ServiceImport]
- public IMemoryService MemoryService { get; set; }
-
- [ServiceImport]
- public IThread CurrentThread { get; set; }
-
- [ServiceImport]
- public IThreadService ThreadService { get; set; }
-
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- [Option(Name = "-verify", Help = "Verify each object and only print ones that are valid objects.")]
- public bool Verify { get; set; }
-
- [Argument(Name = "StackBounds", Help = "The top and bottom of the stack (in hex).")]
- public string[] Bounds { get; set; }
-
- public override void Invoke()
- {
- if (Runtime.Heap.Segments.Length == 0)
- {
- throw new DiagnosticsException("Cannot walk heap.");
- }
-
- MemoryRange range;
- if (Bounds is null || Bounds.Length == 0)
- {
- range = GetStackRange();
- }
- else if (Bounds.Length == 2)
- {
- ulong start = ParseAddress(Bounds[0]) ?? throw new ArgumentException($"Failed to parse start address '{Bounds[0]}'.");
- ulong end = ParseAddress(Bounds[1]) ?? throw new ArgumentException($"Failed to parse end address '{Bounds[1]}'.");
- if (start > end)
- {
- (start, end) = (end, start);
- }
-
- range = new(AlignDown(start), AlignUp(end));
- }
- else
- {
- throw new ArgumentException("Invalid arguments.");
- }
-
- if (range.Start == 0 || range.End == 0)
- {
- throw new ArgumentException($"Invalid range {range.Start:x} - {range.End:x}");
- }
-
- PrintStackObjects(range);
- }
-
- private void PrintStackObjects(MemoryRange stack)
- {
- Console.WriteLine($"OS Thread Id: 0x{CurrentThread.ThreadId:x} ({CurrentThread.ThreadIndex})");
-
- Table output = new(Console, Pointer, DumpObj, TypeName);
- output.WriteHeader("SP/REG", "Object", "Name");
-
- int regCount = ThreadService.Registers.Count();
- foreach ((ulong address, ClrObject obj) in EnumerateValidObjectsWithinRange(stack).OrderBy(r => r.StackAddress))
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (address < (ulong)regCount)
- {
- string registerName;
- if (ThreadService.TryGetRegisterInfo((int)address, out RegisterInfo regInfo))
- {
- registerName = regInfo.RegisterName;
- }
- else
- {
- registerName = $"reg{address}";
- }
-
- output.WriteRow(registerName, obj, obj.Type);
- }
- else
- {
- output.WriteRow(address, obj, obj.Type);
- }
- }
- }
-
- /// <summary>
- /// Enumerates all valid objects (and the address they came from) within the given range.
- /// </summary>
- private IEnumerable<(ulong StackAddress, ClrObject Object)> EnumerateValidObjectsWithinRange(MemoryRange range)
- {
- // Note: This implementation is careful to enumerate only real objects and not generate a lot of native
- // exceptions within the dac. A naïve implementation could simply read every pointer aligned address
- // and call ClrHeap.GetObject(objAddr).IsValid. That approach will generate a lot of exceptions
- // within the dac trying to validate wild pointers as MethodTables, and it will often find old
- // pointers which the GC has already swept but not zeroed yet.
-
- // Sort the list of potential objects so that we can go through each in segment order.
- // Sorting this array saves us a lot of time by not searching for segments.
- IEnumerable<(ulong StackAddress, ulong PotentialObject)> potentialObjects = EnumeratePointersWithinHeapBounds(range);
- potentialObjects = potentialObjects.Concat(EnumerateRegistersWithinHeapBounds());
- potentialObjects = potentialObjects.OrderBy(r => r.PotentialObject);
-
- ClrSegment currSegment = null;
- List<(ulong StackAddress, ulong PotentialObject)> withinCurrSegment = new(64);
- int segmentIndex = 0;
- foreach ((ulong _, ulong PotentialObject) entry in potentialObjects)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- // Find the segment of the current potential object, or null if it doesn't live
- // within a segment.
- ClrSegment segment = GetSegment(entry.PotentialObject, ref segmentIndex);
- if (segment is null)
- {
- continue;
- }
-
- // If we are already processing this segment, just add the entry to the list
- if (currSegment == segment)
- {
- withinCurrSegment.Add(entry);
- continue;
- }
-
- // We are finished walking objects from "currSegment". If we found any pointers
- // within its range, walk the segment and return every valid object.
- if (withinCurrSegment.Count > 0)
- {
- foreach ((ulong StackAddress, ClrObject Object) validObject in EnumerateObjectsOnSegment(withinCurrSegment, currSegment))
- {
- yield return validObject;
- }
-
- withinCurrSegment.Clear();
- }
-
- // Update currSegment and add this entry to the processing list.
- currSegment = segment;
- withinCurrSegment.Add(entry);
- }
-
- // Process leftover items
- if (withinCurrSegment.Count > 0)
- {
- foreach ((ulong StackAddress, ClrObject Object) validObject in EnumerateObjectsOnSegment(withinCurrSegment, currSegment))
- {
- yield return validObject;
- }
- }
- }
-
- /// <summary>
- /// Simultaneously walks the withinCurrSegment list and objects on segment returning valid objects found.
- /// </summary>
- private IEnumerable<(ulong StackAddress, ClrObject Object)> EnumerateObjectsOnSegment(List<(ulong StackAddress, ulong PotentialObject)> withinCurrSegment, ClrSegment segment)
- {
- if (withinCurrSegment.Count == 0)
- {
- yield break;
- }
-
- int index = 0;
- MemoryRange range = new(withinCurrSegment[0].PotentialObject, withinCurrSegment[withinCurrSegment.Count - 1].PotentialObject + 1);
- foreach (ClrObject obj in segment.EnumerateObjects(range, carefully: true))
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (index >= withinCurrSegment.Count)
- {
- yield break;
- }
-
- while (index < withinCurrSegment.Count && withinCurrSegment[index].PotentialObject < obj)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- index++;
- }
-
- while (index < withinCurrSegment.Count && obj == withinCurrSegment[index].PotentialObject)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (Verify)
- {
- if (!Runtime.Heap.IsObjectCorrupted(obj, out _))
- {
- yield return (withinCurrSegment[index].StackAddress, obj);
- }
- }
- else
- {
- yield return (withinCurrSegment[index].StackAddress, obj);
- }
-
- index++;
- }
- }
- }
-
- private ClrSegment GetSegment(ulong potentialObject, ref int segmentIndex)
- {
- ImmutableArray<ClrSegment> segments = Runtime.Heap.Segments;
-
- // This function assumes that segmentIndex is always within the bounds of segments
- // and that all objects passed to it are within the given
- // range of segment bounds.
- Debug.Assert(segmentIndex >= 0 && segmentIndex <= segments.Length);
- Debug.Assert(segments[0].ObjectRange.Start <= potentialObject);
- Debug.Assert(potentialObject < segments[segments.Length - 1].ObjectRange.End);
-
- for (; segmentIndex < segments.Length; segmentIndex++)
- {
- ClrSegment curr = segments[segmentIndex];
- if (potentialObject < curr.Start)
- {
- return null;
- }
- else if (potentialObject < curr.ObjectRange.End)
- {
- return segments[segmentIndex];
- }
- }
-
- // Unreachable.
- Debug.Fail("Reached the end of the segment array.");
- return null;
- }
-
- private IEnumerable<(ulong RegisterIndex, ulong PotentialObject)> EnumerateRegistersWithinHeapBounds()
- {
- ClrHeap heap = Runtime.Heap;
-
- // Segments are always sorted by address
- ulong minAddress = heap.Segments[0].ObjectRange.Start;
- ulong maxAddress = heap.Segments[heap.Segments.Length - 1].ObjectRange.End - (uint)MemoryService.PointerSize;
-
- int regCount = ThreadService.Registers.Count();
- for (int i = 0; i < regCount; i++)
- {
- if (CurrentThread.TryGetRegisterValue(i, out ulong value))
- {
- if (minAddress <= value && value < maxAddress)
- {
- yield return ((ulong)i, value);
- }
- }
- }
- }
-
- private IEnumerable<(ulong StackAddress, ulong PotentialObject)> EnumeratePointersWithinHeapBounds(MemoryRange stack)
- {
- Debug.Assert(AlignDown(stack.Start) == stack.Start);
- Debug.Assert(AlignUp(stack.End) == stack.End);
-
- uint pointerSize = (uint)MemoryService.PointerSize;
- ClrHeap heap = Runtime.Heap;
-
- // Segments are always sorted by address
- ulong minAddress = heap.Segments[0].ObjectRange.Start;
- ulong maxAddress = heap.Segments[heap.Segments.Length - 1].ObjectRange.End - pointerSize;
-
- // Read in 64k chunks
- byte[] buffer = ArrayPool<byte>.Shared.Rent(64 * 1024);
- try
- {
- ulong address = stack.Start;
- while (stack.Contains(address))
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (!MemoryService.ReadMemory(address, buffer, out int read))
- {
- break;
- }
-
- read = AlignDown(read);
- if (read < pointerSize)
- {
- break;
- }
-
- for (int i = 0; i < read; i += (int)pointerSize)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- ulong stackAddress = address + (uint)i;
- if (!stack.Contains(stackAddress))
- {
- yield break;
- }
-
- ulong potentialObj = GetIndex(buffer, i);
- if (minAddress <= potentialObj && potentialObj < maxAddress)
- {
- yield return (stackAddress, potentialObj);
- }
- }
-
- address += (uint)read;
- }
- }
- finally
- {
- ArrayPool<byte>.Shared.Return(buffer);
- }
- }
-
- private static ulong GetIndex(Span<byte> buffer, int i) => Unsafe.As<byte, nuint>(ref buffer[i]);
-
- private MemoryRange GetStackRange()
- {
- ulong end = 0;
-
- int spIndex = ThreadService.StackPointerIndex;
- if (!CurrentThread.TryGetRegisterValue(spIndex, out ulong stackPointer))
- {
- throw new DiagnosticsException($"Unable to get the stack pointer for thread {CurrentThread.ThreadId:x}.");
- }
-
- // On Windows we have the TEB to know where to end the walk.
- ulong teb = CurrentThread.GetThreadTeb();
- if (teb != 0)
- {
- // The stack base is after the first pointer, see TEB and NT_TIB.
- MemoryService.ReadPointer(teb + (uint)MemoryService.PointerSize, out end);
- }
-
- if (end == 0)
- {
- end = stackPointer + 0xFFFF;
- }
-
- return new(AlignDown(stackPointer), AlignUp(end));
- }
-
- private ulong AlignDown(ulong address)
- {
- ulong mask = ~((ulong)MemoryService.PointerSize - 1);
- return address & mask;
- }
-
- private int AlignDown(int value)
- {
- int mask = ~(MemoryService.PointerSize - 1);
- return value & mask;
- }
-
- private ulong AlignUp(ulong address)
- {
- ulong pointerSize = (ulong)MemoryService.PointerSize;
- if (address > ulong.MaxValue - pointerSize)
- {
- return AlignDown(address);
- }
-
- ulong mask = ~(pointerSize - 1);
- return (address + pointerSize - 1) & mask;
- }
- }
-}
using System.Linq;
using System.Text;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
private ulong PrintOneRuntime(ClrRuntime clrRuntime)
{
StringBuilder stringBuilder = null;
- Table output = new(Console, Text.WithWidth(21), Pointer.WithWidth(-1));
- output.SetAlignment(Align.Left);
+ TableOutput output = new(Console, (21, "x12"), (0, "x12"))
+ {
+ AlignLeft = true
+ };
HashSet<ulong> seen = new();
return totalSize;
}
- private ulong PrintAppDomains(Table output, ClrRuntime clrRuntime, HashSet<ulong> loaderAllocatorsSeen)
+ private ulong PrintAppDomains(TableOutput output, ClrRuntime clrRuntime, HashSet<ulong> loaderAllocatorsSeen)
{
Console.WriteLine("Loader Heap:");
WriteDivider();
for (int i = 0; i < clrRuntime.AppDomains.Length; i++)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
ClrAppDomain appDomain = clrRuntime.AppDomains[i];
totalBytes += PrintAppDomain(output, appDomain, $"Domain {i + 1}:", loaderAllocatorsSeen);
}
return totalBytes;
}
- private ulong PrintAppDomain(Table output, ClrAppDomain appDomain, string name, HashSet<ulong> loaderAllocatorsSeen)
+ private ulong PrintAppDomain(TableOutput output, ClrAppDomain appDomain, string name, HashSet<ulong> loaderAllocatorsSeen)
{
if (appDomain is null)
{
IOrderedEnumerable<IGrouping<NativeHeapKind, ClrNativeHeapInfo>> filteredHeapsByKind = from heap in appDomain.EnumerateLoaderAllocatorHeaps()
where IsIncludedInFilter(heap)
- where loaderAllocatorsSeen.Add(heap.MemoryRange.Start)
+ where loaderAllocatorsSeen.Add(heap.Address)
group heap by heap.Kind into g
orderby GetSortOrder(g.Key)
select g;
};
}
- private ulong PrintAppDomainHeapsByKind(Table output, IOrderedEnumerable<IGrouping<NativeHeapKind, ClrNativeHeapInfo>> filteredHeapsByKind)
+ private ulong PrintAppDomainHeapsByKind(TableOutput output, IOrderedEnumerable<IGrouping<NativeHeapKind, ClrNativeHeapInfo>> filteredHeapsByKind)
{
// Just build and print the table.
ulong totalSize = 0;
foreach (IGrouping<NativeHeapKind, ClrNativeHeapInfo> item in filteredHeapsByKind)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
text.Clear();
NativeHeapKind kind = item.Key;
ulong heapSize = 0;
return totalSize;
}
- private ulong PrintCodeHeaps(Table output, ClrRuntime clrRuntime)
+ private ulong PrintCodeHeaps(TableOutput output, ClrRuntime clrRuntime)
{
ulong totalSize = 0;
StringBuilder text = new(512);
foreach (ClrJitManager jitManager in clrRuntime.EnumerateJitManagers())
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
output.WriteRow("JIT Manager:", jitManager.Address);
- IEnumerable<ClrNativeHeapInfo> heaps = jitManager.EnumerateNativeHeaps().Where(IsIncludedInFilter).OrderBy(r => r.Kind).ThenBy(r => r.MemoryRange.Start);
+ IEnumerable<ClrNativeHeapInfo> heaps = jitManager.EnumerateNativeHeaps().Where(IsIncludedInFilter).OrderBy(r => r.Kind).ThenBy(r => r.Address);
ulong jitMgrSize = 0, jitMgrWasted = 0;
foreach (ClrNativeHeapInfo heap in heaps)
return true;
}
- if (filterRange.Contains(info.MemoryRange.Start))
+ if (filterRange.Contains(info.Address))
{
return true;
}
- if (info.MemoryRange.Length > 0)
+ if (info.Size is ulong size && size > 0)
{
// Check for the last valid address in the range
- return filterRange.Contains(info.MemoryRange.End - 1);
+ return filterRange.Contains(info.Address + size - 1);
}
return false;
private (ulong Size, ulong Wasted) CalculateSizeAndWasted(StringBuilder sb, ClrNativeHeapInfo heap)
{
- sb.Append(heap.MemoryRange.Start.ToString("x12"));
+ sb.Append(heap.Address.ToString("x12"));
- ulong size = heap.MemoryRange.Length;
- if (size > 0)
+ if (heap.Size is ulong size)
{
sb.Append('(');
sb.Append(size.ToString("x"));
sb.Append(':');
- ulong actualSize = GetActualSize(heap.MemoryRange.Start, size);
+ ulong actualSize = GetActualSize(heap.Address, size);
sb.Append(actualSize.ToString("x"));
sb.Append(')');
ulong wasted = 0;
- if (actualSize < size && heap.State != ClrNativeHeapState.Active)
+ if (actualSize < size && !heap.IsCurrentBlock)
{
wasted = size - actualSize;
}
return (0, 0);
}
- private ulong PrintModuleThunkTable(Table output, ref StringBuilder text, ClrRuntime clrRuntime)
+ private ulong PrintModuleThunkTable(TableOutput output, ref StringBuilder text, ClrRuntime clrRuntime)
{
IEnumerable<ClrModule> modulesWithThunks = clrRuntime.EnumerateModules().Where(r => r.ThunkHeap != 0);
if (!modulesWithThunks.Any())
return PrintModules(output, ref text, modulesWithThunks);
}
- private ulong PrintModuleLoaderAllocators(Table output, ref StringBuilder text, ClrRuntime clrRuntime, HashSet<ulong> loaderAllocatorsSeen)
+ private ulong PrintModuleLoaderAllocators(TableOutput output, ref StringBuilder text, ClrRuntime clrRuntime, HashSet<ulong> loaderAllocatorsSeen)
{
// On .Net Core, modules share their LoaderAllocator with their AppDomain (and AppDomain shares theirs
// with SystemDomain). Only collectable assemblies have unique loader allocators, and that's what we
return PrintModules(output, ref text, collectable);
}
- private ulong PrintModules(Table output, ref StringBuilder text, IEnumerable<ClrModule> modules)
+ private ulong PrintModules(TableOutput output, ref StringBuilder text, IEnumerable<ClrModule> modules)
{
text ??= new(128);
ulong totalSize = 0, totalWasted = 0;
foreach (ClrModule module in modules)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
ulong moduleSize = 0, moduleWasted = 0;
text.Clear();
foreach (ClrNativeHeapInfo info in module.EnumerateThunkHeap().Where(IsIncludedInFilter))
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
if (text.Length > 0)
{
text.Append(' ');
Console.WriteLine();
ClrHeap heap = clrRuntime.Heap;
- Column sizeColumn = Text.GetAppropriateWidth(heap.Segments.Select(seg => FormatMemorySize(seg.CommittedMemory.Length)), max: 32);
- Table gcOutput = new(Console, DumpHeap, Pointer, Pointer, Pointer, sizeColumn, sizeColumn);
+ int pointerWidth = 16;
+ string pointerToStringFormat = "x16";
+ (int pointerWidth, string pointerToStringFormat) pointerFormat = (pointerWidth, pointerToStringFormat);
+
+ int sizeWidth = Math.Max(15, heap.Segments.Max(seg => FormatMemorySize(seg.CommittedMemory.Length).Length));
+ (int sizeWidth, string) sizeFormat = (sizeWidth, "");
+
+ TableOutput gcOutput = new(Console, pointerFormat, pointerFormat, pointerFormat, pointerFormat, sizeFormat, sizeFormat);
WriteDivider('=');
Console.WriteLine($"Number of GC Heaps: {heap.SubHeaps.Length}");
foreach (ClrSubHeap gc_heap in HeapWithFilters.EnumerateFilteredSubHeaps())
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
if (heap.IsServer)
{
Console.Write("Heap ");
return totalCommitted;
}
- private static void WriteSegmentHeader(Table gcOutput)
+ private static void WriteSegmentHeader(TableOutput gcOutput)
{
- gcOutput.WriteHeader("segment", "begin", "allocated", "committed", "allocated size", "committed size");
+ gcOutput.WriteRow("segment", "begin", "allocated", "committed", "allocated size", "committed size");
}
- private static void WriteSegment(Table gcOutput, ClrSegment segment)
+ private static void WriteSegment(TableOutput gcOutput, ClrSegment segment)
{
- gcOutput.WriteRow(segment, segment.ObjectRange.Start, segment.ObjectRange.End, segment.CommittedMemory.End,
+ gcOutput.WriteRow(new DmlDumpHeapSegment(segment),
+ segment.ObjectRange.Start, segment.ObjectRange.End, segment.CommittedMemory.End,
FormatMemorySize(segment.ObjectRange.Length), FormatMemorySize(segment.CommittedMemory.Length));
}
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-using System;
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.Runtime;
return $"{updated:0.00}gb";
}
- public static string ToSignedHexString(this int offset) => offset < 0 ? $"-{Math.Abs(offset):x2}" : offset.ToString("x2");
-
internal static ulong FindMostCommonPointer(this IEnumerable<ulong> enumerable)
=> (from ptr in enumerable
group ptr by ptr into g
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "finalizequeue", Help = "Displays all objects registered for finalization.")]
- public class FinalizeQueueCommand : CommandBase
- {
- [Option(Name = "-detail", Help = "Will display extra information on any SyncBlocks that need to be cleaned up, and on any RuntimeCallableWrappers (RCWs) that await cleanup. Both of these data structures are cached and cleaned up by the finalizer thread when it gets a chance to run.")]
- public bool Detail { get; set; }
-
- [Option(Name = "-allReady", Help = "Specifying this argument will allow for the display of all objects that are ready for finalization, whether they are already marked by the GC as such, or whether the next GC will. The objects that are not in the \"Ready for finalization\" list are finalizable objects that are no longer rooted. This option can be very expensive, as it verifies whether all the objects in the finalizable queues are still rooted or not.")]
- public bool AllReady { get; set; }
-
- [Option(Name = "-short", Help = "Limits the output to just the address of each object. If used in conjunction with -allReady it enumerates all objects that have a finalizer that are no longer rooted. If used independently it lists all objects in the finalizable and \"ready for finalization\" queues.")]
- public bool Short { get; set; }
-
- [Option(Name = "-mt", Help = "Limits the search for finalizable objects to only those matching the given MethodTable.")]
- public string MethodTable { get; set; }
-
- [Option(Name = "-stat", Aliases = new string[] { "-summary" }, Help = "Only print object statistics, not the list of all objects.")]
- public bool Stat { get; set; }
-
- [ServiceImport]
- public LiveObjectService LiveObjects { get; set; }
-
- [ServiceImport]
- public RootCacheService RootCache { get; set; }
-
- [ServiceImport]
- public DumpHeapService DumpHeap { get; set; }
-
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- public override void Invoke()
- {
- ulong mt = 0;
- if (!string.IsNullOrWhiteSpace(MethodTable))
- {
- mt = ParseAddress(MethodTable) ?? throw new ArgumentException($"Could not parse MethodTable: '{MethodTable}'");
- }
-
- if (Short && Stat)
- {
- throw new ArgumentException("Cannot specify both -short and -stat.");
- }
-
- // If we are going to search for only live objects, be sure to print a warning first
- // in the output of the command instead of in between the rest of the output.
- if (AllReady)
- {
- LiveObjects.PrintWarning = true;
- LiveObjects.Initialize();
- }
-
- if (!Short)
- {
- PrintSyncBlockCleanupData();
- PrintRcwCleanupData();
- Console.WriteLine("----------------------------------");
- Console.WriteLine();
-
- PrintGenerationalRanges();
-
- if (AllReady)
- {
- Console.WriteLine("Statistics for all finalizable objects that are no longer rooted:");
- }
- else
- {
- Console.WriteLine("Statistics for all finalizable objects (including all objects ready for finalization):");
- }
- }
-
- IEnumerable<ClrObject> objects = EnumerateFinalizableObjects(AllReady, mt);
- DumpHeapService.DisplayKind displayKind = Short ? DumpHeapService.DisplayKind.Short : DumpHeapService.DisplayKind.Normal;
-
- DumpHeap.PrintHeap(objects, displayKind, Stat, printFragmentation: false);
-
- }
- private IEnumerable<ClrObject> EnumerateFinalizableObjects(bool allReady, ulong mt)
- {
- IEnumerable<ClrObject> result = EnumerateValidFinalizableObjectsWithTypeFilter(mt);
-
- if (allReady)
- {
- HashSet<ulong> rootedByFinalizer = new();
- foreach (ClrRoot root in Runtime.Heap.EnumerateFinalizerRoots())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- ClrObject obj = root.Object;
- if (obj.IsValid)
- {
- rootedByFinalizer.Add(obj);
- }
- }
-
- // We are trying to find all objects that are ready to be finalized, which is essentially
- // all dead objects. However, objects which were previously collected but waiting on
- // the finalizer thread to process them are considered "live" because they are rooted by
- // the finalizer queue. So our result needs to be either dead objects or directly rooted
- // by the finalizer queue.
- result = result.Where(obj => rootedByFinalizer.Contains(obj) || !LiveObjects.IsLive(obj));
- }
-
- return result;
- }
-
- private IEnumerable<ClrObject> EnumerateValidFinalizableObjectsWithTypeFilter(ulong mt)
- {
- foreach (ClrObject obj in Runtime.Heap.EnumerateFinalizableObjects())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (!obj.IsValid)
- {
- continue;
- }
-
- if (mt != 0 && obj.Type.MethodTable != mt)
- {
- continue;
- }
-
- yield return obj;
- }
- }
-
- private void PrintSyncBlockCleanupData()
- {
- Table output = null;
- int total = 0;
- foreach (ClrSyncBlockCleanupData cleanup in Runtime.EnumerateSyncBlockCleanupData())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (output is null)
- {
- output = new(Console, Pointer, Pointer, Pointer, Pointer);
- output.WriteHeader("SyncBlock", "RCW", "CCW", "ComClassFactory");
- }
-
- output.WriteRow(cleanup.SyncBlock, cleanup.Rcw, cleanup.Ccw, cleanup.ClassFactory);
- total++;
- }
-
- Console.WriteLine($"SyncBlocks to be cleaned up: {total:n0}");
- }
-
- private void PrintRcwCleanupData()
- {
- Table output = null;
- int freeThreadedCount = 0;
- int mtaCount = 0;
- int staCount = 0;
-
- foreach (ClrRcwCleanupData cleanup in Runtime.EnumerateRcwCleanupData())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (output is null)
- {
- output = new(Console, Pointer, Pointer, Thread, Text);
- output.WriteHeader("RCW", "Context", "Thread", "Apartment");
- }
-
- string apartment;
- if (cleanup.IsFreeThreaded)
- {
- freeThreadedCount++;
- apartment = "(FreeThreaded)";
- }
- else if (cleanup.Thread == 0)
- {
- mtaCount++;
- apartment = "(MTA)";
- }
- else
- {
- staCount++;
- apartment = "(STA)";
- }
-
- output.WriteRow(cleanup.Rcw, cleanup.Context, cleanup.Thread, apartment);
- }
-
- Console.WriteLine($"Free-Threaded Interfaces to be released: {freeThreadedCount:n0}");
- Console.WriteLine($"MTA Interfaces to be released: {mtaCount:n0}");
- Console.WriteLine($"STA Interfaces to be released: {staCount:n0}");
- }
-
- private void PrintGenerationalRanges()
- {
- foreach (ClrSubHeap heap in Runtime.Heap.SubHeaps)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- Console.WriteLine($"Heap {heap.Index}");
-
- WriteGeneration(heap, 0);
- WriteGeneration(heap, 1);
- WriteGeneration(heap, 2);
-
- Console.WriteLine($"Ready for finalization {heap.FinalizerQueueRoots.Length / (uint)IntPtr.Size:n0} objects ({heap.FinalizerQueueRoots.Start:x}->{heap.FinalizerQueueRoots.End:x})");
-
- Console.WriteLine("------------------------------");
- }
- }
-
- private void WriteGeneration(ClrSubHeap heap, int gen)
- {
- MemoryRange range = heap.GenerationalFinalizableObjects[gen];
- Console.WriteLine($"generation {gen} has {range.Length / (uint)IntPtr.Size:n0} objects ({range.Start:x}->{range.End:x})");
- }
- }
-}
using System.Diagnostics;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
Console.WriteLineWarning($"Walking {segments:n0} {gcSegKind}, this may take a moment...");
}
- Table output = new(Console, DumpObj, TypeName.WithWidth(64), DumpObj, TypeName);
+ TableOutput output = new(Console, (16, "x12"), (64, ""), (16, "x12"));
// Ephemeral -> Large
List<(ClrObject From, ClrObject To)> ephToLoh = FindEphemeralToLOH().OrderBy(i => i.From.Address).ThenBy(i => i.To.Address).ToList();
{
Console.WriteLine("Ephemeral objects pointing to the Large objects:");
Console.WriteLine();
- output.WriteHeader("Ephemeral", "Ephemeral Type", "Large Object", "Large Object Type");
+ output.WriteRow("Ephemeral", "Ephemeral Type", "Large Object", "Large Object Type");
foreach ((ClrObject from, ClrObject to) in ephToLoh)
{
- output.WriteRow(from, from.Type, to, to.Type);
+ output.WriteRow(new DmlDumpObj(from), from.Type?.Name, new DmlDumpObj(to), to.Type?.Name);
}
Console.WriteLine();
{
Console.WriteLine("Large objects pointing to Ephemeral objects:");
Console.WriteLine();
- output.WriteHeader("Ephemeral", "Ephemeral Type", "Large Object", "Large Object Type");
+ output.WriteRow("Ephemeral", "Ephemeral Type", "Large Object", "Large Object Type");
foreach ((ClrObject from, ClrObject to) in lohToEph)
{
- output.WriteRow(from, from.Type, to, to.Type);
+ output.WriteRow(new DmlDumpObj(from), from.Type?.Name, new DmlDumpObj(to), to.Type?.Name);
}
Console.WriteLine();
{
Console.WriteLine($"Ephemeral objects which point to Large objects which point to Ephemeral objects:");
Console.WriteLine();
- output = new(Console, DumpObj, TypeName.WithWidth(64), DumpObj, TypeName.WithWidth(64), DumpObj, TypeName);
- output.WriteRow(from, from.Type, to, to.Type, ephEnd, ephEnd.Type);
+ output = new(Console, (16, "x12"), (64, ""), (16, "x12"), (64, ""), (16, "x12"));
+ output.WriteRow(new DmlDumpObj(from), from.Type?.Name, new DmlDumpObj(to), to.Type?.Name, new DmlDumpObj(ephEnd), ephEnd.Type?.Name);
}
}
Console.WriteLine();
}
}
+
+ foreach ((ClrObject From, ClrObject To) item in ephToLoh)
+ {
+ if (lohToEph.Any(r => item.To.Address == r.From.Address))
+ {
+ Console.WriteLine("error!");
+ }
+ }
}
private IEnumerable<(ClrObject From, ClrObject To)> FindEphemeralToLOH()
using System.IO;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
+using Microsoft.Diagnostics.Runtime.Interfaces;
using static Microsoft.Diagnostics.ExtensionCommands.NativeAddressHelper;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
namespace Microsoft.Diagnostics.ExtensionCommands
{
int nameLen = Math.Min(80, maxNameLen);
nameLen = Math.Max(nameLen, truncatedName.Length);
- using BorderedTable table = new(Console, TypeName.WithWidth(nameLen), Integer, Integer, Pointer);
- table.Columns[0] = table.Columns[0].WithAlignment(Align.Center);
- table.WriteHeader(nameColumn, "Unique", "Count", "RndPtr");
- table.Columns[0] = table.Columns[0].WithAlignment(Align.Left);
+ TableOutput table = new(Console, (nameLen, ""), (12, "n0"), (12, "n0"), (12, "x"));
+ table.Divider = " ";
+ table.WriteRowWithSpacing('-', nameColumn, "Unique", "Count", "RndPtr");
IEnumerable<(string Name, int Count, int Unique, IEnumerable<ulong> Pointers)> items = truncate ? resolved.Take(multi) : resolved;
foreach ((string Name, int Count, int Unique, IEnumerable<ulong> Pointers) in items)
table.WriteRow(truncatedName, single, single);
}
- table.Columns[0] = table.Columns[0].WithAlignment(Align.Center);
- table.WriteFooter("TOTALS", resolved.Sum(r => r.Unique), resolved.Sum(r => r.Count));
+ table.WriteRowWithSpacing('-', " [ TOTALS ] ", resolved.Sum(r => r.Unique), resolved.Sum(r => r.Count), "");
}
private static string FixTypeName(string typeName, HashSet<int> offsets)
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
public override void Invoke()
{
- Table output = new(Console, DumpObj, DumpHeap, ByteCount, Column.ForEnum<Generation>(), Column.ForEnum<Generation>(), ByteCount, Integer, TypeName);
+ TableOutput output = new(Console, (16, "x12"), (16, "x12"), (10, "n0"), (8, ""), (8, ""), (12, "n0"), (12, "n0"));
var generationGroup = from item in FindObjectsWithEphemeralReferences()
group item by (item.ObjectGeneration, item.ReferenceGeneration) into g
Console.WriteLine($"References from {objGen} to {refGen}:");
Console.WriteLine();
- output.WriteHeader("Object", "MethodTable", "Size", "Obj Gen", "Ref Gen", "Obj Count", "Obj Size", "Type");
+ output.WriteRow("Object", "MethodTable", "Size", "Obj Gen", "Ref Gen", "Obj Count", "Obj Size", "Type");
}
foreach (EphemeralRefCount erc in item.Objects)
Console.CancellationToken.ThrowIfCancellationRequested();
objCount++;
- output.WriteRow(erc.Object, erc.Object.Type, erc.Object.Size, erc.ObjectGeneration, erc.ReferenceGeneration, erc.Count, erc.Size, erc.Object.Type);
+ output.WriteRow(new DmlDumpObj(erc.Object), erc.Object.Type.MethodTable, erc.Object.Size, erc.ObjectGeneration, erc.ReferenceGeneration, erc.Count, erc.Size, erc.Object.Type.Name);
}
}
Generation1 = 2,
Generation2 = 3,
LargeObjectHeap = 4,
- PinnedObjectHeap = 5,
- FrozenObjectHeap = 6
+ PinnedObjectHeap = 5
}
}
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
namespace Microsoft.Diagnostics.ExtensionCommands
{
public override void Invoke()
{
+
HeapInfo[] heaps = Runtime.Heap.SubHeaps.Select(h => GetHeapInfo(h)).ToArray();
bool printFrozen = heaps.Any(h => h.Frozen.Committed != 0);
- List<Column> formats = new()
+
+ List<(int, string)> formats = new()
{
- Text.WithWidth(8),
- IntegerWithoutCommas,
- IntegerWithoutCommas,
- IntegerWithoutCommas,
- IntegerWithoutCommas,
- IntegerWithoutCommas,
- Text.WithWidth(8),
- Text.WithWidth(8),
- Text.WithWidth(8)
+ (8, "x"), (12, ""), (12, ""), (12, ""), (12, ""), (12, ""), (8, ""), (8, ""), (8, "")
};
if (printFrozen)
{
- formats.Insert(1, IntegerWithoutCommas);
+ formats.Insert(1, (12, ""));
}
- Table output = new(Console, formats.ToArray());
- output.SetAlignment(Align.Left);
+ TableOutput output = new(Console, formats.ToArray())
+ {
+ AlignLeft = true,
+ };
// Write allocated
WriteHeader(output, heaps, printFrozen);
}
total = GetTotal(heaps);
- WriteRow(output, total, (info) => info.Committed, printFrozen, printPercentage: false, footer: true);
+ WriteRow(output, total, (info) => info.Committed, printFrozen);
Console.WriteLine();
}
- private static void WriteHeader(Table output, HeapInfo[] heaps, bool printFrozen)
+ private static void WriteHeader(TableOutput output, HeapInfo[] heaps, bool printFrozen)
{
- List<string> row = new(8) { "Heap", "Gen0", "Gen1", "Gen2", "LOH", "POH" };
+ List<object> row = new(8) { "Heap", "Gen0", "Gen1", "Gen2", "LOH", "POH" };
if (printFrozen)
{
row.Insert(1, "EPH");
}
- output.WriteHeader(row.ToArray());
+ output.WriteRow(row.ToArray());
}
- private static void WriteRow(Table output, HeapInfo heapInfo, Func<GenerationInfo, object> select, bool printFrozen, bool printPercentage = false, bool footer = false)
+ private static void WriteRow(TableOutput output, HeapInfo heapInfo, Func<GenerationInfo, object> select, bool printFrozen, bool printPercentage = false)
{
List<object> row = new(11)
{
}
}
- if (footer)
- {
- output.WriteFooter(row.ToArray());
- }
- else
- {
- output.WriteRow(row.ToArray());
- }
+ output.WriteRow(row.ToArray());
}
private static ulong GetValue(object value)
// The .NET Foundation licenses this file to you under the MIT license.
using System;
+using System.Diagnostics;
using System.IO;
using System.Text;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
[ServiceImport]
public RootCacheService RootCache { get; set; }
- [ServiceImport]
- public StaticVariableService StaticVariables { get; set; }
-
[ServiceImport]
public ManagedFileLineService FileLineService { get; set; }
if (AsGCGeneration.HasValue)
{
int gen = AsGCGeneration.Value;
-
- ClrSegment seg = Runtime.Heap.GetSegmentByAddress(address);
- if (seg is null)
- {
- Console.WriteLineError($"Address {address:x} is not in the managed heap.");
- return;
- }
-
- Generation objectGen = seg.GetGeneration(address);
- if (gen < (int)objectGen)
- {
- Console.WriteLine($"Object {address:x} will survive this collection:");
- Console.WriteLine($" gen({address:x}) = {objectGen} > {gen} = condemned generation.");
- return;
- }
-
if (gen < 0 || gen > 1)
{
// If not gen0 or gen1, treat it as a normal !gcroot
}
Console.WriteLine($" {objAddress:x}");
- PrintPath(Console, RootCache, StaticVariables, Runtime.Heap, path);
+ PrintPath(Console, RootCache, Runtime.Heap, path);
Console.WriteLine();
count++;
private void PrintPath(ClrRoot root, GCRoot.ChainLink link)
{
PrintRoot(root);
- PrintPath(Console, RootCache, StaticVariables, Runtime.Heap, link);
+ PrintPath(Console, RootCache, Runtime.Heap, link);
Console.WriteLine();
}
- public static void PrintPath(IConsoleService console, RootCacheService rootCache, StaticVariableService statics, ClrHeap heap, GCRoot.ChainLink link)
+ public static void PrintPath(IConsoleService console, RootCacheService rootCache, ClrHeap heap, GCRoot.ChainLink link)
{
- Table objectOutput = new(console, Text.WithWidth(2), DumpObj, TypeName, Text)
+ TableOutput objectOutput = new(console, (2, ""), (16, "x16"))
{
+ AlignLeft = true,
Indent = new(' ', 10)
};
- objectOutput.SetAlignment(Align.Left);
-
- bool first = true;
- bool isPossibleStatic = true;
-
- ClrObject firstObj = default;
-
ulong prevObj = 0;
while (link != null)
{
- ClrObject obj = heap.GetObject(link.Object);
-
- // Check whether this link is a dependent handle
- string extraText = "";
bool isDependentHandleLink = rootCache.IsDependentHandleLink(prevObj, link.Object);
- if (isDependentHandleLink)
- {
- extraText = "(dependent handle)";
- }
-
- // Print static variable info. In all versions of the runtime, static variables are stored in
- // a pinned object array. We check if the first link in the chain is an object[], and if so we
- // check if the second object's address is the location of a static variable. We could further
- // narrow this by checking the root type, but that needlessly complicates this code...we can't
- // get false positives or negatives here (as nothing points to static variable object[] other
- // than the root).
- if (first)
- {
- firstObj = obj;
- isPossibleStatic = firstObj.IsValid && firstObj.IsArray && firstObj.Type.Name == "System.Object[]";
- first = false;
- }
- else if (isPossibleStatic)
- {
- if (statics is not null && !isDependentHandleLink)
- {
- foreach (ClrReference reference in firstObj.EnumerateReferencesWithFields(carefully: false, considerDependantHandles: false))
- {
- if (reference.Object == obj)
- {
- ulong address = firstObj + (uint)reference.Offset;
-
- if (statics.TryGetStaticByAddress(address, out ClrStaticField field))
- {
- extraText = $"(static variable: {field.Type?.Name ?? "Unknown"}.{field.Name})";
- break;
- }
- }
- }
- }
-
- // only the first object[] in the chain is possible to be the static array
- isPossibleStatic = false;
- }
+ ClrObject obj = heap.GetObject(link.Object);
- objectOutput.WriteRow("->", obj, obj.Type, extraText);
+ objectOutput.WriteRow("->", obj.IsValid ? new DmlDumpObj(obj) : obj.Address, obj.Type?.Name ?? "<unknown type>", (isDependentHandleLink ? " (dependent handle)" : ""));
prevObj = link.Object;
link = link.Next;
ClrHandleKind.SizedRef => "sized ref handle",
ClrHandleKind.WeakWinRT => "weak WinRT handle",
_ => handleKind.ToString()
- };
+ }; ;
}
private string GetFrameOutput(ClrStackFrame currFrame)
using System.Linq;
using System.Text;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
using static Microsoft.Diagnostics.ExtensionCommands.NativeAddressHelper;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
namespace Microsoft.Diagnostics.ExtensionCommands
{
foreach ((ClrSegment Segment, ulong Address, ulong Pointer, DescribedRegion MemoryRange) item in items)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
if (!segmentLists.TryGetValue(item.Segment, out List<GCObjectToRange> list))
{
list = segmentLists[item.Segment] = new();
Console.WriteLine("Resolving object names...");
foreach (string type in memoryTypes)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
WriteHeader($" {type} Regions ");
List<ulong> addressesNotInObjects = new();
foreach (KeyValuePair<ClrSegment, List<GCObjectToRange>> segEntry in segmentLists)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
ClrSegment seg = segEntry.Key;
List<GCObjectToRange> pointers = segEntry.Value;
pointers.Sort((x, y) => x.GCPointer.CompareTo(y.GCPointer));
while (index < pointers.Count && pointers[index].GCPointer < obj.Address)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
// If we "missed" the pointer then it's outside of an object range.
addressesNotInObjects.Add(pointers[index].GCPointer);
while (index < pointers.Count && obj.Address <= pointers[index].GCPointer && pointers[index].GCPointer < obj.Address + obj.Size)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
string typeName = obj.Type?.Name ?? $"<unknown_type>";
if (obj.IsFree)
{
Console.WriteLine($"All memory pointers:");
- IEnumerable<(ulong Pointer, ulong Size, ClrObject Object, ClrType Type)> allPointers = unknownObjPointers.Select(unknown => (unknown.Pointer, 0ul, unknown.Object, unknown.Object.Type));
- allPointers = allPointers.Concat(knownMemory.Values.Select(k => (k.Pointer, GetSize(sizeHints, k), k.Object, k.Object.Type)));
+ IEnumerable<(ulong Pointer, ulong Size, ulong Object, string Type)> allPointers = unknownObjPointers.Select(unknown => (unknown.Pointer, 0ul, unknown.Object.Address, unknown.Object.Type?.Name ?? "<unknown_type>"));
+ allPointers = allPointers.Concat(knownMemory.Values.Select(k => (k.Pointer, GetSize(sizeHints, k), k.Object.Address, k.Name)));
- using BorderedTable allOut = new(Console, Pointer, ByteCount, DumpObj, TypeName);
-
- allOut.WriteHeader("Pointer", "Size", "Object", "Type");
-
- foreach ((ulong Pointer, ulong Size, ClrObject Object, ClrType Type) entry in allPointers)
+ TableOutput allOut = new(Console, (16, "x"), (16, "x"), (16, "x"))
{
- Console.CancellationToken.ThrowIfCancellationRequested();
+ Divider = " | "
+ };
+ allOut.WriteRowWithSpacing('-', "Pointer", "Size", "Object", "Type");
+ foreach ((ulong Pointer, ulong Size, ulong Object, string Type) entry in allPointers)
+ {
if (entry.Size == 0)
{
- allOut.WriteRow(entry.Pointer, null, entry.Object, entry.Type);
+ allOut.WriteRow(entry.Pointer, "", entry.Object, entry.Type);
}
else
{
// totals
var knownMemorySummary = from known in knownMemory.Values
- group known by known.Object.Type into g
- let Type = g.Key
+ group known by known.Name into g
+ let Name = g.Key
let Count = g.Count()
let TotalSize = g.Sum(k => (long)GetSize(sizeHints, k))
- orderby TotalSize descending, Type.Name ascending
+ orderby TotalSize descending, Name ascending
select new {
- Type,
+ Name,
Count,
TotalSize,
Pointer = g.Select(p => p.Pointer).FindMostCommonPointer()
};
- Column typeNameColumn = TypeName.GetAppropriateWidth(knownMemory.Values.Select(r => r.Object.Type), 16);
- using (BorderedTable summary = new(Console, typeNameColumn, Integer, HumanReadableSize, ByteCount, Pointer))
- {
- summary.WriteHeader("Type", "Count", "Size", "Size (bytes)", "RndPointer");
+ int maxNameLen = Math.Min(80, knownMemory.Values.Max(r => r.Name.Length));
- foreach (var item in knownMemorySummary)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
+ TableOutput summary = new(Console, (-maxNameLen, ""), (8, "n0"), (12, "n0"), (12, "n0"), (12, "x"))
+ {
+ Divider = " | "
+ };
- summary.WriteRow(item.Type, item.Count, item.TotalSize, item.TotalSize, item.Pointer);
- }
+ summary.WriteRowWithSpacing('-', "Type", "Count", "Size", "Size (bytes)", "RndPointer");
- (int totalRegions, ulong totalBytes) = GetSizes(knownMemory, sizeHints);
- summary.WriteFooter("[TOTAL]", totalRegions, totalBytes, totalBytes);
+ foreach (var item in knownMemorySummary)
+ {
+ summary.WriteRow(item.Name, item.Count, item.TotalSize.ConvertToHumanReadable(), item.TotalSize, item.Pointer);
}
- Console.WriteLine();
+ (int totalRegions, ulong totalBytes) = GetSizes(knownMemory, sizeHints);
+
+ summary.WriteSpacer('-');
+ summary.WriteRow("[TOTAL]", totalRegions, totalBytes.ConvertToHumanReadable(), totalBytes);
+
+ Console.WriteLine("");
}
};
var unknownMem = unknownMemQuery.ToArray();
+ int maxNameLen = Math.Min(80, unknownMem.Max(r => r.Name.Length));
+
+ TableOutput summary = new(Console, (-maxNameLen, ""), (8, "n0"), (12, "x"))
+ {
+ Divider = " | "
+ };
- Column typeNameColumn = TypeName.GetAppropriateWidth(unknownMem.Select(r => r.Name));
- using BorderedTable summary = new(Console, typeNameColumn, Integer, Pointer);
- summary.WriteHeader("Type", "Count", "RndPointer");
+ summary.WriteRowWithSpacing('-', "Type", "Count", "RndPointer");
foreach (var item in unknownMem)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
summary.WriteRow(item.Name, item.Count, item.Pointer);
}
}
Console.WriteLine(header.PadRight(Width, '='));
}
- private string CollapseGenerics(string typeName)
+ private static string CollapseGenerics(string typeName)
{
StringBuilder result = new(typeName.Length + 16);
int nest = 0;
for (int i = 0; i < typeName.Length; i++)
{
- Console.CancellationToken.ThrowIfCancellationRequested();
-
if (typeName[i] == '<')
{
if (nest++ == 0)
private const string ExternalMemoryBlock = "System.Reflection.Internal.ExternalMemoryBlock";
private const string RuntimeParameterInfo = "System.Reflection.RuntimeParameterInfo";
+ public string Name => Object.Type?.Name ?? "<unknown_type>";
public ClrObject Object { get; }
public ulong Pointer { get; }
public ulong Size { get; }
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
return;
}
- Column objectRangeColumn = Range.WithDml(Dml.DumpHeap).GetAppropriateWidth(segments.Select(r => r.ObjectRange));
- Column committedColumn = Range.GetAppropriateWidth(segments.Select(r => r.CommittedMemory));
- Column reservedColumn = Range.GetAppropriateWidth(segments.Select(r => r.ReservedMemory));
- Table output = new(Console, Pointer, IntegerWithoutCommas.WithWidth(6).WithDml(Dml.DumpHeap), DumpHeap, Text.WithWidth(6), objectRangeColumn, committedColumn, reservedColumn);
- output.SetAlignment(Align.Left);
- output.WriteHeader("Address", "Heap", "Segment", "Generation", "Allocated", "Committed", "Reserved");
+ (int, string) RangeFormat = (segments.Max(seg => RangeSizeForSegment(seg)), "");
+ TableOutput output = new(Console, (16, "x"), (4, ""), (16, "x"), (10, ""), RangeFormat, RangeFormat, RangeFormat)
+ {
+ AlignLeft = true,
+ };
+ output.WriteRow("Address", "Heap", "Segment", "Generation", "Allocated", "Committed", "Reserved");
foreach (ClrSegment segment in segments)
{
string generation;
};
}
- if (segment.ObjectRange.Contains(address))
- {
- output.Columns[0] = output.Columns[0].WithDml(Dml.ListNearObj);
- }
- else
- {
- output.Columns[0] = output.Columns[0].WithDml(null);
- }
+ object addressColumn = segment.ObjectRange.Contains(address) ? new DmlListNearObj(address) : address;
+ output.WriteRow(addressColumn, segment.SubHeap.Index, segment.Address, generation, new DmlDumpHeap(FormatRange(segment.ObjectRange), segment.ObjectRange), FormatRange(segment.CommittedMemory), FormatRange(segment.ReservedMemory));
+ }
+ }
+
+ private static string FormatRange(MemoryRange range) => $"{range.Start:x}-{range.End:x}";
- output.WriteRow(address, segment.SubHeap, segment, generation, segment.ObjectRange, segment.CommittedMemory, segment.ReservedMemory);
+ private static int RangeSizeForSegment(ClrSegment segment)
+ {
+ // segment.ObjectRange should always be less length than CommittedMemory
+ if (segment.CommittedMemory.Length > segment.ReservedMemory.Length)
+ {
+ return FormatRange(segment.CommittedMemory).Length;
+ }
+ else
+ {
+ return FormatRange(segment.ReservedMemory).Length;
}
}
using System.Diagnostics;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
MemoryRange[] segAllocContexts = heap.EnumerateAllocationContexts().Where(context => segment.ObjectRange.Contains(context.Start)).ToArray();
int pointerColumnWidth = segAllocContexts.Length > 0 ? Math.Max(segAllocContexts.Max(r => FormatRange(r).Length), 16) : 16;
- Column kindColumn = Text.WithWidth("Expected:".Length).WithAlignment(Align.Left);
-
- Table output = new(Console, kindColumn, DumpObj.WithWidth(pointerColumnWidth), Text.WithWidth(32), TypeName);
+ TableOutput output = new(Console, (-"Expected:".Length, ""), (pointerColumnWidth, "x16"), (20, ""), (0, ""));
// Get current object, but objAddress may not point to an object.
ClrObject curr = heap.GetObject(objAddress);
if (prev.IsValid)
{
- expectedNextObject = AlignObj(prev + prev.Size, segment);
+ expectedNextObject = Align(prev + prev.Size, segment);
}
else
{
localConsistency = VerifyAndPrintObject(output, "Current:", heap, segment, curr) && localConsistency;
// If curr is valid, we need to print and skip the allocation context
- expectedNextObject = AlignObj(curr + curr.Size, segment);
+ expectedNextObject = Align(curr + curr.Size, segment);
MemoryRange allocContextPlusGap = PrintGapIfExists(output, segment, segAllocContexts, new(curr, expectedNextObject));
if (allocContextPlusGap.End != 0)
{
}
}
- private MemoryRange PrintGapIfExists(Table output, ClrSegment segment, MemoryRange[] segAllocContexts, MemoryRange objectDistance)
+ private MemoryRange PrintGapIfExists(TableOutput output, ClrSegment segment, MemoryRange[] segAllocContexts, MemoryRange objectDistance)
{
// Print information about allocation context gaps between objects
MemoryRange range = segAllocContexts.FirstOrDefault(ctx => objectDistance.Overlaps(ctx) || ctx.Contains(objectDistance.End));
if (range.Start != 0)
{
- output.Columns[1] = output.Columns[1].WithDml(null);
output.WriteRow("Gap:", FormatRange(range), FormatSize(range.Length), "GC Allocation Context (expected gap in the heap)");
}
}
uint minObjectSize = (uint)MemoryService.PointerSize * 3;
- return new(range.Start, range.End + AlignObj(minObjectSize, segment));
+ return new(range.Start, range.End + Align(minObjectSize, segment));
}
private static string FormatRange(MemoryRange range) => $"{range.Start:x}-{range.End:x}";
- private ulong AlignObj(ulong size, ClrSegment seg)
+ private ulong Align(ulong size, ClrSegment seg)
{
ulong AlignConst;
ulong AlignLargeConst = 7;
return (size + AlignConst) & ~AlignConst;
}
- private bool VerifyAndPrintObject(Table output, string which, ClrHeap heap, ClrSegment segment, ClrObject obj)
+ private bool VerifyAndPrintObject(TableOutput output, string which, ClrHeap heap, ClrSegment segment, ClrObject obj)
{
bool isObjectValid = !heap.IsObjectCorrupted(obj, out ObjectCorruption corruption) && obj.IsValid;
+ // Here, isCorrupted may still be true, but it might not interfere with getting the type of the object.
+ // Since we know the information, we will print that out.
+ string typeName = obj.Type?.Name ?? GetErrorTypeName(obj);
+
// ClrObject.Size is not available if IsValid returns false
string size = FormatSize(obj.IsValid ? obj.Size : 0);
if (corruption is null)
{
- output.Columns[1] = output.Columns[1].WithDml(Dml.DumpObj);
- output.WriteRow(which, obj, size, obj.Type);
+ output.WriteRow(which, new DmlDumpObj(obj), size, typeName);
}
else
{
- output.Columns[1] = output.Columns[1].WithDml(Dml.ListNearObj);
- output.WriteRow(which, obj, size, obj.Type);
-
+ output.WriteRow(which, new DmlListNearObj(obj), size, typeName);
Console.Write($"Error Detected: {VerifyHeapCommand.GetObjectCorruptionMessage(MemoryService, heap, corruption)} ");
- if (Console.SupportsDml)
- {
- Console.WriteDmlExec("[verify heap]", $"!verifyheap -segment {segment.Address:X}");
- }
-
+ Console.WriteDmlExec("[verify heap]", $"!verifyheap -s {segment.Address:X}");
Console.WriteLine();
}
}
private static string FormatSize(ulong size) => size > 0 ? $"{size:n0} (0x{size:x})" : "";
+
+ private string GetErrorTypeName(ClrObject obj)
+ {
+ if (!MemoryService.ReadPointer(obj.Address, out _))
+ {
+ return $"[error reading mt at: {obj.Address:x}]";
+ }
+ else
+ {
+ return $"Unknown";
+ }
+ }
}
}
return _liveObjs.Contains(obj);
}
- public void Initialize()
- {
- _liveObjs ??= CreateObjectSet();
- }
-
private HashSet<ulong> CreateObjectSet()
{
ClrHeap heap = Runtime.Heap;
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using static Microsoft.Diagnostics.ExtensionCommands.NativeAddressHelper;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
namespace Microsoft.Diagnostics.ExtensionCommands
{
private const string ReserveFlag = "-reserve";
private const string ReserveHeuristicFlag = "-reserveHeuristic";
private const string ForceHandleTableFlag = "-forceHandleTable";
- private const string ListFlag = "-list";
- private const string BySizeFlag = "-orderBySize";
[Option(Name = SummaryFlag, Aliases = new string[] { "-stat", }, Help = "Only print summary table.")]
public bool Summary { get; set; }
- [Option(Name = ImagesFlag, Help = "Prints a summary table of image memory usage.")]
+ [Option(Name = ImagesFlag, Aliases = new string[] { "-i" }, Help = "Prints a summary table of image memory usage.")]
public bool ShowImageTable { get; set; }
[Option(Name = ReserveFlag, Help = "Include MEM_RESERVE regions in the output.")]
[Option(Name = ForceHandleTableFlag, Help = "We only tag the HandleTable if we can do so efficiently on newer runtimes. This option ensures we always tag HandleTable memory, even if it will take a long time.")]
public bool IncludeHandleTableIfSlow { get; set; }
- [Option(Name = BySizeFlag, Help = "List the raw addresses by size, not by base address.")]
- public bool BySize { get; set; }
-
- [Option(Name = ListFlag, Help = "A separated list of memory regions to list allocations for.")]
- public string List { get; set; }
-
[ServiceImport]
public NativeAddressHelper AddressHelper { get; set; }
DescribedRegion[] ranges = memoryRanges.ToArray();
+ int nameSizeMax = ranges.Max(r => r.Name.Length);
+
// Tag reserved memory based on what's adjacent.
if (TagReserveMemoryHeuristically)
{
CollapseReserveRegions(ranges);
}
- if (!Summary && List is null)
+ if (!Summary)
{
- Column nameColumn = Text.GetAppropriateWidth(ranges.Select(r => r.Name));
- Column kindColumn = Column.ForEnum<MemoryRegionType>();
- Column stateColumn = Column.ForEnum<MemoryRegionState>();
-
- // These are flags, so we need a column wide enough for that output instead of ForEnum
- Column protectionColumn = Text.GetAppropriateWidth(ranges.Select(r => r.Protection));
- Column imageColumn = Image.GetAppropriateWidth(ranges.Select(r => r.Image));
- using BorderedTable output = new(Console, nameColumn, Pointer, Pointer, HumanReadableSize, kindColumn, stateColumn, protectionColumn, imageColumn);
-
- output.WriteHeader("Memory Kind", "StartAddr", "EndAddr-1", "Size", "Type", "State", "Protect", "Image");
- IOrderedEnumerable<DescribedRegion> ordered = BySize ? ranges.OrderByDescending(r => r.Size).ThenBy(r => r.Start) : ranges.OrderBy(r => r.Start);
- foreach (DescribedRegion mem in ordered)
+ int kindSize = ranges.Max(r => r.Type.ToString().Length);
+ int stateSize = ranges.Max(r => r.State.ToString().Length);
+ int protectSize = ranges.Max(r => r.Protection.ToString().Length);
+
+ TableOutput output = new(Console, (nameSizeMax, ""), (12, "x"), (12, "x"), (12, ""), (kindSize, ""), (stateSize, ""), (protectSize, ""))
+ {
+ AlignLeft = true,
+ Divider = " | "
+ };
+
+ output.WriteRowWithSpacing('-', "Memory Kind", "StartAddr", "EndAddr-1", "Size", "Type", "State", "Protect", "Image");
+ foreach (DescribedRegion mem in ranges)
{
Console.CancellationToken.ThrowIfCancellationRequested();
- output.WriteRow(mem.Name, mem.Start, mem.End, mem.Size, mem.Type, mem.State, mem.Protection, mem.Image);
+ output.WriteRow(mem.Name, mem.Start, mem.End, mem.Size.ConvertToHumanReadable(), mem.Type, mem.State, mem.Protection, mem.Image);
}
+
+ output.WriteSpacer('-');
}
if (ShowImageTable)
Size
};
- using (BorderedTable output = new(Console, Image.GetAppropriateWidth(ranges.Select(r => r.Image), max: 80), Integer, HumanReadableSize, ByteCount))
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
+ int moduleLen = Math.Max(80, ranges.Max(r => r.Image?.Length ?? 0));
- output.WriteHeader("Image", "Count", "Size", "Size (bytes)");
+ TableOutput output = new(Console, (moduleLen, ""), (8, "n0"), (12, ""), (24, "n0"))
+ {
+ Divider = " | "
+ };
- int count = 0;
- long size = 0;
- foreach (var item in imageGroups)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
+ output.WriteRowWithSpacing('-', "Image", "Regions", "Size", "Size (bytes)");
- output.WriteRow(item.Image, item.Count, item.Size, item.Size);
- count += item.Count;
- size += item.Size;
- }
+ int count = 0;
+ long size = 0;
+ foreach (var item in imageGroups)
+ {
+ Console.CancellationToken.ThrowIfCancellationRequested();
- output.WriteFooter("[TOTAL]", count, size, size);
+ output.WriteRow(item.Image, item.Count, item.Size.ConvertToHumanReadable(), item.Size);
+ count += item.Count;
+ size += item.Size;
}
- Console.WriteLine();
+ output.WriteSpacer('-');
+ output.WriteRow("[TOTAL]", count, size.ConvertToHumanReadable(), size);
+ WriteLine("");
}
- if (List is not null)
- {
- // Print a list of the specified memory kind, ordered by size descending.
-
- string[] requested = List.Split(new char[] { ',', ' ' }, StringSplitOptions.RemoveEmptyEntries);
- foreach (string kind in requested)
- {
- if (!ranges.Any(r => r.Name.Equals(kind, StringComparison.OrdinalIgnoreCase)))
- {
- Console.WriteLineError($"No memory regions match '{kind}'.");
- }
- else
- {
- Console.WriteLine($"{kind} Memory Regions:");
-
- Table output = new(Console, Pointer, ByteCount, HumanReadableSize, Column.ForEnum<MemoryRegionState>(), Column.ForEnum<MemoryRegionType>(), Column.ForEnum<MemoryRegionProtection>().WithWidth(-1));
- output.WriteHeader("Base Address", "Size (bytes)", "Size", "Mem State", "Mem Type", "Mem Protect");
-
- ulong totalSize = 0;
- int count = 0;
-
- IEnumerable<DescribedRegion> matching = ranges.Where(r => r.Name.Equals(kind, StringComparison.OrdinalIgnoreCase)).OrderByDescending(s => s.Size);
- foreach (DescribedRegion region in matching)
- {
- output.WriteRow(region.Start, region.Size, region.Size, region.State, region.Type, region.Protection);
-
- count++;
- totalSize += region.Size;
- }
-
- Console.WriteLine($"{totalSize:n0} bytes ({totalSize.ConvertToHumanReadable()}) in {count:n0} memory regions");
- Console.WriteLine();
- }
- }
- }
-
- if (List is null || Summary)
+ // Print summary table unconditionally
{
- // Show the summary table in almost every case, unless the user specified -list without -summary.
-
var grouped = from mem in ranges
let name = mem.Name
group mem by name into g
Size
};
- Column nameColumn = Text.GetAppropriateWidth(ranges.Select(r => r.Name));
- using BorderedTable output = new(Console, nameColumn, Integer, HumanReadableSize, ByteCount);
- output.WriteHeader("Memory Type", "Count", "Size", "Size (bytes)");
+ TableOutput output = new(Console, (-nameSizeMax, ""), (8, "n0"), (12, ""), (24, "n0"))
+ {
+ Divider = " | "
+ };
+
+ output.WriteRowWithSpacing('-', "Region Type", "Count", "Size", "Size (bytes)");
int count = 0;
long size = 0;
{
Console.CancellationToken.ThrowIfCancellationRequested();
- output.WriteRow(item.Name, item.Count, item.Size, item.Size);
-
+ output.WriteRow(item.Name, item.Count, item.Size.ConvertToHumanReadable(), item.Size);
count += item.Count;
size += item.Size;
}
- output.WriteFooter("[TOTAL]", count, size, size);
+ output.WriteSpacer('-');
+ output.WriteRow("[TOTAL]", count, size.ConvertToHumanReadable(), size);
}
}
+
[HelpInvoke]
public void HelpInvoke()
{
Flags:
{SummaryFlag}
- Show only a summary table of memory regions and not the list of every memory region.
+ Show only a summary table of memory regions and not the list of every address region.
{ImagesFlag}
Summarizes the memory ranges consumed by images in the process.
that reserve region HeapReserve. Note that this is a heuristic and NOT
intended to be completely accurate. This can be useful to try to figure out
what is creating large amount of MEM_RESERVE regions.
-
- {ListFlag}
- A separated list of memory region types (as maddress defines them) to print the base
- addresses and sizes of. This list may be separated by , or ""in quotes"".
-
- {BySizeFlag}
- Order the list of memory blocks by size (descending) when printing the list
- of all memory blocks instead of by address.
");
}
}
namespace Microsoft.Diagnostics.ExtensionCommands
{
[ServiceExport(Scope = ServiceScope.Target)]
- public sealed class NativeAddressHelper : IDisposable
+ public sealed class NativeAddressHelper
{
- private readonly IDisposable _onFlushEvent;
- private ((bool, bool, bool, bool) Key, DescribedRegion[] Result) _previous;
-
- public NativeAddressHelper(ITarget target)
- {
- Target = target;
- _onFlushEvent = target.OnFlushEvent.Register(() => _previous = default);
- }
-
- public void Dispose() => _onFlushEvent.Dispose();
-
[ServiceImport]
public ITarget Target { get; set; }
/// <exception cref="InvalidOperationException">If !address fails we will throw InvalidOperationException. This is usually
/// because symbols for ntdll couldn't be found.</exception>
/// <returns>An enumerable of memory ranges.</returns>
- public IEnumerable<DescribedRegion> EnumerateAddressSpace(bool tagClrMemoryRanges, bool includeReserveMemory, bool tagReserveMemoryHeuristically, bool includeHandleTableIfSlow)
- {
- (bool, bool, bool, bool) key = (tagClrMemoryRanges, includeReserveMemory, tagReserveMemoryHeuristically, includeHandleTableIfSlow);
-
- if (_previous.Result is not null && _previous.Key == key)
- {
- return _previous.Result;
- }
-
- DescribedRegion[] result = EnumerateAddressSpaceWorker(tagClrMemoryRanges, includeReserveMemory, tagReserveMemoryHeuristically, includeHandleTableIfSlow);
- _previous = (key, result);
-
- // Use AsReadOnly to ensure no modifications to the cached value
- return Array.AsReadOnly(result);
- }
-
- private DescribedRegion[] EnumerateAddressSpaceWorker(bool tagClrMemoryRanges, bool includeReserveMemory, bool tagReserveMemoryHeuristically, bool includeHandleTableIfSlow)
+ internal IEnumerable<DescribedRegion> EnumerateAddressSpace(bool tagClrMemoryRanges, bool includeReserveMemory, bool tagReserveMemoryHeuristically, bool includeHandleTableIfSlow)
{
- Console.WriteLineWarning("Enumerating and tagging the entire address space and caching the result...");
- Console.WriteLineWarning("Subsequent runs of this command should be faster.");
-
bool printedTruncatedWarning = false;
IEnumerable<DescribedRegion> addressResult = from region in MemoryRegionService.EnumerateRegions()
RootCacheService rootCache = runtime.Services.GetService<RootCacheService>();
if (clrRuntime is not null)
{
- foreach ((ulong Address, ulong Size, ClrMemoryKind Kind) mem in EnumerateClrMemoryAddresses(clrRuntime, rootCache, includeHandleTableIfSlow))
+ foreach ((ulong Address, ulong? Size, ClrMemoryKind Kind) mem in EnumerateClrMemoryAddresses(clrRuntime, rootCache, includeHandleTableIfSlow))
{
- // The GCBookkeeping range is a large region of memory that the GC reserved. We'll simply mark every
- // region within it as bookkeeping.
- if (mem.Kind == ClrMemoryKind.GCBookkeeping)
- {
- MemoryRange bookkeepingRange = MemoryRange.CreateFromLength(mem.Address, mem.Size);
- foreach (DescribedRegion region in rangeList)
- {
- if (bookkeepingRange.Contains(region.Start))
- {
- if (region.State == MemoryRegionState.MEM_RESERVE)
- {
- region.ClrMemoryKind = ClrMemoryKind.GCBookkeepingReserve;
- }
- else
- {
- region.ClrMemoryKind = ClrMemoryKind.GCBookkeeping;
- }
- }
- }
-
- continue;
- }
-
DescribedRegion[] found = rangeList.Where(r => r.Start <= mem.Address && mem.Address < r.End).ToArray();
+
if (found.Length == 0 && mem.Kind != ClrMemoryKind.GCHeapReserve)
{
Trace.WriteLine($"Warning: Could not find a memory range for {mem.Address:x} - {mem.Kind}.");
if (!printedTruncatedWarning)
{
- Console.WriteLineWarning($"Warning: Could not find a memory range for {mem.Address:x} - {mem.Kind}.");
- Console.WriteLineWarning($"This crash dump may not be a full dump!");
- Console.WriteLineWarning("");
+ Console.WriteLine($"Warning: Could not find a memory range for {mem.Address:x} - {mem.Kind}.");
+ Console.WriteLine($"This crash dump may not be a full dump!");
+ Console.WriteLine("");
printedTruncatedWarning = true;
}
// Add the memory range if we know its size.
- if (mem.Size > 0)
+ if (mem.Size is ulong size && size > 0)
{
IModule module = ModuleService.GetModuleFromAddress(mem.Address);
rangeList.Add(new DescribedRegion()
{
Start = mem.Address,
- End = mem.Address + mem.Size,
+ End = mem.Address + size,
ClrMemoryKind = mem.Kind,
State = mem.Kind == ClrMemoryKind.GCHeapReserve ? MemoryRegionState.MEM_RESERVE : MemoryRegionState.MEM_COMMIT,
Module = module,
foreach (DescribedRegion region in found)
{
- if (mem.Size == 0)
+ if (!mem.Size.HasValue || mem.Size.Value == 0)
{
// If we don't know the length of memory, just mark the Region with this tag.
SetRegionKindWithWarning(mem, region);
DescribedRegion middleRegion = new(region)
{
Start = mem.Address,
- End = mem.Address + mem.Size,
+ End = mem.Address + mem.Size.Value,
ClrMemoryKind = mem.Kind,
Usage = MemoryRegionUsage.CLR,
};
// Region is now the starting region of this set.
region.End = middleRegion.Start;
}
- else if (region.Size < mem.Size)
+ else if (region.Size < mem.Size.Value)
{
SetRegionKindWithWarning(mem, region);
// If we found no matching regions, expand the current region to be the right length.
if (!foundNext)
{
- region.End = mem.Address + mem.Size;
+ region.End = mem.Address + mem.Size.Value;
}
}
- else if (region.Size > mem.Size)
+ else if (region.Size > mem.Size.Value)
{
// The CLR memory segment is at the beginning of this region.
DescribedRegion newRange = new(region)
{
- End = mem.Address + mem.Size,
+ End = mem.Address + mem.Size.Value,
ClrMemoryKind = mem.Kind
};
region.ClrMemoryKind = mem.Kind;
}
}
- else
- {
- SetRegionKindWithWarning(mem, region);
- }
}
+
}
}
}
/// <summary>
/// Enumerates pointers to various CLR heaps in memory.
/// </summary>
- private static IEnumerable<(ulong Address, ulong Size, ClrMemoryKind Kind)> EnumerateClrMemoryAddresses(ClrRuntime runtime, RootCacheService rootCache, bool includeHandleTableIfSlow)
+ private static IEnumerable<(ulong Address, ulong? Size, ClrMemoryKind Kind)> EnumerateClrMemoryAddresses(ClrRuntime runtime, RootCacheService rootCache, bool includeHandleTableIfSlow)
{
foreach (ClrNativeHeapInfo nativeHeap in runtime.EnumerateClrNativeHeaps())
{
- Debug.Assert((int)NativeHeapKind.GCBookkeeping == (int)ClrMemoryKind.GCBookkeeping);
-
- ClrMemoryKind kind = nativeHeap.Kind switch
- {
- NativeHeapKind.Unknown => ClrMemoryKind.Unknown,
- > NativeHeapKind.Unknown and <= NativeHeapKind.GCBookkeeping => (ClrMemoryKind)nativeHeap.Kind, // enums match for these ranges
- >= NativeHeapKind.GCFreeRegion and <= NativeHeapKind.GCFreeUohSegment => ClrMemoryKind.GCHeapToBeFreed,
- _ => ClrMemoryKind.Unknown
- };
-
- yield return (nativeHeap.MemoryRange.Start, nativeHeap.MemoryRange.Length, kind);
+ yield return (nativeHeap.Address, nativeHeap.Size, nativeHeap.Kind == NativeHeapKind.Unknown ? ClrMemoryKind.None : (ClrMemoryKind)nativeHeap.Kind);
}
- // .Net 8 and beyond has accurate HandleTable memory info.
- bool haveAccurateHandleInfo = runtime.ClrInfo.Flavor == ClrFlavor.Core && runtime.ClrInfo.Version.Major >= 8;
- if (includeHandleTableIfSlow && !haveAccurateHandleInfo)
+ if (includeHandleTableIfSlow)
{
ulong prevHandle = 0;
ulong granularity = 0x100;
if (handle.Address < prevHandle || handle.Address >= (prevHandle | (granularity - 1)))
{
- yield return (handle.Address, 0, ClrMemoryKind.HandleTable);
+ yield return (handle.Address, null, ClrMemoryKind.HandleTable);
prevHandle = handle.Address;
}
}
}
+ // We don't really have the true bounds of the committed or reserved segments.
+ // Return null for the size so that we will mark the entire region with this type.
foreach (ClrSegment seg in runtime.Heap.Segments)
{
if (seg.CommittedMemory.Length > 0)
{
- yield return (seg.CommittedMemory.Start, seg.CommittedMemory.Length, ClrMemoryKind.GCHeap);
+ yield return (seg.CommittedMemory.Start, null, ClrMemoryKind.GCHeap);
}
if (seg.ReservedMemory.Length > 0)
{
- yield return (seg.ReservedMemory.Start, seg.ReservedMemory.Length, ClrMemoryKind.GCHeapReserve);
+ yield return (seg.ReservedMemory.Start, null, ClrMemoryKind.GCHeapReserve);
}
}
}
- private static void SetRegionKindWithWarning((ulong Address, ulong Size, ClrMemoryKind Kind) mem, DescribedRegion region)
+ private static void SetRegionKindWithWarning((ulong Address, ulong? Size, ClrMemoryKind Kind) mem, DescribedRegion region)
{
if (region.ClrMemoryKind != mem.Kind)
{
if (region.ClrMemoryKind is not ClrMemoryKind.None
and not ClrMemoryKind.HighFrequencyHeap)
{
- Trace.WriteLine($"Warning: Overwriting range [{region.Start:x},{region.End:x}] {region.ClrMemoryKind} -> [{mem.Address:x},{mem.Address + mem.Size:x}] {mem.Kind}.");
+ if (mem.Size is not ulong size)
+ {
+ size = 0;
+ }
+
+ Trace.WriteLine($"Warning: Overwriting range [{region.Start:x},{region.End:x}] {region.ClrMemoryKind} -> [{mem.Address:x},{mem.Address + size:x}] {mem.Kind}.");
}
region.ClrMemoryKind = mem.Kind;
StubHeap,
HighFrequencyHeap,
LowFrequencyHeap,
- ExecutableHeap,
- FixupPrecodeHeap,
- NewStubPrecodeHeap,
- ThunkHeap,
- HandleTable,
- GCBookkeeping,
// Skip ahead so new ClrMD NativeHeapKind values don't break the enum.
Unknown = 100,
GCHeap,
- GCHeapToBeFreed,
GCHeapReserve,
- GCBookkeepingReserve,
+ HandleTable,
}
- public sealed class DescribedRegion : IMemoryRegion
+ internal sealed class DescribedRegion : IMemoryRegion
{
public DescribedRegion()
{
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal enum Align
- {
- Left,
- Right,
- Center
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Text;
-using Microsoft.Diagnostics.DebugServices;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal sealed class BorderedTable : Table, IDisposable
- {
- private bool _wroteAtLeastOneSpacer;
-
- public BorderedTable(IConsoleService console, params Column[] columns)
- : base(console, columns)
- {
- _spacing = $" | ";
- }
-
- public void Dispose()
- {
- WriteSpacer();
- }
-
- public override void WriteHeader(params string[] values)
- {
- IncreaseColumnWidth(values);
-
- WriteSpacer();
- WriteHeaderFooter(values, writeSides: true, writeNewline: true);
- WriteSpacer();
- }
-
- public override void WriteFooter(params object[] values)
- {
- WriteSpacer();
- WriteHeaderFooter(values, writeSides: true, writeNewline: true);
- }
-
- public override void WriteRow(params object[] values)
- {
- // Ensure the top of the table is written even if there's no header/footer.
- if (!_wroteAtLeastOneSpacer)
- {
- WriteSpacer();
- }
-
- StringBuilder rowBuilder = _stringBuilderPool.Rent();
- rowBuilder.Append(Indent);
- rowBuilder.Append(_spacing);
-
- WriteRowWorker(values, rowBuilder, _spacing, writeLine: false);
- rowBuilder.Append(_spacing);
-
- FinishColumns(values.Length, rowBuilder);
-
- Console.WriteLine(rowBuilder.ToString());
- _stringBuilderPool.Return(rowBuilder);
- }
-
- protected override void WriteHeaderFooter(object[] values, bool writeSides, bool writeNewline)
- {
- base.WriteHeaderFooter(values, writeSides, writeNewline: false);
-
- StringBuilder rowBuilder = _stringBuilderPool.Rent();
- FinishColumns(values.Length, rowBuilder);
-
- if (writeNewline)
- {
- rowBuilder.AppendLine();
- }
-
- Console.Write(rowBuilder.ToString());
- _stringBuilderPool.Return(rowBuilder);
- }
-
- private void FinishColumns(int start, StringBuilder rowBuilder)
- {
- for (int i = start; i < Columns.Length; i++)
- {
- if (Columns[i].Width < 0)
- {
- break;
- }
-
- rowBuilder.Append(' ', Columns[i].Width);
- rowBuilder.Append(_spacing);
- }
- }
-
- private void WriteSpacer()
- {
- WriteBorder(" +-", '-', "-+ ");
- _wroteAtLeastOneSpacer = true;
- }
-
- private void WriteBorder(string left, char center, string right)
- {
- StringBuilder rowBuilder = _stringBuilderPool.Rent();
- rowBuilder.Append(Indent);
-
- rowBuilder.Append(left);
-
- for (int i = 0; i < Columns.Length; i++)
- {
- if (i != 0)
- {
- rowBuilder.Append(center, _spacing.Length);
- }
-
- if (Columns[i].Width < 0)
- {
- break;
- }
-
- rowBuilder.Append(center, Columns[i].Width);
- }
-
- rowBuilder.Append(right);
- Console.WriteLine(rowBuilder.ToString());
-
- _stringBuilderPool.Return(rowBuilder);
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.Text;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal readonly struct Column
- {
- private static readonly StringBuilderPool s_stringBuilderPool = new();
- private static readonly Column s_enum = new(Align.Left, -1, new(), null);
-
- public readonly int Width;
- public readonly Format Format;
- public readonly Align Alignment;
- public readonly DmlFormat Dml;
-
- public Column(Align alignment, int width, Format format, DmlFormat dml = null)
- {
- Alignment = alignment;
- Width = width;
- Format = format ?? throw new ArgumentNullException(nameof(format));
- Dml = dml;
- }
-
- public readonly Column WithWidth(int width) => new(Alignment, width, Format, Dml);
- internal readonly Column WithDml(DmlFormat dml) => new(Alignment, Width, Format, dml);
- internal readonly Column WithAlignment(Align align) => new(align, Width, Format, Dml);
-
- public readonly Column GetAppropriateWidth<T>(IEnumerable<T> values, int min = -1, int max = -1)
- {
- int len = 0;
-
- StringBuilder sb = s_stringBuilderPool.Rent();
-
- foreach (T value in values)
- {
- sb.Clear();
- Format.FormatValue(sb, value, -1, false);
- len = Math.Max(len, sb.Length);
- }
-
- s_stringBuilderPool.Return(sb);
-
- if (len < min)
- {
- len = min;
- }
-
- if (max > 0 && len > max)
- {
- len = max;
- }
-
- return WithWidth(len);
- }
-
- internal static Column ForEnum<TEnum>()
- where TEnum : struct
- {
- int len = 0;
- foreach (TEnum t in Enum.GetValues(typeof(TEnum)))
- {
- len = Math.Max(len, t.ToString().Length);
- }
-
- return s_enum.WithWidth(len);
- }
-
- public override string ToString()
- {
- string format = Format?.GetType().Name ?? "null";
- string dml = Dml?.GetType().Name ?? "null";
-
- return $"align:{Alignment} width:{Width} format:{format} dml:{dml}";
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal static class ColumnKind
- {
- private static Column? s_pointer;
- private static Column? s_text;
- private static Column? s_hexOffset;
- private static Column? s_hexValue;
- private static Column? s_dumpObj;
- private static Column? s_integer;
- private static Column? s_dumpHeapMT;
- private static Column? s_listNearObj;
- private static Column? s_dumpDomain;
- private static Column? s_thread;
- private static Column? s_integerWithoutComma;
- private static Column? s_humanReadable;
- private static Column? s_range;
-
- // NOTE/BUGBUG: This assumes IntPtr.Size matches the target process, which it should not do
- private static int PointerLength => IntPtr.Size * 2;
-
- /// <summary>
- /// A pointer, displayed as hex.
- /// </summary>
- public static Column Pointer => s_pointer ??= new(Align.Right, PointerLength, Formats.Pointer);
-
- /// <summary>
- /// Raw text which will not be truncated by default.
- /// </summary>
- public static Column Text => s_text ??= new(Align.Left, -1, Formats.Text);
-
- /// <summary>
- /// A hex value, prefixed with 0x.
- /// </summary>
- public static Column HexValue => s_hexValue ??= new(Align.Right, PointerLength + 2, Formats.HexValue);
-
- /// <summary>
- /// An offset (potentially negative), prefixed with 0x. For example: '0x20' or '-0x20'.
- /// </summary>
- public static Column HexOffset => s_hexOffset ??= new(Align.Right, 10, Formats.HexOffset);
-
- /// <summary>
- /// An integer, with commas. i.e. i.ToString("n0")
- /// </summary>
- public static Column Integer => s_integer ??= new(Align.Right, 14, Formats.Integer);
-
- /// <summary>
- /// An integer, without commas.
- /// </summary>
- public static Column IntegerWithoutCommas => s_integerWithoutComma ??= new(Align.Right, 10, Formats.IntegerWithoutCommas);
-
- /// <summary>
- /// A count of bytes (size).
- /// </summary>
- public static Column ByteCount => Integer;
-
- /// <summary>
- /// A human readable size count. e.g. "1.23mb"
- /// </summary>
- public static Column HumanReadableSize => s_humanReadable ??= new(Align.Right, 12, Formats.HumanReadableSize);
-
- /// <summary>
- /// An object pointer, which we would like to link to !do if Dml is enabled.
- /// </summary>
- public static Column DumpObj => s_dumpObj ??= new(Align.Right, PointerLength, Formats.Pointer, Dml.DumpObj);
-
- /// <summary>
- /// A link to any number of ClrMD objects (ClrSubHeap, ClrSegment, a MethodTable or ClrType, etc) which will
- /// print an appropriate !dumpheap filter for, if dml is enabled.
- /// </summary>
- public static Column DumpHeap => s_dumpHeapMT ??= new(Align.Right, PointerLength, Formats.Pointer, Dml.DumpHeap);
-
- /// <summary>
- /// A link to !dumpdomain for the given domain, if dml is enabled. This also puts the domain's name in the
- /// hover text for the link.
- /// </summary>
- public static Column DumpDomain => s_dumpDomain ??= new(Align.Right, PointerLength, Formats.Pointer, Dml.DumpDomain);
-
- /// <summary>
- /// The ClrThread address with a link to the OSThreadID to change threads (if dml is enabled).
- /// </summary>
- public static Column Thread => s_thread ??= new(Align.Right, PointerLength, Formats.Pointer, Dml.Thread);
-
- /// <summary>
- /// A link to !listnearobj for the given ClrObject or address, if dml is enabled.
- /// </summary>
- public static Column ListNearObj => s_listNearObj ??= new(Align.Right, PointerLength, Formats.Pointer, Dml.ListNearObj);
-
- /// <summary>
- /// The name of a given type. Note that types are always truncated by removing the beginning of the type's
- /// name instead of truncating based on alignment. This ensures the most important part of the name (the
- /// actual type name) is preserved instead of the namespace.
- /// </summary>
- public static Column TypeName => s_text ??= new(Align.Left, -1, Formats.TypeName);
-
- /// <summary>
- /// A path to an image on disk. Note that images are always truncted by removing the beginning of the image's
- /// path instead of the end, preserving the filename.
- /// </summary>
- public static Column Image => s_text ??= new(Align.Left, -1, Formats.Image);
-
- /// <summary>
- /// A MemoryRange printed as "[start-end]".
- /// </summary>
- public static Column Range => s_range ??= new(Align.Left, PointerLength * 2 + 1, Formats.Range);
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Diagnostics;
-using System.Text;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal static class Dml
- {
- private static DmlDumpObject s_dumpObj;
- private static DmlDumpHeap s_dumpHeap;
- private static DmlBold s_bold;
- private static DmlListNearObj s_listNearObj;
- private static DmlDumpDomain s_dumpDomain;
- private static DmlThread s_thread;
-
- /// <summary>
- /// Runs !dumpobj on the given pointer or ClrObject. If a ClrObject is invalid,
- /// this will instead link to !verifyobj.
- /// </summary>
- public static DmlFormat DumpObj => s_dumpObj ??= new();
-
- /// <summary>
- /// Marks the output in bold.
- /// </summary>
- public static DmlFormat Bold => s_bold ??= new();
-
- /// <summary>
- /// Dumps the heap. If given a ClrSegment, ClrSubHeap, or MemoryRange it will
- /// just dump that particular section of the heap.
- /// </summary>
- public static DmlFormat DumpHeap => s_dumpHeap ??= new();
-
- /// <summary>
- /// Runs ListNearObj on the given address or ClrObject.
- /// </summary>
- public static DmlFormat ListNearObj => s_listNearObj ??= new();
-
- /// <summary>
- /// Runs !dumpdomain on the given doman, additionally it will put the domain
- /// name as the hover text.
- /// </summary>
- public static DmlFormat DumpDomain => s_dumpDomain ??= new();
-
- /// <summary>
- /// Changes the debugger to the given thread.
- /// </summary>
- public static DmlFormat Thread => s_thread ??= new();
-
- private sealed class DmlBold : DmlFormat
- {
- public override void FormatValue(StringBuilder sb, string outputText, object value)
- {
- sb.Append("<b>");
- sb.Append(DmlEscape(outputText));
- sb.Append("</b>");
- }
- }
-
- private abstract class DmlExec : DmlFormat
- {
- public override void FormatValue(StringBuilder sb, string outputText, object value)
- {
- string command = GetCommand(outputText, value);
- if (string.IsNullOrWhiteSpace(command))
- {
- sb.Append(DmlEscape(outputText));
- return;
- }
-
- sb.Append("<exec cmd=\"");
- sb.Append(DmlEscape(command));
- sb.Append('\"');
-
- string altText = GetAltText(outputText, value);
- if (altText is not null)
- {
- sb.Append(" alt=\"");
- sb.Append(DmlEscape(altText));
- sb.Append('"');
- }
-
- sb.Append('>');
- sb.Append(DmlEscape(outputText));
- sb.Append("</exec>");
- }
-
- protected abstract string GetCommand(string outputText, object value);
- protected virtual string GetAltText(string outputText, object value) => null;
-
- protected static bool IsNullOrZeroValue(object obj, out string value)
- {
- if (obj is null)
- {
- value = null;
- return true;
- }
- else if (TryGetPointerValue(obj, out ulong ul) && ul == 0)
- {
- value = "0";
- return true;
- }
-
- value = null;
- return false;
- }
-
- protected static bool TryGetPointerValue(object value, out ulong ulVal)
- {
- if (value is ulong ul)
- {
- ulVal = ul;
- return true;
- }
- else if (value is nint ni)
- {
- unchecked
- {
- ulVal = (ulong)ni;
- }
- return true;
- }
- else if (value is nuint nuint)
- {
- ulVal = nuint;
- return true;
- }
-
- ulVal = 0;
- return false;
- }
- }
-
- private sealed class DmlThread : DmlExec
- {
- protected override string GetCommand(string outputText, object value)
- {
- if (value is uint id)
- {
- return $"~~[{id:x}]s";
- }
-
- if (value is ClrThread thread)
- {
- return $"~~[{thread.OSThreadId:x}]s";
- }
-
- return null;
- }
- }
-
- private class DmlDumpObject : DmlExec
- {
- protected override string GetCommand(string outputText, object value)
- {
- bool isValid = true;
- if (value is ClrObject obj)
- {
- isValid = obj.IsValid;
- }
-
- value = Format.Unwrap(value);
- if (IsNullOrZeroValue(value, out string result))
- {
- return result;
- }
-
- return isValid ? $"!dumpobj /d {value:x}" : $"!verifyobj {value:x}";
- }
-
- protected override string GetAltText(string outputText, object value)
- {
- if (value is ClrObject obj)
- {
- if (obj.IsValid)
- {
- return obj.Type?.Name;
- }
-
- return "Invalid Object";
- }
-
- return null;
- }
- }
-
- private sealed class DmlListNearObj : DmlDumpObject
- {
- protected override string GetCommand(string outputText, object value)
- {
- value = Format.Unwrap(value);
- if (IsNullOrZeroValue(value, out string result))
- {
- return result;
- }
-
- return $"!listnearobj {value:x}";
- }
- }
-
- private sealed class DmlDumpHeap : DmlExec
- {
- protected override string GetCommand(string outputText, object value)
- {
- if (value is null)
- {
- return null;
- }
-
- if (TryGetMethodTableOrTypeHandle(value, out ulong mtOrTh))
- {
- // !dumpheap will only work on a method table
- if ((mtOrTh & 2) == 2)
- {
- // Can't use typehandles
- return null;
- }
- else if ((mtOrTh & 1) == 1)
- {
- // Clear mark bit
- value = mtOrTh & ~1ul;
- }
-
- if (mtOrTh == 0)
- {
- return null;
- }
-
- return $"!dumpheap -mt {value:x}";
- }
-
- if (value is ClrSegment seg)
- {
- return $"!dumpheap -segment {seg.Address:x}";
- }
-
- if (value is MemoryRange range)
- {
- return $"!dumpheap {range.Start:x} {range.End:x}";
- }
-
- if (value is ClrSubHeap subHeap)
- {
- return $"!dumpheap -heap {subHeap.Index}";
- }
-
- Debug.Fail($"Unknown cannot use type {value.GetType().FullName} with DumpObj");
- return null;
- }
-
- private static bool TryGetMethodTableOrTypeHandle(object value, out ulong mtOrTh)
- {
- if (TryGetPointerValue(value, out mtOrTh))
- {
- return true;
- }
-
- if (value is ClrType type)
- {
- mtOrTh = type.MethodTable;
- return true;
- }
-
- mtOrTh = 0;
- return false;
- }
-
- protected override string GetAltText(string outputText, object value)
- {
- if (value is ClrType type)
- {
- return type.Name;
- }
-
- return null;
- }
- }
-
- private sealed class DmlDumpDomain : DmlExec
- {
- protected override string GetCommand(string outputText, object value)
- {
- value = Format.Unwrap(value);
- if (IsNullOrZeroValue(value, out string result))
- {
- return result;
- }
-
- return $"!dumpdomain /d {value:x}";
- }
-
- protected override string GetAltText(string outputText, object value)
- {
- if (value is ClrAppDomain domain)
- {
- return domain.Name;
- }
-
- return null;
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Text;
-using System.Xml.Linq;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal abstract class DmlFormat
- {
- // intentionally not shared with Format
- private static readonly StringBuilderPool s_stringBuilderPool = new();
-
- public virtual string FormatValue(string outputText, object value)
- {
- StringBuilder sb = s_stringBuilderPool.Rent();
-
- FormatValue(sb, outputText, value);
- string result = sb.ToString();
- s_stringBuilderPool.Return(sb);
- return result;
- }
-
- public abstract void FormatValue(StringBuilder sb, string outputText, object value);
-
- protected static string DmlEscape(string text)
- {
- if (string.IsNullOrWhiteSpace(text))
- {
- return text;
- }
-
- return new XText(text).ToString();
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Diagnostics;
-using System.Text;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal class Format
- {
- private static StringBuilderPool s_stringBuilderPool = new();
-
- /// <summary>
- /// Returns true if a format of this type should never be truncated. If true,
- /// DEBUG builds of SOS Assert.Fail if attempting to truncate the value of the
- /// column. In release builds, we will simply not truncate the value, resulting
- /// in a jagged looking table, but usable output.
- /// </summary>
- public bool CanTruncate { get; protected set; }
-
- public Format() { }
- public Format(bool canTruncate) => CanTruncate = canTruncate;
-
- // Unwraps an object to get at what should be formatted.
- internal static object Unwrap(object value)
- {
- return value switch
- {
- ClrObject obj => obj.Address,
- ClrAppDomain domain => domain.Address,
- ClrType type => type.MethodTable,
- ClrSegment seg => seg.Address,
- ClrThread thread => thread.Address,
- ClrSubHeap subHeap => subHeap.Index,
- _ => value
- };
- }
-
- public virtual string FormatValue(object value, int maxLength, bool truncateBegin)
- {
- StringBuilder sb = s_stringBuilderPool.Rent();
-
- FormatValue(sb, value, maxLength, truncateBegin);
- string result = sb.ToString();
-
- s_stringBuilderPool.Return(sb);
- return TruncateString(result, maxLength, truncateBegin);
- }
-
- public virtual int FormatValue(StringBuilder sb, object value, int maxLength, bool truncateBegin)
- {
- int currLength = sb.Length;
- sb.Append(value);
- TruncateStringBuilder(sb, maxLength, sb.Length - currLength, truncateBegin);
-
- return sb.Length - currLength;
- }
-
- protected string TruncateString(string result, int maxLength, bool truncateBegin)
- {
- if (maxLength >= 0 && result.Length > maxLength)
- {
- if (CanTruncate)
- {
- if (maxLength <= 3)
- {
- result = new string('.', maxLength);
- }
- else if (truncateBegin)
- {
- result = "..." + result.Substring(result.Length - (maxLength - 3));
- }
- else
- {
- result = result.Substring(0, maxLength - 3) + "...";
- }
- }
- else
- {
- Debug.Fail("Tried to truncate a column we should never truncate.");
- }
- }
-
- Debug.Assert(maxLength < 0 || result.Length <= maxLength);
- return result;
- }
-
- protected void TruncateStringBuilder(StringBuilder result, int maxLength, int lengthWritten, bool truncateBegin)
- {
- Debug.Assert(lengthWritten >= 0);
-
- if (maxLength >= 0 && lengthWritten > maxLength)
- {
- if (CanTruncate)
- {
- if (truncateBegin)
- {
- int start = result.Length - lengthWritten;
- int wrote;
- for (wrote = 0; wrote < 3 && wrote < maxLength; wrote++)
- {
- result[start + wrote] = '.';
- }
-
- int gap = lengthWritten - maxLength;
- for (; wrote < maxLength; wrote++)
- {
- result[start + wrote] = result[start + wrote + gap];
- }
-
- result.Length = start + maxLength;
- }
- else
- {
- result.Length = result.Length - lengthWritten + maxLength;
- for (int i = 0; i < maxLength && i < 3; i++)
- {
- result[result.Length - i - 1] = '.';
- }
- }
- }
- else
- {
- Debug.Fail("Tried to truncate a column we should never truncate.");
- }
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using System.Text;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal static class Formats
- {
- private static HexValueFormat s_hexOffsetFormat;
- private static HexValueFormat s_hexValueFormat;
- private static Format s_text;
- private static IntegerFormat s_integerFormat;
- private static TypeOrImageFormat s_typeNameFormat;
- private static TypeOrImageFormat s_imageFormat;
- private static IntegerFormat s_integerWithoutCommaFormat;
- private static HumanReadableFormat s_humanReadableFormat;
- private static RangeFormat s_range;
-
- static Formats()
- {
- int pointerSize = IntPtr.Size;
- Pointer = new IntegerFormat(pointerSize == 4 ? "x8" : "x12");
- }
-
- public static Format Pointer { get; }
-
- public static Format HexOffset => s_hexOffsetFormat ??= new(printPrefix: true, signed: true);
- public static Format HexValue => s_hexValueFormat ??= new(printPrefix: true, signed: false);
- public static Format Integer => s_integerFormat ??= new("n0");
- public static Format IntegerWithoutCommas => s_integerWithoutCommaFormat ??= new("");
- public static Format Text => s_text ??= new(true);
- public static Format TypeName => s_typeNameFormat ??= new(type: true);
- public static Format Image => s_imageFormat ??= new(type: false);
- public static Format HumanReadableSize => s_humanReadableFormat ??= new();
- public static Format Range => s_range ??= new();
-
- private sealed class IntegerFormat : Format
- {
- private readonly string _format;
-
- public IntegerFormat(string format)
- {
- _format = "{0:" + format + "}";
- }
-
- public override int FormatValue(StringBuilder result, object value, int maxLength, bool truncateBegin)
- {
- value = Unwrap(value);
-
- int startLength = result.Length;
- switch (value)
- {
- case null:
- break;
-
- case nuint nui:
- result.AppendFormat(_format, (ulong)nui);
- break;
-
- case nint ni:
- unchecked
- {
- result.AppendFormat(_format, (ulong)ni);
- }
- break;
-
- default:
- result.AppendFormat(_format, value);
- break;
- }
-
- TruncateStringBuilder(result, maxLength, result.Length - startLength, truncateBegin);
- return result.Length - startLength;
- }
- }
-
- /// <summary>
- /// Unlike plain text, this Format always truncates the beginning of the type name or image path,
- /// as the most important part is at the end.
- /// </summary>
- private sealed class TypeOrImageFormat : Format
- {
- private const string UnknownTypeName = "Unknown";
- private readonly bool _type;
-
- public TypeOrImageFormat(bool type)
- : base(canTruncate: true)
- {
- _type = type;
- }
-
- public override int FormatValue(StringBuilder sb, object value, int maxLength, bool truncateBegin)
- {
- int startLength = sb.Length;
-
- if (!_type)
- {
- sb.Append(value);
- }
- else
- {
- if (value is null)
- {
- sb.Append(UnknownTypeName);
- }
- else if (value is ClrType type)
- {
- string typeName = type.Name;
- if (!string.IsNullOrWhiteSpace(typeName))
- {
- sb.Append(typeName);
- }
- else
- {
- string module = type.Module?.Name;
- if (!string.IsNullOrWhiteSpace(module))
- {
- try
- {
- module = System.IO.Path.GetFileNameWithoutExtension(module);
- sb.Append(module);
- sb.Append('!');
- }
- catch (ArgumentException)
- {
- }
- }
-
- sb.Append(UnknownTypeName);
- if (type.MethodTable != 0)
- {
- sb.Append($" (MethodTable: ");
- sb.AppendFormat("{0:x12}", type.MethodTable);
- sb.Append(')');
- }
- }
- }
- else
- {
- sb.Append(value);
- }
- }
-
- TruncateStringBuilder(sb, maxLength, sb.Length - startLength, truncateBegin: true);
- return sb.Length - startLength;
- }
- }
-
- private sealed class HumanReadableFormat : Format
- {
- public override int FormatValue(StringBuilder sb, object value, int maxLength, bool truncateBegin)
- {
- string humanReadable = value switch
- {
- null => null,
- int i => ((long)i).ConvertToHumanReadable(),
- uint ui => ((ulong)ui).ConvertToHumanReadable(),
- long l => l.ConvertToHumanReadable(),
- ulong ul => ul.ConvertToHumanReadable(),
- float f => ((double)f).ConvertToHumanReadable(),
- double d => d.ConvertToHumanReadable(),
- nuint nu => ((ulong)nu).ConvertToHumanReadable(),
- nint ni => ((long)ni).ConvertToHumanReadable(),
- string s => s,
- _ => throw new NotSupportedException($"Cannot convert '{value.GetType().FullName}' to a human readable size.")
- };
-
- if (!string.IsNullOrWhiteSpace(humanReadable))
- {
- return base.FormatValue(sb, humanReadable, maxLength, truncateBegin);
- }
-
- return 0;
- }
- }
-
- private sealed class HexValueFormat : Format
- {
- public bool PrintPrefix { get; }
- public bool Signed { get; }
-
- public HexValueFormat(bool printPrefix, bool signed)
- {
- PrintPrefix = printPrefix;
- Signed = signed;
- }
-
- private string GetStringValue(long offset)
- {
- if (Signed)
- {
- if (PrintPrefix)
- {
- return offset < 0 ? $"-0x{Math.Abs(offset):x2}" : $"0x{offset:x2}";
- }
- else
- {
- return offset < 0 ? $"-{Math.Abs(offset):x2}" : $"{offset:x2}";
- }
- }
-
- return PrintPrefix ? $"0x{offset:x2}" : offset.ToString("x2");
- }
-
- private string GetHexOffsetString(object value)
- {
- return value switch
- {
- null => "",
- string s => s,
- nint ni => GetStringValue(ni),
- nuint nui => PrintPrefix ? $"0x{nui:x2}" : "{nui:x2}",
- ulong ul => PrintPrefix ? $"0x{ul:x2}" : ul.ToString("x2"),
- long l => GetStringValue(l),
- int i => GetStringValue(i),
- uint u => PrintPrefix ? $"0x{u:x2}" : u.ToString("x2"),
- IEnumerable<byte> bytes => (PrintPrefix ? "0x" : "") + string.Join("", bytes.Select(b => b.ToString("x2"))),
- _ => throw new InvalidOperationException($"Cannot convert value of type {value.GetType().FullName} to a HexOffset")
- };
- }
-
- public override int FormatValue(StringBuilder sb, object value, int maxLength, bool truncateBegin)
- {
- int startLength = sb.Length;
- sb.Append(GetHexOffsetString(value));
- TruncateStringBuilder(sb, maxLength, sb.Length - startLength, truncateBegin);
-
- return sb.Length - startLength;
- }
- }
-
- private sealed class RangeFormat : Format
- {
- public override int FormatValue(StringBuilder sb, object value, int maxLength, bool truncateBegin)
- {
- int startLength = sb.Length;
- if (value is MemoryRange range)
- {
- sb.AppendFormat("{0:x}", range.Start);
- sb.Append('-');
- sb.AppendFormat("{0:x}", range.End);
-
- return sb.Length - startLength;
- }
-
- return base.FormatValue(sb, value, maxLength, truncateBegin);
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Text;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal sealed class StringBuilderPool
- {
- private StringBuilder _stringBuilder;
- private readonly int _initialCapacity;
-
- public StringBuilderPool(int initialCapacity = 64)
- {
- _initialCapacity = initialCapacity > 0 ? initialCapacity : 0;
- }
-
- // This code all assumes SOS runs single threaded. We would want to change this
- // code to use Interlocked.Exchange if that ever changes.
- public StringBuilder Rent()
- {
- StringBuilder sb = _stringBuilder;
- _stringBuilder = null;
-
- if (sb is null)
- {
- sb = new StringBuilder(_initialCapacity);
- }
- else
- {
- sb.Clear();
- }
-
- return sb;
- }
-
- public void Return(StringBuilder sb)
- {
- if (sb.Capacity < 1024)
- {
- _stringBuilder = sb;
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Diagnostics;
-using System.Linq;
-using System.Text;
-using Microsoft.Diagnostics.DebugServices;
-
-namespace Microsoft.Diagnostics.ExtensionCommands.Output
-{
- internal class Table
- {
- protected readonly StringBuilderPool _stringBuilderPool = new();
- protected string _spacing = " ";
- protected static readonly Column s_headerColumn = new(Align.Center, -1, Formats.Text, Dml.Bold);
-
- public string Indent { get; set; } = "";
-
- public IConsoleService Console { get; }
-
- public int TotalWidth => 1 * (Columns.Length - 1) + Columns.Sum(c => Math.Abs(c.Width));
-
- public Column[] Columns { get; set; }
-
- public Table(IConsoleService console, params Column[] columns)
- {
- Columns = columns.ToArray();
- Console = console;
- }
-
- public void SetAlignment(Align align)
- {
- for (int i = 0; i < Columns.Length; i++)
- {
- Columns[i] = Columns[i].WithAlignment(align);
- }
- }
-
- public virtual void WriteHeader(params string[] values)
- {
- IncreaseColumnWidth(values);
- WriteHeaderFooter(values);
- }
-
- public virtual void WriteFooter(params object[] values)
- {
- WriteHeaderFooter(values);
- }
-
- protected void IncreaseColumnWidth(string[] values)
- {
- // Increase column width if too small
- for (int i = 0; i < Columns.Length && i < values.Length; i++)
- {
- if (Columns.Length >= 0 && values[i].Length > Columns.Length)
- {
- if (Columns[i].Width != -1 && Columns[i].Width < values[i].Length)
- {
- Columns[i] = Columns[i].WithWidth(values[i].Length);
- }
- }
- }
- }
-
- public virtual void WriteRow(params object[] values)
- {
- StringBuilder rowBuilder = _stringBuilderPool.Rent();
- rowBuilder.Append(Indent);
-
- WriteRowWorker(values, rowBuilder, _spacing);
-
- _stringBuilderPool.Return(rowBuilder);
- }
-
- protected void WriteRowWorker(object[] values, StringBuilder rowBuilder, string spacing, bool writeLine = true)
- {
- bool isRowBuilderDml = false;
-
- for (int i = 0; i < values.Length; i++)
- {
- if (i != 0)
- {
- rowBuilder.Append(spacing);
- }
-
- Column column = i < Columns.Length ? Columns[i] : ColumnKind.Text;
-
- bool isColumnDml = Console.SupportsDml && column.Dml is not null;
- if (isRowBuilderDml != isColumnDml)
- {
- WriteAndClearRowBuilder(rowBuilder, isRowBuilderDml);
- isRowBuilderDml = isColumnDml;
- }
-
- Append(column, rowBuilder, values[i]);
- }
-
- if (writeLine)
- {
- rowBuilder.AppendLine();
- }
-
- WriteAndClearRowBuilder(rowBuilder, isRowBuilderDml);
- }
-
- private void WriteAndClearRowBuilder(StringBuilder rowBuilder, bool dml)
- {
- if (rowBuilder.Length != 0)
- {
- if (dml)
- {
- Console.WriteDml(rowBuilder.ToString());
- }
- else
- {
- Console.Write(rowBuilder.ToString());
- }
-
- rowBuilder.Clear();
- }
- }
-
- private void Append(Column column, StringBuilder sb, object value)
- {
- DmlFormat dml = null;
- if (Console.SupportsDml)
- {
- dml = column.Dml;
- }
-
- // Efficient case
- if (dml is null && column.Alignment == Align.Left)
- {
- int written = column.Format.FormatValue(sb, value, column.Width, column.Alignment == Align.Left);
- Debug.Assert(written >= 0);
- if (written < column.Width)
- {
- sb.Append(' ', column.Width - written);
- }
-
- return;
- }
-
- string toWrite = column.Format.FormatValue(value, column.Width, column.Alignment == Align.Left);
- int displayLength = toWrite.Length;
- if (dml is not null)
- {
- toWrite = dml.FormatValue(toWrite, value);
- }
-
- if (column.Width < 0)
- {
- sb.Append(toWrite);
- }
- else
- {
- if (column.Alignment == Align.Left)
- {
- sb.Append(toWrite);
- if (displayLength < column.Width)
- {
- sb.Append(' ', column.Width - displayLength);
- }
-
- return;
- }
- else if (column.Alignment == Align.Right)
- {
- sb.Append(' ', column.Width - displayLength);
- sb.Append(toWrite);
- }
- else
- {
- Debug.Assert(column.Alignment == Align.Center);
-
- int remainder = column.Width - displayLength;
- int right = remainder >> 1;
- int left = right + (remainder % 2);
-
- sb.Append(' ', left);
- sb.Append(toWrite);
- sb.Append(' ', right);
- }
- }
- }
-
- protected virtual void WriteHeaderFooter(object[] values, bool writeSides = false, bool writeNewline = true)
- {
- StringBuilder rowBuilder = _stringBuilderPool.Rent();
- rowBuilder.Append(Indent);
-
- if (writeSides)
- {
- rowBuilder.Append(_spacing);
- }
-
- for (int i = 0; i < values.Length; i++)
- {
- if (i != 0)
- {
- rowBuilder.Append(_spacing);
- }
-
- Column curr = i < Columns.Length ? Columns[i] : s_headerColumn;
- if (Console.SupportsDml)
- {
- curr = curr.WithDml(Dml.Bold);
- }
- else
- {
- curr = curr.WithDml(null);
- }
-
- Append(curr, rowBuilder, values[i]);
- }
-
- if (writeSides)
- {
- rowBuilder.Append(_spacing);
- }
-
- if (writeNewline)
- {
- rowBuilder.AppendLine();
- }
-
- if (Console.SupportsDml)
- {
- Console.WriteDml(rowBuilder.ToString());
- }
- else
- {
- Console.Write(rowBuilder.ToString());
- }
-
- _stringBuilderPool.Return(rowBuilder);
- }
- }
-}
GCRoot.ChainLink path = gcroot.FindPathFrom(sourceObj);
if (path is not null)
{
- GCRootCommand.PrintPath(Console, RootCache, null, heap, path);
+ GCRootCommand.PrintPath(Console, RootCache, heap, path);
}
else
{
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
}
else
{
- Table output = new(Console, DumpObj, Pointer, HexValue, HexValue, Text);
- output.WriteHeader("Object", "ModifiedAddr", "Old Value", "New Value", "Expected Failure");
+ TableOutput output = new(Console, (12, "x12"), (12, "x12"), (16, "x"), (16, "x"), (0, ""));
+ output.WriteRow("Object", "ModifiedAddr", "Old Value", "New Value", "Expected Failure");
foreach (Change change in _changes)
{
- output.WriteRow(change.Object, change.AddressModified, change.OriginalValue.Reverse(), change.NewValue.Reverse(), change.ExpectedFailure);
+ output.WriteRow(new DmlDumpObj(change.Object), change.AddressModified, change.OriginalValue.Reverse(), change.NewValue.Reverse(), change.ExpectedFailure);
}
}
ClrObject[] withRefs = FindObjectsWithReferences().Take(3).ToArray();
if (withRefs.Length >= 1)
{
- (ClrObject Object, ulong FirstReference) entry = GetFirstReference(withRefs[0]);
+ (ulong Object, ulong FirstReference) entry = GetFirstReference(withRefs[0]);
WriteValue(ObjectCorruptionKind.InvalidObjectReference, entry.Object, entry.FirstReference, 0xcccccccc);
}
if (withRefs.Length >= 2)
ulong free = Runtime.Heap.EnumerateObjects().FirstOrDefault(f => f.IsFree);
if (free != 0)
{
- (ClrObject Object, ulong FirstReference) entry = GetFirstReference(withRefs[1]);
+ (ulong Object, ulong FirstReference) entry = GetFirstReference(withRefs[1]);
WriteValue(ObjectCorruptionKind.FreeObjectReference, entry.Object, entry.FirstReference, free);
}
}
if (withRefs.Length >= 3)
{
- (ClrObject Object, ulong FirstReference) entry = GetFirstReference(withRefs[2]);
+ (ulong Object, ulong FirstReference) entry = GetFirstReference(withRefs[2]);
WriteValue(ObjectCorruptionKind.ObjectReferenceNotPointerAligned, entry.Object, entry.FirstReference, (byte)1);
}
List();
}
- private static (ClrObject Object, ulong FirstReference) GetFirstReference(ClrObject obj)
+ private static (ulong Object, ulong FirstReference) GetFirstReference(ClrObject obj)
{
return (obj, obj.EnumerateReferenceAddresses().First());
}
}
}
- private unsafe void WriteValue<T>(ObjectCorruptionKind kind, ClrObject obj, ulong address, T value)
+ private unsafe void WriteValue<T>(ObjectCorruptionKind kind, ulong obj, ulong address, T value)
where T : unmanaged
{
byte[] old = new byte[sizeof(T)];
private sealed class Change
{
- public ClrObject Object { get; set; }
+ public ulong Object { get; set; }
public ulong AddressModified { get; set; }
public byte[] OriginalValue { get; set; }
public byte[] NewValue { get; set; }
using System.Collections.Generic;
using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
namespace Microsoft.Diagnostics.ExtensionCommands
Console.WriteLine($"Size Statistics for {requestedGen.ToString().ToLowerInvariant()} {freeStr}objects");
Console.WriteLine();
+ TableOutput output = new(Console, (16, "n0"), (16, "n0"), (16, "n0"), (16, "n0"));
+ output.WriteRow("Size", "Count", "Cumulative Size", "Cumulative Count");
+
+
IEnumerable<(ulong Size, ulong Count)> sorted = from i in stats
orderby i.Key ascending
select (i.Key, i.Value);
ulong cumulativeSize = 0;
ulong cumulativeCount = 0;
- Table output = null;
foreach ((ulong size, ulong count) in sorted)
{
Console.CancellationToken.ThrowIfCancellationRequested();
- if (output is null)
- {
- output = new(Console, ColumnKind.ByteCount, ColumnKind.Integer, ColumnKind.Integer, ColumnKind.Integer);
- output.WriteHeader("Size", "Count", "Cumulative Size", "Cumulative Count");
- }
-
- output.WriteRow(size, count, cumulativeSize, cumulativeCount);
-
cumulativeSize += size * count;
cumulativeCount += count;
- }
-
- if (output is null)
- {
- Console.WriteLine("(none)");
+ output.WriteRow(size, count, cumulativeSize, cumulativeCount);
}
Console.WriteLine();
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System.Collections.Generic;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [ServiceExport(Scope = ServiceScope.Runtime)]
- public class StaticVariableService
- {
- private Dictionary<ulong, ClrStaticField> _fields;
- private IEnumerator<(ulong Address, ClrStaticField Static)> _enumerator;
-
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- /// <summary>
- /// Returns the static field at the given address.
- /// </summary>
- /// <param name="address">The address of the static field. Note that this is not a pointer to
- /// an object, but rather a pointer to where the CLR runtime tracks the static variable's
- /// location. In all versions of the runtime, address will live in the middle of a pinned
- /// object[].</param>
- /// <param name="field">The field corresponding to the given address. Non-null if return
- /// is true.</param>
- /// <returns>True if the address corresponded to a static variable, false otherwise.</returns>
- public bool TryGetStaticByAddress(ulong address, out ClrStaticField field)
- {
- if (_fields is null)
- {
- _fields = new();
- _enumerator = EnumerateStatics().GetEnumerator();
- }
-
- if (_fields.TryGetValue(address, out field))
- {
- return true;
- }
-
- // pay for play lookup
- if (_enumerator is not null)
- {
- do
- {
- _fields[_enumerator.Current.Address] = _enumerator.Current.Static;
- if (_enumerator.Current.Address == address)
- {
- field = _enumerator.Current.Static;
- return true;
- }
- } while (_enumerator.MoveNext());
-
- _enumerator = null;
- }
-
- return false;
- }
-
- public IEnumerable<(ulong Address, ClrStaticField Static)> EnumerateStatics()
- {
- ClrAppDomain shared = Runtime.SharedDomain;
-
- foreach (ClrModule module in Runtime.EnumerateModules())
- {
- foreach ((ulong mt, _) in module.EnumerateTypeDefToMethodTableMap())
- {
- ClrType type = Runtime.GetTypeByMethodTable(mt);
- if (type is null)
- {
- continue;
- }
-
- foreach (ClrStaticField stat in type.StaticFields)
- {
- foreach (ClrAppDomain domain in Runtime.AppDomains)
- {
- ulong address = stat.GetAddress(domain);
- if (address != 0)
- {
- yield return (address, stat);
- }
- }
-
- if (shared is not null)
- {
- ulong address = stat.GetAddress(shared);
- if (address != 0)
- {
- yield return (address, stat);
- }
- }
- }
- }
- }
- }
- }
-}
--- /dev/null
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using System.Text;
+using Microsoft.Diagnostics.DebugServices;
+using Microsoft.Diagnostics.Runtime;
+
+namespace Microsoft.Diagnostics.ExtensionCommands
+{
+ internal sealed class TableOutput
+ {
+ private readonly StringBuilder _rowBuilder = new(260);
+ private readonly char _spacing = ' ';
+
+ public string Divider { get; set; } = " ";
+
+ public string Indent { get; set; } = "";
+
+ public bool AlignLeft { get; set; }
+
+ public int ColumnCount => _formats.Length;
+
+ public IConsoleService Console { get; }
+
+ public int TotalWidth => 1 * (_formats.Length - 1) + _formats.Sum(c => Math.Abs(c.width));
+
+ private readonly (int width, string format)[] _formats;
+
+ public TableOutput(IConsoleService console, params (int width, string format)[] columns)
+ {
+ _formats = columns.ToArray();
+ Console = console;
+ }
+
+ public void WriteSpacer(char spacer)
+ {
+ Console.WriteLine(new string(spacer, Divider.Length * (_formats.Length - 1) + _formats.Sum(c => Math.Abs(c.width))));
+ }
+
+ public void WriteRow(params object[] columns)
+ {
+ _rowBuilder.Clear();
+ _rowBuilder.Append(Indent);
+
+ for (int i = 0; i < columns.Length; i++)
+ {
+ if (i != 0)
+ {
+ _rowBuilder.Append(_spacing);
+ }
+
+ (int width, string format) = i < _formats.Length ? _formats[i] : default;
+ FormatColumn(_spacing, columns[i], _rowBuilder, width, format);
+ }
+
+ Console.WriteLine(_rowBuilder.ToString());
+ }
+
+ public void WriteRowWithSpacing(char spacing, params object[] columns)
+ {
+ _rowBuilder.Clear();
+ _rowBuilder.Append(Indent);
+
+ for (int i = 0; i < columns.Length; i++)
+ {
+ if (i != 0)
+ {
+ _rowBuilder.Append(spacing, Divider.Length);
+ }
+
+ (int width, string format) = i < _formats.Length ? _formats[i] : default;
+
+ FormatColumn(spacing, columns[i], _rowBuilder, width, format);
+ }
+
+ Console.WriteLine(_rowBuilder.ToString());
+ }
+
+ private void FormatColumn(char spacing, object value, StringBuilder sb, int width, string format)
+ {
+ string action = null;
+ string text;
+ if (value is DmlExec dml)
+ {
+ value = dml.Text;
+ if (Console.SupportsDml)
+ {
+ action = dml.Action;
+ }
+ }
+
+ if (string.IsNullOrWhiteSpace(format))
+ {
+ text = value?.ToString();
+ }
+ else
+ {
+ text = Format(value, format);
+ }
+
+ AddValue(spacing, sb, width, text ?? "", action);
+ }
+
+ private void AddValue(char spacing, StringBuilder sb, int width, string value, string action)
+ {
+ bool leftAlign = AlignLeft ? width > 0 : width < 0;
+ width = Math.Abs(width);
+
+ if (width == 0)
+ {
+ if (string.IsNullOrWhiteSpace(action))
+ {
+ sb.Append(value);
+ }
+ else
+ {
+ WriteAndClear(sb);
+ Console.WriteDmlExec(value, action);
+ }
+ }
+ else if (value.Length > width)
+ {
+ if (!string.IsNullOrWhiteSpace(action))
+ {
+ WriteAndClear(sb);
+ }
+
+ if (width <= 3)
+ {
+ sb.Append(value, 0, width);
+ }
+ else if (leftAlign)
+ {
+ value = value.Substring(0, width - 3);
+ sb.Append(value);
+ sb.Append("...");
+ }
+ else
+ {
+ value = value.Substring(value.Length - (width - 3));
+ sb.Append("...");
+ sb.Append(value);
+ }
+
+ if (!string.IsNullOrWhiteSpace(action))
+ {
+ WriteDmlExecAndClear(sb, action);
+ }
+ }
+ else if (leftAlign)
+ {
+ if (!string.IsNullOrWhiteSpace(action))
+ {
+ WriteAndClear(sb);
+ Console.WriteDmlExec(value, action);
+ }
+ else
+ {
+ sb.Append(value);
+ }
+
+ int remaining = width - value.Length;
+ if (remaining > 0)
+ {
+ sb.Append(spacing, remaining);
+ }
+ }
+ else
+ {
+ int remaining = width - value.Length;
+ if (remaining > 0)
+ {
+ sb.Append(spacing, remaining);
+ }
+
+ if (!string.IsNullOrWhiteSpace(action))
+ {
+ WriteAndClear(sb);
+ Console.WriteDmlExec(value, action);
+ }
+ else
+ {
+ sb.Append(value);
+ }
+ }
+ }
+
+ private void WriteDmlExecAndClear(StringBuilder sb, string action)
+ {
+ Console.WriteDmlExec(sb.ToString(), action);
+ sb.Clear();
+ }
+
+ private void WriteAndClear(StringBuilder sb)
+ {
+ Console.Write(sb.ToString());
+ sb.Clear();
+ }
+
+ private static string Format(object obj, string format)
+ {
+ if (obj is null)
+ {
+ return null;
+ }
+
+ if (obj is Enum)
+ {
+ return obj.ToString();
+ }
+
+ return obj switch
+ {
+ nint ni => ni.ToString(format),
+ ulong ul => ul.ToString(format),
+ long l => l.ToString(format),
+ uint ui => ui.ToString(format),
+ int i => i.ToString(format),
+ StringBuilder sb => sb.ToString(),
+ IEnumerable<byte> bytes => string.Join("", bytes.Select(b => b.ToString("x2"))),
+ string s => s,
+ _ => throw new NotImplementedException(obj.GetType().ToString()),
+ };
+ }
+
+ public class DmlExec
+ {
+ public object Text { get; }
+ public string Action { get; }
+
+ public DmlExec(object text, string action)
+ {
+ Text = text;
+ Action = action;
+ }
+ }
+
+ public sealed class DmlDumpObj : DmlExec
+ {
+ public DmlDumpObj(ulong address)
+ : base(address, address != 0 ? $"!dumpobj /d {address:x}" : "")
+ {
+ }
+ }
+
+ public sealed class DmlListNearObj : DmlExec
+ {
+ public DmlListNearObj(ulong address)
+ : base(address, address != 0 ? $"!sos listnearobj {address:x}" : "")
+ {
+ }
+ }
+
+ public sealed class DmlVerifyObj : DmlExec
+ {
+ public DmlVerifyObj(ulong address)
+ : base(address, address != 0 ? $"!verifyobj /d {address:x}" : "")
+ {
+ }
+ }
+
+ public sealed class DmlDumpHeap : DmlExec
+ {
+ public DmlDumpHeap(string text, MemoryRange range)
+ : base(text, $"!dumpheap {range.Start:x} {range.End:x}")
+ {
+ }
+
+ public DmlDumpHeap(ulong methodTable)
+ : base (methodTable, methodTable != 0 ? $"!dumpheap -mt {methodTable:x}" : "")
+ {
+ }
+ }
+
+ public sealed class DmlVerifyHeap : DmlExec
+ {
+ public DmlVerifyHeap(string text, ClrSegment what)
+ : base(text, $"!verifyheap -segment {what.Address}")
+ {
+ }
+ }
+
+ public sealed class DmlDumpHeapSegment : DmlExec
+ {
+ public DmlDumpHeapSegment(ClrSegment seg)
+ : base(seg?.Address ?? 0, seg != null ? $"!dumpheap -segment {seg.Address:x}" : "")
+ {
+ }
+ }
+ }
+}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "threadpool", Help = "Displays info about the runtime thread pool.")]
- public sealed class ThreadPoolCommand : CommandBase
- {
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- [Option(Name = "-ti", Help = "Print the hill climbing log.", Aliases = new string[] { "-hc" })]
- public bool PrintHillClimbingLog { get; set; }
-
- [Option(Name = "-wi", Help = "Print all work items that are queued.")]
- public bool PrintWorkItems { get; set; }
-
- public override void Invoke()
- {
- // Runtime.ThreadPool shouldn't be null unless there was a problem with the dump.
- ClrThreadPool threadPool = Runtime.ThreadPool;
- if (threadPool is null)
- {
- Console.WriteLineError("Failed to obtain ThreadPool data.");
- }
- else
- {
- Table output = new(Console, Text.WithWidth(17), Text);
- output.WriteRow("CPU utilization:", $"{threadPool.CpuUtilization}%");
- output.WriteRow("Workers Total:", threadPool.ActiveWorkerThreads + threadPool.IdleWorkerThreads + threadPool.RetiredWorkerThreads);
- output.WriteRow("Workers Running:", threadPool.ActiveWorkerThreads);
- output.WriteRow("Workers Idle:", threadPool.IdleWorkerThreads);
- output.WriteRow("Worker Min Limit:", threadPool.MinThreads);
- output.WriteRow("Worker Max Limit:", threadPool.MaxThreads);
- Console.WriteLine();
-
- ClrType threadPoolType = Runtime.BaseClassLibrary.GetTypeByName("System.Threading.ThreadPool");
- ClrStaticField usePortableIOField = threadPoolType?.GetStaticFieldByName("UsePortableThreadPoolForIO");
-
- // Desktop CLR work items.
- if (PrintWorkItems)
- {
- LegacyThreadPoolWorkRequest[] requests = threadPool.EnumerateLegacyWorkRequests().ToArray();
- if (requests.Length > 0)
- {
- Console.WriteLine($"Work Request in Queue: {requests.Length:n0}");
- foreach (LegacyThreadPoolWorkRequest request in requests)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (request.IsAsyncTimerCallback)
- {
- Console.WriteLine($" AsyncTimerCallbackCompletion TimerInfo@{request.Context:x}");
- }
- else
- {
- Console.WriteLine($" Unknown Function: {request.Function:x} Context: {request.Context:x}");
- }
- }
- }
- }
-
- // We will assume that if UsePortableThreadPoolForIO field is deleted from ThreadPool then we are always
- // using C# version.
- bool usingPortableCompletionPorts = threadPool.Portable && (usePortableIOField is null || usePortableIOField.Read<bool>(usePortableIOField.Type.Module.AppDomain));
- if (!usingPortableCompletionPorts)
- {
- output.Columns[0] = output.Columns[0].WithWidth(19);
- output.WriteRow("Completion Total:", threadPool.TotalCompletionPorts);
- output.WriteRow("Completion Free:", threadPool.FreeCompletionPorts);
- output.WriteRow("Completion MaxFree:", threadPool.MaxFreeCompletionPorts);
-
- output.Columns[0] = output.Columns[0].WithWidth(25);
- output.WriteRow("Completion Current Limit:", threadPool.CompletionPortCurrentLimit);
- output.WriteRow("Completion Min Limit:", threadPool.MinCompletionPorts);
- output.WriteRow("Completion Max Limit:", threadPool.MaxCompletionPorts);
- Console.WriteLine();
- }
-
- if (PrintHillClimbingLog)
- {
- HillClimbingLogEntry[] hcl = threadPool.EnumerateHillClimbingLog().ToArray();
- if (hcl.Length > 0)
- {
- output = new(Console, Text.WithWidth(10).WithAlignment(Align.Right), Column.ForEnum<HillClimbingTransition>(), Integer, Integer, Text.WithAlignment(Align.Right));
-
- Console.WriteLine("Hill Climbing Log:");
- output.WriteHeader("Time", "Transition", "#New Threads", "#Samples", "Throughput");
-
- int end = hcl.Last().TickCount;
- foreach (HillClimbingLogEntry entry in hcl)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
- output.WriteRow($"{(entry.TickCount - end)/1000.0:0.00}", entry.StateOrTransition, entry.NewThreadCount, entry.SampleCount, $"{entry.Throughput:0.00}");
- }
-
- Console.WriteLine();
- }
- }
- }
-
- // We can print managed work items even if we failed to request the ThreadPool.
- if (PrintWorkItems && (threadPool is null || threadPool.Portable))
- {
- DumpWorkItems();
- }
- }
-
- private void DumpWorkItems()
- {
- Table output = null;
-
- ClrType workQueueType = Runtime.BaseClassLibrary.GetTypeByName("System.Threading.ThreadPoolWorkQueue");
- ClrType workStealingQueueType = Runtime.BaseClassLibrary.GetTypeByName("System.Threading.ThreadPoolWorkQueue+WorkStealingQueue");
-
- foreach (ClrObject obj in Runtime.Heap.EnumerateObjects())
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (obj.Type == workQueueType)
- {
- if (obj.TryReadObjectField("highPriorityWorkItems", out ClrObject workItems))
- {
- foreach (ClrObject entry in EnumerateConcurrentQueue(workItems))
- {
- WriteEntry(ref output, entry, isHighPri: true);
- }
- }
-
- if (obj.TryReadObjectField("workItems", out workItems))
- {
- foreach (ClrObject entry in EnumerateConcurrentQueue(workItems))
- {
- WriteEntry(ref output, entry, isHighPri: false);
- }
- }
-
- if (obj.Type.Fields.Any(r => r.Name == "_assignableWorkItems"))
- {
- if (obj.TryReadObjectField("_assignableWorkItems", out workItems))
- {
- foreach (ClrObject entry in EnumerateConcurrentQueue(workItems))
- {
- WriteEntry(ref output, entry, isHighPri: false);
- }
- }
- }
- }
- else if (obj.Type == workStealingQueueType)
- {
- if (obj.TryReadObjectField("m_array", out ClrObject m_array) && m_array.IsValid && !m_array.IsNull)
- {
- ClrArray arrayView = m_array.AsArray();
- int len = Math.Min(8192, arrayView.Length); // ensure a sensible max in case we have heap corruption
-
- nuint[] buffer = arrayView.ReadValues<nuint>(0, len);
- if (buffer != null)
- {
- for (int i = 0; i < len; i++)
- {
- if (buffer[i] != 0)
- {
- ClrObject entry = Runtime.Heap.GetObject(buffer[i]);
- if (entry.IsValid && !entry.IsNull)
- {
- WriteEntry(ref output, entry, isHighPri: false);
- }
- }
- }
- }
- }
- }
- }
- }
-
- private void WriteEntry(ref Table output, ClrObject entry, bool isHighPri)
- {
- if (output is null)
- {
- output = new(Console, Text.WithWidth(17), DumpObj, TypeName);
- output.SetAlignment(Align.Left);
- output.WriteHeader("Queue", "Object", "Type");
- }
-
- output.WriteRow(isHighPri ? "[Global high-pri]" : "[Global]", entry, entry.Type);
- if (entry.IsDelegate)
- {
- ClrDelegate del = entry.AsDelegate();
- ClrDelegateTarget target = del.GetDelegateTarget();
- if (target is not null)
- {
- Console.WriteLine($" => {target.TargetObject.Address:x} {target.Method.Name}");
- }
- }
- }
-
- private IEnumerable<ClrObject> EnumerateConcurrentQueue(ClrObject concurrentQueue)
- {
- if (!concurrentQueue.IsValid || concurrentQueue.IsNull)
- {
- yield break;
- }
-
- if (concurrentQueue.TryReadObjectField("_head", out ClrObject curr))
- {
- while (curr.IsValid && !curr.IsNull)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- if (curr.TryReadObjectField("_slots", out ClrObject slots) && slots.IsValid && slots.IsArray)
- {
- ClrArray slotsArray = slots.AsArray();
- for (int i = 0; i < slotsArray.Length; i++)
- {
- Console.CancellationToken.ThrowIfCancellationRequested();
-
- ClrObject item = slotsArray.GetStructValue(i).ReadObjectField("Item");
- if (item.IsValid && !item.IsNull)
- {
- yield return item;
- }
- }
- }
-
- if (!curr.TryReadObjectField("_nextSegment", out ClrObject next))
- {
- if (curr.Type is not null && curr.Type.GetFieldByName("_nextSegment") == null)
- {
- Console.WriteLineError($"Error: Type '{curr.Type?.Name}' does not contain a '_nextSegment' field.");
- }
-
- break;
- }
-
- curr = next;
- }
- }
- }
- }
-}
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.IO;
-using System.Linq;
-using System.Text;
-using System.Xml;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.Runtime;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "traverseheap", Help = "Writes out heap information to a file in a format understood by the CLR Profiler.")]
- public class TraverseHeapCommand : CommandBase
- {
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- [ServiceImport]
- public RootCacheService RootCache { get; set; }
-
- [Option(Name = "-xml")]
- public bool Xml { get; set; }
-
- [Argument(Name = "filename")]
- public string Filename { get; set; }
-
- public override void Invoke()
- {
- if (string.IsNullOrWhiteSpace(Filename))
- {
- throw new ArgumentException($"Output filename cannot be empty.", nameof(Filename));
- }
-
- // create file early in case it throws
- using StreamWriter output = File.CreateText(Filename);
- using (XmlWriter xml = Xml ? XmlWriter.Create(output, new XmlWriterSettings() { Encoding = new UTF8Encoding(true), Indent = true, OmitXmlDeclaration = true }) : null)
- {
- using StreamWriter text = Xml ? null : output;
-
- // must be called first to initialize types
- (MemoryStream rootObjectStream, Dictionary<ClrType, int> types) = WriteRootsAndObjects();
-
- xml?.WriteStartElement("gcheap");
- xml?.WriteStartElement("types");
-
- foreach (KeyValuePair<ClrType, int> kv in types.OrderBy(kv => kv.Value))
- {
- string name = kv.Key?.Name ?? $"error-reading-type-name:{kv.Key.MethodTable:x}";
- int typeId = kv.Value;
-
- xml?.WriteStartElement("type");
- xml?.WriteAttributeString("id", typeId.ToString());
- xml?.WriteAttributeString("name", name);
- xml?.WriteEndElement();
-
- text?.WriteLine($"t {typeId} 0 {name}");
- }
- xml?.WriteEndElement();
-
- xml?.Flush();
- text?.Flush();
-
- output.WriteLine();
- output.Flush();
-
- rootObjectStream.Position = 0;
- rootObjectStream.CopyTo(output.BaseStream);
-
- xml?.WriteEndElement();
- }
- }
-
- private (MemoryStream Stream, Dictionary<ClrType, int> Types) WriteRootsAndObjects()
- {
- Dictionary<ClrType, int> types = new();
- MemoryStream rootObjectStream = new();
-
- using StreamWriter text = Xml ? null : new StreamWriter(rootObjectStream, Encoding.Default, 4096, leaveOpen: true);
- using XmlWriter xml = Xml ? XmlWriter.Create(rootObjectStream, new XmlWriterSettings()
- {
- Encoding = new UTF8Encoding(false),
- CloseOutput = false,
- Indent = true,
- OmitXmlDeclaration = true,
- ConformanceLevel = ConformanceLevel.Fragment
- }) : null;
-
- int currObj = 1;
- int currType = 1;
-
- xml?.WriteStartElement("roots");
- text?.Write("r");
- foreach (ClrRoot root in RootCache.EnumerateRoots())
- {
- string kind = root switch
- {
- ClrStackRoot => "stack",
- ClrHandle => "handle",
- _ => "finalizer"
- };
-
- xml?.WriteStartElement("root");
- xml?.WriteAttributeString("kind", kind);
- xml?.WriteAttributeString("address", FormatHex(root.Address));
- xml?.WriteEndElement();
-
- text?.Write(" ");
- text?.Write(FormatHex(root.Address));
- }
- xml?.WriteEndElement();
- text?.WriteLine();
-
- xml?.WriteStartElement("objects");
- foreach (ClrObject obj in Runtime.Heap.EnumerateObjects())
- {
- if (!obj.IsValid)
- {
- continue;
- }
-
- ulong size = obj.Size;
- int objId = currObj++;
- if (!types.TryGetValue(obj.Type, out int typeId))
- {
- typeId = types[obj.Type] = currType++;
- }
-
- xml?.WriteStartElement("object");
- xml?.WriteAttributeString("address", FormatHex(obj.Address));
- xml?.WriteAttributeString("typeid", typeId.ToString());
- xml?.WriteAttributeString("size", size.ToString());
-
- text?.WriteLine($"n {objId} 1 {typeId} {size}");
- text?.WriteLine($"! 1 {FormatHex(obj.Address)} {objId}");
-
- text?.Write($"o {FormatHex(obj.Address)} {typeId} {size} "); // trailing space intentional
-
- if (obj.ContainsPointers)
- {
- foreach (ClrObject objRef in obj.EnumerateReferences(considerDependantHandles: true))
- {
- xml?.WriteStartElement("member");
- xml?.WriteAttributeString("address", FormatHex(objRef.Address));
- xml?.WriteEndElement();
-
- text?.Write($" ");
- text?.Write(FormatHex(objRef.Address));
- }
- }
-
- text?.WriteLine();
- xml?.WriteEndElement();
- }
- xml?.WriteEndElement();
-
- return (rootObjectStream, types);
- }
-
- private static string FormatHex(ulong address) => $"0x{address:x16}";
- }
-}
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
+using System;
using System.Collections.Generic;
using System.Diagnostics;
+using System.Linq;
using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
+using static Microsoft.Diagnostics.ExtensionCommands.TableOutput;
namespace Microsoft.Diagnostics.ExtensionCommands
{
objects = EnumerateWithCount(objects);
int errors = 0;
- Table output = null;
+ TableOutput output = null;
ClrHeap heap = Runtime.Heap;
// Verify heap
}
}
- private void WriteError(ref Table output, ClrHeap heap, ObjectCorruption corruption)
+ private void WriteError(ref TableOutput output, ClrHeap heap, ObjectCorruption corruption)
{
string message = GetObjectCorruptionMessage(MemoryService, heap, corruption);
WriteRow(ref output, heap, corruption, message);
ObjectCorruptionKind.ObjectNotPointerAligned => $"Object {obj.Address:x} is not pointer aligned",
// Object failures
- ObjectCorruptionKind.ObjectTooLarge => $"Object {obj.Address:x} is too large, size={obj.Size:x}, segmentEnd: {heap.GetSegmentByAddress(obj)?.End.ToString("x") ?? "???"}",
+ ObjectCorruptionKind.ObjectTooLarge => $"Object {obj.Address:x} is too large, size={obj.Size:x}, segmentEnd: {ValueWithError(heap.GetSegmentByAddress(obj)?.End)}",
ObjectCorruptionKind.InvalidMethodTable => $"Object {obj.Address:x} has an invalid method table {ReadPointerWithError(memory, obj):x}",
ObjectCorruptionKind.InvalidThinlock => $"Object {obj.Address:x} has an invalid thin lock",
ObjectCorruptionKind.SyncBlockMismatch => GetSyncBlockFailureMessage(corruption),
ObjectCorruptionKind.SyncBlockZero => GetSyncBlockFailureMessage(corruption),
// Object reference failures
- ObjectCorruptionKind.ObjectReferenceNotPointerAligned => $"Object {obj.Address:x} has an unaligned member at offset {corruption.Offset:x}: is not pointer aligned",
+ ObjectCorruptionKind.ObjectReferenceNotPointerAligned => $"Object {obj.Address:x} has an unaligned member at {corruption.Offset:x}: is not pointer aligned",
ObjectCorruptionKind.InvalidObjectReference => $"Object {obj.Address:x} has a bad member at offset {corruption.Offset:x}: {ReadPointerWithError(memory, obj + (uint)corruption.Offset)}",
ObjectCorruptionKind.FreeObjectReference => $"Object {obj.Address:x} contains free object at offset {corruption.Offset:x}: {ReadPointerWithError(memory, obj + (uint)corruption.Offset)}",
return message;
}
- private void WriteRow(ref Table output, ClrHeap heap, ObjectCorruption corruption, string message)
+ private void WriteRow(ref TableOutput output, ClrHeap heap, ObjectCorruption corruption, string message)
{
if (output is null)
{
if (heap.IsServer)
{
- output = new(Console, IntegerWithoutCommas.WithWidth(4), Pointer, ListNearObj, Column.ForEnum<ObjectCorruptionKind>(), Text);
- output.SetAlignment(Align.Left);
+ output = new(Console, (-4, ""), (-12, "x12"), (-12, "x12"), (32, ""), (0, ""))
+ {
+ AlignLeft = true,
+ };
- output.WriteHeader("Heap", "Segment", "Object", "Failure", "Reason");
+ output.WriteRow("Heap", "Segment", "Object", "Failure", "");
}
else
{
- output = new(Console, Pointer, ListNearObj, Column.ForEnum<ObjectCorruptionKind>(), Text);
- output.SetAlignment(Align.Left);
+ output = new(Console, (-12, "x12"), (-12, "x12"), (22, ""), (0, ""))
+ {
+ AlignLeft = true,
+ };
- output.WriteHeader("Segment", "Object", "Failure", "Reason");
+ output.WriteRow("Segment", "Object", "Failure", "");
}
}
+
ClrSegment segment = heap.GetSegmentByAddress(corruption.Object);
- object[] columns = new object[output.Columns.Length];
+ object[] columns = new object[output.ColumnCount];
int i = 0;
if (heap.IsServer)
{
- columns[i++] = (object)segment?.SubHeap.Index ?? "???";
+ columns[i++] = ValueWithError(segment?.SubHeap.Index, format: "", error: "");
}
- columns[i++] = (object)segment ?? "???";
- columns[i++] = corruption.Object;
+ columns[i++] = ValueWithError(segment?.Address, format: "x12", error: "");
+ columns[i++] = new DmlExec(corruption.Object.Address, $"!ListNearObj {corruption.Object.Address:x}");
columns[i++] = corruption.Kind;
columns[i++] = message;
return result;
}
+ private static string ValueWithError(int? value, string format = "x", string error = "???")
+ {
+ if (value.HasValue)
+ {
+ return value.Value.ToString(format);
+ }
+
+ return error;
+ }
+
+ private static string ValueWithError(ulong? value, string format = "x", string error = "???")
+ {
+ if (value.HasValue)
+ {
+ return value.Value.ToString(format);
+ }
+
+ return error;
+ }
+
private static string ReadPointerWithError(IMemoryService memory, ulong address)
{
if (memory.ReadPointer(address, out ulong value))
+++ /dev/null
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using Microsoft.Diagnostics.DebugServices;
-using Microsoft.Diagnostics.ExtensionCommands.Output;
-using Microsoft.Diagnostics.Runtime;
-using static Microsoft.Diagnostics.ExtensionCommands.Output.ColumnKind;
-
-namespace Microsoft.Diagnostics.ExtensionCommands
-{
- [Command(Name = "verifyobj", Help = "Checks the given object for signs of corruption.")]
- public sealed class VerifyObjectCommand : CommandBase
- {
- [ServiceImport]
- public ClrRuntime Runtime { get; set; }
-
- [ServiceImport]
- public IMemoryService Memory { get; set; }
-
- [Argument(Name = "ObjectAddress", Help = "The object to verify.")]
- public string ObjectAddress { get; set; }
-
- public override void Invoke()
- {
- if (!TryParseAddress(ObjectAddress, out ulong objAddress))
- {
- throw new ArgumentException($"Invalid object address: '{ObjectAddress}'", nameof(ObjectAddress));
- }
-
- bool isNotCorrupt = Runtime.Heap.FullyVerifyObject(objAddress, out IEnumerable<ObjectCorruption> corruptionEnum);
- if (isNotCorrupt)
- {
- Console.WriteLine($"object 0x{objAddress:x} is a valid object");
- return;
- }
-
- ObjectCorruption[] corruption = corruptionEnum.OrderBy(r => r.Offset).ToArray();
-
- Column offsetColumn = HexOffset.WithAlignment(Align.Left);
- offsetColumn = offsetColumn.GetAppropriateWidth(corruption.Select(r => r.Offset));
-
- Table output = new(Console, offsetColumn, Column.ForEnum<ObjectCorruptionKind>(), Text);
- output.WriteHeader("Offset", "Issue", "Description");
- foreach (ObjectCorruption oc in corruption)
- {
- output.WriteRow(oc.Offset, oc.Kind, VerifyHeapCommand.GetObjectCorruptionMessage(Memory, Runtime.Heap, oc));
- }
-
- Console.WriteLine();
- Console.WriteLine($"{corruption.Length:n0} error{(corruption.Length == 1 ? "" : "s")} detected.");
- }
- }
-}
}
}
- internal class UpDownCounterPayload : CounterPayload
- {
- public UpDownCounterPayload(string providerName, string name, string displayName, string displayUnits, string metadata, double value, DateTime timestamp) :
- base(providerName, name, metadata, value, timestamp, "Metric", EventType.UpDownCounter)
- {
- // In case these properties are not provided, set them to appropriate values.
- string counterName = string.IsNullOrEmpty(displayName) ? name : displayName;
- DisplayName = !string.IsNullOrEmpty(displayUnits) ? $"{counterName} ({displayUnits})" : counterName;
- }
- }
-
internal class CounterEndedPayload : CounterPayload
{
public CounterEndedPayload(string providerName, string name, DateTime timestamp)
Rate,
Gauge,
Histogram,
- UpDownCounter,
Error,
CounterEnded
}
{
HandleCounterRate(traceEvent, filter, sessionId, out payload);
}
- else if (traceEvent.EventName == "UpDownCounterRateValuePublished")
- {
- HandleUpDownCounterValue(traceEvent, filter, sessionId, out payload);
- }
else if (traceEvent.EventName == "TimeSeriesLimitReached")
{
HandleTimeSeriesLimitReached(traceEvent, sessionId, out payload);
else
{
// for observable instruments we assume the lack of data is meaningful and remove it from the UI
- // this happens when the ObservableCounter callback function throws an exception
- // or when the ObservableCounter doesn't include a measurement for a particular set of tag values.
- payload = new CounterEndedPayload(meterName, instrumentName, traceEvent.TimeStamp);
- }
- }
-
- private static void HandleUpDownCounterValue(TraceEvent traceEvent, CounterFilter filter, string sessionId, out ICounterPayload payload)
- {
- payload = null;
-
- string payloadSessionId = (string)traceEvent.PayloadValue(0);
-
- if (payloadSessionId != sessionId || traceEvent.Version < 1) // Version 1 added the value field.
- {
- return;
- }
-
- string meterName = (string)traceEvent.PayloadValue(1);
- //string meterVersion = (string)obj.PayloadValue(2);
- string instrumentName = (string)traceEvent.PayloadValue(3);
- string unit = (string)traceEvent.PayloadValue(4);
- string tags = (string)traceEvent.PayloadValue(5);
- //string rateText = (string)traceEvent.PayloadValue(6); // Not currently using rate for UpDownCounters.
- string valueText = (string)traceEvent.PayloadValue(7);
-
- if (!filter.IsIncluded(meterName, instrumentName))
- {
- return;
- }
-
- if (double.TryParse(valueText, NumberStyles.Number | NumberStyles.Float, CultureInfo.InvariantCulture, out double value))
- {
- // UpDownCounter reports the value, not the rate - this is different than how Counter behaves.
- payload = new UpDownCounterPayload(meterName, instrumentName, null, unit, tags, value, traceEvent.TimeStamp);
-
- }
- else
- {
- // for observable instruments we assume the lack of data is meaningful and remove it from the UI
- // this happens when the ObservableUpDownCounter callback function throws an exception
- // or when the ObservableUpDownCounter doesn't include a measurement for a particular set of tag values.
+ // this happens when the ObservableCounter callback function throws an exception.
payload = new CounterEndedPayload(meterName, instrumentName, traceEvent.TimeStamp);
}
}
<ItemGroup>
<PackageReference Include="Microsoft.Diagnostics.Tracing.TraceEvent" Version="$(MicrosoftDiagnosticsTracingTraceEventVersion)" />
- <PackageReference Include="Microsoft.Extensions.Logging" Version="$(MicrosoftExtensionsLoggingVersion)" />
+ <PackageReference Include="Microsoft.Extensions.Logging" Version="$(MicrosoftExtensionsLoggingPinnedVersion)" />
<PackageReference Include="System.ComponentModel.Annotations" Version="$(SystemComponentModelAnnotationsVersion)" />
<PackageReference Include="System.Text.Encodings.Web" Version="$(SystemTextEncodingsWebVersion)" />
<PackageReference Include="System.Text.Json" Version="$(SystemTextJsonVersion)" />
using System;
using System.Buffers.Binary;
using System.Collections.Generic;
-using System.Diagnostics;
using System.Globalization;
using System.IO;
using System.Linq;
continue;
}
- try
- {
- Process.GetProcessById(processId);
- }
- catch (ArgumentException)
- {
- continue;
- }
-
yield return processId;
}
}
</PropertyGroup>
<ItemGroup>
- <PackageReference Include="Microsoft.Extensions.Logging" Version="$(MicrosoftExtensionsLoggingVersion)" />
- </ItemGroup>
-
- <ItemGroup Condition="'$(TargetFrameworkIdentifier)' != '.NETCoreApp'" >
- <PackageReference Include="Microsoft.Bcl.AsyncInterfaces" Version="$(MicrosoftBclAsyncInterfacesVersion)" />
- <PackageReference Include="System.Buffers" Version="$(SystemBuffersVersion)" />
+ <PackageReference Condition="'$(TargetFrameworkIdentifier)' != '.NETCoreApp'" Include="Microsoft.Bcl.AsyncInterfaces" Version="$(MicrosoftBclAsyncInterfacesVersion)" />
+ <PackageReference Include="Microsoft.Extensions.Logging" Version="$(MicrosoftExtensionsLoggingPinnedVersion)" />
</ItemGroup>
<ItemGroup>
public void WriteDmlExec(string text, string cmd)
{
- if (!SupportsDml || string.IsNullOrWhiteSpace(cmd))
- {
- Write(text);
- }
- else
- {
- string dml = $"<exec cmd=\"{DmlEscape(cmd)}\">{DmlEscape(text)}</exec>";
- WriteDml(dml);
- }
+ string dml = $"<exec cmd=\"{DmlEscape(cmd)}\">{DmlEscape(text)}</exec>";
+ WriteDml(dml);
}
public bool SupportsDml => _supportsDml ??= _debuggerServices.SupportsDml;
#endregion
- private static string DmlEscape(string text) => string.IsNullOrWhiteSpace(text) ? text : new XText(text).ToString();
+ private static string DmlEscape(string text) => new XText(text).ToString();
}
}
{
_moduleService = moduleService;
ModuleIndex = moduleIndex;
- FileName = imageName ?? string.Empty;
+ FileName = imageName;
ImageBase = imageBase;
ImageSize = imageSize;
IndexFileSize = indexTimeStamp == InvalidTimeStamp ? null : indexFileSize;
[Command(Name = "dumpmodule", DefaultOptions = "DumpModule", Help = "Displays information about a EE module structure at the specified address.")]
[Command(Name = "dumpmt", DefaultOptions = "DumpMT", Help = "Displays information about a method table at the specified address.")]
[Command(Name = "dumpobj", DefaultOptions = "DumpObj", Aliases = new string[] { "do" }, Help = "Displays info about an object at the specified address.")]
+ [Command(Name = "dumpruntimetypes", DefaultOptions = "DumpRuntimeTypes", Help = "Finds all System.RuntimeType objects in the GC heap and prints the type name and MethodTable they refer too.")]
[Command(Name = "dumpsig", DefaultOptions = "DumpSig", Help = "Dumps the signature of a method or field specified by <sigaddr> <moduleaddr>.")]
[Command(Name = "dumpsigelem", DefaultOptions = "DumpSigElem", Help = "Dumps a single element of a signature object.")]
+ [Command(Name = "dumpstackobjects", DefaultOptions = "DumpStackObjects", Aliases = new string[] { "dso" }, Help = "Displays all managed objects found within the bounds of the current stack.")]
[Command(Name = "dumpvc", DefaultOptions = "DumpVC", Help = "Displays info about the fields of a value class.")]
[Command(Name = "eeversion", DefaultOptions = "EEVersion", Help = "Displays information about the runtime version.")]
[Command(Name = "ehinfo", DefaultOptions = "EHInfo", Help = "Displays the exception handling blocks in a JIT-ed method.")]
[Command(Name = "enummem", DefaultOptions = "enummem", Help = "ICLRDataEnumMemoryRegions.EnumMemoryRegions test command.")]
+ [Command(Name = "finalizequeue", DefaultOptions = "FinalizeQueue", Help = "Displays all objects registered for finalization.")]
[Command(Name = "findappdomain", DefaultOptions = "FindAppDomain", Help = "Attempts to resolve the AppDomain of a GC object.")]
[Command(Name = "gchandles", DefaultOptions = "GCHandles", Help = "Provides statistics about GCHandles in the process.")]
[Command(Name = "gcinfo", DefaultOptions = "GCInfo", Help = "Displays JIT GC encoding for a method.")]
[Command(Name = "printexception", DefaultOptions = "PrintException", Aliases = new string[] { "pe" }, Help = "Displays and formats fields of any object derived from the Exception class at the specified address.")]
[Command(Name = "soshelp", DefaultOptions = "Help", Help = "Displays help for a specific SOS command.")]
[Command(Name = "syncblk", DefaultOptions = "SyncBlk", Help = "Displays the SyncBlock holder info.")]
+ [Command(Name = "threadpool", DefaultOptions = "ThreadPool", Help = "Lists basic information about the thread pool.")]
[Command(Name = "threadstate", DefaultOptions = "ThreadState", Help = "Pretty prints the meaning of a threads state.")]
+ [Command(Name = "traverseheap", DefaultOptions = "TraverseHeap", Help = "Writes out heap information to a file in a format understood by the CLR Profiler.")]
+ [Command(Name = "verifyobj", DefaultOptions = "VerifyObj", Help = "Checks the object for signs of corruption.")]
[Command(Name = "comstate", DefaultOptions = "COMState", Flags = CommandFlags.Windows, Help = "Lists the COM apartment model for each thread.")]
[Command(Name = "dumprcw", DefaultOptions = "DumpRCW", Flags = CommandFlags.Windows, Help = "Displays information about a Runtime Callable Wrapper.")]
[Command(Name = "dumpccw", DefaultOptions = "DumpCCW", Flags = CommandFlags.Windows, Help = "Displays information about a COM Callable Wrapper.")]
IntPtr self,
uint threadId,
uint contextFlags,
- int contextSize,
+ uint contextSize,
IntPtr context)
{
byte[] registerContext;
}
try
{
- Marshal.Copy(registerContext, 0, context, Math.Min(registerContext.Length, contextSize));
+ Marshal.Copy(registerContext, 0, context, (int)contextSize);
}
catch (Exception ex) when (ex is ArgumentOutOfRangeException or ArgumentNullException)
{
[In] IntPtr self,
[In] uint threadId,
[In] uint contextFlags,
- [In] int contextSize,
+ [In] uint contextSize,
[Out] IntPtr context);
#endregion
}
try
{
- Marshal.Copy(registerContext, 0, context, Math.Min(registerContext.Length, contextSize));
+ Marshal.Copy(registerContext, 0, context, contextSize);
}
catch (Exception ex) when (ex is ArgumentOutOfRangeException or ArgumentNullException)
{
private delegate int GetThreadContextDelegate(
[In] IntPtr self,
[In] IntPtr context,
- [In] int contextSize);
+ [In] uint contextSize);
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
private delegate int SetThreadContextDelegate(
[In] IntPtr self,
[In] IntPtr context,
- [In] int contextSize);
+ [In] uint contextSize);
#endregion
}
IntPtr self,
uint threadId,
uint contextFlags,
- int contextSize,
+ uint contextSize,
IntPtr context);
[UnmanagedFunctionPointer(CallingConvention.Winapi)]
internal int GetThreadContext(
IntPtr self,
IntPtr context,
- int contextSize)
+ uint contextSize)
{
IThread thread = ContextService.GetCurrentThread();
if (thread is not null)
IntPtr self,
uint threadId,
uint contextFlags,
- int contextSize,
+ uint contextSize,
IntPtr context)
{
byte[] registerContext;
}
try
{
- Marshal.Copy(registerContext, 0, context, Math.Min(registerContext.Length, contextSize));
+ Marshal.Copy(registerContext, 0, context, (int)contextSize);
}
catch (Exception ex) when (ex is ArgumentOutOfRangeException or ArgumentNullException)
{
internal static int SetThreadContext(
IntPtr self,
IntPtr context,
- int contextSize)
+ uint contextSize)
{
return DebugClient.NotImplemented;
}
<LogDir>$(RootBinDir)/TestResults/$(TargetConfiguration)/sos.unittests_$(Timestamp)</LogDir>
<DumpDir>$(RootBinDir)/tmp/$(TargetConfiguration)\dumps</DumpDir>
- <SetHostExe>true</SetHostExe>
- <SetFxVersion>true</SetFxVersion>
-
<TestWebApp3>true</TestWebApp3>
<TestWebApp3 Condition="'$(InternalReleaseTesting)' == 'true'">false</TestWebApp3>
<Option Condition="'$(AspNetCoreVersionLatest)' != ''">
<BuildProjectFramework>$(BuildProjectFrameworkLatest)</BuildProjectFramework>
<RuntimeFrameworkVersion>$(RuntimeVersionLatest)</RuntimeFrameworkVersion>
- <!-- This turns off the -fx-version to the dotnet host allowing it use the correct runtime version -->
- <SetFxVersion>false</SetFxVersion>
</Option>
<Option Condition="'$(AspNetCoreVersion70)' != ''">
<BuildProjectFramework>net7.0</BuildProjectFramework>
<LLDBHelperScript>$(ScriptRootDir)/lldbhelper.py</LLDBHelperScript>
<!-- Single-file debuggees don't need the host -->
- <SetHostExe Condition="'$(PublishSingleFile)' == 'true'">false</SetHostExe>
- <SetFxVersion Condition="'$(PublishSingleFile)' == 'true'">false</SetFxVersion>
-
- <HostExe Condition="'$(SetHostExe)' == 'true'">$(DotNetRoot)/dotnet</HostExe>
- <HostArgs Condition="'$(SetFxVersion)' == 'true'">--fx-version $(FrameworkVersion)</HostArgs>
+ <HostExe Condition="'$(PublishSingleFile)' != 'true'">$(DotNetRoot)/dotnet</HostExe>
+ <HostArgs Condition="'$(PublishSingleFile)' != 'true'">--fx-version $(FrameworkVersion)</HostArgs>
<Options>
<Option Condition="$(OS) == Linux">
<DumpDir>$(RootBinDir)\tmp\$(TargetConfiguration)\dumps</DumpDir>
<CDBHelperExtension>$(InstallDir)\runcommand.dll</CDBHelperExtension>
- <SetHostExe>true</SetHostExe>
- <SetFxVersion>true</SetFxVersion>
-
<TestWebApp3>true</TestWebApp3>
<TestWebApp3 Condition="'$(InternalReleaseTesting)' == 'true'">false</TestWebApp3>
<Option Condition="'$(AspNetCoreVersionLatest)' != ''">
<BuildProjectFramework>$(BuildProjectFrameworkLatest)</BuildProjectFramework>
<RuntimeFrameworkVersion>$(RuntimeVersionLatest)</RuntimeFrameworkVersion>
- <!-- This turns off the -fx-version to the dotnet host allowing it use the correct runtime version -->
- <SetFxVersion>false</SetFxVersion>
</Option>
<Option Condition="'$(AspNetCoreVersion70)' != ''">
<BuildProjectFramework>net7.0</BuildProjectFramework>
<FrameworkVersion Condition="'$(FrameworkVersion)' == ''">$(RuntimeFrameworkVersion)</FrameworkVersion>
<RuntimeSymbolsPath>$(DotNetRoot)\shared\Microsoft.NETCore.App\$(RuntimeFrameworkVersion)</RuntimeSymbolsPath>
-
+
<!-- Single-file debuggees don't need the host -->
- <SetHostExe Condition="'$(PublishSingleFile)' == 'true'">false</SetHostExe>
- <SetFxVersion Condition="'$(PublishSingleFile)' == 'true'">false</SetFxVersion>
-
- <HostExe Condition="'$(SetHostExe)' == 'true'">$(DotNetRoot)\dotnet.exe</HostExe>
- <HostArgs Condition="'$(SetFxVersion)' == 'true'">--fx-version $(FrameworkVersion)</HostArgs>
+ <HostExe Condition="'$(PublishSingleFile)' != 'true'">$(DotNetRoot)\dotnet.exe</HostExe>
+ <HostArgs Condition="'$(PublishSingleFile)' != 'true'">--fx-version $(FrameworkVersion)</HostArgs>
</Option>
<!--
Desktop Runtime (debuggees cli built)
// This debuggee needs the directory of the exes/dlls to load the SymbolTestDll assembly.
await SOSTestHelpers.RunTest(
- scriptName: "StackAndOtherTests.script", new SOSRunner.TestInformation
+ scriptName: "StackAndOtherTests.script",
+ new SOSRunner.TestInformation
{
TestConfiguration = currentConfig,
TestName = "SOS.StackAndOtherTests",
}
else
{
- program = Environment.GetEnvironmentVariable("PYTHONPATH");
- if (program == null)
- {
- // We should verify what python version this is. 2.7 is out of
- // support for a while now, but we have old OS's.
- program = "/usr/bin/python";
- }
+ // We should verify what python version this is. 2.7 is out of
+ // support for a while now, but we have old OS's.
+ program = "/usr/bin/python";
if (!File.Exists(program))
{
throw new ArgumentException($"{program} does not exists");
public string DumpNameSuffix { get; set; }
- public bool EnableSOSLogging { get; set; } = true;
-
public bool TestCrashReport
{
get { return _testCrashReport && DumpGenerator == DumpGenerator.CreateDump && OS.Kind != OSKind.Windows && TestConfiguration.RuntimeFrameworkVersionMajor >= 6; }
{
// Setup the logging from the options in the config file
outputHelper = TestRunner.ConfigureLogging(config, information.OutputHelper, information.TestName);
- string sosLogFile = information.EnableSOSLogging ? Path.Combine(config.LogDirPath, $"{information.TestName}.{config.LogSuffix}.soslog") : null;
// Figure out which native debugger to use
NativeDebugger debugger = GetNativeDebuggerToUse(config, action);
{
throw new ArgumentException("LLDB helper script path not set or does not exist: " + lldbHelperScript);
}
- arguments.Append(@"--no-lldbinit -o ""settings set target.disable-aslr false"" -o ""settings set interpreter.prompt-on-quit false""");
- arguments.AppendFormat(@" -o ""command script import {0}"" -o ""version""", lldbHelperScript);
+ arguments.AppendFormat(@"--no-lldbinit -o ""settings set interpreter.prompt-on-quit false"" -o ""command script import {0}"" -o ""version""", lldbHelperScript);
string debuggeeTarget = config.HostExe;
if (string.IsNullOrWhiteSpace(debuggeeTarget))
break;
}
-
// Create the native debugger process running
ProcessRunner processRunner = new ProcessRunner(debuggerPath, ReplaceVariables(variables, arguments.ToString())).
WithEnvironmentVariable("DOTNET_MULTILEVEL_LOOKUP", "0").
processRunner.WithExpectedExitCode(0);
}
- if (sosLogFile != null)
- {
- processRunner.WithEnvironmentVariable("DOTNET_ENABLED_SOS_LOGGING", sosLogFile);
- }
-
// Disable W^E so that the bpmd command and the tests pass
// Issue: https://github.com/dotnet/diagnostics/issues/3126
processRunner.WithRuntimeConfiguration("EnableWriteXorExecute", "0");
{
defines.Add("MAJOR_RUNTIME_VERSION_GE_7");
}
- if (major >= 8)
- {
- defines.Add("MAJOR_RUNTIME_VERSION_GE_8");
- }
}
catch (SkipTestException)
{
defines.Add("UNIX_SINGLE_FILE_APP");
}
}
- string setHostRuntime = _config.SetHostRuntime();
- if (!string.IsNullOrEmpty(setHostRuntime) && setHostRuntime == "-none")
- {
- defines.Add("HOST_RUNTIME_NONE");
- }
return defines;
}
# Checks on ConcurrentDictionary<int, string[]>
SOSCOMMAND: DumpHeap -stat -type System.Collections.Concurrent.ConcurrentDictionary<
SOSCOMMAND: DumpHeap -mt <POUT>^(<HEXVAL>) .*System.Collections.Concurrent.ConcurrentDictionary<System.Int32, System.String\[\]>[^+]<POUT>
-EXTCOMMAND: dcd <POUT>\n\s*(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
+EXTCOMMAND: dcd <POUT>^(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
VERIFY: 2 items
VERIFY: Key: 2
EXTCOMMAND: dumparray <POUT>Key: 1\s+Value: dumparray (<HEXVAL>)<POUT>
# Checks on ConcurrentDictionary<int, int>
SOSCOMMAND: DumpHeap -stat -type System.Collections.Concurrent.ConcurrentDictionary<
SOSCOMMAND: DumpHeap -mt <POUT>^(<HEXVAL>) .*System.Collections.Concurrent.ConcurrentDictionary<System.Int32, System.Int32>[^+]<POUT>
-EXTCOMMAND: dcd <POUT>\n\s*(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
+EXTCOMMAND: dcd <POUT>^(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
VERIFY: 3 items
VERIFY: Key: 0\s+Value: 1
VERIFY: Key: 31\s+Value: 17
# Checks on ConcurrentDictionary<string, bool>
SOSCOMMAND: DumpHeap -stat -type System.Collections.Concurrent.ConcurrentDictionary<
SOSCOMMAND: DumpHeap -mt <POUT>^(<HEXVAL>) .*System.Collections.Concurrent.ConcurrentDictionary<System.String, System.Boolean>[^+]<POUT>
-EXTCOMMAND: dcd <POUT>\n\s*(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
+EXTCOMMAND: dcd <POUT>^(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
VERIFY: 3 items
VERIFY: Key: "String true"\s+Value: True
VERIFY: Key: "String false"\s+Value: False
# Checks on ConcurrentDictionary<DumpSampleStruct, DumpSampleClass>
SOSCOMMAND: DumpHeap -stat -type System.Collections.Concurrent.ConcurrentDictionary<
SOSCOMMAND: DumpHeap -mt <POUT>^(<HEXVAL>) .*System.Collections.Concurrent.ConcurrentDictionary<DotnetDumpCommands\.Program\+DumpSampleStruct, DotnetDumpCommands\.Program\+DumpSampleClass>[^+]<POUT>
-EXTCOMMAND: dcd <POUT>\n\s*(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
+EXTCOMMAND: dcd <POUT>^(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
VERIFY: 2 items
VERIFY: Key: dumpvc <HEXVAL> <HEXVAL>\s+Value: null
VERIFY: Key: dumpvc <HEXVAL> <HEXVAL>\s+Value: dumpobj <HEXVAL>
# Checks on ConcurrentDictionary<int, DumpSampleStruct>
SOSCOMMAND: DumpHeap -stat -type System.Collections.Concurrent.ConcurrentDictionary<
SOSCOMMAND: DumpHeap -mt <POUT>^(<HEXVAL>) .*System.Collections.Concurrent.ConcurrentDictionary<System.Int32, DotnetDumpCommands\.Program\+DumpSampleStruct>[^+]<POUT>
-EXTCOMMAND: dcd <POUT>\n\s*(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
+EXTCOMMAND: dcd <POUT>^(<HEXVAL>)\s+<HEXVAL>\s+\d+<POUT>
VERIFY: 1 items
VERIFY: Key: 0\s+Value: dumpvc <HEXVAL> <HEXVAL>
SOSCOMMAND: dumpvc <POUT>dumpvc (<HEXVAL> <HEXVAL>)<POUT>
SOSCOMMAND: dumpheap -stat
-ENDIF:NETCORE_OR_DOTNETDUMP
+ENDIF:NETCORE_OR_DOTNETDUMP
\ No newline at end of file
LOADSOS
SOSCOMMAND:DumpStackObjects
-VERIFY:\s*<HEXVAL>\s+<HEXVAL>\s+System.Byte\[\]\s+
+VERIFY:<HEXVAL>\s+<HEXVAL>\s+System.Byte\[\]\s+
SOSCOMMAND:DumpObj <POUT>\w+\s+(<HEXVAL>)\s+(System.Byte\[\]!\$0_)*System.Byte\[\]\s+<POUT>
VERIFY:\s+Name: System.Byte\[\]\s+
LOADSOS
SOSCOMMAND:DumpStackObjects
-VERIFY:\s*<HEXVAL>\s+<HEXVAL>\s+System.IO.StringWriter\s+
+VERIFY:<HEXVAL>\s+<HEXVAL>\s+System.IO.StringWriter\s+
SOSCOMMAND:DumpObj <POUT>\w+\s+(<HEXVAL>)\s+(System.IO.StringWriter!\$0_)*System.IO.StringWriter\s+<POUT>
IFDEF:MAJOR_RUNTIME_VERSION_2
ENDIF:MAJOR_RUNTIME_VERSION_GE_3
SOSCOMMAND:DumpStackObjects
-VERIFY:\s*<HEXVAL>\s+<HEXVAL>\s+([Gg][Cc]where!\$0_)*GCWhere\s+
+VERIFY:<HEXVAL>\s<HEXVAL>\s([Gg][Cc]where!\$0_)*GCWhere\s+
SOSCOMMAND:GCWhere <POUT>\w+\s+(<HEXVAL>)\s+([Gg][Cc]where!\$0_)*GCWhere\s+<POUT>
# we care that the Gen is 0
VERIFY:\s+<HEXVAL>\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*_string\s+
VERIFY:\s+<HEXVAL>\s+<HEXVAL>\s+<HEXVAL>\s+System\.UInt64.*52704621242434 _static\s+
VERIFY:\s+GC Refs:\s+
-VERIFY:\s+Field\s+Offset\s+Object\s+Type\s+
-VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>.*
+VERIFY:\s+offset\s+object\s+
+VERIFY:\s+<DECVAL>\s+<HEXVAL>\s+
SOSCOMMAND:DumpHeap
VERIFY:\s+<HEXVAL>\s+<HEXVAL>\s+<DECVAL>\s+
VERIFY:\s+<HEXVAL>\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*_string\s+
VERIFY:\s+<HEXVAL>\s+<HEXVAL>\s+<HEXVAL>\s+System\.UInt64.*52704621242434 _static\s+
VERIFY:\s+GC Refs:\s+
-VERIFY:\s+Field\s+Offset\s+Object\s+Type\s+
-VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>.*
+VERIFY:\s+offset\s+object\s+
+VERIFY:\s+<DECVAL>\s+<HEXVAL>\s+
# Continue to the next DebugBreak
CONTINUE
VERIFY:\s*SyncBlocks to be cleaned up:\s+<DECVAL>\s+
VERIFY:(\s*Free-Threaded Interfaces to be released:\s+<DECVAL>\s+)?
VERIFY:\s*Statistics for all finalizable objects.*:\s+
-VERIFY:\s+Address\s+MT\s+Size\s+
+VERIFY:\s+MT\s+Count\s+TotalSize\s+Class Name\s+
VERIFY:(\s*<HEXVAL>\s+<DECVAL>\s+<DECVAL>\s+.*)?
-VERIFY:\s*Total\s+<DECVAL>\s+objects.*<DECVAL>\s+bytes\s*
+VERIFY:\s*Total\s+<DECVAL>\s+objects\s+
EXTCOMMAND:logopen %LOG_PATH%/%TEST_NAME%.%LOG_SUFFIX%.consolelog
EXTCOMMAND:logging %LOG_PATH%/%TEST_NAME%.%LOG_SUFFIX%.diaglog
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+\[DEFAULT\] Void SymbolTestApp\.Program\.Main\(.*\)\s+\(.*\)\s+
VERIFY:.*\s+Stack walk complete.\s+
-SOSCOMMAND: runtimes
-
-!IFDEF:HOST_RUNTIME_NONE
-
# Verify DumpStackObjects works
SOSCOMMAND:DumpStackObjects
VERIFY:.*OS Thread Id:\s+0x<HEXVAL>\s+.*
-VERIFY:\s*SP/REG\s+Object\s+Name\s+
-VERIFY:.*\s*<HEXVAL>\s+<HEXVAL>\s+System\.String.*
-VERIFY:.*\s*<HEXVAL>\s+<HEXVAL>\s+System\.String\[\].*
+VERIFY:\s+([R|E])*SP/REG\s+Object\s+Name\s+
+VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*
+VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String\[\].*
# Verify DumpStackObjects -verify works
SOSCOMMAND:DumpStackObjects -verify
VERIFY:.*OS Thread Id:\s+0x<HEXVAL>\s+.*
-VERIFY:\s*SP/REG\s+Object\s+Name\s+
-VERIFY:.*\s*<HEXVAL>\s+<HEXVAL>\s+System\.String.*
-VERIFY:.*\s*<HEXVAL>\s+<HEXVAL>\s+System\.String\[\].*
-
-ENDIF:HOST_RUNTIME_NONE
+VERIFY:\s+([R|E])*SP/REG\s+Object\s+Name\s+
+VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*
+VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String\[\].*
ENDIF:NETCORE_OR_DOTNETDUMP
# 7) Verify DumpStackObjects works
SOSCOMMAND:DumpStackObjects
VERIFY:.*OS Thread Id:\s+0x<HEXVAL>\s+.*
-VERIFY:\s*SP/REG\s+Object\s+Name\s+
+VERIFY:\s+([R|E])*SP/REG\s+Object\s+Name\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.FormatException\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.InvalidOperationException\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*
# 8) Verify DumpStackObjects -verify works
SOSCOMMAND:DumpStackObjects -verify
VERIFY:.*OS Thread Id:\s+0x<HEXVAL>\s+.*
-VERIFY:\s*SP/REG\s+Object\s+Name\s+
+VERIFY:\s+([R|E])*SP/REG\s+Object\s+Name\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.FormatException\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.InvalidOperationException\s+
VERIFY:.*\s+<HEXVAL>\s+<HEXVAL>\s+System\.String.*
/// X86Machine implementation
///
LPCSTR X86Machine::s_DumpStackHeading = "ChildEBP RetAddr Caller, Callee\n";
+LPCSTR X86Machine::s_DSOHeading = "ESP/REG Object Name\n";
LPCSTR X86Machine::s_GCRegs[7] = {"eax", "ebx", "ecx", "edx", "esi", "edi", "ebp"};
LPCSTR X86Machine::s_SPName = "ESP";
/// ARMMachine implementation
///
LPCSTR ARMMachine::s_DumpStackHeading = "ChildFP RetAddr Caller, Callee\n";
+LPCSTR ARMMachine::s_DSOHeading = "SP/REG Object Name\n";
LPCSTR ARMMachine::s_GCRegs[14] = {"r0", "r1", "r2", "r3", "r4", "r5", "r6",
"r7", "r8", "r9", "r10", "r11", "r12", "lr"};
LPCSTR ARMMachine::s_SPName = "sp";
/// AMD64Machine implementation
///
LPCSTR AMD64Machine::s_DumpStackHeading = "Child-SP RetAddr Caller, Callee\n";
+LPCSTR AMD64Machine::s_DSOHeading = "RSP/REG Object Name\n";
LPCSTR AMD64Machine::s_GCRegs[15] = {"rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp",
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"};
LPCSTR AMD64Machine::s_SPName = "RSP";
/// ARM64Machine implementation
///
LPCSTR ARM64Machine::s_DumpStackHeading = "ChildFP RetAddr Caller, Callee\n";
+LPCSTR ARM64Machine::s_DSOHeading = "SP/REG Object Name\n";
// excluding x18, fp & lr as these will not contain object references
LPCSTR ARM64Machine::s_GCRegs[28] = {"x0", "x1", "x2", "x3", "x4", "x5", "x6",
"x7", "x8", "x9", "x10", "x11", "x12", "x13",
virtual void FillTargetContext(LPVOID destCtx, LPVOID srcCtx, int idx = 0) const;
virtual LPCSTR GetDumpStackHeading() const { return s_DumpStackHeading; }
+ virtual LPCSTR GetDumpStackObjectsHeading() const { return s_DSOHeading; }
virtual LPCSTR GetSPName() const { return s_SPName; }
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = ARRAY_SIZE(s_GCRegs); }
private:
static LPCSTR s_DumpStackHeading;
+ static LPCSTR s_DSOHeading;
static LPCSTR s_GCRegs[7];
static LPCSTR s_SPName;
}; // class X86Machine
virtual void FillTargetContext(LPVOID destCtx, LPVOID srcCtx, int idx = 0) const;
virtual LPCSTR GetDumpStackHeading() const { return s_DumpStackHeading; }
+ virtual LPCSTR GetDumpStackObjectsHeading() const { return s_DSOHeading; }
virtual LPCSTR GetSPName() const { return s_SPName; }
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = ARRAY_SIZE(s_GCRegs); }
private:
static LPCSTR s_DumpStackHeading;
+ static LPCSTR s_DSOHeading;
static LPCSTR s_GCRegs[14];
static LPCSTR s_SPName;
static ARMMachine s_ARMMachineInstance;
virtual void FillTargetContext(LPVOID destCtx, LPVOID srcCtx, int idx = 0) const;
virtual LPCSTR GetDumpStackHeading() const { return s_DumpStackHeading; }
+ virtual LPCSTR GetDumpStackObjectsHeading() const { return s_DSOHeading; }
virtual LPCSTR GetSPName() const { return s_SPName; }
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = ARRAY_SIZE(s_GCRegs); }
private:
static LPCSTR s_DumpStackHeading;
+ static LPCSTR s_DSOHeading;
static LPCSTR s_GCRegs[15];
static LPCSTR s_SPName;
}; // class AMD64Machine
virtual void FillTargetContext(LPVOID destCtx, LPVOID srcCtx, int idx = 0) const;
virtual LPCSTR GetDumpStackHeading() const { return s_DumpStackHeading; }
+ virtual LPCSTR GetDumpStackObjectsHeading() const { return s_DSOHeading; }
virtual LPCSTR GetSPName() const { return s_SPName; }
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const
{ _ASSERTE(cntRegs != NULL); *regNames = s_GCRegs; *cntRegs = ARRAY_SIZE(s_GCRegs);}
ARM64Machine & operator=(const ARM64Machine&); // undefined
static LPCSTR s_DumpStackHeading;
+ static LPCSTR s_DSOHeading;
static LPCSTR s_GCRegs[28];
static LPCSTR s_SPName;
return TRUE;
}
+
+// This function expects stat to be valid, and ready to get statistics.
+void GatherOneHeapFinalization(DacpGcHeapDetails& heapDetails, HeapStat *stat, BOOL bAllReady, BOOL bShort)
+{
+ DWORD_PTR dwAddr=0;
+ UINT m;
+
+ if (!bShort)
+ {
+ for (m = 0; m <= GetMaxGeneration(); m ++)
+ {
+ if (IsInterrupt())
+ return;
+
+ ExtOut("generation %d has %d finalizable objects ", m,
+ (SegQueueLimit(heapDetails,gen_segment(m)) - SegQueue(heapDetails,gen_segment(m))) / sizeof(size_t));
+
+ ExtOut ("(%p->%p)\n",
+ SOS_PTR(SegQueue(heapDetails,gen_segment(m))),
+ SOS_PTR(SegQueueLimit(heapDetails,gen_segment(m))));
+ }
+ }
+
+ if (bAllReady)
+ {
+ if (!bShort)
+ {
+ ExtOut ("Finalizable but not rooted: ");
+ }
+
+ TADDR rngStart = (TADDR)SegQueue(heapDetails, gen_segment(GetMaxGeneration()));
+ TADDR rngEnd = (TADDR)SegQueueLimit(heapDetails, gen_segment(0));
+
+ std::stringstream argsBuilder;
+ argsBuilder << std::hex << rngStart << " ";
+ argsBuilder << std::hex << rngEnd << " ";
+ argsBuilder << "-nofinalizer ";
+ if (bShort)
+ argsBuilder << "-short";
+
+ ExecuteCommand("notreachableinrange", argsBuilder.str().c_str());
+ }
+
+ if (!bShort)
+ {
+ ExtOut ("Ready for finalization %d objects ",
+ (SegQueueLimit(heapDetails,FinalizerListSeg)-SegQueue(heapDetails,CriticalFinalizerListSeg)) / sizeof(size_t));
+ ExtOut ("(%p->%p)\n",
+ SOS_PTR(SegQueue(heapDetails,CriticalFinalizerListSeg)),
+ SOS_PTR(SegQueueLimit(heapDetails,FinalizerListSeg)));
+ }
+
+ // if bAllReady we only count objects that are ready for finalization,
+ // otherwise we count all finalizable objects.
+ TADDR taddrLowerLimit = (bAllReady ? (TADDR)SegQueue(heapDetails, CriticalFinalizerListSeg) :
+ (DWORD_PTR)SegQueue(heapDetails, gen_segment(GetMaxGeneration())));
+ for (dwAddr = taddrLowerLimit;
+ dwAddr < (DWORD_PTR)SegQueueLimit(heapDetails, FinalizerListSeg);
+ dwAddr += sizeof (dwAddr))
+ {
+ if (IsInterrupt())
+ {
+ return;
+ }
+
+ DWORD_PTR objAddr = NULL,
+ MTAddr = NULL;
+
+ if (SUCCEEDED(MOVE(objAddr, dwAddr)) && SUCCEEDED(GetMTOfObject(objAddr, &MTAddr)) && MTAddr)
+ {
+ if (bShort)
+ {
+ DMLOut("%s\n", DMLObject(objAddr));
+ }
+ else
+ {
+ size_t s = ObjectSize(objAddr);
+ stat->Add(MTAddr, (DWORD)s);
+ }
+ }
+ }
+}
+
+BOOL GCHeapTraverse(const GCHeapDetails &heap, AllocInfo* pallocInfo, VISITGCHEAPFUNC pFunc, LPVOID token, BOOL verify)
+{
+ DWORD_PTR dwAddrSeg = 0;
+ DWORD_PTR dwAddr = 0;
+ DWORD_PTR dwAddrCurrObj = 0;
+ DWORD_PTR dwAddrPrevObj = 0;
+ size_t s, sPrev = 0;
+
+ DacpHeapSegmentData segment;
+ if (heap.has_regions)
+ {
+ BOOL bPrevFree = FALSE;
+ for (UINT n = 0; n <= GetMaxGeneration(); n++)
+ {
+ dwAddrSeg = (DWORD_PTR)heap.generation_table[n].start_segment;
+ while (dwAddrSeg != 0)
+ {
+ if (IsInterrupt())
+ {
+ ExtOut("<heap walk interrupted>\n");
+ return FALSE;
+ }
+ if (segment.Request(g_sos, dwAddrSeg, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg));
+ return FALSE;
+ }
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+ DWORD_PTR end_of_segment = (DWORD_PTR)segment.highAllocMark;
+
+ while (true)
+ {
+ if (dwAddrCurrObj - SIZEOF_OBJHEADER == end_of_segment - Align(min_obj_size))
+ break;
+
+ if (dwAddrCurrObj >= (DWORD_PTR)end_of_segment)
+ {
+ if (dwAddrCurrObj > (DWORD_PTR)end_of_segment)
+ {
+ ExtOut("curr_object: %p > heap_segment_allocated (seg: %p)\n",
+ SOS_PTR(dwAddrCurrObj), SOS_PTR(dwAddrSeg));
+ if (dwAddrPrevObj)
+ {
+ ExtOut("Last good object: %p\n", SOS_PTR(dwAddrPrevObj));
+ }
+ return FALSE;
+ }
+ // done with this segment
+ break;
+ }
+
+ if (dwAddrSeg == (DWORD_PTR)heap.ephemeral_heap_segment
+ && dwAddrCurrObj >= end_of_segment)
+ {
+ if (dwAddrCurrObj > end_of_segment)
+ {
+ // prev_object length is too long
+ ExtOut("curr_object: %p > end_of_segment: %p\n",
+ SOS_PTR(dwAddrCurrObj), SOS_PTR(end_of_segment));
+ if (dwAddrPrevObj)
+ {
+ DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj));
+ }
+ return FALSE;
+ }
+ return FALSE;
+ }
+
+ DWORD_PTR dwAddrMethTable = 0;
+ if (FAILED(GetMTOfObject(dwAddrCurrObj, &dwAddrMethTable)))
+ {
+ return FALSE;
+ }
+
+ dwAddrMethTable = dwAddrMethTable & ~sos::Object::METHODTABLE_PTR_LOW_BITMASK;
+ if (dwAddrMethTable == 0)
+ {
+ // Is this the beginning of an allocation context?
+ int i;
+ for (i = 0; i < pallocInfo->num; i++)
+ {
+ if (dwAddrCurrObj == (DWORD_PTR)pallocInfo->array[i].alloc_ptr)
+ {
+ dwAddrCurrObj =
+ (DWORD_PTR)pallocInfo->array[i].alloc_limit + Align(min_obj_size);
+ break;
+ }
+ }
+ if (i < pallocInfo->num)
+ continue;
+
+ // We also need to look at the gen0 alloc context.
+ if (dwAddrCurrObj == (DWORD_PTR)heap.generation_table[0].allocContextPtr)
+ {
+ dwAddrCurrObj = (DWORD_PTR)heap.generation_table[0].allocContextLimit + Align(min_obj_size);
+ continue;
+ }
+ }
+
+ BOOL bContainsPointers;
+ size_t s;
+ BOOL bMTOk = GetSizeEfficient(dwAddrCurrObj, dwAddrMethTable, FALSE, s, bContainsPointers);
+ if (verify && bMTOk)
+ bMTOk = VerifyObject(heap, dwAddrCurrObj, dwAddrMethTable, s, TRUE);
+ if (!bMTOk)
+ {
+ DMLOut("curr_object: %s\n", DMLListNearObj(dwAddrCurrObj));
+ if (dwAddrPrevObj)
+ DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj));
+
+ ExtOut("----------------\n");
+ return FALSE;
+ }
+
+ pFunc(dwAddrCurrObj, s, dwAddrMethTable, token);
+
+ // We believe we did this alignment in ObjectSize above.
+ assert((s & ALIGNCONST) == 0);
+ dwAddrPrevObj = dwAddrCurrObj;
+ sPrev = s;
+ bPrevFree = IsMTForFreeObj(dwAddrMethTable);
+
+ dwAddrCurrObj += s;
+ }
+ dwAddrSeg = (DWORD_PTR)segment.next;
+ }
+ }
+ }
+ else
+ {
+ DWORD_PTR begin_youngest;
+ DWORD_PTR end_youngest;
+
+ begin_youngest = (DWORD_PTR)heap.generation_table[0].allocation_start;
+ dwAddr = (DWORD_PTR)heap.ephemeral_heap_segment;
+
+ end_youngest = (DWORD_PTR)heap.alloc_allocated;
+
+ dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()].start_segment;
+ dwAddr = dwAddrSeg;
+
+ if (segment.Request(g_sos, dwAddr, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr));
+ return FALSE;
+ }
+
+ // DWORD_PTR dwAddrCurrObj = (DWORD_PTR)heap.generation_table[GetMaxGeneration()].allocation_start;
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+
+ BOOL bPrevFree = FALSE;
+
+ while (1)
+ {
+ if (IsInterrupt())
+ {
+ ExtOut("<heap walk interrupted>\n");
+ return FALSE;
+ }
+ DWORD_PTR end_of_segment = (DWORD_PTR)segment.allocated;
+ if (dwAddrSeg == (DWORD_PTR)heap.ephemeral_heap_segment)
+ {
+ end_of_segment = end_youngest;
+ if (dwAddrCurrObj - SIZEOF_OBJHEADER == end_youngest - Align(min_obj_size))
+ break;
+ }
+ if (dwAddrCurrObj >= (DWORD_PTR)end_of_segment)
+ {
+ if (dwAddrCurrObj > (DWORD_PTR)end_of_segment)
+ {
+ ExtOut ("curr_object: %p > heap_segment_allocated (seg: %p)\n",
+ SOS_PTR(dwAddrCurrObj), SOS_PTR(dwAddrSeg));
+ if (dwAddrPrevObj) {
+ ExtOut ("Last good object: %p\n", SOS_PTR(dwAddrPrevObj));
+ }
+ return FALSE;
+ }
+ dwAddrSeg = (DWORD_PTR)segment.next;
+ if (dwAddrSeg)
+ {
+ dwAddr = dwAddrSeg;
+ if (segment.Request(g_sos, dwAddr, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr));
+ return FALSE;
+ }
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+ continue;
+ }
+ else
+ {
+ break; // Done Verifying Heap
+ }
+ }
+
+ if (dwAddrSeg == (DWORD_PTR)heap.ephemeral_heap_segment
+ && dwAddrCurrObj >= end_youngest)
+ {
+ if (dwAddrCurrObj > end_youngest)
+ {
+ // prev_object length is too long
+ ExtOut("curr_object: %p > end_youngest: %p\n",
+ SOS_PTR(dwAddrCurrObj), SOS_PTR(end_youngest));
+ if (dwAddrPrevObj) {
+ DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj));
+ }
+ return FALSE;
+ }
+ return FALSE;
+ }
+
+ DWORD_PTR dwAddrMethTable = 0;
+ if (FAILED(GetMTOfObject(dwAddrCurrObj, &dwAddrMethTable)))
+ {
+ return FALSE;
+ }
+
+ dwAddrMethTable = dwAddrMethTable & ~sos::Object::METHODTABLE_PTR_LOW_BITMASK;
+ if (dwAddrMethTable == 0)
+ {
+ // Is this the beginning of an allocation context?
+ int i;
+ for (i = 0; i < pallocInfo->num; i ++)
+ {
+ if (dwAddrCurrObj == (DWORD_PTR)pallocInfo->array[i].alloc_ptr)
+ {
+ dwAddrCurrObj =
+ (DWORD_PTR)pallocInfo->array[i].alloc_limit + Align(min_obj_size);
+ break;
+ }
+ }
+ if (i < pallocInfo->num)
+ continue;
+
+ // We also need to look at the gen0 alloc context.
+ if (dwAddrCurrObj == (DWORD_PTR) heap.generation_table[0].allocContextPtr)
+ {
+ dwAddrCurrObj = (DWORD_PTR) heap.generation_table[0].allocContextLimit + Align(min_obj_size);
+ continue;
+ }
+ }
+
+ BOOL bContainsPointers;
+ BOOL bMTOk = GetSizeEfficient(dwAddrCurrObj, dwAddrMethTable, FALSE, s, bContainsPointers);
+ if (verify && bMTOk)
+ bMTOk = VerifyObject (heap, dwAddrCurrObj, dwAddrMethTable, s, TRUE);
+ if (!bMTOk)
+ {
+ DMLOut("curr_object: %s\n", DMLListNearObj(dwAddrCurrObj));
+ if (dwAddrPrevObj)
+ DMLOut("Last good object: %s\n", DMLObject(dwAddrPrevObj));
+
+ ExtOut ("----------------\n");
+ return FALSE;
+ }
+
+ pFunc (dwAddrCurrObj, s, dwAddrMethTable, token);
+
+ // We believe we did this alignment in ObjectSize above.
+ assert((s & ALIGNCONST) == 0);
+ dwAddrPrevObj = dwAddrCurrObj;
+ sPrev = s;
+ bPrevFree = IsMTForFreeObj(dwAddrMethTable);
+
+ dwAddrCurrObj += s;
+ }
+ }
+
+ // Now for the large object and pinned object generations:
+
+ BOOL bPinnedDone = FALSE;
+
+ dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration()+1].start_segment;
+ dwAddr = dwAddrSeg;
+
+ if (segment.Request(g_sos, dwAddr, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr));
+ return FALSE;
+ }
+
+ // dwAddrCurrObj = (DWORD_PTR)heap.generation_table[GetMaxGeneration()+1].allocation_start;
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+
+ dwAddrPrevObj=0;
+
+ while(1)
+ {
+ if (IsInterrupt())
+ {
+ ExtOut("<heap traverse interrupted>\n");
+ return FALSE;
+ }
+
+ DWORD_PTR end_of_segment = (DWORD_PTR)segment.allocated;
+
+ if (dwAddrCurrObj >= (DWORD_PTR)end_of_segment)
+ {
+ if (dwAddrCurrObj > (DWORD_PTR)end_of_segment)
+ {
+ ExtOut("curr_object: %p > heap_segment_allocated (seg: %p)\n",
+ SOS_PTR(dwAddrCurrObj), SOS_PTR(dwAddrSeg));
+ if (dwAddrPrevObj) {
+ ExtOut("Last good object: %p\n", SOS_PTR(dwAddrPrevObj));
+ }
+ return FALSE;
+ }
+
+ dwAddrSeg = (DWORD_PTR)segment.next;
+ if (dwAddrSeg)
+ {
+ dwAddr = dwAddrSeg;
+ if (segment.Request(g_sos, dwAddr, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr));
+ return FALSE;
+ }
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+ continue;
+ }
+ else if (heap.has_poh && !bPinnedDone)
+ {
+ bPinnedDone = TRUE;
+ dwAddrSeg = (DWORD_PTR)heap.generation_table[GetMaxGeneration() + 2].start_segment;
+ dwAddr = dwAddrSeg;
+
+ if (segment.Request(g_sos, dwAddr, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddr));
+ return FALSE;
+ }
+
+ dwAddrCurrObj = (DWORD_PTR)segment.mem;
+ continue;
+ }
+ else
+ {
+ break; // Done Verifying Heap
+ }
+ }
+
+ DWORD_PTR dwAddrMethTable = 0;
+ if (FAILED(GetMTOfObject(dwAddrCurrObj, &dwAddrMethTable)))
+ {
+ return FALSE;
+ }
+
+ dwAddrMethTable = dwAddrMethTable & ~sos::Object::METHODTABLE_PTR_LOW_BITMASK;
+ BOOL bContainsPointers;
+ BOOL bMTOk = GetSizeEfficient(dwAddrCurrObj, dwAddrMethTable, TRUE, s, bContainsPointers);
+ if (verify && bMTOk)
+ bMTOk = VerifyObject (heap, dwAddrCurrObj, dwAddrMethTable, s, TRUE);
+ if (!bMTOk)
+ {
+ DMLOut("curr_object: %s\n", DMLListNearObj(dwAddrCurrObj));
+
+ if (dwAddrPrevObj)
+ DMLOut("Last good object: %s\n", dwAddrPrevObj);
+
+ ExtOut ("----------------\n");
+ return FALSE;
+ }
+
+ pFunc (dwAddrCurrObj, s, dwAddrMethTable, token);
+
+ // We believe we did this alignment in ObjectSize above.
+ assert((s & ALIGNCONSTLARGE) == 0);
+ dwAddrPrevObj = dwAddrCurrObj;
+ dwAddrCurrObj += s;
+ }
+
+ return TRUE;
+}
+
+BOOL GCHeapsTraverse(VISITGCHEAPFUNC pFunc, LPVOID token, BOOL verify)
+{
+ // Obtain allocation context for each managed thread.
+ AllocInfo allocInfo;
+ allocInfo.Init();
+
+ if (!IsServerBuild())
+ {
+ DacpGcHeapDetails dacHeapDetails;
+ if (dacHeapDetails.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting gc heap details\n");
+ return FALSE;
+ }
+
+ GCHeapDetails heapDetails(dacHeapDetails);
+ return GCHeapTraverse (heapDetails, &allocInfo, pFunc, token, verify);
+ }
+ else
+ {
+ DacpGcHeapData gcheap;
+ if (gcheap.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting GC Heap data\n");
+ return FALSE;
+ }
+
+ DWORD dwAllocSize;
+ DWORD dwNHeaps = gcheap.HeapCount;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
+ {
+ ExtOut("Failed to get GCHeaps: integer overflow error\n");
+ return FALSE;
+ }
+ CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
+ if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
+ {
+ ExtOut("Failed to get GCHeaps\n");
+ return FALSE;
+ }
+
+ DWORD n;
+ for (n = 0; n < dwNHeaps; n ++)
+ {
+ DacpGcHeapDetails dacHeapDetails;
+ if (dacHeapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
+ {
+ ExtOut("Error requesting details\n");
+ return FALSE;
+ }
+
+ GCHeapDetails heapDetails(dacHeapDetails, heapAddrs[n]);
+ if (!GCHeapTraverse (heapDetails, &allocInfo, pFunc, token, verify))
+ {
+ ExtOut("Traversing a gc heap failed\n");
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+GCHeapSnapshot::GCHeapSnapshot()
+{
+ m_isBuilt = FALSE;
+ m_heapDetails = NULL;
+}
+
+///////////////////////////////////////////////////////////
+SegmentLookup::SegmentLookup()
+{
+ m_iSegmentsSize = m_iSegmentCount = 0;
+
+ m_segments = new DacpHeapSegmentData[nSegLookupStgIncrement];
+ if (m_segments == NULL)
+ {
+ ReportOOM();
+ }
+ else
+ {
+ m_iSegmentsSize = nSegLookupStgIncrement;
+ }
+}
+
+BOOL SegmentLookup::AddSegment(DacpHeapSegmentData *pData)
+{
+ // appends the address of a new (initialized) instance of DacpHeapSegmentData to the list of segments
+ // (m_segments) adding space for a segment when necessary.
+ // @todo Microsoft: The field name m_iSegmentSize is a little misleading. It's not the size in bytes,
+ // but the number of elements allocated for the array. It probably should have been named something like
+ // m_iMaxSegments instead.
+ if (m_iSegmentCount >= m_iSegmentsSize)
+ {
+ // expand buffer--allocate enough space to hold the elements we already have plus nSegLookupStgIncrement
+ // more elements
+ DacpHeapSegmentData *pNewBuffer = new DacpHeapSegmentData[m_iSegmentsSize+nSegLookupStgIncrement];
+ if (pNewBuffer==NULL)
+ return FALSE;
+
+ // copy the old elements into the new array
+ memcpy(pNewBuffer, m_segments, sizeof(DacpHeapSegmentData)*m_iSegmentsSize);
+
+ // record the new number of elements available
+ m_iSegmentsSize+=nSegLookupStgIncrement;
+
+ // delete the old array
+ delete [] m_segments;
+
+ // set m_segments to point to the new array
+ m_segments = pNewBuffer;
+ }
+
+ // add pData to the array
+ m_segments[m_iSegmentCount++] = *pData;
+
+ return TRUE;
+}
+
+SegmentLookup::~SegmentLookup()
+{
+ if (m_segments)
+ {
+ delete [] m_segments;
+ m_segments = NULL;
+ }
+}
+
+void SegmentLookup::Clear()
+{
+ m_iSegmentCount = 0;
+}
+
+CLRDATA_ADDRESS SegmentLookup::GetHeap(CLRDATA_ADDRESS object, BOOL& bFound)
+{
+ CLRDATA_ADDRESS ret = NULL;
+ bFound = FALSE;
+
+ // Visit our segments
+ for (int i=0; i<m_iSegmentCount; i++)
+ {
+ if (TO_TADDR(m_segments[i].mem) <= TO_TADDR(object) &&
+ TO_TADDR(m_segments[i].highAllocMark) > TO_TADDR(object))
+ {
+ ret = m_segments[i].gc_heap;
+ bFound = TRUE;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+///////////////////////////////////////////////////////////////////////////
+
+BOOL GCHeapSnapshot::Build()
+{
+ Clear();
+
+ m_isBuilt = FALSE;
+
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ /// 1. Get some basic information such as the heap type (SVR or WKS), how many heaps there are, mode and max generation
+ /// (See code:ClrDataAccess::RequestGCHeapData)
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ if (m_gcheap.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting GC Heap data\n");
+ return FALSE;
+ }
+
+ ArrayHolder<CLRDATA_ADDRESS> heapAddrs = NULL;
+
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ /// 2. Get a list of the addresses of the heaps when we have multiple heaps in server mode
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ if (m_gcheap.bServerMode)
+ {
+ UINT AllocSize;
+ // allocate an array to hold the starting addresses of each heap when we're in server mode
+ if (!ClrSafeInt<UINT>::multiply(sizeof(CLRDATA_ADDRESS), m_gcheap.HeapCount, AllocSize) ||
+ (heapAddrs = new CLRDATA_ADDRESS [m_gcheap.HeapCount]) == NULL)
+ {
+ ReportOOM();
+ return FALSE;
+ }
+
+ // and initialize it with their addresses (see code:ClrDataAccess::RequestGCHeapList
+ // for details)
+ if (g_sos->GetGCHeapList(m_gcheap.HeapCount, heapAddrs, NULL) != S_OK)
+ {
+ ExtOut("Failed to get GCHeaps\n");
+ return FALSE;
+ }
+ }
+
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ /// 3. Get some necessary information about each heap, such as the card table location, the generation
+ /// table, the heap bounds, etc., and retrieve the heap segments
+ ///- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+
+ // allocate an array to hold the information
+ m_heapDetails = new GCHeapDetails[m_gcheap.HeapCount];
+
+ if (m_heapDetails == NULL)
+ {
+ ReportOOM();
+ return FALSE;
+ }
+
+ // get the heap information for each heap
+ // See code:ClrDataAccess::RequestGCHeapDetails for details
+ for (UINT n = 0; n < m_gcheap.HeapCount; n ++)
+ {
+ if (m_gcheap.bServerMode)
+ {
+ DacpGcHeapDetails dacHeapDetails;
+ if (dacHeapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
+ {
+ ExtOut("Error requesting details\n");
+ return FALSE;
+ }
+
+ m_heapDetails[n].Set(dacHeapDetails, heapAddrs[n]);
+ }
+ else
+ {
+ DacpGcHeapDetails dacHeapDetails;
+ if (dacHeapDetails.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting details\n");
+ return FALSE;
+ }
+
+ m_heapDetails[n].Set(dacHeapDetails);
+ }
+
+ // now get information about the heap segments for this heap
+ if (!AddSegments(m_heapDetails[n]))
+ {
+ ExtOut("Failed to retrieve segments for gc heap\n");
+ return FALSE;
+ }
+ }
+
+ m_isBuilt = TRUE;
+ return TRUE;
+}
+
+BOOL GCHeapSnapshot::AddSegments(const GCHeapDetails& details)
+{
+ int n = 0;
+ DacpHeapSegmentData segment;
+
+ // This array of addresses gives us access to all the segments.
+ CLRDATA_ADDRESS AddrSegs[5];
+ if (details.has_regions)
+ {
+ // with regions, each generation has its own list of segments
+ for (unsigned gen = 0; gen <= GetMaxGeneration() + 1; gen++)
+ {
+ AddrSegs[gen] = details.generation_table[gen].start_segment;
+ }
+ if (details.has_poh)
+ {
+ AddrSegs[4] = details.generation_table[GetMaxGeneration() + 2].start_segment; // pinned object heap
+ }
+ }
+ else
+ {
+ // The generation segments are linked to each other, starting with the maxGeneration segment.
+ // The second address gives us the large object heap, the third the pinned object heap
+
+ AddrSegs[0] = details.generation_table[GetMaxGeneration()].start_segment;
+ AddrSegs[1] = details.generation_table[GetMaxGeneration() + 1].start_segment;
+ AddrSegs[2] = NULL;
+ if (details.has_poh)
+ {
+ AddrSegs[2] = details.generation_table[GetMaxGeneration() + 2].start_segment; // pinned object heap
+ }
+ AddrSegs[3] = NULL;
+ AddrSegs[4] = NULL;
+ }
+
+ // this loop will get information for all the heap segments in this heap. The outer loop iterates once
+ // for the "normal" generation segments and once for the large object heap. The inner loop follows the chain
+ // of segments rooted at AddrSegs[i]
+ for (unsigned int i = 0; i < ARRAY_SIZE(AddrSegs); ++i)
+ {
+ if (AddrSegs[i] == NULL)
+ {
+ continue;
+ }
+
+ CLRDATA_ADDRESS AddrSeg = AddrSegs[i];
+
+ while (AddrSeg != NULL)
+ {
+ if (IsInterrupt())
+ {
+ return FALSE;
+ }
+ // Initialize segment by copying fields from the target's heap segment at AddrSeg.
+ // See code:ClrDataAccess::RequestGCHeapSegment for details.
+ if (segment.Request(g_sos, AddrSeg, details.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(AddrSeg));
+ return FALSE;
+ }
+ // add the new segment to the array of segments. This will expand the array if necessary
+ if (!m_segments.AddSegment(&segment))
+ {
+ ExtOut("strike: Failed to store segment\n");
+ return FALSE;
+ }
+ // get the next segment in the chain
+ AddrSeg = segment.next;
+ }
+ }
+
+ return TRUE;
+}
+
+void GCHeapSnapshot::Clear()
+{
+ if (m_heapDetails != NULL)
+ {
+ delete [] m_heapDetails;
+ m_heapDetails = NULL;
+ }
+
+ m_segments.Clear();
+
+ m_isBuilt = FALSE;
+}
+
+GCHeapSnapshot g_snapshot;
+
+GCHeapDetails *GCHeapSnapshot::GetHeap(CLRDATA_ADDRESS objectPointer)
+{
+ // We need bFound because heap will be NULL if we are Workstation Mode.
+ // We still need a way to know if the address was found in our segment
+ // list.
+ BOOL bFound = FALSE;
+ CLRDATA_ADDRESS heap = m_segments.GetHeap(objectPointer, bFound);
+ if (heap)
+ {
+ for (UINT i=0; i<m_gcheap.HeapCount; i++)
+ {
+ if (m_heapDetails[i].heapAddr == heap)
+ return m_heapDetails + i;
+ }
+ }
+ else if (!m_gcheap.bServerMode)
+ {
+ if (bFound)
+ {
+ return m_heapDetails;
+ }
+ }
+
+ // Not found
+ return NULL;
+}
+
+// TODO: Do we need to handle the LOH here?
+int GCHeapSnapshot::GetGeneration(CLRDATA_ADDRESS objectPointer)
+{
+ GCHeapDetails *pDetails = GetHeap(objectPointer);
+ if (pDetails == NULL)
+ {
+ ExtOut("Object %p has no generation\n", SOS_PTR(objectPointer));
+ return 0;
+ }
+
+ TADDR taObj = TO_TADDR(objectPointer);
+ if (pDetails->has_regions)
+ {
+ for (int gen_num = 0; gen_num <= 1; gen_num++)
+ {
+ CLRDATA_ADDRESS dwAddrSeg = pDetails->generation_table[gen_num].start_segment;
+ while (dwAddrSeg != 0)
+ {
+ DacpHeapSegmentData segment;
+ if (segment.Request(g_sos, dwAddrSeg, pDetails->original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg));
+ return 0;
+ }
+ // The DAC doesn't fill the generation table with true CLRDATA_ADDRESS values
+ // but rather with ULONG64 values (i.e. non-sign-extended 64-bit values)
+ // We use the TO_TADDR below to ensure we won't break if this will ever
+ // be fixed in the DAC.
+ if (TO_TADDR(segment.mem) <= taObj && taObj < TO_TADDR(segment.highAllocMark))
+ {
+ return gen_num;
+ }
+ dwAddrSeg = segment.next;
+ }
+ }
+ }
+ else
+ {
+ // The DAC doesn't fill the generation table with true CLRDATA_ADDRESS values
+ // but rather with ULONG64 values (i.e. non-sign-extended 64-bit values)
+ // We use the TO_TADDR below to ensure we won't break if this will ever
+ // be fixed in the DAC.
+ if (taObj >= TO_TADDR(pDetails->generation_table[0].allocation_start) &&
+ taObj <= TO_TADDR(pDetails->alloc_allocated))
+ return 0;
+
+ if (taObj >= TO_TADDR(pDetails->generation_table[1].allocation_start) &&
+ taObj <= TO_TADDR(pDetails->generation_table[0].allocation_start))
+ return 1;
+ }
+ return 2;
+}
#endif
return target->GetRuntime(ppRuntime);
}
-
-void FlushCheck()
-{
-#ifndef FEATURE_PAL
- SOSExtensions* extensions = (SOSExtensions*)Extensions::GetInstance();
- if (extensions != nullptr)
- {
- extensions->FlushCheck();
- }
-#endif // !FEATURE_PAL
-}
};
extern HRESULT GetRuntime(IRuntime** ppRuntime);
-extern void FlushCheck();
#ifndef MINIDUMP
ControlC = FALSE; \
g_bDacBroken = TRUE; \
g_clrData = NULL; \
- g_sos = NULL; \
- FlushCheck();
+ g_sos = NULL;
// Also initializes the target machine
#define INIT_API_NOEE() \
// Retrieve some target specific output strings
virtual LPCSTR GetDumpStackHeading() const = 0;
+ virtual LPCSTR GetDumpStackObjectsHeading() const = 0;
virtual LPCSTR GetSPName() const = 0;
// Retrieves the non-volatile registers reported to the GC
virtual void GetGCRegisters(LPCSTR** regNames, unsigned int* cntRegs) const = 0;
}
+static const char *NameForHandle(unsigned int type)
+{
+ switch (type)
+ {
+ case 0:
+ return "weak short";
+ case 1:
+ return "weak long";
+ case 2:
+ return "strong";
+ case 3:
+ return "pinned";
+ case 5:
+ return "ref counted";
+ case 6:
+ return "dependent";
+ case 7:
+ return "async pinned";
+ case 8:
+ return "sized ref";
+ case 9:
+ return "weak WinRT";
+ }
+
+ return "unknown";
+}
+
///////////////////////////////////////////////////////////////////////////////
+void GetDependentHandleMap(std::unordered_map<TADDR, std::list<TADDR>>& map)
+{
+ unsigned int type = HNDTYPE_DEPENDENT;
+ ToRelease<ISOSHandleEnum> handles;
+
+ HRESULT hr = g_sos->GetHandleEnumForTypes(&type, 1, &handles);
+
+ if (FAILED(hr))
+ {
+ ExtOut("Failed to walk dependent handles. GCRoot may miss paths.\n");
+ return;
+ }
+
+ SOSHandleData data[4];
+ unsigned int fetched = 0;
+
+ do
+ {
+ hr = handles->Next(ARRAY_SIZE(data), data, &fetched);
+
+ if (FAILED(hr))
+ {
+ ExtOut("Error walking dependent handles. GCRoot may miss paths.\n");
+ return;
+ }
+
+ for (unsigned int i = 0; i < fetched; ++i)
+ {
+ if (data[i].Secondary != 0)
+ {
+ TADDR obj = 0;
+ TADDR target = TO_TADDR(data[i].Secondary);
+
+ MOVE(obj, TO_TADDR(data[i].Handle));
+
+ map[obj].push_back(target);
+ }
+ }
+ } while (fetched == ARRAY_SIZE(data));
+}
+
UINT FindAllPinnedAndStrong(DWORD_PTR handlearray[], UINT arraySize)
{
unsigned int fetched = 0;
return pos;
}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Some defines for cards taken from gc code
+//
+#define card_word_width ((size_t)32)
+
+//
+// The value of card_size is determined empirically according to the average size of an object
+// In the code we also rely on the assumption that one card_table entry (DWORD) covers an entire os page
+//
+#if defined (_TARGET_WIN64_)
+#define card_size ((size_t)(2*DT_GC_PAGE_SIZE/card_word_width))
+#else
+#define card_size ((size_t)(DT_GC_PAGE_SIZE/card_word_width))
+#endif //_TARGET_WIN64_
+
+// so card_size = 128 on x86, 256 on x64
+
+inline
+size_t card_word (size_t card)
+{
+ return card / card_word_width;
+}
+
+inline
+unsigned card_bit (size_t card)
+{
+ return (unsigned)(card % card_word_width);
+}
+
+inline
+size_t card_of ( BYTE* object)
+{
+ return (size_t)(object) / card_size;
+}
+
+BOOL CardIsSet(const GCHeapDetails &heap, TADDR objAddr)
+{
+ // The card table has to be translated to look at the refcount, etc.
+ // g_card_table[card_word(card_of(g_lowest_address))].
+
+ TADDR card_table = TO_TADDR(heap.card_table);
+ card_table = card_table + card_word(card_of((BYTE *)heap.lowest_address))*sizeof(DWORD);
+
+ do
+ {
+ TADDR card_table_lowest_addr;
+ TADDR card_table_next;
+
+ if (MOVE(card_table_lowest_addr, ALIGN_DOWN(card_table, 0x1000) + sizeof(PVOID)) != S_OK)
+ {
+ ExtErr("Error getting card table lowest address\n");
+ return FALSE;
+ }
+
+ if (MOVE(card_table_next, card_table - sizeof(PVOID)) != S_OK)
+ {
+ ExtErr("Error getting next card table\n");
+ return FALSE;
+ }
+
+ size_t card = (objAddr - card_table_lowest_addr) / card_size;
+ TADDR card_addr = card_table + (card_word(card) * sizeof(DWORD));
+ DWORD value;
+ if (MOVE(value, card_addr) != S_OK)
+ {
+ ExtErr("Error reading card bits - obj %p card %08x card_addr %p card_table %p\n", objAddr, card, card_addr, card_table);
+ return FALSE;
+ }
+
+ if (value & 1<<card_bit(card))
+ return TRUE;
+
+ card_table = card_table_next;
+ }
+ while(card_table);
+
+ return FALSE;
+}
+
+BOOL NeedCard(TADDR parent, TADDR child)
+{
+ int iChildGen = g_snapshot.GetGeneration(child);
+
+ if (iChildGen == 2)
+ return FALSE;
+
+ int iParentGen = g_snapshot.GetGeneration(parent);
+
+ return (iChildGen < iParentGen);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// Some defines for mark_array taken from gc code
+//
+
+#define mark_bit_pitch 8
+#define mark_word_width 32
+#define mark_word_size (mark_word_width * mark_bit_pitch)
+#define heap_segment_flags_swept 16
+
+inline
+size_t mark_bit_bit_of(CLRDATA_ADDRESS add)
+{
+ return (size_t)((add / mark_bit_pitch) % mark_word_width);
+}
+
+inline
+size_t mark_word_of(CLRDATA_ADDRESS add)
+{
+ return (size_t)(add / mark_word_size);
+}
+
+inline BOOL mark_array_marked(const GCHeapDetails &heap, CLRDATA_ADDRESS add)
+{
+
+ DWORD entry = 0;
+ HRESULT hr = MOVE(entry, heap.mark_array + sizeof(DWORD) * mark_word_of(add));
+
+ if (FAILED(hr))
+ ExtOut("Failed to read card table entry.\n");
+
+ return entry & (1 << mark_bit_bit_of(add));
+}
+
+BOOL background_object_marked(const GCHeapDetails &heap, CLRDATA_ADDRESS o)
+{
+ BOOL m = TRUE;
+
+ if ((o >= heap.background_saved_lowest_address) && (o < heap.background_saved_highest_address))
+ m = mark_array_marked(heap, o);
+
+ return m;
+}
+
+BOOL fgc_should_consider_object(const GCHeapDetails &heap,
+ CLRDATA_ADDRESS o,
+ const DacpHeapSegmentData &seg,
+ BOOL consider_bgc_mark_p,
+ BOOL check_current_sweep_p,
+ BOOL check_saved_sweep_p)
+{
+ // the logic for this function must be kept in sync with the analogous function in gc.cpp
+ BOOL no_bgc_mark_p = FALSE;
+
+ if (consider_bgc_mark_p)
+ {
+ if (check_current_sweep_p && (o < heap.next_sweep_obj))
+ {
+ no_bgc_mark_p = TRUE;
+ }
+
+ if (!no_bgc_mark_p)
+ {
+ if(check_saved_sweep_p && (o >= heap.saved_sweep_ephemeral_start))
+ {
+ no_bgc_mark_p = TRUE;
+ }
+
+ if (!check_saved_sweep_p)
+ {
+ CLRDATA_ADDRESS background_allocated = seg.background_allocated;
+ if (o >= background_allocated)
+ {
+ no_bgc_mark_p = TRUE;
+ }
+ }
+ }
+ }
+ else
+ {
+ no_bgc_mark_p = TRUE;
+ }
+
+ return no_bgc_mark_p ? TRUE : background_object_marked(heap, o);
+}
+
+enum c_gc_state
+{
+ c_gc_state_marking,
+ c_gc_state_planning,
+ c_gc_state_free
+};
+
+inline BOOL in_range_for_segment(const DacpHeapSegmentData &seg, CLRDATA_ADDRESS addr)
+{
+ return (addr >= seg.mem) && (addr < seg.reserved);
+}
+
+void should_check_bgc_mark(const GCHeapDetails &heap,
+ const DacpHeapSegmentData &seg,
+ BOOL* consider_bgc_mark_p,
+ BOOL* check_current_sweep_p,
+ BOOL* check_saved_sweep_p)
+{
+ // the logic for this function must be kept in sync with the analogous function in gc.cpp
+ *consider_bgc_mark_p = FALSE;
+ *check_current_sweep_p = FALSE;
+ *check_saved_sweep_p = FALSE;
+
+ if (heap.current_c_gc_state == c_gc_state_planning)
+ {
+ // We are doing the next_sweep_obj comparison here because we have yet to
+ // turn on the swept flag for the segment but in_range_for_segment will return
+ // FALSE if the address is the same as reserved.
+ if ((seg.flags & heap_segment_flags_swept) || (heap.next_sweep_obj == seg.reserved))
+ {
+ // this seg was already swept.
+ }
+ else
+ {
+ *consider_bgc_mark_p = TRUE;
+
+ if ((heap.saved_sweep_ephemeral_seg != -1) && (seg.segmentAddr == heap.saved_sweep_ephemeral_seg))
+ {
+ *check_saved_sweep_p = TRUE;
+ }
+
+ if (in_range_for_segment(seg, heap.next_sweep_obj))
+ {
+ *check_current_sweep_p = TRUE;
+ }
+ }
+ }
+}
+
+// TODO: FACTOR TOGETHER THE OBJECT MEMBER WALKING CODE FROM
+// TODO: VerifyObjectMember(), GetListOfRefs(), HeapTraverser::PrintRefs()
+BOOL VerifyObjectMember(const GCHeapDetails &heap, DWORD_PTR objAddr)
+{
+ BOOL ret = TRUE;
+ BOOL bCheckCard = TRUE;
+ size_t size = 0;
+ {
+ DWORD_PTR dwAddrCard = objAddr;
+ while (dwAddrCard < objAddr + size)
+ {
+ if (CardIsSet(heap, dwAddrCard))
+ {
+ bCheckCard = FALSE;
+ break;
+ }
+ dwAddrCard += card_size;
+ }
+
+ if (bCheckCard)
+ {
+ dwAddrCard = objAddr + size - 2*sizeof(PVOID);
+ if (CardIsSet(heap, dwAddrCard))
+ {
+ bCheckCard = FALSE;
+ }
+ }
+ }
+
+ for (sos::RefIterator itr(TO_TADDR(objAddr)); itr; ++itr)
+ {
+ TADDR dwAddr1 = (DWORD_PTR)*itr;
+ if (dwAddr1)
+ {
+ TADDR dwChild = dwAddr1;
+ // Try something more efficient than IsObject here. Is the methodtable valid?
+ size_t s;
+ BOOL bPointers;
+ TADDR dwAddrMethTable;
+ if (FAILED(GetMTOfObject(dwAddr1, &dwAddrMethTable)) ||
+ (GetSizeEfficient(dwAddr1, dwAddrMethTable, FALSE, s, bPointers) == FALSE))
+ {
+ DMLOut("object %s: bad member %p at %p\n", DMLObject(objAddr), SOS_PTR(dwAddr1), SOS_PTR(itr.GetOffset()));
+ ret = FALSE;
+ }
+
+ if (IsMTForFreeObj(dwAddrMethTable))
+ {
+ DMLOut("object %s contains free object %p at %p\n", DMLObject(objAddr),
+ SOS_PTR(dwAddr1), SOS_PTR(objAddr+itr.GetOffset()));
+ ret = FALSE;
+ }
+
+ // verify card table
+ if (bCheckCard && NeedCard(objAddr+itr.GetOffset(), dwAddr1))
+ {
+ DMLOut("object %s:%s missing card_table entry for %p\n",
+ DMLObject(objAddr), (dwChild == dwAddr1) ? "" : " maybe",
+ SOS_PTR(objAddr+itr.GetOffset()));
+ ret = FALSE;
+ }
+ }
+ }
+
+ return ret;
+}
+
+// search for can_verify_deep in gc.cpp for examples of how these functions are used.
+BOOL VerifyObject(const GCHeapDetails &heap, const DacpHeapSegmentData &seg, DWORD_PTR objAddr, DWORD_PTR MTAddr, size_t objSize,
+ BOOL bVerifyMember)
+{
+ if (IsMTForFreeObj(MTAddr))
+ {
+ return TRUE;
+ }
+
+ if (objSize < min_obj_size)
+ {
+ DMLOut("object %s: size %d too small\n", DMLObject(objAddr), objSize);
+ return FALSE;
+ }
+
+ // If we requested to verify the object's members, the GC may be in a state where that's not possible.
+ // Here we check to see if the object in question needs to have its members updated. If so, we turn off
+ // verification for the object.
+ if (bVerifyMember)
+ {
+ BOOL consider_bgc_mark = FALSE, check_current_sweep = FALSE, check_saved_sweep = FALSE;
+ should_check_bgc_mark(heap, seg, &consider_bgc_mark, &check_current_sweep, &check_saved_sweep);
+ bVerifyMember = fgc_should_consider_object(heap, objAddr, seg, consider_bgc_mark, check_current_sweep, check_saved_sweep);
+ }
+
+ return bVerifyMember ? VerifyObjectMember(heap, objAddr) : TRUE;
+}
+
+
+BOOL FindSegment(const GCHeapDetails &heap, DacpHeapSegmentData &seg, CLRDATA_ADDRESS addr)
+{
+ if (heap.has_regions)
+ {
+ CLRDATA_ADDRESS dwAddrSeg;
+ for (UINT n = 0; n <= GetMaxGeneration(); n++)
+ {
+ dwAddrSeg = (DWORD_PTR)heap.generation_table[n].start_segment;
+ while (dwAddrSeg != 0)
+ {
+ if (IsInterrupt())
+ return FALSE;
+ if (seg.Request(g_sos, dwAddrSeg, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p\n", SOS_PTR(dwAddrSeg));
+ return FALSE;
+ }
+ if (addr >= TO_TADDR(seg.mem) && addr < seg.highAllocMark)
+ {
+ return TRUE;
+ }
+ dwAddrSeg = (DWORD_PTR)seg.next;
+ }
+ }
+ return FALSE;
+ }
+ else
+ {
+ CLRDATA_ADDRESS dwAddrSeg = heap.generation_table[GetMaxGeneration()].start_segment;
+
+ // Request the initial segment.
+ if (seg.Request(g_sos, dwAddrSeg, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p.\n", SOS_PTR(dwAddrSeg));
+ return FALSE;
+ }
+
+ // Loop while the object is not in range of the segment.
+ while (addr < TO_TADDR(seg.mem) ||
+ addr >= (dwAddrSeg == heap.ephemeral_heap_segment ? heap.alloc_allocated : TO_TADDR(seg.allocated)))
+ {
+ // get the next segment
+ dwAddrSeg = seg.next;
+
+ // We reached the last segment without finding the object.
+ if (dwAddrSeg == NULL)
+ return FALSE;
+
+ if (seg.Request(g_sos, dwAddrSeg, heap.original_heap_details) != S_OK)
+ {
+ ExtOut("Error requesting heap segment %p.\n", SOS_PTR(dwAddrSeg));
+ return FALSE;
+ }
+ }
+ }
+
+ return TRUE;
+}
+
+BOOL VerifyObject(const GCHeapDetails &heap, DWORD_PTR objAddr, DWORD_PTR MTAddr, size_t objSize, BOOL bVerifyMember)
+{
+ // This is only used by the other VerifyObject function if bVerifyMember is true,
+ // so we only initialize it if we need it for verifying object members.
+ DacpHeapSegmentData seg;
+
+ if (bVerifyMember)
+ {
+ // if we fail to find the segment, we cannot verify the object's members
+ bVerifyMember = FindSegment(heap, seg, objAddr);
+ }
+
+ return VerifyObject(heap, seg, objAddr, MTAddr, objSize, bVerifyMember);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////
+typedef void (*TYPETREEVISIT)(size_t methodTable, size_t ID, LPVOID token);
+
+// TODO remove this. MethodTableCache already maps method tables to
+// various information. We don't need TypeTree to do this too.
+// Straightfoward to do, but low priority.
+class TypeTree
+{
+private:
+ size_t methodTable;
+ size_t ID;
+ TypeTree *pLeft;
+ TypeTree *pRight;
+
+public:
+ TypeTree(size_t MT) : methodTable(MT),ID(0),pLeft(NULL),pRight(NULL) { }
+
+ BOOL isIn(size_t MT, size_t *pID)
+ {
+ TypeTree *pCur = this;
+
+ while (pCur)
+ {
+ if (MT == pCur->methodTable)
+ {
+ if (pID)
+ *pID = pCur->ID;
+ return TRUE;
+ }
+ else if (MT < pCur->methodTable)
+ pCur = pCur->pLeft;
+ else
+ pCur = pCur->pRight;
+ }
+
+ return FALSE;
+ }
+
+ BOOL insert(size_t MT)
+ {
+ TypeTree *pCur = this;
+
+ while (pCur)
+ {
+ if (MT == pCur->methodTable)
+ return TRUE;
+ else if ((MT < pCur->methodTable))
+ {
+ if (pCur->pLeft)
+ pCur = pCur->pLeft;
+ else
+ break;
+ }
+ else if (pCur->pRight)
+ pCur = pCur->pRight;
+ else
+ break;
+ }
+
+ // If we got here, we need to append at the current node.
+ TypeTree *pNewNode = new TypeTree(MT);
+ if (pNewNode == NULL)
+ return FALSE;
+
+ if (MT < pCur->methodTable)
+ pCur->pLeft = pNewNode;
+ else
+ pCur->pRight = pNewNode;
+
+ return TRUE;
+ }
+
+ static void destroy(TypeTree *pStart)
+ {
+ TypeTree *pCur = pStart;
+
+ if (pCur)
+ {
+ destroy(pCur->pLeft);
+ destroy(pCur->pRight);
+ delete [] pCur;
+ }
+ }
+
+ static void visit_inorder(TypeTree *pStart, TYPETREEVISIT pFunc, LPVOID token)
+ {
+ TypeTree *pCur = pStart;
+
+ if (pCur)
+ {
+ visit_inorder(pCur->pLeft, pFunc, token);
+ pFunc (pCur->methodTable, pCur->ID, token);
+ visit_inorder(pCur->pRight, pFunc, token);
+ }
+ }
+
+ static void setTypeIDs(TypeTree *pStart, size_t *pCurID)
+ {
+ TypeTree *pCur = pStart;
+
+ if (pCur)
+ {
+ setTypeIDs(pCur->pLeft, pCurID);
+ pCur->ID = *pCurID;
+ (*pCurID)++;
+ setTypeIDs(pCur->pRight, pCurID);
+ }
+ }
+
+};
+
+///////////////////////////////////////////////////////////////////////////////
+//
+
+HeapTraverser::HeapTraverser(bool verify)
+{
+ m_format = 0;
+ m_file = NULL;
+ m_objVisited = 0;
+ m_pTypeTree = NULL;
+ m_curNID = 1;
+ m_verify = verify;
+}
+
+HeapTraverser::~HeapTraverser()
+{
+ if (m_pTypeTree) {
+ TypeTree::destroy(m_pTypeTree);
+ m_pTypeTree = NULL;
+ }
+}
+
+BOOL HeapTraverser::Initialize()
+{
+ if (!GCHeapsTraverse (HeapTraverser::GatherTypes, this, m_verify))
+ {
+ ExtOut("Error during heap traverse\n");
+ return FALSE;
+ }
+
+ GetDependentHandleMap(mDependentHandleMap);
+
+ size_t startID = 1;
+ TypeTree::setTypeIDs(m_pTypeTree, &startID);
+
+ return TRUE;
+}
+
+BOOL HeapTraverser::CreateReport (FILE *fp, int format)
+{
+ if (fp == NULL || (format!=FORMAT_XML && format != FORMAT_CLRPROFILER))
+ {
+ return FALSE;
+ }
+
+ m_file = fp;
+ m_format = format;
+
+ PrintSection(TYPE_START,TRUE);
+
+ PrintSection(TYPE_TYPES,TRUE);
+ TypeTree::visit_inorder(m_pTypeTree, HeapTraverser::PrintOutTree, this);
+ PrintSection(TYPE_TYPES,FALSE);
+
+ ExtOut("tracing roots...\n");
+ PrintSection(TYPE_ROOTS,TRUE);
+ PrintRootHead();
+
+ TraceHandles();
+ FindGCRootOnStacks();
+
+ PrintRootTail();
+ PrintSection(TYPE_ROOTS,FALSE);
+
+ // now print type tree
+ PrintSection(TYPE_OBJECTS,TRUE);
+ ExtOut("\nWalking heap...\n");
+ m_objVisited = 0; // for UI updates
+ GCHeapsTraverse (HeapTraverser::PrintHeap, this, FALSE); // Never verify on the second pass
+ PrintSection(TYPE_OBJECTS,FALSE);
+
+ PrintSection(TYPE_START,FALSE);
+
+ m_file = NULL;
+ return TRUE;
+}
+
+void HeapTraverser::insert(size_t mTable)
+{
+ if (m_pTypeTree == NULL)
+ {
+ m_pTypeTree = new TypeTree(mTable);
+ if (m_pTypeTree == NULL)
+ {
+ ReportOOM();
+ return;
+ }
+ }
+ else
+ {
+ m_pTypeTree->insert(mTable);
+ }
+}
+
+size_t HeapTraverser::getID(size_t mTable)
+{
+ if (m_pTypeTree == NULL)
+ {
+ return 0;
+ }
+ // IDs start at 1, so we can return 0 if not found.
+ size_t ret;
+ if (m_pTypeTree->isIn(mTable,&ret))
+ {
+ return ret;
+ }
+
+ return 0;
+}
+
+void replace(std::string &str, const char* toReplace, const char* replaceWith)
+{
+ const size_t replaceLen = strlen(toReplace);
+ const size_t replaceWithLen = strlen(replaceWith);
+
+ size_t i = str.find(toReplace);
+ while (i != std::wstring::npos)
+ {
+ str.replace(i, replaceLen, replaceWith);
+ i = str.find(toReplace, i + replaceWithLen);
+ }
+}
+
+void HeapTraverser::PrintType(size_t ID, LPCWSTR wname)
+{
+ if (m_format==FORMAT_XML)
+ {
+ int len = (int)_wcslen(wname);
+ int size = WideCharToMultiByte(CP_ACP, 0, wname, len, NULL, 0, NULL, NULL);
+ char *buffer = (char*)_alloca(size + 1);
+ WideCharToMultiByte(CP_ACP, 0, wname, len, buffer, size, NULL, NULL);
+ buffer[size] = '\0';
+
+ // Sanitize name based on XML spec.
+ std::string name(buffer);
+ replace(name, "&", "&");
+ replace(name, "\"", """);
+ replace(name, "'", "'");
+ replace(name, "<", "<");
+ replace(name, ">", ">");
+
+ fprintf(m_file,
+ "<type id=\"%d\" name=\"%s\"/>\n",
+ ID, name.c_str());
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "t %d 0 %S\n",
+ ID, wname);
+ }
+}
+
+void HeapTraverser::PrintObjectHead(size_t objAddr,size_t typeID,size_t Size)
+{
+ if (m_format==FORMAT_XML)
+ {
+ fprintf(m_file,
+ "<object address=\"0x%p\" typeid=\"%d\" size=\"%d\">\n",
+ (PBYTE)objAddr,typeID, Size);
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "n %d 1 %d %d\n",
+ m_curNID,typeID,Size);
+
+ fprintf(m_file,
+ "! 1 0x%p %d\n",
+ (PBYTE)objAddr,m_curNID);
+
+ m_curNID++;
+
+ fprintf(m_file,
+ "o 0x%p %d %d ",
+ (PBYTE)objAddr,typeID,Size);
+ }
+}
+
+void HeapTraverser::PrintLoaderAllocator(size_t memberValue)
+{
+ if (m_format == FORMAT_XML)
+ {
+ fprintf(m_file,
+ " <loaderallocator address=\"0x%p\"/>\n",
+ (PBYTE)memberValue);
+ }
+ else if (m_format == FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ " 0x%p",
+ (PBYTE)memberValue);
+ }
+}
+
+void HeapTraverser::PrintObjectMember(size_t memberValue, bool dependentHandle)
+{
+ if (m_format==FORMAT_XML)
+ {
+ fprintf(m_file,
+ " <member address=\"0x%p\"%s/>\n",
+ (PBYTE)memberValue, dependentHandle ? " dependentHandle=\"1\"" : "");
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ " 0x%p",
+ (PBYTE)memberValue);
+ }
+}
+
+void HeapTraverser::PrintObjectTail()
+{
+ if (m_format==FORMAT_XML)
+ {
+ fprintf(m_file,
+ "</object>\n");
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "\n");
+ }
+}
+
+void HeapTraverser::PrintRootHead()
+{
+ if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "r ");
+ }
+}
+
+void HeapTraverser::PrintRoot(LPCWSTR kind,size_t Value)
+{
+ if (m_format==FORMAT_XML)
+ {
+ fprintf(m_file,
+ "<root kind=\"%S\" address=\"0x%p\"/>\n",
+ kind,
+ (PBYTE)Value);
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "0x%p ",
+ (PBYTE)Value);
+ }
+}
+
+void HeapTraverser::PrintRootTail()
+{
+ if (m_format==FORMAT_CLRPROFILER)
+ {
+ fprintf(m_file,
+ "\n");
+ }
+}
+
+void HeapTraverser::PrintSection(int Type,BOOL bOpening)
+{
+ const char *const pTypes[] = {"<gcheap>","<types>","<roots>","<objects>"};
+ const char *const pTypeEnds[] = {"</gcheap>","</types>","</roots>","</objects>"};
+
+ if (m_format==FORMAT_XML)
+ {
+ if ((Type >= 0) && (Type < TYPE_HIGHEST))
+ {
+ fprintf(m_file,"%s\n",bOpening ? pTypes[Type] : pTypeEnds[Type]);
+ }
+ else
+ {
+ ExtOut ("INVALID TYPE %d\n", Type);
+ }
+ }
+ else if (m_format==FORMAT_CLRPROFILER)
+ {
+ if ((Type == TYPE_START) && !bOpening) // a final newline is needed
+ {
+ fprintf(m_file,"\n");
+ }
+ }
+}
+
+void HeapTraverser::FindGCRootOnStacks()
+{
+ ArrayHolder<DWORD_PTR> threadList = NULL;
+ int numThreads = 0;
+
+ // GetThreadList calls ReportOOM so we don't need to do that here.
+ HRESULT hr = GetThreadList(&threadList, &numThreads);
+ if (FAILED(hr) || !threadList)
+ {
+ ExtOut("Failed to enumerate threads in the process.\n");
+ return;
+ }
+
+ int total = 0;
+ DacpThreadData vThread;
+ for (int i = 0; i < numThreads; i++)
+ {
+ if (FAILED(vThread.Request(g_sos, threadList[i])))
+ continue;
+
+ if (vThread.osThreadId)
+ {
+ unsigned int refCount = 0;
+ ArrayHolder<SOSStackRefData> refs = NULL;
+
+ if (FAILED(::GetGCRefs(vThread.osThreadId, &refs, &refCount, NULL, NULL)))
+ {
+ ExtOut("Failed to walk thread %x\n", vThread.osThreadId);
+ continue;
+ }
+
+ for (unsigned int i = 0; i < refCount; ++i)
+ if (refs[i].Object)
+ PrintRoot(W("stack"), TO_TADDR(refs[i].Object));
+ }
+ }
+
+}
+
+
+/* static */ void HeapTraverser::PrintOutTree(size_t methodTable, size_t ID,
+ LPVOID token)
+{
+ HeapTraverser *pHolder = (HeapTraverser *) token;
+ NameForMT_s(methodTable, g_mdName, mdNameLen);
+ pHolder->PrintType(ID,g_mdName);
+}
+
+
+/* static */ void HeapTraverser::PrintHeap(DWORD_PTR objAddr,size_t Size,
+ DWORD_PTR methodTable, LPVOID token)
+{
+ if (!IsMTForFreeObj (methodTable))
+ {
+ HeapTraverser *pHolder = (HeapTraverser *) token;
+ pHolder->m_objVisited++;
+ size_t ID = pHolder->getID(methodTable);
+
+ pHolder->PrintObjectHead(objAddr, ID, Size);
+ pHolder->PrintRefs(objAddr, methodTable, Size);
+ pHolder->PrintObjectTail();
+
+ if (pHolder->m_objVisited % 1024 == 0) {
+ ExtOut(".");
+ if (pHolder->m_objVisited % (1024*64) == 0)
+ ExtOut("\r\n");
+ }
+ }
+}
+
+void HeapTraverser::TraceHandles()
+{
+ unsigned int fetched = 0;
+ SOSHandleData data[64];
+
+ ToRelease<ISOSHandleEnum> handles;
+ HRESULT hr = g_sos->GetHandleEnum(&handles);
+ if (FAILED(hr))
+ return;
+
+ do
+ {
+ hr = handles->Next(ARRAY_SIZE(data), data, &fetched);
+
+ if (FAILED(hr))
+ break;
+
+ for (unsigned int i = 0; i < fetched; ++i)
+ PrintRoot(W("handle"), (size_t)data[i].Handle);
+ } while (fetched == ARRAY_SIZE(data));
+}
+
+/* static */ void HeapTraverser::GatherTypes(DWORD_PTR objAddr,size_t Size,
+ DWORD_PTR methodTable, LPVOID token)
+{
+ if (!IsMTForFreeObj (methodTable))
+ {
+ HeapTraverser *pHolder = (HeapTraverser *) token;
+ pHolder->insert(methodTable);
+ }
+}
+
+void HeapTraverser::PrintRefs(size_t obj, size_t methodTable, size_t size)
+{
+ DWORD_PTR dwAddr = methodTable;
+
+ // TODO: pass info to callback having to lookup the MethodTableInfo again
+ MethodTableInfo* info = g_special_mtCache.Lookup((DWORD_PTR)methodTable);
+ _ASSERTE(info->IsInitialized()); // This is the second pass, so we should be initialized
+
+ if (!info->bContainsPointers && !info->bCollectible)
+ return;
+
+ if (info->bContainsPointers)
+ {
+ // Fetch the GCInfo from the other process
+ CGCDesc *map = info->GCInfo;
+ if (map == NULL)
+ {
+ INT_PTR nEntries;
+ move_xp (nEntries, dwAddr-sizeof(PVOID));
+ bool arrayOfVC = false;
+ if (nEntries<0)
+ {
+ arrayOfVC = true;
+ nEntries = -nEntries;
+ }
+
+ size_t nSlots = 1+nEntries*sizeof(CGCDescSeries)/sizeof(DWORD_PTR);
+ info->GCInfoBuffer = new DWORD_PTR[nSlots];
+ if (info->GCInfoBuffer == NULL)
+ {
+ ReportOOM();
+ return;
+ }
+
+ if (FAILED(rvCache->Read(TO_CDADDR(dwAddr - nSlots*sizeof(DWORD_PTR)),
+ info->GCInfoBuffer, (ULONG) (nSlots*sizeof(DWORD_PTR)), NULL)))
+ return;
+
+ map = info->GCInfo = (CGCDesc*)(info->GCInfoBuffer+nSlots);
+ info->ArrayOfVC = arrayOfVC;
+ }
+ }
+
+ mCache.EnsureRangeInCache((TADDR)obj, (unsigned int)size);
+ for (sos::RefIterator itr(obj, info->GCInfo, info->ArrayOfVC, &mCache); itr; ++itr)
+ {
+ if (*itr && (!m_verify || sos::IsObject(*itr)))
+ {
+ if (itr.IsLoaderAllocator())
+ {
+ PrintLoaderAllocator(*itr);
+ }
+ else
+ {
+ PrintObjectMember(*itr, false);
+ }
+ }
+ }
+
+ std::unordered_map<TADDR, std::list<TADDR>>::iterator itr = mDependentHandleMap.find((TADDR)obj);
+ if (itr != mDependentHandleMap.end())
+ {
+ for (std::list<TADDR>::iterator litr = itr->second.begin(); litr != itr->second.end(); ++litr)
+ {
+ PrintObjectMember(*litr, true);
+ }
+ }
+}
+
+void sos::ObjectIterator::BuildError(char *out, size_t count, const char *format, ...) const
+{
+ if (out == NULL || count == 0)
+ return;
+
+ va_list args;
+ va_start(args, format);
+
+ int written = vsprintf_s(out, count, format, args);
+ if (written > 0 && mLastObj)
+ sprintf_s(out+written, count-written, "\nLast good object: %p.\n", (int*)mLastObj);
+
+ va_end(args);
+}
+
+bool sos::ObjectIterator::VerifyObjectMembers(char *reason, size_t count) const
+{
+ if (!mCurrObj.HasPointers())
+ return true;
+
+ size_t size = mCurrObj.GetSize();
+ size_t objAddr = (size_t)mCurrObj.GetAddress();
+ TADDR mt = mCurrObj.GetMT();
+
+ INT_PTR nEntries;
+ MOVE(nEntries, mt-sizeof(PVOID));
+ if (nEntries < 0)
+ nEntries = -nEntries;
+
+ size_t nSlots = 1 + nEntries * sizeof(CGCDescSeries)/sizeof(DWORD_PTR);
+ ArrayHolder<DWORD_PTR> buffer = new DWORD_PTR[nSlots];
+
+ if (FAILED(g_ExtData->ReadVirtual(TO_CDADDR(mt - nSlots*sizeof(DWORD_PTR)),
+ buffer, (ULONG) (nSlots*sizeof(DWORD_PTR)), NULL)))
+ {
+ BuildError(reason, count, "Object %s has a bad GCDesc.", DMLObject(objAddr));
+ return false;
+ }
+
+ CGCDesc *map = (CGCDesc *)(buffer+nSlots);
+ CGCDescSeries* cur = map->GetHighestSeries();
+ CGCDescSeries* last = map->GetLowestSeries();
+
+ const size_t bufferSize = sizeof(size_t)*128;
+ size_t objBuffer[bufferSize/sizeof(size_t)];
+ size_t dwBeginAddr = (size_t)objAddr;
+ size_t bytesInBuffer = bufferSize;
+ if (size < bytesInBuffer)
+ bytesInBuffer = size;
+
+
+ if (FAILED(g_ExtData->ReadVirtual(TO_CDADDR(dwBeginAddr), objBuffer, (ULONG) bytesInBuffer,NULL)))
+ {
+ BuildError(reason, count, "Object %s: Failed to read members.", DMLObject(objAddr));
+ return false;
+ }
+
+ BOOL bCheckCard = TRUE;
+ {
+ DWORD_PTR dwAddrCard = (DWORD_PTR)objAddr;
+ while (dwAddrCard < objAddr + size)
+ {
+ if (CardIsSet (mHeaps[mCurrHeap], dwAddrCard))
+ {
+ bCheckCard = FALSE;
+ break;
+ }
+ dwAddrCard += card_size;
+ }
+ if (bCheckCard)
+ {
+ dwAddrCard = objAddr + size - 2*sizeof(PVOID);
+ if (CardIsSet (mHeaps[mCurrHeap], dwAddrCard))
+ {
+ bCheckCard = FALSE;
+ }
+ }
+ }
+
+ if (cur >= last)
+ {
+ do
+ {
+ BYTE** parm = (BYTE**)((objAddr) + cur->GetSeriesOffset());
+ BYTE** ppstop =
+ (BYTE**)((BYTE*)parm + cur->GetSeriesSize() + (size));
+ while (parm < ppstop)
+ {
+ CheckInterrupt();
+ size_t dwAddr1;
+
+ // Do we run out of cache?
+ if ((size_t)parm >= dwBeginAddr+bytesInBuffer)
+ {
+ // dwBeginAddr += bytesInBuffer;
+ dwBeginAddr = (size_t)parm;
+ if (dwBeginAddr >= objAddr + size)
+ {
+ return true;
+ }
+ bytesInBuffer = bufferSize;
+ if (objAddr+size-dwBeginAddr < bytesInBuffer)
+ {
+ bytesInBuffer = objAddr+size-dwBeginAddr;
+ }
+ if (FAILED(g_ExtData->ReadVirtual(TO_CDADDR(dwBeginAddr), objBuffer, (ULONG) bytesInBuffer, NULL)))
+ {
+ BuildError(reason, count, "Object %s: Failed to read members.", DMLObject(objAddr));
+ return false;
+ }
+ }
+ dwAddr1 = objBuffer[((size_t)parm-dwBeginAddr)/sizeof(size_t)];
+ if (dwAddr1) {
+ DWORD_PTR dwChild = dwAddr1;
+ // Try something more efficient than IsObject here. Is the methodtable valid?
+ size_t s;
+ BOOL bPointers;
+ DWORD_PTR dwAddrMethTable;
+ if (FAILED(GetMTOfObject(dwAddr1, &dwAddrMethTable)) ||
+ (GetSizeEfficient(dwAddr1, dwAddrMethTable, FALSE, s, bPointers) == FALSE))
+ {
+ BuildError(reason, count, "object %s: bad member %p at %p", DMLObject(objAddr),
+ SOS_PTR(dwAddr1), SOS_PTR(objAddr+(size_t)parm-objAddr));
+
+ return false;
+ }
+
+ if (IsMTForFreeObj(dwAddrMethTable))
+ {
+ sos::Throw<HeapCorruption>("object %s contains free object %p at %p", DMLObject(objAddr),
+ SOS_PTR(dwAddr1), SOS_PTR(objAddr+(size_t)parm-objAddr));
+ }
+
+ // verify card table
+ if (bCheckCard &&
+ NeedCard(objAddr+(size_t)parm-objAddr,dwChild))
+ {
+ BuildError(reason, count, "Object %s: %s missing card_table entry for %p",
+ DMLObject(objAddr), (dwChild == dwAddr1)? "" : " maybe",
+ SOS_PTR(objAddr+(size_t)parm-objAddr));
+
+ return false;
+ }
+ }
+ parm++;
+ }
+ cur--;
+ CheckInterrupt();
+
+ } while (cur >= last);
+ }
+ else
+ {
+ int cnt = (int) map->GetNumSeries();
+ BYTE** parm = (BYTE**)((objAddr) + cur->startoffset);
+ while ((BYTE*)parm < (BYTE*)((objAddr)+(size)-plug_skew))
+ {
+ for (int __i = 0; __i > cnt; __i--)
+ {
+ CheckInterrupt();
+
+ unsigned skip = cur->val_serie[__i].skip;
+ unsigned nptrs = cur->val_serie[__i].nptrs;
+ BYTE** ppstop = parm + nptrs;
+ do
+ {
+ size_t dwAddr1;
+ // Do we run out of cache?
+ if ((size_t)parm >= dwBeginAddr+bytesInBuffer)
+ {
+ // dwBeginAddr += bytesInBuffer;
+ dwBeginAddr = (size_t)parm;
+ if (dwBeginAddr >= objAddr + size)
+ return true;
+
+ bytesInBuffer = bufferSize;
+ if (objAddr+size-dwBeginAddr < bytesInBuffer)
+ bytesInBuffer = objAddr+size-dwBeginAddr;
+
+ if (FAILED(g_ExtData->ReadVirtual(TO_CDADDR(dwBeginAddr), objBuffer, (ULONG) bytesInBuffer, NULL)))
+ {
+ BuildError(reason, count, "Object %s: Failed to read members.", DMLObject(objAddr));
+ return false;
+ }
+ }
+ dwAddr1 = objBuffer[((size_t)parm-dwBeginAddr)/sizeof(size_t)];
+ {
+ if (dwAddr1)
+ {
+ DWORD_PTR dwChild = dwAddr1;
+ // Try something more efficient than IsObject here. Is the methodtable valid?
+ size_t s;
+ BOOL bPointers;
+ DWORD_PTR dwAddrMethTable;
+ if (FAILED(GetMTOfObject(dwAddr1, &dwAddrMethTable)) ||
+ (GetSizeEfficient(dwAddr1, dwAddrMethTable, FALSE, s, bPointers) == FALSE))
+ {
+ BuildError(reason, count, "Object %s: Bad member %p at %p.\n", DMLObject(objAddr),
+ SOS_PTR(dwAddr1), SOS_PTR(objAddr+(size_t)parm-objAddr));
+
+ return false;
+ }
+
+ if (IsMTForFreeObj(dwAddrMethTable))
+ {
+ BuildError(reason, count, "Object %s contains free object %p at %p.", DMLObject(objAddr),
+ SOS_PTR(dwAddr1), SOS_PTR(objAddr+(size_t)parm-objAddr));
+ return false;
+ }
+
+ // verify card table
+ if (bCheckCard &&
+ NeedCard (objAddr+(size_t)parm-objAddr,dwAddr1))
+ {
+ BuildError(reason, count, "Object %s:%s missing card_table entry for %p",
+ DMLObject(objAddr), (dwChild == dwAddr1) ? "" : " maybe",
+ SOS_PTR(objAddr+(size_t)parm-objAddr));
+
+ return false;
+ }
+ }
+ }
+ parm++;
+ CheckInterrupt();
+ } while (parm < ppstop);
+ parm = (BYTE**)((BYTE*)parm + skip);
+ }
+ }
+ }
+
+ return true;
+}
+
+bool sos::ObjectIterator::Verify(char *reason, size_t count) const
+{
+ try
+ {
+ TADDR mt = mCurrObj.GetMT();
+
+ if (MethodTable::GetFreeMT() == mt)
+ {
+ return true;
+ }
+
+ size_t size = mCurrObj.GetSize();
+ if (size < min_obj_size)
+ {
+ BuildError(reason, count, "Object %s: Size %d is too small.", DMLObject(mCurrObj.GetAddress()), size);
+ return false;
+ }
+
+ if (mCurrObj.GetAddress() + mCurrObj.GetSize() > mSegmentEnd)
+ {
+ BuildError(reason, count, "Object %s is too large. End of segment at %p.", DMLObject(mCurrObj), mSegmentEnd);
+ return false;
+ }
+
+ BOOL bVerifyMember = TRUE;
+
+ // If we requested to verify the object's members, the GC may be in a state where that's not possible.
+ // Here we check to see if the object in question needs to have its members updated. If so, we turn off
+ // verification for the object.
+ BOOL consider_bgc_mark = FALSE, check_current_sweep = FALSE, check_saved_sweep = FALSE;
+ should_check_bgc_mark(mHeaps[mCurrHeap], mSegment, &consider_bgc_mark, &check_current_sweep, &check_saved_sweep);
+ bVerifyMember = fgc_should_consider_object(mHeaps[mCurrHeap], mCurrObj.GetAddress(), mSegment,
+ consider_bgc_mark, check_current_sweep, check_saved_sweep);
+
+ if (bVerifyMember)
+ return VerifyObjectMembers(reason, count);
+ }
+ catch(const sos::Exception &e)
+ {
+ BuildError(reason, count, e.GetMesssage());
+ return false;
+ }
+
+ return true;
+}
+
+bool sos::ObjectIterator::Verify() const
+{
+ char *c = NULL;
+ return Verify(c, 0);
+}
return (size_t)stInfo.m_StringLength;
}
+
+ RefIterator::RefIterator(TADDR obj, LinearReadCache *cache)
+ : mCache(cache), mGCDesc(0), mArrayOfVC(false), mDone(false), mBuffer(0), mCurrSeries(0), mLoaderAllocatorObjectHandle(0),
+ i(0), mCount(0), mCurr(0), mStop(0), mObject(obj), mObjSize(0)
+ {
+ Init();
+ }
+
+ RefIterator::RefIterator(TADDR obj, CGCDesc *desc, bool arrayOfVC, LinearReadCache *cache)
+ : mCache(cache), mGCDesc(desc), mArrayOfVC(arrayOfVC), mDone(false), mBuffer(0), mCurrSeries(0), mLoaderAllocatorObjectHandle(0),
+ i(0), mCount(0), mCurr(0), mStop(0), mObject(obj), mObjSize(0)
+ {
+ Init();
+ }
+
+ RefIterator::~RefIterator()
+ {
+ if (mBuffer)
+ delete [] mBuffer;
+ }
+
+ const RefIterator &RefIterator::operator++()
+ {
+ if (mDone)
+ Throw<Exception>("Attempt to move past the end of the iterator.");
+
+ if (mCurr == mLoaderAllocatorObjectHandle)
+ {
+ // The mLoaderAllocatorObjectHandle is always the last reference returned
+ mDone = true;
+ return *this;
+ }
+
+ if (!mArrayOfVC)
+ {
+ mCurr += sizeof(TADDR);
+ if (mCurr >= mStop)
+ {
+ mCurrSeries--;
+ if (mCurrSeries < mGCDesc->GetLowestSeries())
+ {
+ mDone = true;
+ }
+ else
+ {
+ mCurr = mObject + mCurrSeries->GetSeriesOffset();
+ mStop = mCurr + mCurrSeries->GetSeriesSize() + mObjSize;
+ }
+ }
+ }
+ else
+ {
+ mCurr += sizeof(TADDR);
+ if (mCurr >= mStop)
+ {
+ int i_last = i;
+ i--;
+
+ if (i == mCount)
+ i = 0;
+
+ mCurr += mCurrSeries->val_serie[i_last].skip;
+ mStop = mCurr + mCurrSeries->val_serie[i].nptrs * sizeof(TADDR);
+ }
+
+ if (mCurr >= mObject + mObjSize - plug_skew)
+ mDone = true;
+ }
+
+ if (mDone && mLoaderAllocatorObjectHandle != NULL)
+ {
+ // The iteration over all regular object references is done, but there is one more
+ // reference for collectible types - the LoaderAllocator for GC
+ mCurr = mLoaderAllocatorObjectHandle;
+ mDone = false;
+ }
+
+ return *this;
+ }
+
+ TADDR RefIterator::operator*() const
+ {
+ return ReadPointer(mCurr);
+ }
+
+ TADDR RefIterator::GetOffset() const
+ {
+ return mCurr - mObject;
+ }
+
+ void RefIterator::Init()
+ {
+ TADDR mt = ReadPointer(mObject);
+ BOOL bContainsPointers = FALSE;
+ BOOL bCollectible = FALSE;
+ TADDR loaderAllocatorObjectHandle;
+
+ if (!GetSizeEfficient(mObject, mt, FALSE, mObjSize, bContainsPointers))
+ Throw<DataRead>("Failed to get size of object.");
+
+ if (!GetCollectibleDataEfficient(mt, bCollectible, loaderAllocatorObjectHandle))
+ Throw<DataRead>("Failed to get collectible info of object.");
+
+ if (!bContainsPointers && !bCollectible)
+ {
+ mDone = true;
+ return;
+ }
+
+ if (bContainsPointers)
+ {
+ if (!mGCDesc)
+ {
+ int entries = 0;
+
+ if (FAILED(MOVE(entries, mt-sizeof(TADDR))))
+ Throw<DataRead>("Failed to request number of entries for %p MT %p", mObject, mt);
+
+ // array of vc?
+ if (entries < 0)
+ {
+ entries = -entries;
+ mArrayOfVC = true;
+ }
+ else
+ {
+ mArrayOfVC = false;
+ }
+
+ size_t slots = 1 + entries * sizeof(CGCDescSeries)/sizeof(TADDR);
+
+ ArrayHolder<TADDR> buffer = new TADDR[slots];
+
+ ULONG fetched = 0;
+ CLRDATA_ADDRESS address = TO_CDADDR(mt - slots*sizeof(TADDR));
+ if (FAILED(g_ExtData->ReadVirtual(address, buffer, (ULONG)(slots*sizeof(TADDR)), &fetched)))
+ Throw<DataRead>("Failed to request GCDesc.");
+
+ mBuffer = buffer.Detach();
+ mGCDesc = (CGCDesc*)(mBuffer + slots);
+ }
+
+ mCurrSeries = mGCDesc->GetHighestSeries();
+
+ if (!mArrayOfVC)
+ {
+ mCurr = mObject + mCurrSeries->GetSeriesOffset();
+ mStop = mCurr + mCurrSeries->GetSeriesSize() + mObjSize;
+ }
+ else
+ {
+ i = 0;
+ mCurr = mObject + mCurrSeries->startoffset;
+ mStop = mCurr + mCurrSeries->val_serie[i].nptrs * sizeof(TADDR);
+ mCount = (int)mGCDesc->GetNumSeries();
+ }
+
+ if (mCurr == mStop)
+ operator++();
+ else if (mCurr >= mObject + mObjSize - plug_skew)
+ mDone = true;
+ }
+ else
+ {
+ mDone = true;
+ }
+
+ if (bCollectible)
+ {
+ mLoaderAllocatorObjectHandle = loaderAllocatorObjectHandle;
+ if (mDone)
+ {
+ // There are no object references, but there is still a reference for
+ // collectible types - the LoaderAllocator for GC
+ mCurr = mLoaderAllocatorObjectHandle;
+ mDone = false;
+ }
+ }
+ }
+
+
+ const TADDR GCHeap::HeapStart = 0;
+ const TADDR GCHeap::HeapEnd = ~0;
+
+ ObjectIterator::ObjectIterator(const GCHeapDetails *heap, int numHeaps, TADDR start, TADDR stop)
+ : bLarge(false), bPinned(false), mCurrObj(0), mLastObj(0), mStart(start), mEnd(stop), mSegmentEnd(0), mHeaps(heap),
+ mNumHeaps(numHeaps), mCurrHeap(0), mCurrRegionGen(0)
+ {
+ mAllocInfo.Init();
+ SOS_Assert(numHeaps > 0);
+
+ TADDR segStart;
+ if (heap->has_regions)
+ {
+ // with regions, we have a null terminated list for each generation
+ segStart = TO_TADDR(mHeaps[0].generation_table[mCurrRegionGen].start_segment);
+ }
+ else
+ {
+ segStart = TO_TADDR(mHeaps[0].generation_table[GetMaxGeneration()].start_segment);
+ }
+ if (FAILED(mSegment.Request(g_sos, segStart, mHeaps[0].original_heap_details)))
+ {
+ sos::Throw<DataRead>("Could not request segment data at %p.", segStart);
+ }
+
+ mCurrObj = mStart < TO_TADDR(mSegment.mem) ? TO_TADDR(mSegment.mem) : mStart;
+ mSegmentEnd = TO_TADDR(mSegment.highAllocMark);
+
+ TryAlignToObjectInRange();
+ }
+
+ bool ObjectIterator::TryMoveNextSegment()
+ {
+ CheckInterrupt();
+
+ if (mCurrHeap >= mNumHeaps)
+ {
+ return false;
+ }
+
+ TADDR next = TO_TADDR(mSegment.next);
+ if (next == NULL)
+ {
+ if (mHeaps[mCurrHeap].has_regions)
+ {
+ mCurrRegionGen++;
+ if ((mCurrRegionGen > GetMaxGeneration() + 2) ||
+ (mCurrRegionGen > GetMaxGeneration() + 1 && !mHeaps[mCurrHeap].has_poh))
+ {
+ mCurrHeap++;
+ if (mCurrHeap == mNumHeaps)
+ {
+ return false;
+ }
+ mCurrRegionGen = 0;
+ }
+ next = TO_TADDR(mHeaps[mCurrHeap].generation_table[mCurrRegionGen].start_segment);
+ }
+ else if (bPinned || (bLarge && !mHeaps[mCurrHeap].has_poh))
+ {
+ mCurrHeap++;
+ if (mCurrHeap == mNumHeaps)
+ {
+ return false;
+ }
+
+ bPinned = false;
+ bLarge = false;
+ next = TO_TADDR(mHeaps[mCurrHeap].generation_table[GetMaxGeneration()].start_segment);
+ }
+ else if (bLarge)
+ {
+ bLarge = false;
+ bPinned = true;
+ next = TO_TADDR(mHeaps[mCurrHeap].generation_table[GetMaxGeneration() + 2].start_segment);
+ }
+ else
+ {
+ bLarge = true;
+ next = TO_TADDR(mHeaps[mCurrHeap].generation_table[GetMaxGeneration() + 1].start_segment);
+ }
+ }
+
+ SOS_Assert(next != NULL);
+ if (FAILED(mSegment.Request(g_sos, next, mHeaps[mCurrHeap].original_heap_details)))
+ {
+ sos::Throw<DataRead>("Failed to request segment data at %p.", next);
+ }
+
+ mLastObj = 0;
+ mCurrObj = mStart < TO_TADDR(mSegment.mem) ? TO_TADDR(mSegment.mem) : mStart;
+ mSegmentEnd = TO_TADDR(mSegment.highAllocMark);
+ return true;
+ }
+
+ bool ObjectIterator::TryMoveToObjectInNextSegmentInRange()
+ {
+ if (TryMoveNextSegment())
+ {
+ return TryAlignToObjectInRange();
+ }
+
+ return false;
+ }
+
+ bool ObjectIterator::TryAlignToObjectInRange()
+ {
+ CheckInterrupt();
+ while (!MemOverlap(mStart, mEnd, TO_TADDR(mSegment.mem), mSegmentEnd))
+ {
+ CheckInterrupt();
+ if (!TryMoveNextSegment())
+ {
+ return false;
+ }
+ }
+
+ // At this point we know that the current segment contains objects in
+ // the correct range. However, there's no telling if the user gave us
+ // a starting address that corresponds to an object. If mStart is a
+ // valid object, then we'll just start there. If it's not we'll need
+ // to walk the segment from the beginning to find the first aligned
+ // object on or after mStart.
+ if (mCurrObj == mStart && !Object::IsValid(mStart))
+ {
+ // It's possible mCurrObj will equal mStart after this. That's fine.
+ // It means that the starting object is corrupt (and we'll figure
+ // that when the user calls GetNext), or IsValid was wrong.
+ mLastObj = 0;
+ mCurrObj = TO_TADDR(mSegment.mem);
+ while (mCurrObj < mStart)
+ MoveToNextObject();
+ }
+
+ return true;
+ }
+
+
+
+ const Object &ObjectIterator::operator*() const
+ {
+ AssertSanity();
+ return mCurrObj;
+ }
+
+
+ const Object *ObjectIterator::operator->() const
+ {
+ AssertSanity();
+ return &mCurrObj;
+ }
+
+ //Object ObjectIterator::GetNext()
+ const ObjectIterator &ObjectIterator::operator++()
+ {
+ CheckInterrupt();
+
+ // Assert we aren't done walking the heap.
+ SOS_Assert(*this);
+ AssertSanity();
+
+ MoveToNextObject();
+ return *this;
+ }
+
+ void ObjectIterator::MoveToNextObjectCarefully()
+ {
+ CheckInterrupt();
+
+ SOS_Assert(*this);
+ AssertSanity();
+
+ // Move to NextObject won't generally throw unless it fails to request the
+ // MethodTable of the object. At which point we won't know how large the
+ // current object is, nor how to move past it. In this case we'll simply
+ // move to the next segment if possible to continue iterating from there.
+ try
+ {
+ MoveToNextObject();
+ }
+ catch(const sos::Exception &)
+ {
+ TryMoveToObjectInNextSegmentInRange();
+ }
+ }
+
+ void ObjectIterator::AssertSanity() const
+ {
+ // Assert that we are in a sane state. Function which call this assume two things:
+ // 1. That the current object is within the segment bounds.
+ // 2. That the current object is within the requested memory range.
+ SOS_Assert(mCurrObj >= TO_TADDR(mSegment.mem));
+ SOS_Assert(mCurrObj <= TO_TADDR(mSegmentEnd - Align(min_obj_size)));
+
+ SOS_Assert(mCurrObj >= mStart);
+ SOS_Assert(mCurrObj <= mEnd);
+ }
+
+ void ObjectIterator::MoveToNextObject()
+ {
+ CheckInterrupt();
+
+ // Object::GetSize can be unaligned, so we must align it ourselves.
+ size_t size = (bLarge || bPinned) ? AlignLarge(mCurrObj.GetSize()) : Align(mCurrObj.GetSize());
+
+ mLastObj = mCurrObj;
+ mCurrObj = mCurrObj.GetAddress() + size;
+
+ if (!bLarge)
+ {
+ // Is this the end of an allocation context? We need to know this because there can be
+ // allocated memory at the end of an allocation context that doesn't yet contain any objects.
+ // This happens because we actually allocate a minimum amount of memory (the allocation quantum)
+ // whenever we need to get more memory. Typically, a single allocation request won't fill this
+ // block, so we'll fulfill subsequent requests out of the remainder of the block until it's
+ // depleted.
+ int i;
+ for (i = 0; i < mAllocInfo.num; i ++)
+ {
+ if (mCurrObj == TO_TADDR(mAllocInfo.array[i].alloc_ptr)) // end of objects in this context
+ {
+ // Set mCurrObj to point after the context (alloc_limit is the end of the allocation context).
+ mCurrObj = TO_TADDR(mAllocInfo.array[i].alloc_limit) + Align(min_obj_size);
+ break;
+ }
+ }
+
+ // We also need to look at the gen0 alloc context.
+ if (mCurrObj == TO_TADDR(mHeaps[mCurrHeap].generation_table[0].allocContextPtr))
+ mCurrObj = TO_TADDR(mHeaps[mCurrHeap].generation_table[0].allocContextLimit) + Align(min_obj_size);
+ }
+
+ if (mCurrObj > mEnd || mCurrObj >= mSegmentEnd)
+ {
+ TryMoveToObjectInNextSegmentInRange();
+ }
+ }
+
+ SyncBlkIterator::SyncBlkIterator()
+ : mCurr(1), mTotal(0)
+ {
+ // If DacpSyncBlockData::Request fails with the call "1", then it means
+ // there are no SyncBlocks in the process.
+ DacpSyncBlockData syncBlockData;
+ if (SUCCEEDED(syncBlockData.Request(g_sos, 1)))
+ {
+ mTotal = syncBlockData.SyncBlockCount;
+ mSyncBlk = mCurr;
+ }
+ }
+
+ GCHeap::GCHeap()
+ {
+ if (FAILED(mHeapData.Request(g_sos)))
+ {
+ sos::Throw<DataRead>("Failed to request GC heap data.");
+ }
+
+ if (mHeapData.bServerMode)
+ {
+ mNumHeaps = mHeapData.HeapCount;
+ DWORD dwAllocSize = 0;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), mNumHeaps, dwAllocSize))
+ {
+ sos::Throw<Exception>("Failed to get GCHeaps: Integer overflow.");
+ }
+
+ CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
+ if (FAILED(g_sos->GetGCHeapList(mNumHeaps, heapAddrs, NULL)))
+ {
+ sos::Throw<DataRead>("Failed to get GCHeaps.");
+ }
+
+ mHeaps = new GCHeapDetails[mNumHeaps];
+
+ for (int i = 0; i < mNumHeaps; i++)
+ {
+ DacpGcHeapDetails dacHeapDetails;
+ if (FAILED(dacHeapDetails.Request(g_sos, heapAddrs[i])))
+ {
+ sos::Throw<DataRead>("Failed to get GC heap details at %p.", heapAddrs[i]);
+ }
+
+ mHeaps[i].Set(dacHeapDetails, heapAddrs[i]);
+ }
+ }
+ else
+ {
+ mHeaps = new GCHeapDetails[1];
+ mNumHeaps = 1;
+
+ DacpGcHeapDetails dacGCDetails;
+ if (FAILED(dacGCDetails.Request(g_sos)))
+ {
+ sos::Throw<DataRead>("Failed to request GC details data.");
+ }
+
+ mHeaps[0].Set(dacGCDetails);
+ }
+ }
+
+ GCHeap::~GCHeap()
+ {
+ delete [] mHeaps;
+ }
+
+ ObjectIterator GCHeap::WalkHeap(TADDR start, TADDR stop) const
+ {
+ return ObjectIterator(mHeaps, mNumHeaps, start, stop);
+ }
+
+ bool GCHeap::AreGCStructuresValid() const
+ {
+ return mHeapData.bGcStructuresValid != FALSE;
+ }
+
// SyncBlk class
SyncBlk::SyncBlk()
: mIndex(0)
namespace sos
{
+ class GCHeap;
+
/* The base SOS Exception. Note that most commands should not attempt to be
* resilient to exceptions thrown by most functions here. Instead a top level
* try/catch at the beginning of the command which prints out the exception's
mutable WCHAR *mTypeName;
};
+ /* Enumerates all the GC references (objects) contained in an object. This uses the GCDesc
+ * map exactly as the GC does.
+ */
+ class RefIterator
+ {
+ public:
+ RefIterator(TADDR obj, LinearReadCache *cache = NULL);
+ RefIterator(TADDR obj, CGCDesc *desc, bool arrayOfVC, LinearReadCache *cache = NULL);
+ ~RefIterator();
+
+ /* Moves to the next reference in the object.
+ */
+ const RefIterator &operator++();
+
+ /* Returns the address of the current reference.
+ */
+ TADDR operator*() const;
+
+ /* Gets the offset into the object where the current reference comes from.
+ */
+ TADDR GetOffset() const;
+
+ /* Returns true if there are more objects in the iteration, false otherwise.
+ * Used as:
+ * if (itr)
+ * ...
+ */
+ inline operator void *() const
+ {
+ return (void*)!mDone;
+ }
+
+ bool IsLoaderAllocator() const
+ {
+ return mLoaderAllocatorObjectHandle == mCurr;
+ }
+
+ private:
+ void Init();
+ inline TADDR ReadPointer(TADDR addr) const
+ {
+ if (mCache)
+ {
+ if (!mCache->Read(addr, &addr, false))
+ Throw<DataRead>("Could not read address %p.", addr);
+ }
+ else
+ {
+ MOVE(addr, addr);
+ }
+
+ return addr;
+ }
+
+ private:
+ LinearReadCache *mCache;
+ CGCDesc *mGCDesc;
+ bool mArrayOfVC, mDone;
+
+ TADDR *mBuffer;
+ CGCDescSeries *mCurrSeries;
+
+ TADDR mLoaderAllocatorObjectHandle;
+
+ int i, mCount;
+
+ TADDR mCurr, mStop, mObject;
+ size_t mObjSize;
+ };
+
+
+ /* The Iterator used to walk the managed objects on the GC heap.
+ * The general usage pattern for this class is:
+ * for (ObjectIterator itr = gcheap.WalkHeap(); itr; ++itr)
+ * itr->SomeObjectMethod();
+ */
+ class ObjectIterator
+ {
+ friend class GCHeap;
+ public:
+
+ /* Returns the next object in the GCHeap. Note that you must ensure
+ * that there are more objects to walk before calling this function by
+ * checking "if (iterator)". If this function throws an exception,
+ * the the iterator is invalid, and should no longer be used to walk
+ * the heap. This should generally only happen if we cannot read the
+ * MethodTable of the object to move to the next object.
+ * Throws:
+ * DataRead
+ */
+ const ObjectIterator &operator++();
+
+ /* Dereference operator. This allows you to take a reference to the
+ * current object. Note the lifetime of this reference is valid for
+ * either the lifetime of the iterator or until you call operator++,
+ * whichever is shorter. For example.
+ * void Foo(const Object ¶m);
+ * void Bar(const ObjectIterator &itr)
+ * {
+ * Foo(*itr);
+ * }
+ */
+ const Object &operator*() const;
+
+ /* Returns a pointer to the current Object to call members on it.
+ * The usage pattern for the iterator is to simply use operator->
+ * to call methods on the Object it points to without taking a
+ * direct reference to the underlying Object if at all possible.
+ */
+ const Object *operator->() const;
+
+ /* Returns false when the iterator has reached the end of the managed
+ * heap.
+ */
+ inline operator void *() const
+ {
+ return (void*)(SIZE_T)(mCurrHeap == mNumHeaps ? 0 : 1);
+ }
+
+ /* Do not use.
+ * TODO: Replace this functionality with int Object::GetGeneration().
+ */
+ bool IsCurrObjectOnLOH() const
+ {
+ SOS_Assert(*this);
+ return bLarge;
+ }
+
+ /* Verifies the current object. Returns true if the current object is valid.
+ * Returns false and fills 'buffer' with the reason the object is corrupted.
+ * This is a deeper validation than Object::IsValid as it checks the card
+ * table entires for the object in addition to the rest of the references.
+ * This function does not throw exceptions.
+ * Params:
+ * buffer - out buffer that is filled if and only if this function returns
+ * false.
+ * size - the total size of the buffer
+ * Returns:
+ * True if the object is valid, false otherwise.
+ */
+ bool Verify(__out_ecount(size) char *buffer, size_t size) const;
+
+ /* The same as Verify(char*, size_t), except it does not write out the failure
+ * reason to a provided buffer.
+ * See:
+ * ObjectIterator::Verify(char *, size_t)
+ */
+ bool Verify() const;
+
+ /* Attempts to move to the next object (similar to ObjectIterator++), but
+ * attempts to recover from any heap corruption by skipping to the next
+ * segment. If Verify returns false, meaning it detected heap corruption
+ * at the current object, you can use MoveToNextObjectCarefully instead of
+ * ObjectIterator++ to attempt to keep reading from the heap. If possible,
+ * this function attempts to move to the next object in the same segment,
+ * but if that's not possible then it skips to the next segment and
+ * continues from there.
+ * Note:
+ * This function can throw, and if it does then the iterator is no longer
+ * in a valid state. No further attempts to move to the next object will
+ * be possible.
+ * Throws:
+ * DataRead - if the heap is corrupted and it's not possible to continue
+ * walking the heap
+ */
+ void MoveToNextObjectCarefully();
+
+ private:
+ ObjectIterator(const GCHeapDetails *heap, int numHeaps, TADDR start, TADDR stop);
+
+ bool VerifyObjectMembers(__out_ecount(size) char *buffer, size_t size) const;
+ void BuildError(__out_ecount(count) char *out, size_t count, const char *format, ...) const;
+
+ void AssertSanity() const;
+
+ /*
+ This function moves to the next segment/region without checking any restrictions
+ on the range. Returns true if it was able to move to a new segment/region.
+ */
+ bool TryMoveNextSegment();
+
+ /*
+ Aligns the iterator to the object that falls in the requested range, moving to
+ the next segment/region as necessary. The iterator state doesn't change if the
+ current object already lies in the requested range. Returns true if aligning
+ to such an object was possible.
+ */
+ bool TryAlignToObjectInRange();
+
+ /*
+ Moves to the next segment/region that contains an object in the requested
+ range and align it to such object. This operation always moves the iterator.
+ Returns false if no such move was possible.
+ */
+ bool TryMoveToObjectInNextSegmentInRange();
+ void MoveToNextObject();
+
+ private:
+ DacpHeapSegmentData mSegment;
+ bool bLarge;
+ bool bPinned;
+ Object mCurrObj;
+ TADDR mLastObj, mStart, mEnd, mSegmentEnd;
+ AllocInfo mAllocInfo;
+ const GCHeapDetails *mHeaps;
+ int mNumHeaps;
+ int mCurrHeap;
+ unsigned mCurrRegionGen;
+ };
+
/* Reprensents an entry in the sync block table.
*/
class SyncBlk
SyncBlk mSyncBlk;
};
+ /* An class which contains information about the GCHeap.
+ */
+ class GCHeap
+ {
+ public:
+ static const TADDR HeapStart; // A constant signifying the start of the GC heap.
+ static const TADDR HeapEnd; // A constant signifying the end of the GC heap.
+
+ public:
+ /* Constructor.
+ * Throws:
+ * DataRead
+ */
+ GCHeap();
+
+ ~GCHeap();
+
+ /* Returns an ObjectIterator which allows you to walk the objects on the managed heap.
+ * This ObjectIterator is valid for the duration of the GCHeap's lifetime. Note that
+ * if you specify an address at which you wish to start walking the heap it need
+ * not point directly to a managed object. However, if it does not, WalkHeap
+ * will need to walk the segment that address resides in to find the first object
+ * after that address, and if it encounters any heap corruption along the way,
+ * it may be impossible to walk the heap from the address specified.
+ *
+ * Params:
+ * start - The starting address at which you want to start walking the heap.
+ * This need not point directly to an object on the heap.
+ * end - The ending address at which you want to stop walking the heap. This
+ * need not point directly to an object on the heap.
+ * validate - Whether or not you wish to validate the GC heap as you walk it.
+ * Throws:
+ * DataRead
+ */
+ ObjectIterator WalkHeap(TADDR start = HeapStart, TADDR stop = HeapEnd) const;
+
+ /* Returns true if the GC Heap structures are in a valid state for traversal.
+ * Returns false if not (e.g. if we are in the middle of a relocation).
+ */
+ bool AreGCStructuresValid() const;
+
+ private:
+ GCHeapDetails *mHeaps;
+ DacpGcHeapData mHeapData;
+ int mNumHeaps;
+ };
+
// convenience functions
/* A temporary wrapper function for Object::IsValid. There are too many locations
* in SOS which need to use IsObject but have a wide variety of internal
return hostServices->DispatchCommand(commandName, args);
}
}
- ExtErr("Unrecognized command %s\n", commandName);
return E_NOTIMPL;
}
return Status;
}
+HRESULT DumpStackObjectsRaw(size_t nArg, __in_z LPSTR exprBottom, __in_z LPSTR exprTop, BOOL bVerify)
+{
+ size_t StackTop = 0;
+ size_t StackBottom = 0;
+ if (nArg==0)
+ {
+ ULONG64 StackOffset;
+ g_ExtRegisters->GetStackOffset(&StackOffset);
+
+ StackTop = TO_TADDR(StackOffset);
+ }
+ else
+ {
+ StackTop = GetExpression(exprTop);
+ if (StackTop == 0)
+ {
+ ExtOut("wrong option: %s\n", exprTop);
+ return E_FAIL;
+ }
+
+ if (nArg==2)
+ {
+ StackBottom = GetExpression(exprBottom);
+ if (StackBottom == 0)
+ {
+ ExtOut("wrong option: %s\n", exprBottom);
+ return E_FAIL;
+ }
+ }
+ }
+
+#ifndef FEATURE_PAL
+ if (IsWindowsTarget())
+ {
+ NT_TIB teb;
+ ULONG64 dwTebAddr = 0;
+ HRESULT hr = g_ExtSystem->GetCurrentThreadTeb(&dwTebAddr);
+ if (SUCCEEDED(hr) && SafeReadMemory(TO_TADDR(dwTebAddr), &teb, sizeof(NT_TIB), NULL))
+ {
+ if (StackTop > TO_TADDR(teb.StackLimit) && StackTop <= TO_TADDR(teb.StackBase))
+ {
+ if (StackBottom == 0 || StackBottom > TO_TADDR(teb.StackBase))
+ StackBottom = TO_TADDR(teb.StackBase);
+ }
+ }
+ }
+#endif
+
+ if (StackBottom == 0)
+ StackBottom = StackTop + 0xFFFF;
+
+ if (StackBottom < StackTop)
+ {
+ ExtOut("Wrong option: stack selection wrong\n");
+ return E_FAIL;
+ }
+
+ // We can use the gc snapshot to eliminate object addresses that are
+ // not on the gc heap.
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to determine bounds of gc heap\n");
+ return E_FAIL;
+ }
+
+ // Print thread ID.
+ ULONG id = 0;
+ g_ExtSystem->GetCurrentThreadSystemId (&id);
+ ExtOut("OS Thread Id: 0x%x ", id);
+ g_ExtSystem->GetCurrentThreadId (&id);
+ ExtOut("(%d)\n", id);
+
+ DumpStackObjectsHelper(StackTop, StackBottom, bVerify);
+ return S_OK;
+}
+
/**********************************************************************\
* Routine Description: *
* *
\**********************************************************************/
DECLARE_API(DumpStackObjects)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
+ StringHolder exprTop, exprBottom;
- return ExecuteCommand("dumpstackobjects", args);
+ BOOL bVerify = FALSE;
+ BOOL dml = FALSE;
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"-verify", &bVerify, COBOOL, FALSE},
+ {"/d", &dml, COBOOL, FALSE}
+ };
+ CMDValue arg[] =
+ { // vptr, type
+ {&exprTop.data, COSTRING},
+ {&exprBottom.data, COSTRING}
+ };
+ size_t nArg;
+
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), arg, ARRAY_SIZE(arg), &nArg))
+ {
+ return Status;
+ }
+
+ EnableDMLHolder enableDML(dml);
+
+ return DumpStackObjectsRaw(nArg, exprBottom.data, exprTop.data, bVerify);
}
/**********************************************************************\
return DecodeILFromAddress(NULL, dwStartAddr);
}
- if (sos::IsObject(dwStartAddr))
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to build snapshot of the garbage collector state\n");
+ return Status;
+ }
+
+ if (g_snapshot.GetHeap(dwStartAddr) != NULL)
{
dwDynamicMethodObj = dwStartAddr;
}
if (SUCCEEDED(Status) && bRefs)
{
- std::stringstream argsBuilder;
- argsBuilder << std::hex << p_Object << " ";
- return ExecuteCommand("dumpobjgcrefs", argsBuilder.str().c_str());
+ ExtOut("GC Refs:\n");
+ TableOutput out(2, POINTERSIZE_HEX, AlignRight, 4);
+ out.WriteRow("offset", "object");
+ for (sos::RefIterator itr(TO_TADDR(p_Object)); itr; ++itr)
+ out.WriteRow(Hex(itr.GetOffset()), ObjectPtr(*itr));
}
}
catch(const sos::Exception &e)
DECLARE_API(TraverseHeap)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
- return ExecuteCommand("traverseheap", args);
+ ONLY_SUPPORTED_ON_WINDOWS_TARGET();
+
+ BOOL bXmlFormat = FALSE;
+ BOOL bVerify = FALSE;
+ StringHolder Filename;
+
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"-xml", &bXmlFormat, COBOOL, FALSE},
+ {"-verify", &bVerify, COBOOL, FALSE},
+ };
+ CMDValue arg[] =
+ { // vptr, type
+ {&Filename.data, COSTRING},
+ };
+ size_t nArg;
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), arg, ARRAY_SIZE(arg), &nArg))
+ {
+ return Status;
+ }
+
+ if (nArg != 1)
+ {
+ ExtOut("usage: %straverseheap [-xml] filename\n", SOSPrefix);
+ return Status;
+ }
+
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to build snapshot of the garbage collector state\n");
+ return Status;
+ }
+
+ FILE* file = fopen(Filename.data, "w");
+ if (file == nullptr) {
+ ExtOut("Unable to open file %s (%d)\n", strerror(errno), errno);
+ return Status;
+ }
+
+ if (!bVerify)
+ ExtOut("Assuming a uncorrupted GC heap. If this is a crash dump consider -verify option\n");
+
+ HeapTraverser traverser(bVerify != FALSE);
+
+ ExtOut("Writing %s format to file %s\n", bXmlFormat ? "Xml" : "CLRProfiler", Filename.data);
+ ExtOut("Gathering types...\n");
+
+ // TODO: there may be a canonical list of methodtables in the runtime that we can
+ // traverse instead of exploring the gc heap for that list. We could then simplify the
+ // tree structure to a sorted list of methodtables, and the index is the ID.
+
+ // TODO: "Traversing object members" code should be generalized and shared between
+ // gcroot and traverseheap. Also dumpheap can begin using GCHeapsTraverse.
+
+ if (!traverser.Initialize())
+ {
+ ExtOut("Error initializing heap traversal\n");
+ fclose(file);
+ return Status;
+ }
+
+ if (!traverser.CreateReport (file, bXmlFormat ? FORMAT_XML : FORMAT_CLRPROFILER))
+ {
+ ExtOut("Unable to write heap report\n");
+ fclose(file);
+ return Status;
+ }
+
+ fclose(file);
+ ExtOut("\nfile %s saved\n", Filename.data);
+
+ return Status;
+}
+
+struct PrintRuntimeTypeArgs
+{
+ DWORD_PTR mtOfRuntimeType;
+ int handleFieldOffset;
+ DacpAppDomainStoreData adstore;
+};
+
+void PrintRuntimeTypes(DWORD_PTR objAddr,size_t Size,DWORD_PTR methodTable,LPVOID token)
+{
+ PrintRuntimeTypeArgs *pArgs = (PrintRuntimeTypeArgs *)token;
+
+ if (pArgs->mtOfRuntimeType == NULL)
+ {
+ NameForMT_s(methodTable, g_mdName, mdNameLen);
+
+ if (_wcscmp(g_mdName, W("System.RuntimeType")) == 0)
+ {
+ pArgs->mtOfRuntimeType = methodTable;
+ pArgs->handleFieldOffset = GetObjFieldOffset(TO_CDADDR(objAddr), TO_CDADDR(methodTable), W("m_handle"));
+ if (pArgs->handleFieldOffset <= 0)
+ ExtOut("Error getting System.RuntimeType.m_handle offset\n");
+
+ pArgs->adstore.Request(g_sos);
+ }
+ }
+
+ if ((methodTable == pArgs->mtOfRuntimeType) && (pArgs->handleFieldOffset > 0))
+ {
+ // Get the method table and display the information.
+ DWORD_PTR mtPtr;
+ if (MOVE(mtPtr, objAddr + pArgs->handleFieldOffset) == S_OK)
+ {
+ DMLOut(DMLObject(objAddr));
+
+ // Check if TypeDesc
+ if ((mtPtr & RUNTIMETYPE_HANDLE_IS_TYPEDESC) != 0)
+ {
+ ExtOut(" %p\n", mtPtr & ~RUNTIMETYPE_HANDLE_IS_TYPEDESC);
+ }
+ else
+ {
+ CLRDATA_ADDRESS appDomain = GetAppDomainForMT(mtPtr);
+ if (appDomain != NULL)
+ {
+ if (appDomain == pArgs->adstore.sharedDomain)
+ ExtOut(" %" POINTERSIZE "s", "Shared");
+
+ else if (appDomain == pArgs->adstore.systemDomain)
+ ExtOut(" %" POINTERSIZE "s", "System");
+ else
+ DMLOut(" %s", DMLDomain(appDomain));
+ }
+ else
+ {
+ ExtOut(" %" POINTERSIZE "s", "?");
+ }
+
+ if (NameForMT_s(mtPtr, g_mdName, mdNameLen))
+ {
+ DMLOut(" %s %S\n", DMLMethodTable(mtPtr), g_mdName);
+ }
+ }
+ }
+ }
}
+
DECLARE_API(DumpRuntimeTypes)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
- return ExecuteCommand("dumpruntimetypes", args);
+
+ BOOL dml = FALSE;
+
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"/d", &dml, COBOOL, FALSE},
+ };
+
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), NULL, 0, NULL))
+ return Status;
+
+ EnableDMLHolder dmlHolder(dml);
+
+ ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s Type Name \n",
+ "Address", "Domain", "MT");
+ ExtOut("------------------------------------------------------------------------------\n");
+
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to build snapshot of the garbage collector state\n");
+ return E_FAIL;
+ }
+
+ PrintRuntimeTypeArgs pargs;
+ ZeroMemory(&pargs, sizeof(PrintRuntimeTypeArgs));
+
+ try
+ {
+ GCHeapsTraverse(PrintRuntimeTypes, (LPVOID)&pargs);
+ }
+ catch(const sos::Exception &e)
+ {
+ ExtOut("%s\n", e.what());
+ return E_FAIL;
+ }
+
+ return Status;
}
namespace sos
DECLARE_API(VerifyObj)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
- return ExecuteCommand("verifyobj", args);
+ TADDR taddrObj = 0;
+ TADDR taddrMT;
+ size_t objSize;
+
+ BOOL bValid = FALSE;
+ BOOL dml = FALSE;
+
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"/d", &dml, COBOOL, FALSE},
+ };
+ CMDValue arg[] =
+ { // vptr, type
+ {&taddrObj, COHEX}
+ };
+ size_t nArg;
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), arg, ARRAY_SIZE(arg), &nArg))
+ {
+ return Status;
+ }
+
+ EnableDMLHolder dmlHolder(dml);
+ BOOL bContainsPointers;
+
+ if (FAILED(GetMTOfObject(taddrObj, &taddrMT)) ||
+ !GetSizeEfficient(taddrObj, taddrMT, FALSE, objSize, bContainsPointers))
+ {
+ ExtOut("object %#p does not have valid method table\n", SOS_PTR(taddrObj));
+ goto Exit;
+ }
+
+ // we need to build g_snapshot as it is later used in GetGeneration
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to build snapshot of the garbage collector state\n");
+ goto Exit;
+ }
+
+ try
+ {
+ GCHeapDetails *pheapDetails = g_snapshot.GetHeap(taddrObj);
+ bValid = VerifyObject(*pheapDetails, taddrObj, taddrMT, objSize, TRUE);
+ }
+ catch(const sos::Exception &e)
+ {
+ ExtOut("%s\n", e.what());
+ return E_FAIL;
+ }
+
+Exit:
+ if (bValid)
+ {
+ ExtOut("object %#p is a valid object\n", SOS_PTR(taddrObj));
+ }
+
+ return Status;
}
DECLARE_API(ListNearObj)
\**********************************************************************/
DECLARE_API(FinalizeQueue)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
- return ExecuteCommand("finalizequeue", args);
+ BOOL bDetail = FALSE;
+ BOOL bAllReady = FALSE;
+ BOOL bShort = FALSE;
+ BOOL dml = FALSE;
+ TADDR taddrMT = 0;
+
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"-detail", &bDetail, COBOOL, FALSE},
+ {"-allReady", &bAllReady, COBOOL, FALSE},
+ {"-short", &bShort, COBOOL, FALSE},
+ {"/d", &dml, COBOOL, FALSE},
+ {"-mt", &taddrMT, COHEX, TRUE},
+ };
+
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), NULL, 0, NULL))
+ {
+ return Status;
+ }
+
+ EnableDMLHolder dmlHolder(dml);
+ if (!bShort)
+ {
+ DacpSyncBlockCleanupData dsbcd;
+ CLRDATA_ADDRESS sbCurrent = NULL;
+ ULONG cleanCount = 0;
+ while ((dsbcd.Request(g_sos,sbCurrent) == S_OK) && dsbcd.SyncBlockPointer)
+ {
+ if (bDetail)
+ {
+ if (cleanCount == 0) // print first time only
+ {
+ ExtOut("SyncBlocks to be cleaned by the finalizer thread:\n");
+ ExtOut("%" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s %" POINTERSIZE "s\n",
+ "SyncBlock", "RCW", "CCW", "ComClassFactory");
+ }
+
+ ExtOut("%" POINTERSIZE "p %" POINTERSIZE "p %" POINTERSIZE "p %" POINTERSIZE "p\n",
+ (ULONG64) dsbcd.SyncBlockPointer,
+ (ULONG64) dsbcd.blockRCW,
+ (ULONG64) dsbcd.blockCCW,
+ (ULONG64) dsbcd.blockClassFactory);
+ }
+
+ cleanCount++;
+ sbCurrent = dsbcd.nextSyncBlock;
+ if (sbCurrent == NULL)
+ {
+ break;
+ }
+ }
+
+ ExtOut("SyncBlocks to be cleaned up: %d\n", cleanCount);
+
+#ifdef FEATURE_COMINTEROP
+ VisitRcwArgs travArgs;
+ ZeroMemory(&travArgs,sizeof(VisitRcwArgs));
+ travArgs.bDetail = bDetail;
+ g_sos->TraverseRCWCleanupList(0, (VISITRCWFORCLEANUP) VisitRcw, &travArgs);
+ ExtOut("Free-Threaded Interfaces to be released: %d\n", travArgs.FTMCount);
+ ExtOut("MTA Interfaces to be released: %d\n", travArgs.MTACount);
+ ExtOut("STA Interfaces to be released: %d\n", travArgs.STACount);
+#endif // FEATURE_COMINTEROP
+
+// noRCW:
+ ExtOut("----------------------------------\n");
+ }
+
+ // GC Heap
+ DWORD dwNHeaps = GetGcHeapCount();
+
+ HeapStat hpStat;
+
+ if (!IsServerBuild())
+ {
+ DacpGcHeapDetails heapDetails;
+ if (heapDetails.Request(g_sos) != S_OK)
+ {
+ ExtOut("Error requesting details\n");
+ return Status;
+ }
+
+ GatherOneHeapFinalization(heapDetails, &hpStat, bAllReady, bShort);
+ }
+ else
+ {
+ DWORD dwAllocSize;
+ if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), dwNHeaps, dwAllocSize))
+ {
+ ExtOut("Failed to get GCHeaps: integer overflow\n");
+ return Status;
+ }
+
+ CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize);
+ if (g_sos->GetGCHeapList(dwNHeaps, heapAddrs, NULL) != S_OK)
+ {
+ ExtOut("Failed to get GCHeaps\n");
+ return Status;
+ }
+
+ for (DWORD n = 0; n < dwNHeaps; n ++)
+ {
+ DacpGcHeapDetails heapDetails;
+ if (heapDetails.Request(g_sos, heapAddrs[n]) != S_OK)
+ {
+ ExtOut("Error requesting details\n");
+ return Status;
+ }
+
+ ExtOut("------------------------------\n");
+ ExtOut("Heap %d\n", n);
+
+ GatherOneHeapFinalization(heapDetails, &hpStat, bAllReady, bShort);
+ }
+ }
+
+ if (!bShort)
+ {
+ if (bAllReady)
+ {
+ PrintGCStat(&hpStat, "Statistics for all finalizable objects that are no longer rooted:\n");
+ }
+ else
+ {
+ PrintGCStat(&hpStat, "Statistics for all finalizable objects (including all objects ready for finalization):\n");
+ }
+ }
+
+ return Status;
}
enum {
\**********************************************************************/
DECLARE_API(ThreadPool)
{
- INIT_API_EXT();
+ INIT_API();
MINIDUMP_NOT_SUPPORTED();
- return ExecuteCommand("threadpool", args);
+ BOOL doHCDump = FALSE, doWorkItemDump = FALSE, dml = FALSE;
+ BOOL mustBePortableThreadPool = FALSE;
+
+ CMDOption option[] =
+ { // name, vptr, type, hasValue
+ {"-ti", &doHCDump, COBOOL, FALSE},
+ {"-wi", &doWorkItemDump, COBOOL, FALSE},
+ {"/d", &dml, COBOOL, FALSE},
+ };
+
+ if (!GetCMDOption(args, option, ARRAY_SIZE(option), NULL, 0, NULL))
+ {
+ return E_FAIL;
+ }
+
+ EnableDMLHolder dmlHolder(dml);
+
+ DacpThreadpoolData threadpool;
+ Status = threadpool.Request(g_sos);
+ if (Status == E_NOTIMPL)
+ {
+ mustBePortableThreadPool = TRUE;
+ }
+ else if (Status != S_OK)
+ {
+ ExtOut(" %s\n", "Failed to request ThreadpoolMgr information");
+ return FAILED(Status) ? Status : E_FAIL;
+ }
+
+ DWORD_PTR corelibModule;
+ {
+ int numModule;
+ ArrayHolder<DWORD_PTR> moduleList = ModuleFromName(const_cast<LPSTR>("System.Private.CoreLib.dll"), &numModule);
+ if (moduleList == NULL || numModule != 1)
+ {
+ ExtOut(" %s\n", "Failed to find System.Private.CoreLib.dll");
+ return E_FAIL;
+ }
+ corelibModule = moduleList[0];
+ }
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+ // Check whether the portable thread pool is being used and fill in the thread pool data
+
+ UINT64 ui64Value = 0;
+ DacpObjectData vPortableTpHcLogArray;
+ int portableTpHcLogEntry_tickCountOffset = 0;
+ int portableTpHcLogEntry_stateOrTransitionOffset = 0;
+ int portableTpHcLogEntry_newControlSettingOffset = 0;
+ int portableTpHcLogEntry_lastHistoryCountOffset = 0;
+ int portableTpHcLogEntry_lastHistoryMeanOffset = 0;
+ do // while (false)
+ {
+ if (!mustBePortableThreadPool)
+ {
+ // Determine if the portable thread pool is enabled
+ if (FAILED(
+ GetNonSharedStaticFieldValueFromName(
+ &ui64Value,
+ corelibModule,
+ "System.Threading.ThreadPool",
+ W("UsePortableThreadPool"),
+ ELEMENT_TYPE_BOOLEAN)) ||
+ ui64Value == 0)
+ {
+ // The type was not loaded yet, or the static field was not found, etc. For now assume that the portable thread pool
+ // is not being used.
+ break;
+ }
+ }
+
+ // Get the thread pool instance
+ if (FAILED(
+ GetNonSharedStaticFieldValueFromName(
+ &ui64Value,
+ corelibModule,
+ "System.Threading.PortableThreadPool",
+ W("ThreadPoolInstance"),
+ ELEMENT_TYPE_CLASS)) ||
+ ui64Value == 0)
+ {
+ // The type was not loaded yet, or the static field was not found, etc. For now assume that the portable thread pool
+ // is not being used.
+ break;
+ }
+ CLRDATA_ADDRESS cdaTpInstance = TO_CDADDR(ui64Value);
+
+ // Get the thread pool method table
+ CLRDATA_ADDRESS cdaTpMethodTable;
+ {
+ TADDR tpMethodTableAddr = NULL;
+ if (FAILED(GetMTOfObject(TO_TADDR(cdaTpInstance), &tpMethodTableAddr)))
+ {
+ break;
+ }
+ cdaTpMethodTable = TO_CDADDR(tpMethodTableAddr);
+ }
+
+ DWORD_PTR ptrValue = 0;
+ INT32 i32Value = 0;
+ INT16 i16Value = 0;
+ int offset = 0;
+
+ // Populate fields of the thread pool with simple types
+ {
+ offset = GetObjFieldOffset(cdaTpInstance, cdaTpMethodTable, W("_cpuUtilization"));
+ if (offset <= 0 || FAILED(MOVE(i32Value, cdaTpInstance + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._cpuUtilization");
+ break;
+ }
+ threadpool.cpuUtilization = i32Value;
+
+ offset = GetObjFieldOffset(cdaTpInstance, cdaTpMethodTable, W("_minThreads"));
+ if (offset <= 0 || FAILED(MOVE(i16Value, cdaTpInstance + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._minThreads");
+ break;
+ }
+ threadpool.MinLimitTotalWorkerThreads = i16Value;
+
+ offset = GetObjFieldOffset(cdaTpInstance, cdaTpMethodTable, W("_maxThreads"));
+ if (offset <= 0 || FAILED(MOVE(i16Value, cdaTpInstance + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._maxThreads");
+ break;
+ }
+ threadpool.MaxLimitTotalWorkerThreads = i16Value;
+ }
+
+ // Populate thread counts
+ {
+ DacpFieldDescData vSeparatedField;
+ offset = GetObjFieldOffset(cdaTpInstance, cdaTpMethodTable, W("_separated"), TRUE, &vSeparatedField);
+ if (offset <= 0)
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._separated");
+ break;
+ }
+ int accumulatedOffset = offset;
+
+ DacpFieldDescData vCountsField;
+ offset = GetValueFieldOffset(vSeparatedField.MTOfType, W("counts"), &vCountsField);
+ if (offset < 0)
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._separated.counts");
+ break;
+ }
+ accumulatedOffset += offset;
+
+ offset = GetValueFieldOffset(vCountsField.MTOfType, W("_data"));
+ if (offset < 0 || FAILED(MOVE(ui64Value, cdaTpInstance + accumulatedOffset + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool._separated.counts._data");
+ break;
+ }
+ UINT64 data = ui64Value;
+
+ const UINT8 NumProcessingWorkShift = 0;
+ const UINT8 NumExistingThreadsShift = 16;
+
+ INT16 numProcessingWork = (INT16)(data >> NumProcessingWorkShift);
+ INT16 numExistingThreads = (INT16)(data >> NumExistingThreadsShift);
+
+ threadpool.NumIdleWorkerThreads = numExistingThreads - numProcessingWork;
+ threadpool.NumWorkingWorkerThreads = numProcessingWork;
+ threadpool.NumRetiredWorkerThreads = 0;
+ }
+
+ // Populate hill climbing log info
+ {
+ threadpool.HillClimbingLog = 0; // this indicates that the portable thread pool's hill climbing data should be used
+ threadpool.HillClimbingLogFirstIndex = 0;
+ threadpool.HillClimbingLogSize = 0;
+
+ // Get the hill climbing instance
+ if (FAILED(
+ GetNonSharedStaticFieldValueFromName(
+ &ui64Value,
+ corelibModule,
+ "System.Threading.PortableThreadPool+HillClimbing",
+ W("ThreadPoolHillClimber"),
+ ELEMENT_TYPE_CLASS)) ||
+ ui64Value == 0)
+ {
+ // The type was not loaded yet, or the static field was not found, etc. For now assume that the hill climber has
+ // not been used yet.
+ break;
+ }
+ CLRDATA_ADDRESS cdaTpHcInstance = TO_CDADDR(ui64Value);
+
+ // Get the thread pool method table
+ CLRDATA_ADDRESS cdaTpHcMethodTable;
+ {
+ TADDR tpHcMethodTableAddr = NULL;
+ if (FAILED(GetMTOfObject(TO_TADDR(cdaTpHcInstance), &tpHcMethodTableAddr)))
+ {
+ ExtOut(" %s\n", "Failed to get method table for PortableThreadPool.HillClimbing");
+ break;
+ }
+ cdaTpHcMethodTable = TO_CDADDR(tpHcMethodTableAddr);
+ }
+
+ offset = GetObjFieldOffset(cdaTpHcInstance, cdaTpHcMethodTable, W("_logStart"));
+ if (offset <= 0 || FAILED(MOVE(i32Value, cdaTpHcInstance + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool.HillClimbing._logStart");
+ break;
+ }
+ int logStart = i32Value;
+
+ offset = GetObjFieldOffset(cdaTpHcInstance, cdaTpHcMethodTable, W("_logSize"));
+ if (offset <= 0 || FAILED(MOVE(i32Value, cdaTpHcInstance + offset)))
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool.HillClimbing._logSize");
+ break;
+ }
+ int logSize = i32Value;
+
+ offset = GetObjFieldOffset(cdaTpHcInstance, cdaTpHcMethodTable, W("_log"));
+ if (offset <= 0 || FAILED(MOVE(ptrValue, cdaTpHcInstance + offset)) || ptrValue == 0)
+ {
+ ExtOut(" %s\n", "Failed to read PortableThreadPool.HillClimbing._log");
+ break;
+ }
+ CLRDATA_ADDRESS cdaTpHcLog = TO_CDADDR(ptrValue);
+
+ // Validate the log array
+ if (!sos::IsObject(cdaTpHcLog, false) ||
+ vPortableTpHcLogArray.Request(g_sos, cdaTpHcLog) != S_OK ||
+ vPortableTpHcLogArray.ObjectType != OBJ_ARRAY ||
+ vPortableTpHcLogArray.ArrayDataPtr == 0 ||
+ vPortableTpHcLogArray.dwComponentSize != sizeof(HillClimbingLogEntry) ||
+ vPortableTpHcLogArray.ElementTypeHandle == 0)
+ {
+ ExtOut(" %s\n", "Failed to validate PortableThreadPool.HillClimbing._log");
+ break;
+ }
+
+ // Get the log entry field offsets
+ portableTpHcLogEntry_tickCountOffset =
+ GetValueFieldOffset(vPortableTpHcLogArray.ElementTypeHandle, W("tickCount"));
+ portableTpHcLogEntry_stateOrTransitionOffset =
+ GetValueFieldOffset(vPortableTpHcLogArray.ElementTypeHandle, W("stateOrTransition"));
+ portableTpHcLogEntry_newControlSettingOffset =
+ GetValueFieldOffset(vPortableTpHcLogArray.ElementTypeHandle, W("newControlSetting"));
+ portableTpHcLogEntry_lastHistoryCountOffset =
+ GetValueFieldOffset(vPortableTpHcLogArray.ElementTypeHandle, W("lastHistoryCount"));
+ portableTpHcLogEntry_lastHistoryMeanOffset =
+ GetValueFieldOffset(vPortableTpHcLogArray.ElementTypeHandle, W("lastHistoryMean"));
+ if (portableTpHcLogEntry_tickCountOffset < 0 ||
+ portableTpHcLogEntry_stateOrTransitionOffset < 0 ||
+ portableTpHcLogEntry_newControlSettingOffset < 0 ||
+ portableTpHcLogEntry_lastHistoryCountOffset < 0 ||
+ portableTpHcLogEntry_lastHistoryMeanOffset < 0)
+ {
+ ExtOut(" %s\n", "Failed to get a field offset in PortableThreadPool.HillClimbing.LogEntry");
+ break;
+ }
+
+ ExtOut("logStart: %d\n", logStart);
+ ExtOut("logSize: %d\n", logSize);
+ threadpool.HillClimbingLogFirstIndex = logStart;
+ threadpool.HillClimbingLogSize = logSize;
+ }
+ } while (false);
+
+ ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+ ExtOut ("CPU utilization: %d %s\n", threadpool.cpuUtilization, "%");
+ ExtOut ("Worker Thread:");
+ ExtOut (" Total: %d", threadpool.NumWorkingWorkerThreads + threadpool.NumIdleWorkerThreads + threadpool.NumRetiredWorkerThreads);
+ ExtOut (" Running: %d", threadpool.NumWorkingWorkerThreads);
+ ExtOut (" Idle: %d", threadpool.NumIdleWorkerThreads);
+ ExtOut (" MaxLimit: %d", threadpool.MaxLimitTotalWorkerThreads);
+ ExtOut (" MinLimit: %d", threadpool.MinLimitTotalWorkerThreads);
+ ExtOut ("\n");
+
+ int numWorkRequests = 0;
+ CLRDATA_ADDRESS workRequestPtr = threadpool.FirstUnmanagedWorkRequest;
+ DacpWorkRequestData workRequestData;
+ while (workRequestPtr)
+ {
+ if ((Status = workRequestData.Request(g_sos,workRequestPtr))!=S_OK)
+ {
+ ExtOut(" Failed to examine a WorkRequest\n");
+ return Status;
+ }
+ numWorkRequests++;
+ workRequestPtr = workRequestData.NextWorkRequest;
+ }
+
+ ExtOut ("Work Request in Queue: %d\n", numWorkRequests);
+ workRequestPtr = threadpool.FirstUnmanagedWorkRequest;
+ while (workRequestPtr)
+ {
+ if ((Status = workRequestData.Request(g_sos,workRequestPtr))!=S_OK)
+ {
+ ExtOut(" Failed to examine a WorkRequest\n");
+ return Status;
+ }
+
+ if (workRequestData.Function == threadpool.AsyncTimerCallbackCompletionFPtr)
+ ExtOut (" AsyncTimerCallbackCompletion TimerInfo@%p\n", SOS_PTR(workRequestData.Context));
+ else
+ ExtOut (" Unknown Function: %p Context: %p\n", SOS_PTR(workRequestData.Function),
+ SOS_PTR(workRequestData.Context));
+
+ workRequestPtr = workRequestData.NextWorkRequest;
+ }
+
+ if (doWorkItemDump && g_snapshot.Build())
+ {
+ // Display a message if the heap isn't verified.
+ sos::GCHeap gcheap;
+ if (!gcheap.AreGCStructuresValid())
+ {
+ DisplayInvalidStructuresMessage();
+ }
+
+ mdTypeDef threadPoolWorkQueueMd, threadPoolWorkStealingQueueMd;
+ GetInfoFromName(corelibModule, "System.Threading.ThreadPoolWorkQueue", &threadPoolWorkQueueMd);
+ GetInfoFromName(corelibModule, "System.Threading.ThreadPoolWorkQueue+WorkStealingQueue", &threadPoolWorkStealingQueueMd);
+
+ // Walk every heap item looking for the global queue and local queues.
+ ExtOut("\nQueued work items:\n%" THREAD_POOL_WORK_ITEM_TABLE_QUEUE_WIDTH "s %" POINTERSIZE "s %s\n", "Queue", "Address", "Work Item");
+ HeapStat stats;
+ for (sos::ObjectIterator itr = gcheap.WalkHeap(); !IsInterrupt() && itr != NULL; ++itr)
+ {
+ DacpMethodTableData mtdata;
+ if (mtdata.Request(g_sos, TO_TADDR(itr->GetMT())) != S_OK ||
+ mtdata.Module != corelibModule)
+ {
+ continue;
+ }
+
+ if (mtdata.cl == threadPoolWorkQueueMd)
+ {
+ // We found a ThreadPoolWorkQueue (there should be only one, given one AppDomain).
+
+ // Enumerate high-priority work items.
+ int offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("highPriorityWorkItems"));
+ if (offset > 0)
+ {
+ DWORD_PTR workItemsConcurrentQueuePtr;
+ MOVE(workItemsConcurrentQueuePtr, itr->GetAddress() + offset);
+ if (sos::IsObject(workItemsConcurrentQueuePtr, false))
+ {
+ // We got the ConcurrentQueue. Enumerate it.
+ EnumerateThreadPoolGlobalWorkItemConcurrentQueue(workItemsConcurrentQueuePtr, "[Global high-pri]", &stats);
+ }
+ }
+
+ // Enumerate assignable normal-priority work items.
+ offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("_assignableWorkItemQueues"));
+ if (offset > 0)
+ {
+ DWORD_PTR workItemsConcurrentQueueArrayPtr;
+ MOVE(workItemsConcurrentQueueArrayPtr, itr->GetAddress() + offset);
+ DacpObjectData workItemsConcurrentQueueArray;
+ if (workItemsConcurrentQueueArray.Request(g_sos, TO_CDADDR(workItemsConcurrentQueueArrayPtr)) == S_OK &&
+ workItemsConcurrentQueueArray.ObjectType == OBJ_ARRAY)
+ {
+ for (int i = 0; i < workItemsConcurrentQueueArray.dwNumComponents; i++)
+ {
+ DWORD_PTR workItemsConcurrentQueuePtr;
+ MOVE(workItemsConcurrentQueuePtr, workItemsConcurrentQueueArray.ArrayDataPtr + (i * workItemsConcurrentQueueArray.dwComponentSize));
+ if (workItemsConcurrentQueuePtr != NULL && sos::IsObject(TO_CDADDR(workItemsConcurrentQueuePtr), false))
+ {
+ // We got the ConcurrentQueue. Enumerate it.
+ EnumerateThreadPoolGlobalWorkItemConcurrentQueue(workItemsConcurrentQueuePtr, "[Global]", &stats);
+ }
+ }
+ }
+ }
+
+ // Enumerate normal-priority work items.
+ offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("workItems"));
+ if (offset > 0)
+ {
+ DWORD_PTR workItemsConcurrentQueuePtr;
+ MOVE(workItemsConcurrentQueuePtr, itr->GetAddress() + offset);
+ if (sos::IsObject(workItemsConcurrentQueuePtr, false))
+ {
+ // We got the ConcurrentQueue. Enumerate it.
+ EnumerateThreadPoolGlobalWorkItemConcurrentQueue(workItemsConcurrentQueuePtr, "[Global]", &stats);
+ }
+ }
+ }
+ else if (mtdata.cl == threadPoolWorkStealingQueueMd)
+ {
+ // We found a local queue. Get its work items array.
+ int offset = GetObjFieldOffset(itr->GetAddress(), itr->GetMT(), W("m_array"));
+ if (offset > 0)
+ {
+ // Walk every element in the array, outputting details on non-null work items.
+ DWORD_PTR workItemArrayPtr;
+ MOVE(workItemArrayPtr, itr->GetAddress() + offset);
+ DacpObjectData workItemArray;
+ if (workItemArray.Request(g_sos, TO_CDADDR(workItemArrayPtr)) == S_OK && workItemArray.ObjectType == OBJ_ARRAY)
+ {
+ for (int i = 0; i < workItemArray.dwNumComponents; i++)
+ {
+ DWORD_PTR workItemPtr;
+ MOVE(workItemPtr, workItemArray.ArrayDataPtr + (i * workItemArray.dwComponentSize));
+ if (workItemPtr != NULL && sos::IsObject(TO_CDADDR(workItemPtr), false))
+ {
+ sos::Object workItem = TO_TADDR(workItemPtr);
+ stats.Add((DWORD_PTR)workItem.GetMT(), (DWORD)workItem.GetSize());
+ DMLOut("%" THREAD_POOL_WORK_ITEM_TABLE_QUEUE_WIDTH "s %s %S", DMLObject(itr->GetAddress()), DMLObject(workItem.GetAddress()), workItem.GetTypeName());
+ if ((offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("_callback"))) > 0 ||
+ (offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("m_action"))) > 0)
+ {
+ DWORD_PTR delegatePtr;
+ MOVE(delegatePtr, workItem.GetAddress() + offset);
+ CLRDATA_ADDRESS md;
+ if (TryGetMethodDescriptorForDelegate(TO_CDADDR(delegatePtr), &md))
+ {
+ NameForMD_s((DWORD_PTR)md, g_mdName, mdNameLen);
+ ExtOut(" => %S", g_mdName);
+ }
+ }
+ ExtOut("\n");
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Output a summary.
+ stats.Sort();
+ stats.Print();
+ ExtOut("\n");
+ }
+
+ if (doHCDump)
+ {
+ ExtOut ("--------------------------------------\n");
+ ExtOut ("\nThread Injection History\n");
+ if (threadpool.HillClimbingLogSize > 0)
+ {
+ static char const * const TransitionNames[] =
+ {
+ "Warmup",
+ "Initializing",
+ "RandomMove",
+ "ClimbingMove",
+ "ChangePoint",
+ "Stabilizing",
+ "Starvation",
+ "ThreadTimedOut",
+ "CooperativeBlocking",
+ "Undefined"
+ };
+
+ bool usePortableThreadPoolHillClimbingData = threadpool.HillClimbingLog == 0;
+ int logCapacity =
+ usePortableThreadPoolHillClimbingData
+ ? (int)vPortableTpHcLogArray.dwNumComponents
+ : HillClimbingLogCapacity;
+
+ ExtOut("\n Time Transition New #Threads #Samples Throughput\n");
+ DacpHillClimbingLogEntry entry;
+
+ // Get the most recent entry first, so we can calculate time offsets
+ DWORD endTime;
+ int index = (threadpool.HillClimbingLogFirstIndex + threadpool.HillClimbingLogSize - 1) % logCapacity;
+ if (usePortableThreadPoolHillClimbingData)
+ {
+ CLRDATA_ADDRESS entryPtr =
+ TO_CDADDR(vPortableTpHcLogArray.ArrayDataPtr + index * sizeof(HillClimbingLogEntry));
+ INT32 i32Value = 0;
+
+ if (FAILED(Status = MOVE(i32Value, entryPtr + portableTpHcLogEntry_tickCountOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+
+ endTime = i32Value;
+ }
+ else
+ {
+ CLRDATA_ADDRESS entryPtr = threadpool.HillClimbingLog + (index * sizeof(HillClimbingLogEntry));
+ if ((Status = entry.Request(g_sos, entryPtr)) != S_OK)
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+
+ endTime = entry.TickCount;
+ }
+
+ for (int i = 0; i < threadpool.HillClimbingLogSize; i++)
+ {
+ index = (i + threadpool.HillClimbingLogFirstIndex) % logCapacity;
+ if (usePortableThreadPoolHillClimbingData)
+ {
+ CLRDATA_ADDRESS entryPtr =
+ TO_CDADDR(vPortableTpHcLogArray.ArrayDataPtr + (index * sizeof(HillClimbingLogEntry)));
+ INT32 i32Value = 0;
+ float f32Value = 0;
+
+ if (FAILED(Status = MOVE(i32Value, entryPtr + portableTpHcLogEntry_tickCountOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ entry.TickCount = i32Value;
+
+ if (FAILED(Status = MOVE(i32Value, entryPtr + portableTpHcLogEntry_stateOrTransitionOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ entry.Transition = i32Value;
+
+ if (FAILED(Status = MOVE(i32Value, entryPtr + portableTpHcLogEntry_newControlSettingOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ entry.NewControlSetting = i32Value;
+
+ if (FAILED(Status = MOVE(i32Value, entryPtr + portableTpHcLogEntry_lastHistoryCountOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ entry.LastHistoryCount = i32Value;
+
+ if (FAILED(Status = MOVE(f32Value, entryPtr + portableTpHcLogEntry_lastHistoryMeanOffset)))
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ entry.LastHistoryMean = f32Value;
+ }
+ else
+ {
+ CLRDATA_ADDRESS entryPtr = threadpool.HillClimbingLog + (index * sizeof(HillClimbingLogEntry));
+
+ if ((Status = entry.Request(g_sos, entryPtr)) != S_OK)
+ {
+ ExtOut(" Failed to examine a HillClimbing log entry\n");
+ return Status;
+ }
+ }
+
+ ExtOut("%8.2lf %-14s %12d %12d %11.2lf\n",
+ (double)(int)(entry.TickCount - endTime) / 1000.0,
+ TransitionNames[entry.Transition],
+ entry.NewControlSetting,
+ entry.LastHistoryCount,
+ entry.LastHistoryMean);
+ }
+ }
+ }
+
+ ExtOut ("--------------------------------------\n");
+ ExtOut ("Number of Timers: %d\n", threadpool.NumTimers);
+ ExtOut ("--------------------------------------\n");
+
+ // Determine if the portable thread pool is being used for IO. The portable thread pool does not use a separate set of
+ // threads for processing IO completions.
+ if (FAILED(
+ GetNonSharedStaticFieldValueFromName(
+ &ui64Value,
+ corelibModule,
+ "System.Threading.ThreadPool",
+ W("UsePortableThreadPoolForIO"),
+ ELEMENT_TYPE_BOOLEAN)) ||
+ ui64Value == 0)
+ {
+ ExtOut ("Completion Port Thread:");
+ ExtOut ("Total: %d", threadpool.NumCPThreads);
+ ExtOut (" Free: %d", threadpool.NumFreeCPThreads);
+ ExtOut (" MaxFree: %d", threadpool.MaxFreeCPThreads);
+ ExtOut (" CurrentLimit: %d", threadpool.CurrentLimitTotalCPThreads);
+ ExtOut (" MaxLimit: %d", threadpool.MaxLimitTotalCPThreads);
+ ExtOut (" MinLimit: %d", threadpool.MinLimitTotalCPThreads);
+ ExtOut ("\n");
+ }
+
+ return S_OK;
}
DECLARE_API(FindAppDomain)
ExtOut("At this time %sgcroot should be used instead.\n", SOSPrefix);
return Status;
}
+ // validate argument
+ if (!g_snapshot.Build())
+ {
+ ExtOut("Unable to build snapshot of the garbage collector state\n");
+ return Status;
+ }
+
+ if (g_snapshot.GetHeap(taObj) == NULL)
+ {
+ ExtOut("Address %#p is not in the managed heap.\n", SOS_PTR(taObj));
+ return Status;
+ }
+
+ int ogen = g_snapshot.GetGeneration(taObj);
+ if (ogen > CNotification::GetCondemnedGen())
+ {
+ DMLOut("Object %s will survive this collection:\n\tgen(%#p) = %d > %d = condemned generation.\n",
+ DMLObject(taObj), SOS_PTR(taObj), ogen, CNotification::GetCondemnedGen());
+ return Status;
+ }
std::stringstream argsBuilder;
argsBuilder << "-gcgen " << CNotification::GetCondemnedGen() << " " << std::hex << taObj;
return;
}
+HRESULT GetNonSharedStaticFieldValueFromName(
+ UINT64* pValue,
+ DWORD_PTR moduleAddr,
+ const char *typeName,
+ __in_z LPCWSTR wszFieldName,
+ CorElementType fieldType)
+{
+ HRESULT hr = S_OK;
+
+ mdTypeDef mdType = 0;
+ GetInfoFromName(moduleAddr, typeName, &mdType);
+ if (mdType == 0)
+ {
+ return E_FAIL; // Failed to find type token
+ }
+
+ CLRDATA_ADDRESS cdaMethodTable = 0;
+ if (FAILED(hr = g_sos->GetMethodDescFromToken(moduleAddr, mdType, &cdaMethodTable)) ||
+ !IsValidToken(moduleAddr, mdType) ||
+ cdaMethodTable == 0)
+ {
+ return FAILED(hr) ? hr : E_FAIL; // Invalid type token or type is not loaded yet
+ }
+
+ DacpMethodTableData vMethodTable;
+ if ((hr = vMethodTable.Request(g_sos, cdaMethodTable)) != S_OK)
+ {
+ return FAILED(hr) ? hr : E_FAIL; // Failed to get method table data
+ }
+ if (vMethodTable.bIsShared)
+ {
+ ExtOut(" %s: %s\n", "Method table is shared (not implemented)", typeName);
+ return E_NOTIMPL;
+ }
+
+ DacpMethodTableFieldData vMethodTableFields;
+ if (FAILED(hr = vMethodTableFields.Request(g_sos, cdaMethodTable)))
+ {
+ return hr; // Failed to get field data
+ }
+
+ DacpModuleData vModule;
+ if ((hr = vModule.Request(g_sos, vMethodTable.Module)) != S_OK)
+ {
+ return FAILED(hr) ? hr : E_FAIL; // Failed to get module data
+ }
+
+ DacpDomainLocalModuleData vDomainLocalModule;
+ if ((hr = g_sos->GetDomainLocalModuleDataFromModule(vMethodTable.Module, &vDomainLocalModule)) != S_OK)
+ {
+ return FAILED(hr) ? hr : E_FAIL; // Failed to get domain local module data
+ }
+
+ ToRelease<IMetaDataImport> pImport = MDImportForModule(&vModule);
+ CLRDATA_ADDRESS cdaField = vMethodTableFields.FirstField;
+ DacpFieldDescData vFieldDesc;
+ bool found = false;
+ for (DWORD staticFieldIndex = 0; staticFieldIndex < vMethodTableFields.wNumStaticFields; )
+ {
+ if ((hr = vFieldDesc.Request(g_sos, cdaField)) != S_OK || vFieldDesc.Type >= ELEMENT_TYPE_MAX)
+ {
+ return FAILED(hr) ? hr : E_FAIL; // Failed to get member field desc
+ }
+ cdaField = vFieldDesc.NextField;
+
+ if (!vFieldDesc.bIsStatic)
+ {
+ continue;
+ }
+
+ ++staticFieldIndex;
+
+ if (vFieldDesc.Type != fieldType)
+ {
+ continue;
+ }
+
+ if (FAILED(hr = NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false)))
+ {
+ return hr; // Failed to get member field name
+ }
+
+ if (_wcscmp(g_mdName, wszFieldName) != 0)
+ {
+ continue;
+ }
+
+ if (vFieldDesc.bIsThreadLocal || vFieldDesc.bIsContextLocal)
+ {
+ ExtOut(" %s: %s.%S\n", "Static field is thread-local or context-local (not implemented)", typeName, wszFieldName);
+ return E_NOTIMPL;
+ }
+
+ found = true;
+ break;
+ }
+
+ if (!found)
+ {
+ return E_FAIL; // Static field not found
+ }
+
+ DWORD_PTR pValueAddr = 0;
+ GetStaticFieldPTR(&pValueAddr, &vDomainLocalModule, &vMethodTable, &vFieldDesc);
+ if (pValueAddr == 0)
+ {
+ return E_FAIL; // Failed to get static field address
+ }
+
+ UINT64 value = 0;
+ if (FAILED(MOVEBLOCK(value, pValueAddr, gElementTypeInfo[fieldType])))
+ {
+ return E_FAIL; // Failed to read static field
+ }
+
+ *pValue = value;
+ return S_OK;
+}
+
// Return value: -1 = error,
// 0 = field not found,
// > 0 = offset to field from objAddr
}
+// Return value: -1 = error
+// -2 = not found
+// >= 0 = offset to field from cdaValue
+int GetValueFieldOffset(CLRDATA_ADDRESS cdaMT, __in_z LPCWSTR wszFieldName, DacpFieldDescData* pDacpFieldDescData)
+{
+#define EXITPOINT(EXPR) do { if(!(EXPR)) { return -1; } } while (0)
+
+ const int NOT_FOUND = -2;
+ DacpMethodTableData dmtd;
+ DacpMethodTableFieldData vMethodTableFields;
+ DacpFieldDescData vFieldDesc;
+ DacpModuleData module;
+ static DWORD numInstanceFields = 0; // Static due to recursion visiting parents
+ numInstanceFields = 0;
+
+ EXITPOINT(vMethodTableFields.Request(g_sos, cdaMT) == S_OK);
+
+ EXITPOINT(dmtd.Request(g_sos, cdaMT) == S_OK);
+ EXITPOINT(module.Request(g_sos, dmtd.Module) == S_OK);
+ if (dmtd.ParentMethodTable)
+ {
+ DWORD retVal = GetValueFieldOffset(dmtd.ParentMethodTable, wszFieldName, pDacpFieldDescData);
+ if (retVal != (DWORD)NOT_FOUND)
+ {
+ // Return in case of error or success. Fall through for field-not-found.
+ return retVal;
+ }
+ }
+
+ CLRDATA_ADDRESS dwAddr = vMethodTableFields.FirstField;
+ ToRelease<IMetaDataImport> pImport = MDImportForModule(&module);
+
+ while (numInstanceFields < vMethodTableFields.wNumInstanceFields)
+ {
+ EXITPOINT(vFieldDesc.Request(g_sos, dwAddr) == S_OK);
+
+ if (!vFieldDesc.bIsStatic)
+ {
+ NameForToken_s(TokenFromRid(vFieldDesc.mb, mdtFieldDef), pImport, g_mdName, mdNameLen, false);
+ if (_wcscmp(wszFieldName, g_mdName) == 0)
+ {
+ if (pDacpFieldDescData != NULL)
+ {
+ *pDacpFieldDescData = vFieldDesc;
+ }
+ return vFieldDesc.dwOffset;
+ }
+ numInstanceFields++;
+ }
+
+ dwAddr = vFieldDesc.NextField;
+ }
+
+ // Field name not found...
+ return NOT_FOUND;
+
+#undef EXITPOINT
+}
+
// Returns an AppDomain address if AssemblyPtr is loaded into that domain only. Otherwise
// returns NULL
CLRDATA_ADDRESS IsInOneDomainOnly(CLRDATA_ADDRESS AssemblyPtr)
return FALSE;
}
+void DumpStackObjectsOutput(const char *location, DWORD_PTR objAddr, BOOL verifyFields)
+{
+ // rule out pointers that are outside of the gc heap.
+ if (g_snapshot.GetHeap(objAddr) == NULL)
+ return;
+
+ DacpObjectData objectData;
+ if (objectData.Request(g_sos, TO_CDADDR(objAddr)) != S_OK)
+ return;
+
+ if (sos::IsObject(objAddr, verifyFields != FALSE)
+ && !sos::MethodTable::IsFreeMT(TO_TADDR(objectData.MethodTable)))
+ {
+ DMLOut("%-" POINTERSIZE "s %s ", location, DMLObject(objAddr));
+ if (g_sos->GetObjectClassName(TO_CDADDR(objAddr), mdNameLen, g_mdName, NULL)==S_OK)
+ {
+ ExtOut("%S", g_mdName);
+
+ if (IsStringObject(objAddr))
+ {
+ ExtOut(" ");
+ StringObjectContent(objAddr, FALSE, 40);
+ }
+ else if (IsObjectArray(objAddr) &&
+ (g_sos->GetMethodTableName(objectData.ElementTypeHandle, mdNameLen, g_mdName, NULL) == S_OK))
+ {
+ ExtOut(" ");
+ ExtOut("(%S[])", g_mdName);
+ }
+ }
+ else
+ {
+ ExtOut("<unknown type>");
+ }
+ ExtOut("\n");
+ }
+}
+
+void DumpStackObjectsOutput(DWORD_PTR ptr, DWORD_PTR objAddr, BOOL verifyFields)
+{
+ char location[64];
+ sprintf_s(location, 64, "%p", (DWORD_PTR *)ptr);
+
+ DumpStackObjectsOutput(location, objAddr, verifyFields);
+}
+
+void DumpStackObjectsInternal(size_t StackTop, size_t StackBottom, BOOL verifyFields)
+{
+ for (DWORD_PTR ptr = StackTop; ptr <= StackBottom; ptr += sizeof(DWORD_PTR))
+ {
+ if (IsInterrupt())
+ return;
+
+ DWORD_PTR objAddr;
+ move_xp(objAddr, ptr);
+
+ DumpStackObjectsOutput(ptr, objAddr, verifyFields);
+ }
+}
+
+void DumpRegObjectHelper(const char *regName, BOOL verifyFields)
+{
+ DWORD_PTR reg;
+#ifdef FEATURE_PAL
+ if (FAILED(g_ExtRegisters->GetValueByName(regName, ®)))
+ return;
+#else
+ DEBUG_VALUE value;
+ ULONG IREG;
+ if (FAILED(g_ExtRegisters->GetIndexByName(regName, &IREG)) ||
+ FAILED(g_ExtRegisters->GetValue(IREG, &value)))
+ return;
+
+#if defined(SOS_TARGET_X86) || defined(SOS_TARGET_ARM)
+ reg = (DWORD_PTR) value.I32;
+#elif defined(SOS_TARGET_AMD64) || defined(SOS_TARGET_ARM64)
+ reg = (DWORD_PTR) value.I64;
+#else
+#error Unsupported target
+#endif
+#endif // FEATURE_PAL
+
+ DumpStackObjectsOutput(regName, reg, verifyFields);
+}
+
+void DumpStackObjectsHelper (
+ TADDR StackTop,
+ TADDR StackBottom,
+ BOOL verifyFields)
+{
+ ExtOut(g_targetMachine->GetDumpStackObjectsHeading());
+
+ LPCSTR* regs;
+ unsigned int cnt;
+ g_targetMachine->GetGCRegisters(®s, &cnt);
+
+ for (size_t i = 0; i < cnt; ++i)
+ DumpRegObjectHelper(regs[i], verifyFields);
+
+ // Make certain StackTop is dword aligned:
+ DumpStackObjectsInternal(StackTop & ~ALIGNCONST, StackBottom, verifyFields);
+}
+
void AddToModuleList(DWORD_PTR * &moduleList, int &numModule, int &maxList,
DWORD_PTR dwModuleAddr)
{
case DacpTieredVersionData::OptimizationTier_ReadyToRun:
descriptor = "ReadyToRun";
break;
- case DacpTieredVersionData::OptimizationTier_QuickJittedInstrumented:
- descriptor = "QuickJitted + Instrumented";
- break;
- case DacpTieredVersionData::OptimizationTier_OptimizedTier1Instrumented:
- descriptor = "OptimizedTier1 + Instrumented";
- break;
}
DMLOut(" CodeAddr: %s (%s)\n", DMLIP(pTieredVersionData[i].NativeCodeAddr), descriptor);
ExtOut(" NativeCodeVersion: %p\n", SOS_PTR(pTieredVersionData[i].NativeCodeVersionNodePtr));
#endif // !FEATURE_PAL
+size_t ObjectSize(DWORD_PTR obj,BOOL fIsLargeObject)
+{
+ DWORD_PTR dwMT;
+ MOVE(dwMT, obj);
+ return ObjectSize(obj, dwMT, FALSE, fIsLargeObject);
+}
+
+size_t ObjectSize(DWORD_PTR obj, DWORD_PTR mt, BOOL fIsValueClass, BOOL fIsLargeObject)
+{
+ BOOL bContainsPointers;
+ size_t size = 0;
+ if (!GetSizeEfficient(obj, mt, fIsLargeObject, size, bContainsPointers))
+ {
+ return 0;
+ }
+ return size;
+}
+
// This takes an array of values and sets every non-printable character
// to be a period.
void Flatten(__out_ecount(len) char *data, unsigned int len)
return heapData.bGcStructuresValid;
}
+void GetAllocContextPtrs(AllocInfo *pallocInfo)
+{
+ // gets the allocation contexts for all threads. This provides information about how much of
+ // the current allocation quantum has been allocated and the heap to which the quantum belongs.
+ // The allocation quantum is a fixed size chunk of zeroed memory from which allocations will come
+ // until it's filled. Each managed thread has its own allocation context.
+
+ pallocInfo->num = 0;
+ pallocInfo->array = NULL;
+
+ // get the thread store (See code:ClrDataAccess::RequestThreadStoreData for details)
+ DacpThreadStoreData ThreadStore;
+ if ( ThreadStore.Request(g_sos) != S_OK)
+ {
+ return;
+ }
+
+ int numThread = ThreadStore.threadCount;
+ if (numThread)
+ {
+ pallocInfo->array = new needed_alloc_context[numThread + 1];
+ if (pallocInfo->array == NULL)
+ {
+ return;
+ }
+ }
+
+ // get details for each thread in the thread store
+ CLRDATA_ADDRESS CurThread = ThreadStore.firstThread;
+ while (CurThread != NULL)
+ {
+ if (IsInterrupt())
+ return;
+
+ DacpThreadData Thread;
+ // Get information about the thread (we're getting the values of several of the
+ // fields of the Thread instance from the target) See code:ClrDataAccess::RequestThreadData for
+ // details
+ if (Thread.Request(g_sos, CurThread) != S_OK)
+ {
+ return;
+ }
+
+ if (Thread.allocContextPtr != 0)
+ {
+ // get a list of all the allocation contexts
+ int j;
+ for (j = 0; j < pallocInfo->num; j ++)
+ {
+ if (pallocInfo->array[j].alloc_ptr == (BYTE *) Thread.allocContextPtr)
+ break;
+ }
+ if (j == pallocInfo->num)
+ {
+ pallocInfo->num ++;
+ pallocInfo->array[j].alloc_ptr = (BYTE *) Thread.allocContextPtr;
+ pallocInfo->array[j].alloc_limit = (BYTE *) Thread.allocContextLimit;
+ }
+ }
+
+ CurThread = Thread.nextThread;
+ }
+
+ CLRDATA_ADDRESS allocPtr;
+ CLRDATA_ADDRESS allocLimit;
+
+ ReleaseHolder<ISOSDacInterface12> sos12;
+ if (SUCCEEDED(g_sos->QueryInterface(__uuidof(ISOSDacInterface12), &sos12)) &&
+ SUCCEEDED(sos12->GetGlobalAllocationContext(&allocPtr, &allocLimit)) &&
+ allocPtr != 0)
+ {
+ int j = pallocInfo->num ++;
+ pallocInfo->array[j].alloc_ptr = (BYTE *) allocPtr;
+ pallocInfo->array[j].alloc_limit = (BYTE *) allocLimit;
+ }
+}
+
HRESULT ReadVirtualCache::Read(TADDR address, PVOID buffer, ULONG bufferSize, PULONG lpcbBytesRead)
{
// address can be any random ULONG64, as it can come from VerifyObjectMember(), and this
}
#endif // FEATURE_PAL
+
+///////////////////////////////////////////////////////////////////////////////////////////
+//
+// Miscellaneous helper methods
+//
+
+void EnumerateThreadPoolGlobalWorkItemConcurrentQueue(
+ DWORD_PTR workItemsConcurrentQueuePtr,
+ const char *queueName,
+ HeapStat *stats)
+{
+ // Get its head segment.
+ sos::Object workItemsConcurrentQueue = TO_TADDR(workItemsConcurrentQueuePtr);
+ int offset = GetObjFieldOffset(workItemsConcurrentQueue.GetAddress(), workItemsConcurrentQueue.GetMT(), W("_head"));
+ if (offset <= 0)
+ {
+ return;
+ }
+
+ // Now, walk from segment to segment, each of which contains an array of work items.
+ DWORD_PTR segmentPtr;
+ MOVE(segmentPtr, workItemsConcurrentQueue.GetAddress() + offset);
+ while (sos::IsObject(segmentPtr, false))
+ {
+ sos::Object segment = TO_TADDR(segmentPtr);
+
+ // Get the work items array. It's an array of Slot structs, which starts with the T.
+ offset = GetObjFieldOffset(segment.GetAddress(), segment.GetMT(), W("_slots"));
+ if (offset <= 0)
+ {
+ break;
+ }
+
+ DWORD_PTR slotsPtr;
+ MOVE(slotsPtr, segment.GetAddress() + offset);
+ if (!sos::IsObject(slotsPtr, false))
+ {
+ break;
+ }
+
+ // Walk every element in the array, outputting details on non-null work items.
+ DacpObjectData slotsArray;
+ if (slotsArray.Request(g_sos, TO_CDADDR(slotsPtr)) == S_OK && slotsArray.ObjectType == OBJ_ARRAY)
+ {
+ for (int i = 0; i < slotsArray.dwNumComponents; i++)
+ {
+ DWORD_PTR workItemPtr;
+ MOVE(workItemPtr, slotsArray.ArrayDataPtr + (i * slotsArray.dwComponentSize)); // the item object reference is at the beginning of the Slot
+ if (workItemPtr != NULL && sos::IsObject(TO_CDADDR(workItemPtr), false))
+ {
+ sos::Object workItem = TO_TADDR(workItemPtr);
+ stats->Add((DWORD_PTR)workItem.GetMT(), (DWORD)workItem.GetSize());
+ DMLOut("%" THREAD_POOL_WORK_ITEM_TABLE_QUEUE_WIDTH "s %s %S", queueName, DMLObject(workItem.GetAddress()), workItem.GetTypeName());
+ if ((offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("_callback"))) > 0 ||
+ (offset = GetObjFieldOffset(workItem.GetAddress(), workItem.GetMT(), W("m_action"))) > 0)
+ {
+ DWORD_PTR delegatePtr;
+ MOVE(delegatePtr, workItem.GetAddress() + offset);
+ CLRDATA_ADDRESS md;
+ if (TryGetMethodDescriptorForDelegate(TO_CDADDR(delegatePtr), &md))
+ {
+ NameForMD_s((DWORD_PTR)md, g_mdName, mdNameLen);
+ ExtOut(" => %S", g_mdName);
+ }
+ }
+ ExtOut("\n");
+ }
+ }
+ }
+
+ // Move to the next segment.
+ DacpFieldDescData segmentField;
+ offset = GetObjFieldOffset(segment.GetAddress(), segment.GetMT(), W("_nextSegment"), TRUE, &segmentField);
+ if (offset <= 0)
+ {
+ break;
+ }
+
+ MOVE(segmentPtr, segment.GetAddress() + offset);
+ if (segmentPtr == NULL)
+ {
+ break;
+ }
+ }
+}
static StaticData<char, 4, 1024> cache;
};
+class GCHeapDetails
+{
+private:
+ void GetGenerationTableSize(CLRDATA_ADDRESS svrHeapAddr, unsigned int *count)
+ {
+ HRESULT hr = S_OK;
+ bool success = false;
+ ReleaseHolder<ISOSDacInterface8> sos8;
+ if (!SUCCEEDED(hr = g_sos->QueryInterface(__uuidof(ISOSDacInterface8), &sos8))
+ || !SUCCEEDED(hr = sos8->GetNumberGenerations(count)))
+ {
+ // The runtime will either have the original 4 generations or implement ISOSDacInterface8
+ // if the call succeeded, count is already populated.
+ *count = DAC_NUMBERGENERATIONS;
+ }
+ }
+
+ // Fill the target array with either the details from heap or if this is a newer runtime that supports
+ // the pinned object heap (or potentially future GC generations), get that data too. This abstraction is
+ // necessary because the original GC heap APIs are hardcoded to 4 generations.
+ void FillGenerationTable(CLRDATA_ADDRESS svrHeapAddr, const DacpGcHeapDetails &heap, unsigned int count, DacpGenerationData *data)
+ {
+ HRESULT hr = S_OK;
+ bool success = false;
+ unsigned int generationCount;
+ ReleaseHolder<ISOSDacInterface8> sos8;
+ if (SUCCEEDED(hr = g_sos->QueryInterface(__uuidof(ISOSDacInterface8), &sos8)))
+ {
+ if (svrHeapAddr == NULL)
+ {
+ if (SUCCEEDED(hr = sos8->GetGenerationTable(count, data, &generationCount))
+ && hr != S_FALSE)
+ {
+ success = true;
+ // Nothing else to do, data is already populated
+ }
+ }
+ else
+ {
+ if (SUCCEEDED(hr = sos8->GetGenerationTableSvr(svrHeapAddr, count, data, &generationCount))
+ && hr != S_FALSE)
+ {
+ success = true;
+ // Nothing else to do, data is already populated
+ }
+ }
+
+ _ASSERTE(generationCount == count || !success);
+ }
+
+ if (!success)
+ {
+ // This would mean that there are additional, unaccounted for, generations
+ _ASSERTE(hr != S_FALSE);
+
+ // We couldn't get any data from the newer APIs, so fall back to the original data
+ memcpy(data, &(heap.generation_table), sizeof(DacpGenerationData) * DAC_NUMBERGENERATIONS);
+ }
+ }
+
+ // Fill the target array with either the details from heap or if this is a newer runtime that supports
+ // the pinned object heap (or potentially future GC generations), get that data too. This abstraction is
+ // necessary because the original GC heap APIs are hardcoded to 4 generations.
+ void FillFinalizationPointers(CLRDATA_ADDRESS svrHeapAddr, const DacpGcHeapDetails &heap, unsigned int count, CLRDATA_ADDRESS *data)
+ {
+ HRESULT hr = S_OK;
+ bool success = false;
+ unsigned int fillPointersCount;
+ ReleaseHolder<ISOSDacInterface8> sos8;
+ if (SUCCEEDED(hr = g_sos->QueryInterface(__uuidof(ISOSDacInterface8), &sos8)))
+ {
+ if (svrHeapAddr == NULL)
+ {
+ if (SUCCEEDED(hr = sos8->GetFinalizationFillPointers(count, data, &fillPointersCount))
+ && hr != S_FALSE)
+ {
+ success = true;
+ // Nothing else to do, data is already populated
+ }
+ }
+ else
+ {
+ if (SUCCEEDED(hr = sos8->GetFinalizationFillPointersSvr(svrHeapAddr, count, data, &fillPointersCount))
+ && hr != S_FALSE)
+ {
+ success = true;
+ // Nothing else to do, data is already populated
+ }
+ }
+
+ _ASSERTE(fillPointersCount == count);
+ }
+
+ if (!success)
+ {
+ // This would mean that there are additional, unaccounted for, generations
+ _ASSERTE(hr != S_FALSE);
+
+ // We couldn't get any data from the newer APIs, so fall back to the original data
+ memcpy(data, &(heap.finalization_fill_pointers), sizeof(CLRDATA_ADDRESS) * (DAC_NUMBERGENERATIONS + 2));
+ }
+ }
+
+public:
+ GCHeapDetails()
+ {
+ generation_table = NULL;
+ finalization_fill_pointers = NULL;
+ }
+
+ GCHeapDetails(const DacpGcHeapDetails &dacGCDetails, CLRDATA_ADDRESS svrHeapAddr = NULL)
+ {
+ generation_table = NULL;
+ finalization_fill_pointers = NULL;
+
+ Set(dacGCDetails, svrHeapAddr);
+ }
+
+ ~GCHeapDetails()
+ {
+ if (generation_table != NULL)
+ {
+ delete[] generation_table;
+ generation_table = NULL;
+ }
+
+ if (finalization_fill_pointers != NULL)
+ {
+ delete[] finalization_fill_pointers;
+ finalization_fill_pointers = NULL;
+ }
+ }
+
+ // Due to the raw pointers, we are not a POD and have to be careful about lifetime
+ GCHeapDetails(const GCHeapDetails& other) = delete;
+ GCHeapDetails(GCHeapDetails&& other) = delete;
+ GCHeapDetails& operator=(const GCHeapDetails& other) = delete;
+ GCHeapDetails& operator=(GCHeapDetails&& other) = delete;
+
+ void Set(const DacpGcHeapDetails dacGCDetails, CLRDATA_ADDRESS svrHeapAddr = NULL)
+ {
+ original_heap_details = dacGCDetails;
+
+ GetGenerationTableSize(svrHeapAddr, &num_generations);
+ // Either we're pre POH and have 4, or post and have 5. If there's a different
+ // number it's either a bug or we need to update SOS.
+ _ASSERTE(num_generations == 4 || num_generations == 5);
+ has_poh = num_generations > 4;
+
+ if (generation_table != NULL)
+ {
+ delete[] generation_table;
+ }
+ generation_table = new DacpGenerationData[num_generations];
+ FillGenerationTable(svrHeapAddr, dacGCDetails, num_generations, generation_table);
+
+ if (finalization_fill_pointers != NULL)
+ {
+ delete[] finalization_fill_pointers;
+ }
+
+ unsigned int num_fill_pointers = num_generations + 2;
+ finalization_fill_pointers = new CLRDATA_ADDRESS[num_fill_pointers];
+ FillFinalizationPointers(svrHeapAddr, dacGCDetails, num_fill_pointers, finalization_fill_pointers);
+
+ heapAddr = svrHeapAddr;
+ alloc_allocated = dacGCDetails.alloc_allocated;
+ mark_array = dacGCDetails.mark_array;
+ current_c_gc_state = dacGCDetails.current_c_gc_state;
+ next_sweep_obj = dacGCDetails.next_sweep_obj;
+ saved_sweep_ephemeral_seg = dacGCDetails.saved_sweep_ephemeral_seg;
+ saved_sweep_ephemeral_start = dacGCDetails.saved_sweep_ephemeral_start;
+ background_saved_lowest_address = dacGCDetails.background_saved_lowest_address;
+ background_saved_highest_address = dacGCDetails.background_saved_highest_address;
+ ephemeral_heap_segment = dacGCDetails.ephemeral_heap_segment;
+ lowest_address = dacGCDetails.lowest_address;
+ highest_address = dacGCDetails.highest_address;
+ card_table = dacGCDetails.card_table;
+ has_regions = generation_table[0].start_segment != generation_table[1].start_segment;
+ has_background_gc = dacGCDetails.mark_array != -1;
+ }
+
+ DacpGcHeapDetails original_heap_details;
+ bool has_poh;
+ bool has_regions;
+ bool has_background_gc;
+ CLRDATA_ADDRESS heapAddr; // Only filled in in server mode, otherwise NULL
+ CLRDATA_ADDRESS alloc_allocated;
+
+ CLRDATA_ADDRESS mark_array;
+ CLRDATA_ADDRESS current_c_gc_state;
+ CLRDATA_ADDRESS next_sweep_obj;
+ CLRDATA_ADDRESS saved_sweep_ephemeral_seg;
+ CLRDATA_ADDRESS saved_sweep_ephemeral_start;
+ CLRDATA_ADDRESS background_saved_lowest_address;
+ CLRDATA_ADDRESS background_saved_highest_address;
+
+ // There are num_generations entries in generation_table and num_generations + 3 entries
+ // in finalization_fill_pointers
+ unsigned int num_generations;
+ DacpGenerationData *generation_table;
+ CLRDATA_ADDRESS ephemeral_heap_segment;
+ CLRDATA_ADDRESS *finalization_fill_pointers;
+ CLRDATA_ADDRESS lowest_address;
+ CLRDATA_ADDRESS highest_address;
+ CLRDATA_ADDRESS card_table;
+
+};
+
// Things in this namespace should not be directly accessed/called outside of
// the output-related functions.
namespace Output
const char *ElementTypeName (unsigned type);
void DisplayFields (CLRDATA_ADDRESS cdaMT, DacpMethodTableData *pMTD, DacpMethodTableFieldData *pMTFD,
DWORD_PTR dwStartAddr = 0, BOOL bFirst=TRUE, BOOL bValueClass=FALSE);
+HRESULT GetNonSharedStaticFieldValueFromName(UINT64* pValue, DWORD_PTR moduleAddr, const char *typeName, __in_z LPCWSTR wszFieldName, CorElementType fieldType);
int GetObjFieldOffset(CLRDATA_ADDRESS cdaObj, __in_z LPCWSTR wszFieldName, BOOL bFirst=TRUE);
int GetObjFieldOffset(CLRDATA_ADDRESS cdaObj, CLRDATA_ADDRESS cdaMT, __in_z LPCWSTR wszFieldName, BOOL bFirst=TRUE, DacpFieldDescData* pDacpFieldDescData=NULL);
int GetValueFieldOffset(CLRDATA_ADDRESS cdaMT, __in_z LPCWSTR wszFieldName, DacpFieldDescData* pDacpFieldDescData=NULL);
HRESULT GetMTOfObject(TADDR obj, TADDR *mt);
+struct needed_alloc_context
+{
+ BYTE* alloc_ptr; // starting point for next allocation
+ BYTE* alloc_limit; // ending point for allocation region/quantum
+};
+
+struct AllocInfo
+{
+ needed_alloc_context *array;
+ int num; // number of allocation contexts in array
+
+ AllocInfo()
+ : array(NULL)
+ , num(0)
+ {}
+ void Init()
+ {
+ extern void GetAllocContextPtrs(AllocInfo *pallocInfo);
+ GetAllocContextPtrs(this);
+ }
+ ~AllocInfo()
+ {
+ if (array != NULL)
+ delete[] array;
+ }
+};
+
struct GCHandleStatistics
{
HeapStat hs;
}
};
+struct SegmentLookup
+{
+ DacpHeapSegmentData *m_segments;
+ int m_iSegmentsSize;
+ int m_iSegmentCount;
+
+ SegmentLookup();
+ ~SegmentLookup();
+
+ void Clear();
+ BOOL AddSegment(DacpHeapSegmentData *pData);
+ CLRDATA_ADDRESS GetHeap(CLRDATA_ADDRESS object, BOOL& bFound);
+};
+
+class GCHeapSnapshot
+{
+private:
+ BOOL m_isBuilt;
+ GCHeapDetails *m_heapDetails;
+ DacpGcHeapData m_gcheap;
+ SegmentLookup m_segments;
+
+ BOOL AddSegments(const GCHeapDetails& details);
+public:
+ GCHeapSnapshot();
+
+ BOOL Build();
+ void Clear();
+ BOOL IsBuilt() { return m_isBuilt; }
+
+ DacpGcHeapData *GetHeapData() { return &m_gcheap; }
+
+ int GetHeapCount() { return m_gcheap.HeapCount; }
+
+ GCHeapDetails *GetHeap(CLRDATA_ADDRESS objectPointer);
+ int GetGeneration(CLRDATA_ADDRESS objectPointer);
+
+
+};
+extern GCHeapSnapshot g_snapshot;
+
BOOL IsSameModuleName (const char *str1, const char *str2);
BOOL IsModule (DWORD_PTR moduleAddr);
BOOL IsMethodDesc (DWORD_PTR value);
void GetInfoFromName(DWORD_PTR ModuleAddr, const char* name, mdTypeDef* retMdTypeDef=NULL);
void GetInfoFromModule (DWORD_PTR ModuleAddr, ULONG token, DWORD_PTR *ret=NULL);
+
+typedef void (*VISITGCHEAPFUNC)(DWORD_PTR objAddr,size_t Size,DWORD_PTR methodTable,LPVOID token);
+BOOL GCHeapsTraverse(VISITGCHEAPFUNC pFunc, LPVOID token, BOOL verify=true);
+
/////////////////////////////////////////////////////////////////////////////////////////////////////////
struct strobjInfo
DWORD m_StringLength;
};
+// Just to make figuring out which fill pointer element matches a generation
+// a bit less confusing. This gen_segment function is ported from gc.cpp.
+inline unsigned int gen_segment (int gen)
+{
+ return (DAC_NUMBERGENERATIONS - gen - 1);
+}
+
+inline CLRDATA_ADDRESS SegQueue(DacpGcHeapDetails& heapDetails, int seg)
+{
+ return heapDetails.finalization_fill_pointers[seg - 1];
+}
+
+inline CLRDATA_ADDRESS SegQueueLimit(DacpGcHeapDetails& heapDetails, int seg)
+{
+ return heapDetails.finalization_fill_pointers[seg];
+}
+
+#define FinalizerListSeg (DAC_NUMBERGENERATIONS+1)
+#define CriticalFinalizerListSeg (DAC_NUMBERGENERATIONS)
+
+void GatherOneHeapFinalization(DacpGcHeapDetails& heapDetails, HeapStat *stat, BOOL bAllReady, BOOL bShort);
+
CLRDATA_ADDRESS GetAppDomainForMT(CLRDATA_ADDRESS mtPtr);
CLRDATA_ADDRESS GetAppDomain(CLRDATA_ADDRESS objPtr);
+BOOL VerifyObject(const GCHeapDetails &heap, const DacpHeapSegmentData &seg, DWORD_PTR objAddr, DWORD_PTR MTAddr, size_t objSize,
+ BOOL bVerifyMember);
+BOOL VerifyObject(const GCHeapDetails &heap, DWORD_PTR objAddr, DWORD_PTR MTAddr, size_t objSize,
+ BOOL bVerifyMember);
+
BOOL IsMTForFreeObj(DWORD_PTR pMT);
+void DumpStackObjectsHelper (TADDR StackTop, TADDR StackBottom, BOOL verifyFields);
HRESULT ExecuteCommand(PCSTR commandName, PCSTR args);
void GetDomainList(DWORD_PTR *&domainList, int &numDomain);
HRESULT GetThreadList(DWORD_PTR **threadList, int *numThread);
CLRDATA_ADDRESS GetCurrentManagedThread(); // returns current managed thread if any
+void GetAllocContextPtrs(AllocInfo *pallocInfo);
void ReloadSymbolWithLineInfo();
BOOL GetCollectibleDataEfficient(DWORD_PTR dwAddrMethTable, BOOL& bCollectible, TADDR& loaderAllocatorObjectHandle);
+// ObjSize now uses the methodtable cache for its work too.
+size_t ObjectSize (DWORD_PTR obj, BOOL fIsLargeObject=FALSE);
+size_t ObjectSize(DWORD_PTR obj, DWORD_PTR mt, BOOL fIsValueClass, BOOL fIsLargeObject=FALSE);
+
void CharArrayContent(TADDR pos, ULONG num, bool widechar);
void StringObjectContent (size_t obj, BOOL fLiteral=FALSE, const int length=-1); // length=-1: dump everything in the string object.
int mMisses, mReads, mMisaligned;
};
+
+///////////////////////////////////////////////////////////////////////////////////////////
+//
+// Methods for creating a database out of the gc heap and it's roots in xml format or CLRProfiler format
+//
+
+#include <unordered_map>
+#include <unordered_set>
+#include <list>
+
+class TypeTree;
+enum { FORMAT_XML=0, FORMAT_CLRPROFILER=1 };
+enum { TYPE_START=0,TYPE_TYPES=1,TYPE_ROOTS=2,TYPE_OBJECTS=3,TYPE_HIGHEST=4};
+class HeapTraverser
+{
+private:
+ TypeTree *m_pTypeTree;
+ size_t m_curNID;
+ FILE *m_file;
+ int m_format; // from the enum above
+ size_t m_objVisited; // for UI updates
+ bool m_verify;
+ LinearReadCache mCache;
+
+ std::unordered_map<TADDR, std::list<TADDR>> mDependentHandleMap;
+
+public:
+ HeapTraverser(bool verify);
+ ~HeapTraverser();
+
+ FILE *getFile() { return m_file; }
+
+ BOOL Initialize();
+ BOOL CreateReport (FILE *fp, int format);
+
+private:
+ // First all types are added to a tree
+ void insert(size_t mTable);
+ size_t getID(size_t mTable);
+
+ // Functions for writing to the output file.
+ void PrintType(size_t ID,LPCWSTR name);
+
+ void PrintObjectHead(size_t objAddr,size_t typeID,size_t Size);
+ void PrintObjectMember(size_t memberValue, bool dependentHandle);
+ void PrintLoaderAllocator(size_t memberValue);
+ void PrintObjectTail();
+
+ void PrintRootHead();
+ void PrintRoot(LPCWSTR kind,size_t Value);
+ void PrintRootTail();
+
+ void PrintSection(int Type,BOOL bOpening);
+
+ // Root and object member helper functions
+ void FindGCRootOnStacks();
+ void PrintRefs(size_t obj, size_t methodTable, size_t size);
+
+ // Callback functions used during traversals
+ static void GatherTypes(DWORD_PTR objAddr,size_t Size,DWORD_PTR methodTable, LPVOID token);
+ static void PrintHeap(DWORD_PTR objAddr,size_t Size,DWORD_PTR methodTable, LPVOID token);
+ static void PrintOutTree(size_t methodTable, size_t ID, LPVOID token);
+ void TraceHandles();
+};
+
+//
+// Helper class used for type-safe bitflags
+// T - the enum type specifying the individual bit flags
+// U - the underlying/storage type
+// Requirement:
+// sizeof(T) <= sizeof(U)
+//
+template <typename T, typename U>
+struct Flags
+{
+ typedef T UnderlyingType;
+ typedef U BitFlagEnumType;
+
+ static_assert_no_msg(sizeof(BitFlagEnumType) <= sizeof(UnderlyingType));
+
+ Flags(UnderlyingType v)
+ : m_val(v)
+ { }
+
+ Flags(BitFlagEnumType v)
+ : m_val(v)
+ { }
+
+ Flags(const Flags& other)
+ : m_val(other.m_val)
+ { }
+
+ Flags& operator = (const Flags& other)
+ { m_val = other.m_val; return *this; }
+
+ Flags operator | (Flags other) const
+ { return Flags<T, U>(m_val | other._val); }
+
+ void operator |= (Flags other)
+ { m_val |= other.m_val; }
+
+ Flags operator & (Flags other) const
+ { return Flags<T, U>(m_val & other.m_val); }
+
+ void operator &= (Flags other)
+ { m_val &= other.m_val; }
+
+ Flags operator ^ (Flags other) const
+ { return Flags<T, U>(m_val ^ other._val); }
+
+ void operator ^= (Flags other)
+ { m_val ^= other.m_val; }
+
+ BOOL operator == (Flags other) const
+ { return m_val == other.m_val; }
+
+ BOOL operator != (Flags other) const
+ { return m_val != other.m_val; }
+
+
+private:
+ UnderlyingType m_val;
+};
+
// Helper class used in ClrStackFromPublicInterface() to keep track of explicit EE Frames
// (i.e., "internal frames") on the stack. Call Init() with the appropriate
// ICorDebugThread3, and this class will initialize itself with the set of internal
};
#include "sigparser.h"
+///////////////////////////////////////////////////////////////////////////////////////////
+//
+// Miscellaneous helper methods
+//
+
+#define THREAD_POOL_WORK_ITEM_TABLE_QUEUE_WIDTH "17"
+void EnumerateThreadPoolGlobalWorkItemConcurrentQueue(
+ DWORD_PTR workItemsConcurrentQueuePtr,
+ const char *queueName,
+ HeapStat *stats);
+
#endif // __util_h__
# If the file in a directory is found the result is stored in the variable and the search will not be repeated unless the variable is cleared.
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "${WITH_LLDB_INCLUDES}" NO_DEFAULT_PATH)
find_path(LLDB_H "lldb/API/LLDB.h")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-14/include")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-13/include")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-12/include")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-11/include")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-10/include")
- find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-9/include")
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-6.0/include")
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-5.0/include")
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-4.0/include")
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-3.9/include")
+ find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-3.8/include")
+ find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-3.7/include")
+ find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-3.6/include")
+ find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/lib/llvm-3.5/include")
#FreeBSD
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/local/llvm39/include")
find_path(LLDB_H "lldb/API/LLDB.h" PATHS "/usr/local/llvm38/include")
{
g_services->AddCommand("sos", new sosCommand(nullptr), "Executes various coreclr debugging commands. Use the syntax 'sos <command - name> <args>'. For more information, see 'soshelp'.");
g_services->AddCommand("ext", new sosCommand(nullptr), "Executes various coreclr debugging commands. Use the syntax 'sos <command - name> <args>'. For more information, see 'soshelp'.");
- g_services->AddManagedCommand("analyzeoom", "Provides a stack trace of managed code only.");
+ g_services->AddCommand("analyzeoom", new sosCommand("AnalyzeOOM"), "Provides a stack trace of managed code only.");
g_services->AddCommand("bpmd", new sosCommand("bpmd"), "Creates a breakpoint at the specified managed method in the specified module.");
g_services->AddManagedCommand("clrmodules", "Lists the managed modules in the process.");
g_services->AddCommand("clrstack", new sosCommand("ClrStack"), "Provides a stack trace of managed code only.");
g_services->AddCommand("dumpdelegate", new sosCommand("DumpDelegate"), "Displays information about a delegate.");
g_services->AddCommand("dumpdomain", new sosCommand("DumpDomain"), "Displays information about the all assemblies within all the AppDomains or the specified one.");
g_services->AddCommand("dumpgcdata", new sosCommand("DumpGCData"), "Displays information about the GC data.");
- g_services->AddManagedCommand("dumpheap", "Displays info about the garbage-collected heap and collection statistics about objects.");
+ g_services->AddCommand("dumpheap", new sosCommand("DumpHeap"), "Displays info about the garbage-collected heap and collection statistics about objects.");
g_services->AddCommand("dumpil", new sosCommand("DumpIL"), "Displays the Microsoft intermediate language (MSIL) that's associated with a managed method.");
g_services->AddCommand("dumplog", new sosCommand("DumpLog"), "Writes the contents of an in-memory stress log to the specified file.");
g_services->AddCommand("dumpmd", new sosCommand("DumpMD"), "Displays information about a MethodDesc structure at the specified address.");
g_services->AddCommand("dumpmodule", new sosCommand("DumpModule"), "Displays information about a EE module structure at the specified address.");
g_services->AddCommand("dumpmt", new sosCommand("DumpMT"), "Displays information about a method table at the specified address.");
g_services->AddCommand("dumpobj", new sosCommand("DumpObj"), "Displays info about an object at the specified address.");
- g_services->AddManagedCommand("dumpruntimetypes", "Finds all System.RuntimeType objects in the GC heap and prints the type name and MethodTable they refer too.");
+ g_services->AddCommand("dumpruntimetypes", new sosCommand("DumpRuntimeTypes"), "Finds all System.RuntimeType objects in the GC heap and prints the type name and MethodTable they refer too.");
g_services->AddCommand("dumpsig", new sosCommand("DumpSig"), "Dumps the signature of a method or field specified by '<sigaddr> <moduleaddr>'.");
g_services->AddCommand("dumpsigelem", new sosCommand("DumpSigElem"), "Dumps a single element of a signature object.");
g_services->AddCommand("dumpstack", new sosCommand("DumpStack"), "Displays a native and managed stack trace.");
- g_services->AddManagedCommand("dumpstackobjects", "Displays all managed objects found within the bounds of the current stack.");
- g_services->AddManagedCommand("dso", "Displays all managed objects found within the bounds of the current stack.");
+ g_services->AddCommand("dumpstackobjects", new sosCommand("DumpStackObjects"), "Displays all managed objects found within the bounds of the current stack.");
+ g_services->AddCommand("dso", new sosCommand("DumpStackObjects"), "Displays all managed objects found within the bounds of the current stack.");
g_services->AddCommand("dumpvc", new sosCommand("DumpVC"), "Displays info about the fields of a value class.");
- g_services->AddManagedCommand("eeheap", "Displays info about process memory consumed by internal runtime data structures.");
+ g_services->AddCommand("eeheap", new sosCommand("EEHeap"), "Displays info about process memory consumed by internal runtime data structures.");
g_services->AddCommand("eestack", new sosCommand("EEStack"), "Runs dumpstack on all threads in the process.");
g_services->AddCommand("eeversion", new sosCommand("EEVersion"), "Displays information about the runtime and SOS versions.");
g_services->AddCommand("ehinfo", new sosCommand("EHInfo"), "Displays the exception handling blocks in a JIT-ed method.");
- g_services->AddManagedCommand("finalizequeue", "Displays all objects registered for finalization.");
+ g_services->AddCommand("finalizequeue", new sosCommand("FinalizeQueue"), "Displays all objects registered for finalization.");
g_services->AddCommand("findappdomain", new sosCommand("FindAppDomain"), "Attempts to resolve the AppDomain of a GC object.");
g_services->AddCommand("findroots", new sosCommand("FindRoots"), "Finds and displays object roots across GC collections.");
g_services->AddCommand("gchandles", new sosCommand("GCHandles"), "Displays statistics about garbage collector handles in the process.");
- g_services->AddManagedCommand("gcheapstat", "Displays statistics about garbage collector.");
+ g_services->AddCommand("gcheapstat", new sosCommand("GCHeapStat"), "Displays statistics about garbage collector.");
g_services->AddCommand("gcinfo", new sosCommand("GCInfo"), "Displays info JIT GC encoding for a method.");
- g_services->AddManagedCommand("gcroot", "Displays info about references (or roots) to an object at the specified address.");
- g_services->AddManagedCommand("gcwhere", "Displays the location in the GC heap of the specified address.");
+ g_services->AddCommand("gcroot", new sosCommand("GCRoot"), "Displays info about references (or roots) to an object at the specified address.");
+ g_services->AddCommand("gcwhere", new sosCommand("GCWhere"), "Displays the location in the GC heap of the specified address.");
g_services->AddCommand("histclear", new sosCommand("HistClear"), "Releases any resources used by the family of Hist commands.");
g_services->AddCommand("histinit", new sosCommand("HistInit"), "Initializes the SOS structures from the stress log saved in the debuggee.");
g_services->AddCommand("histobj", new sosCommand("HistObj"), "Examines all stress log relocation records and displays the chain of garbage collection relocations that may have led to the address passed in as an argument.");
g_services->AddManagedCommand("loadsymbols", "Loads the .NET Core native module symbols.");
g_services->AddManagedCommand("logging", "Enables/disables internal SOS logging.");
g_services->AddCommand("name2ee", new sosCommand("Name2EE"), "Displays the MethodTable structure and EEClass structure for the specified type or method in the specified module.");
- g_services->AddManagedCommand("objsize", "Displays the size of the specified object.");
+ g_services->AddCommand("objsize", new sosCommand("ObjSize"), "Displays the size of the specified object.");
g_services->AddCommand("pathto", new sosCommand("PathTo"), "Displays the GC path from <root> to <target>.");
g_services->AddCommand("pe", new sosCommand("PrintException"), "Displays and formats fields of any object derived from the Exception class at the specified address.");
g_services->AddCommand("printexception", new sosCommand("PrintException"), "Displays and formats fields of any object derived from the Exception class at the specified address.");
g_services->AddCommand("sosstatus", new sosCommand("SOSStatus"), "Displays the global SOS status.");
g_services->AddCommand("sosflush", new sosCommand("SOSFlush"), "Resets the internal cached state.");
g_services->AddCommand("syncblk", new sosCommand("SyncBlk"), "Displays the SyncBlock holder info.");
- g_services->AddManagedCommand("threadpool", "Displays info about the runtime thread pool.");
+ g_services->AddCommand("threadpool", new sosCommand("ThreadPool"), "Displays info about the runtime thread pool.");
g_services->AddCommand("threadstate", new sosCommand("ThreadState"), "Pretty prints the meaning of a threads state.");
g_services->AddCommand("token2ee", new sosCommand("token2ee"), "Displays the MethodTable structure and MethodDesc structure for the specified token and module.");
- g_services->AddManagedCommand("verifyheap", "Checks the GC heap for signs of corruption.");
- g_services->AddManagedCommand("verifyobj", "Checks the object that is passed as an argument for signs of corruption.");
+ g_services->AddCommand("traverseheap", new sosCommand("TraverseHeap"), "Writes out heap information to a file in a format understood by the CLR Profiler.");
+ g_services->AddCommand("verifyheap", new sosCommand("VerifyHeap"), "Checks the GC heap for signs of corruption.");
+ g_services->AddCommand("verifyobj", new sosCommand("VerifyObj"), "Checks the object that is passed as an argument for signs of corruption.");
return true;
}
{
HandleCounterRate(obj);
}
- else if (obj.EventName == "UpDownCounterRateValuePublished")
- {
- HandleUpDownCounterValue(obj);
- }
else if (obj.EventName == "TimeSeriesLimitReached")
{
HandleTimeSeriesLimitReached(obj);
}
}
- private void HandleUpDownCounterValue(TraceEvent obj)
- {
- if (obj.Version < 1) // Version 1 added the value field.
- {
- return;
- }
-
- string sessionId = (string)obj.PayloadValue(0);
- string meterName = (string)obj.PayloadValue(1);
- //string meterVersion = (string)obj.PayloadValue(2);
- string instrumentName = (string)obj.PayloadValue(3);
- string unit = (string)obj.PayloadValue(4);
- string tags = (string)obj.PayloadValue(5);
- //string rateText = (string)obj.PayloadValue(6); // Not currently using rate for UpDownCounters.
- string valueText = (string)obj.PayloadValue(7);
- if (sessionId != _metricsEventSourceSessionId)
- {
- return;
- }
- MeterInstrumentEventObserved(meterName, obj.TimeStamp);
-
- // the value might be an empty string indicating no measurement was provided this collection interval
- if (double.TryParse(valueText, NumberStyles.Number | NumberStyles.Float, CultureInfo.InvariantCulture, out double value))
- {
- // UpDownCounter reports the value, not the rate - this is different than how Counter behaves, and is thus treated as a gauge.
- CounterPayload payload = new GaugePayload(meterName, instrumentName, null, unit, tags, value, obj.TimeStamp);
- _renderer.CounterPayloadReceived(payload, _pauseCmdSet);
- }
- else
- {
- // for observable instruments we assume the lack of data is meaningful and remove it from the UI
- CounterPayload payload = new RatePayload(meterName, instrumentName, null, unit, tags, 0, _interval, obj.TimeStamp);
- _renderer.CounterStopped(payload);
- }
- }
-
private void HandleHistogram(TraceEvent obj)
{
string sessionId = (string)obj.PayloadValue(0);
public readonly CounterProvider KnownProvider;
}
- private interface ICounterRow
- {
- int Row { get; set; }
- }
-
/// <summary>Information about an observed counter.</summary>
- private class ObservedCounter : ICounterRow
+ private class ObservedCounter
{
public ObservedCounter(string displayName) => DisplayName = displayName;
public string DisplayName { get; } // Display name for this counter.
public double LastValue { get; set; }
}
- private class ObservedTagSet : ICounterRow
+ private class ObservedTagSet
{
public ObservedTagSet(string tags)
{
{
Clear();
- // clear row data on all counters
- foreach (ObservedProvider provider in _providers.Values)
- {
- foreach (ObservedCounter counter in provider.Counters.Values)
- {
- counter.Row = -1;
- foreach (ObservedTagSet tagSet in counter.TagSets.Values)
- {
- tagSet.Row = -1;
- }
- }
- }
-
_consoleWidth = Console.WindowWidth;
_consoleHeight = Console.WindowHeight;
_maxNameLength = Math.Max(Math.Min(80, _consoleWidth) - (CounterValueLength + Indent + 1), 0); // Truncate the name to prevent line wrapping as long as the console width is >= CounterValueLength + Indent + 1 characters
Console.WriteLine(_errorText);
row += GetLineWrappedLines(_errorText);
}
+ Console.WriteLine(); row++; // Blank line.
- bool RenderRow(ref int row, string lineOutput = null, ICounterRow counterRow = null)
+ foreach (ObservedProvider provider in _providers.Values.OrderBy(p => p.KnownProvider == null).ThenBy(p => p.Name)) // Known providers first.
{
- if (row >= _consoleHeight + _topRow) // prevents from displaying more counters than vertical space available
- {
- return false;
- }
-
- if (lineOutput != null)
- {
- Console.Write(lineOutput);
- }
+ Console.WriteLine($"[{provider.Name}]"); row++;
- if (row < _consoleHeight + _topRow - 1) // prevents screen from scrolling due to newline on last line of console
+ foreach (ObservedCounter counter in provider.Counters.Values.OrderBy(c => c.DisplayName))
{
- Console.WriteLine();
- }
-
- if (counterRow != null)
- {
- counterRow.Row = row;
- }
-
- row++;
- return true;
- }
-
- if (RenderRow(ref row)) // Blank line.
- {
- foreach (ObservedProvider provider in _providers.Values.OrderBy(p => p.KnownProvider == null).ThenBy(p => p.Name)) // Known providers first.
- {
- if (!RenderRow(ref row, $"[{provider.Name}]"))
- {
- break;
- }
-
- foreach (ObservedCounter counter in provider.Counters.Values.OrderBy(c => c.DisplayName))
+ string name = MakeFixedWidth($"{new string(' ', Indent)}{counter.DisplayName}", Indent + _maxNameLength);
+ counter.Row = row++;
+ if (counter.RenderValueInline)
{
- string name = MakeFixedWidth($"{new string(' ', Indent)}{counter.DisplayName}", Indent + _maxNameLength);
- if (counter.RenderValueInline)
+ if (row >= _consoleHeight) // prevents from displaying more counters than vertical space available
{
- if (!RenderRow(ref row, $"{name} {FormatValue(counter.LastValue)}", counter))
- {
- break;
- }
+ break;
}
- else
+ Console.WriteLine($"{name} {FormatValue(counter.LastValue)}");
+ }
+ else
+ {
+ Console.WriteLine(name);
+ foreach (ObservedTagSet tagSet in counter.TagSets.Values.OrderBy(t => t.Tags))
{
- if (!RenderRow(ref row, name, counter))
+ if (row >= _consoleHeight)
{
break;
}
- foreach (ObservedTagSet tagSet in counter.TagSets.Values.OrderBy(t => t.Tags))
- {
- string tagName = MakeFixedWidth($"{new string(' ', 2 * Indent)}{tagSet.Tags}", Indent + _maxNameLength);
- if (!RenderRow(ref row, $"{tagName} {FormatValue(tagSet.LastValue)}", tagSet))
- {
- break;
- }
- }
+
+ string tagName = MakeFixedWidth($"{new string(' ', 2 * Indent)}{tagSet.Tags}", Indent + _maxNameLength);
+ Console.WriteLine($"{tagName} {FormatValue(tagSet.LastValue)}");
+ tagSet.Row = row++;
}
}
}
}
int row = counter.RenderValueInline ? counter.Row : tagSet.Row;
- if (row < 0)
- {
- return;
- }
SetCursorPosition(Indent + _maxNameLength + 1, row);
Console.Write(FormatValue(payload.Value));
}
break;
}
- int loopEnd = 10;
// Retry the write dump on ERROR_PARTIAL_COPY
- for (int i = 0; i <= loopEnd; i++)
+ for (int i = 0; i < 5; i++)
{
// Dump the process!
if (NativeMethods.MiniDumpWriteDump(processHandle.DangerousGetHandle(), (uint)processId, stream.SafeFileHandle, dumpType, IntPtr.Zero, IntPtr.Zero, IntPtr.Zero))
else
{
int err = Marshal.GetHRForLastWin32Error();
- if (err != NativeMethods.HR_ERROR_PARTIAL_COPY || i == loopEnd)
+ if (err != NativeMethods.HR_ERROR_PARTIAL_COPY)
{
Marshal.ThrowExceptionForHR(err);
}
- else
- {
- Console.WriteLine($"retrying due to PARTIAL_COPY #{i}");
- }
}
}
}
// Thread id is in the frame name as "Thread (<ID>)"
string template = "Thread (";
string threadFrame = stackSource.GetFrameName(stackSource.GetFrameIndex(stackIndex), false);
-
- // we are looking for the first index of ) because
- // we need to handle a thread name like: Thread (4008) (.NET IO ThreadPool Worker)
- int firstIndex = threadFrame.IndexOf(')');
- int threadId = int.Parse(threadFrame.AsSpan(template.Length, firstIndex - template.Length));
+ int threadId = int.Parse(threadFrame.Substring(template.Length, threadFrame.Length - (template.Length + 1)));
if (samplesForThread.TryGetValue(threadId, out List<StackSourceSample> samples))
{
HMODULE hmodTargetCLR,
IUnknown **ppCordb);
-typedef HRESULT (STDAPICALLTYPE *FPCreateRemoteCordbObject)(
- DWORD port,
- LPCSTR assemblyBasePath,
- IUnknown **ppCordb);
-
HRESULT CreateCoreDbg(
HMODULE hCLRModule,
DWORD processId,
return pDebuggingImpl->QueryInterface(riid, ppInterface);
}
-
-HRESULT CreateCoreDbgRemotePort(HMODULE hDBIModule, DWORD portId, LPCSTR assemblyBasePath, IUnknown **ppCordb)
-{
- PUBLIC_CONTRACT;
- HRESULT hr = S_OK;
-
- FPCreateRemoteCordbObject fpCreate =
- (FPCreateRemoteCordbObject)GetProcAddress(hDBIModule, "CreateRemoteCordbObject");
- if (fpCreate == NULL)
- {
- return CORDBG_E_INCOMPATIBLE_PROTOCOL;
- }
-
- return fpCreate(portId, assemblyBasePath, ppCordb);
-
- return hr;
-}
-
-DLLEXPORT
-HRESULT
-RegisterForRuntimeStartupRemotePort(
- _In_ DWORD dwRemotePortId,
- _In_ LPCSTR mscordbiPath,
- _In_ LPCSTR assemblyBasePath,
- _Out_ IUnknown ** ppCordb)
-{
- PUBLIC_CONTRACT;
- HRESULT hr = S_OK;
- HMODULE hMod = NULL;
-
- hMod = LoadLibraryA(mscordbiPath);
- if (hMod == NULL)
- {
- hr = CORDBG_E_DEBUG_COMPONENT_MISSING;
- return hr;
- }
-
- hr = CreateCoreDbgRemotePort(hMod, dwRemotePortId, assemblyBasePath, ppCordb);
- return S_OK;
-}
_In_ LPCWSTR szApplicationGroupId,
_In_ ICLRDebuggingLibraryProvider3* pLibraryProvider,
_Out_ IUnknown ** ppCordb);
-
-EXTERN_C HRESULT
-RegisterForRuntimeStartupRemotePort(
- _In_ DWORD dwRemotePortId,
- _In_ LPCSTR mscordbiPath,
- _In_ LPCSTR assemblyBasePath,
- _Out_ IUnknown ** ppCordb);
CreateDebuggingInterfaceFromVersion2
CreateDebuggingInterfaceFromVersion3
CLRCreateInstance
- RegisterForRuntimeStartupRemotePort
CreateDebuggingInterfaceFromVersion2
CreateDebuggingInterfaceFromVersion3
CLRCreateInstance
-RegisterForRuntimeStartupRemotePort
OptimizationTier_OptimizedTier1,
OptimizationTier_ReadyToRun,
OptimizationTier_OptimizedTier1OSR,
- OptimizationTier_QuickJittedInstrumented,
- OptimizationTier_OptimizedTier1Instrumented
};
CLRDATA_ADDRESS NativeCodeAddr;