if (!moduleData.IsDynamic && moduleData.LoadedPEAddress != 0)
{
- ArrayHolder<WCHAR> wszUnicodeName = new WCHAR[MAX_LONGPATH + 1];
+ ArrayHolder<WCHAR> wszUnicodeName = new (std::nothrow) WCHAR[MAX_LONGPATH + 1];
+ if (wszUnicodeName == nullptr)
+ {
+ fprintf(stderr, "Allocating unicode module name FAILED\n");
+ result = false;
+ break;
+ }
if (SUCCEEDED(hr = pClrDataModule->GetFileName(MAX_LONGPATH, nullptr, wszUnicodeName)))
{
- // If the module file name isn't empty
- if (wszUnicodeName[0] != 0) {
- ArrayHolder<char> pszName = new (std::nothrow) char[MAX_LONGPATH + 1];
- if (pszName == nullptr) {
- fprintf(stderr, "Allocating module name FAILED\n");
- result = false;
- break;
- }
- sprintf_s(pszName.GetPtr(), MAX_LONGPATH, "%S", (WCHAR*)wszUnicodeName);
- TRACE(" %s\n", pszName.GetPtr());
-
- // Change the module mapping name
- ReplaceModuleMapping(moduleData.LoadedPEAddress, moduleData.LoadedPESize, std::string(pszName.GetPtr()));
+ ArrayHolder<char> pszName = new (std::nothrow) char[MAX_LONGPATH + 1];
+ if (pszName == nullptr)
+ {
+ fprintf(stderr, "Allocating ascii module name FAILED\n");
+ result = false;
+ break;
}
+ sprintf_s(pszName.GetPtr(), MAX_LONGPATH, "%S", (WCHAR*)wszUnicodeName);
+ TRACE(" %s\n", pszName.GetPtr());
+
+ // Change the module mapping name
+ ReplaceModuleMapping(moduleData.LoadedPEAddress, moduleData.LoadedPESize, std::string(pszName.GetPtr()));
}
else {
TRACE("\nModule.GetFileName FAILED %08x\n", hr);
{
uint64_t start = (uint64_t)baseAddress;
uint64_t end = ((baseAddress + size) + (PAGE_SIZE - 1)) & PAGE_MASK;
+ uint32_t flags = GetMemoryRegionFlags(start);
+
+ // Make sure that the page containing the PE header for the managed asseblies is in the dump
+ // especially on MacOS where they are added artificially.
+ MemoryRegion header(flags | MEMORY_REGION_FLAG_MEMORY_BACKED, start, start + PAGE_SIZE);
+ InsertMemoryRegion(header);
// Add or change the module mapping for this PE image. The managed assembly images may already
// be in the module mappings list but they may not have the full assembly name (like in .NET 2.0
// they have the name "/dev/zero"). On MacOS, the managed assembly modules have not been added.
- MemoryRegion search(0, start, start + PAGE_SIZE);
- const auto& found = m_moduleMappings.find(search);
+ const auto& found = m_moduleMappings.find(header);
if (found == m_moduleMappings.end())
{
// On MacOS the assemblies are always added.
- MemoryRegion newRegion(GetMemoryRegionFlags(start), start, end, 0, pszName);
+ MemoryRegion newRegion(flags, start, end, 0, pszName);
m_moduleMappings.insert(newRegion);
if (g_diagnostics) {
if (size > sizeof(mach_header_64))
{
- mach_header_64* header = nullptr;
- mach_msg_type_number_t read = 0;
- kern_return_t kresult = ::vm_read(Task(), address, sizeof(mach_header_64), (vm_offset_t*)&header, &read);
- if (kresult == KERN_SUCCESS)
- {
- if (header->magic == MH_MAGIC_64)
+ mach_header_64 header;
+ size_t read = 0;
+ if (ReadProcessMemory((void*)address, &header, sizeof(mach_header_64), &read))
+ {
+ if (header.magic == MH_MAGIC_64)
{
TRACE("TryFindDyLinker: found module header at %016llx %08llx ncmds %d sizeofcmds %08x type %02x\n",
address,
size,
- header->ncmds,
- header->sizeofcmds,
- header->filetype);
+ header.ncmds,
+ header.sizeofcmds,
+ header.filetype);
- if (header->filetype == MH_DYLINKER)
+ if (header.filetype == MH_DYLINKER)
{
TRACE("TryFindDyLinker: found dylinker\n");
*found = true;
// Enumerate all the modules in dyld's image cache. VisitModule is called for every module found.
- result = EnumerateModules(address, header);
+ result = EnumerateModules(address, &header);
}
}
}
- if (header != nullptr)
+ else
{
- ::vm_deallocate(Task(), (vm_address_t)header, sizeof(mach_header_64));
+ TRACE("TryFindDyLinker: ReadProcessMemory header at %p %d FAILED\n", address, read);
}
}
// and the size be a multiple of the page size. We can't differentiate
// between the cases in which that's required and those in which it
// isn't, so we do it all the time.
- int* addressAligned = (int*)((SIZE_T)address & ~(PAGE_SIZE - 1));
- ssize_t offset = ((SIZE_T)address & (PAGE_SIZE - 1));
+ vm_address_t addressAligned = (vm_address_t)address & ~(PAGE_SIZE - 1);
+ ssize_t offset = (ssize_t)address & (PAGE_SIZE - 1);
char *data = (char*)alloca(PAGE_SIZE);
ssize_t numberOfBytesRead = 0;
- ssize_t bytesToRead;
+ ssize_t bytesLeft = size;
- while (size > 0)
+ while (bytesLeft > 0)
{
- vm_size_t bytesRead;
-
- bytesToRead = PAGE_SIZE - offset;
- if (bytesToRead > size)
+ vm_size_t bytesRead = PAGE_SIZE;
+ kern_return_t result = ::vm_read_overwrite(Task(), addressAligned, PAGE_SIZE, (vm_address_t)data, &bytesRead);
+ if (result != KERN_SUCCESS || bytesRead != PAGE_SIZE)
{
- bytesToRead = size;
+ TRACE_VERBOSE("ReadProcessMemory(%p %d): vm_read_overwrite failed bytesLeft %d bytesRead %d from %p: %x %s\n",
+ address, size, bytesLeft, bytesRead, (void*)addressAligned, result, mach_error_string(result));
+ break;
}
- bytesRead = PAGE_SIZE;
- kern_return_t result = ::vm_read_overwrite(Task(), (vm_address_t)addressAligned, PAGE_SIZE, (vm_address_t)data, &bytesRead);
- if (result != KERN_SUCCESS || bytesRead != PAGE_SIZE)
+ ssize_t bytesToCopy = PAGE_SIZE - offset;
+ if (bytesToCopy > bytesLeft)
{
- TRACE_VERBOSE("vm_read_overwrite failed for %d bytes from %p: %x %s\n", PAGE_SIZE, (char *)addressAligned, result, mach_error_string(result));
- *read = 0;
- return false;
+ bytesToCopy = bytesLeft;
}
- memcpy((LPSTR)buffer + numberOfBytesRead, data + offset, bytesToRead);
- addressAligned = (int*)((char*)addressAligned + PAGE_SIZE);
- numberOfBytesRead += bytesToRead;
- size -= bytesToRead;
+ memcpy((LPSTR)buffer + numberOfBytesRead, data + offset, bytesToCopy);
+ addressAligned = addressAligned + PAGE_SIZE;
+ numberOfBytesRead += bytesToCopy;
+ bytesLeft -= bytesToCopy;
offset = 0;
}
*read = numberOfBytesRead;
- return true;
+ return size == 0 || numberOfBytesRead > 0;
}
// For src/inc/llvm/ELF.h
size_t read = 0;
if (!m_crashInfo.ReadProcessMemory((void*)(ULONG_PTR)address, buffer, size, &read))
{
+ TRACE("DumpDataTarget::ReadVirtual %p %d FAILED\n", (void*)address, size);
*done = 0;
return E_FAIL;
}
ThreadInfo::UnwindNativeFrames(CONTEXT* pContext)
{
uint64_t previousSp = 0;
+ uint64_t previousIp = 0;
+ int ipMatchCount = 0;
- // For each native frame
+ // For each native frame, add a page around the IP and any unwind info not already
+ // added in VisitProgramHeader (Linux) and VisitSection (MacOS) to the dump.
while (true)
{
uint64_t ip = 0, sp = 0;
if (ip == 0 || sp <= previousSp) {
break;
}
+ // Break out of the endless loop if the IP matches over a 1000 times. This is a fallback
+ // behavior of libunwind when the module the IP is in doesn't have unwind info and for
+ // simple stack overflows. The stack memory is added to the dump in GetThreadStack and
+ // it isn't necessary to add the same IP page over and over again. The only place this
+ // check won't catch is the stack overflow case that repeats a sequence of IPs over and
+ // over.
+ if (ip == previousIp)
+ {
+ if (ipMatchCount++ > 1000)
+ {
+ TRACE("Unwind: same ip %" PRIA PRIx64 " over 1000 times\n", ip);
+ break;
+ }
+ }
+ else
+ {
+ ipMatchCount = 0;
+ }
// Add two pages around the instruction pointer to the core dump
m_crashInfo.InsertMemoryRegion(ip - PAGE_SIZE, PAGE_SIZE * 2);
break;
}
previousSp = sp;
+ previousIp = ip;
}
}
// isn't, so we do it all the time.
const size_t pageSize = GetVirtualPageSize();
vm_address_t addressAligned = ALIGN_DOWN(address, pageSize);
- size_t offset = OffsetWithinPage(address);
- size_t bytesToRead;
+ ssize_t offset = OffsetWithinPage(address);
+ ssize_t bytesLeft = size;
char *data = (char*)malloc(pageSize);
- if (data == nullptr)
- {
- ERROR("malloc(%d) FAILED\n", pageSize);
- result = FALSE;
- goto exit;
- }
-
- while (size > 0)
+ if (data != nullptr)
{
- vm_size_t bytesRead;
-
- bytesToRead = pageSize - offset;
- if (bytesToRead > size)
- {
- bytesToRead = size;
- }
- bytesRead = pageSize;
- kern_return_t result = ::vm_read_overwrite(task, addressAligned, pageSize, (vm_address_t)data, &bytesRead);
- if (result != KERN_SUCCESS || bytesRead != pageSize)
+ while (bytesLeft > 0)
{
- ERROR("vm_read_overwrite failed for %d bytes from %p: %x %s\n", pageSize, (void*)addressAligned, result, mach_error_string(result));
- result = FALSE;
- goto exit;
+ vm_size_t bytesRead = pageSize;
+ kern_return_t result = ::vm_read_overwrite(task, addressAligned, pageSize, (vm_address_t)data, &bytesRead);
+ if (result != KERN_SUCCESS || bytesRead != pageSize)
+ {
+ TRACE("PAL_ReadProcessMemory(%p %d): vm_read_overwrite failed bytesLeft %d bytesRead %d from %p: %x %s\n",
+ (void*)address, size, bytesLeft, bytesRead, (void*)addressAligned, result, mach_error_string(result));
+ break;
+ }
+ ssize_t bytesToCopy = pageSize - offset;
+ if (bytesToCopy > bytesLeft)
+ {
+ bytesToCopy = bytesLeft;
+ }
+ memcpy((LPSTR)buffer + read, data + offset, bytesToCopy);
+ addressAligned = addressAligned + pageSize;
+ read += bytesToCopy;
+ bytesLeft -= bytesToCopy;
+ offset = 0;
}
- memcpy((LPSTR)buffer + read , data + offset, bytesToRead);
- addressAligned = addressAligned + pageSize;
- read += bytesToRead;
- size -= bytesToRead;
- offset = 0;
+ result = size == 0 || read > 0;
+ }
+ else
+ {
+ ERROR("malloc(%d) FAILED\n", pageSize);
+ result = FALSE;
}
-exit:
if (data != nullptr)
{
free(data);