UnixSignals m_unix_signals; /// This is the current signal set for this process.
lldb::ABISP m_abi_sp;
lldb::InputReaderSP m_process_input_reader;
- Communication m_stdio_communication;
- Mutex m_stdio_communication_mutex;
+ Communication m_stdio_communication;
+ Mutex m_stdio_communication_mutex;
std::string m_stdout_data;
std::string m_stderr_data;
Mutex m_profile_data_comm_mutex;
MemoryCache m_memory_cache;
AllocatedMemoryCache m_allocated_memory_cache;
bool m_should_detach; /// Should we detach if the process object goes away with an explicit call to Kill or Detach?
- LanguageRuntimeCollection m_language_runtimes;
+ LanguageRuntimeCollection m_language_runtimes;
std::auto_ptr<NextEventAction> m_next_event_action_ap;
std::vector<PreResumeCallbackAndBaton> m_pre_resume_actions;
ReadWriteLock m_run_lock;
// The simplest thing to do is to spin up a temporary thread to handle private state thread events while
// we are fielding public events here.
if (log)
- log->Printf ("Running thread plan on private state thread, spinning up another state thread to handle the events.");
+ log->Printf ("Running thread plan on private state thread, spinning up another state thread to handle the events.");
backup_private_state_thread = m_private_state_thread;
}
}
- // if no_stdio or std paths not supplied, then route to "/dev/null".
+ // if no_stdio or std paths not supplied, then route to "/dev/null".
if (no_stdio || stdin_path == NULL || stdin_path[0] == '\0')
stdin_path = "/dev/null";
if (no_stdio || stdout_path == NULL || stdout_path[0] == '\0')
{
struct task_basic_info task_info;
task_t task = TaskPort();
- if (task == TASK_NULL)
- return KERN_INVALID_ARGUMENT;
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
DNBError err;
err = BasicInfo(task, &task_info);
if (err.Success())
{
- // task_resume isn't counted like task_suspend calls are, are, so if the
- // task is not suspended, don't try and resume it since it is already
- // running
- if (task_info.suspend_count > 0)
+ // task_resume isn't counted like task_suspend calls are, are, so if the
+ // task is not suspended, don't try and resume it since it is already
+ // running
+ if (task_info.suspend_count > 0)
{
err = ::task_resume (task);
if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
return ret;
}
-#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
+#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
(r)->tv_sec = (a)->seconds; \
-(r)->tv_usec = (a)->microseconds; \
+(r)->tv_usec = (a)->microseconds; \
} while (0)
// todo: make use of existing MachThread, if there is already one?
static void update_used_time(task_t task, int &num_threads, uint64_t **threads_id, uint64_t **threads_used_usec, struct timeval ¤t_used_time)
{
- kern_return_t kr;
- thread_act_array_t threads;
- mach_msg_type_number_t tcnt;
+ kern_return_t kr;
+ thread_act_array_t threads;
+ mach_msg_type_number_t tcnt;
- kr = task_threads(task, &threads, &tcnt);
- if (kr != KERN_SUCCESS)
+ kr = task_threads(task, &threads, &tcnt);
+ if (kr != KERN_SUCCESS)
return;
num_threads = tcnt;
*threads_id = (uint64_t *)malloc(num_threads * sizeof(uint64_t));
*threads_used_usec = (uint64_t *)malloc(num_threads * sizeof(uint64_t));
- for (int i = 0; i < tcnt; i++) {
+ for (int i = 0; i < tcnt; i++) {
thread_identifier_info_data_t identifier_info;
mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
kr = thread_info(threads[i], THREAD_IDENTIFIER_INFO, (thread_info_t)&identifier_info, &count);
if (kr != KERN_SUCCESS) continue;
- thread_basic_info_data_t basic_info;
- count = THREAD_BASIC_INFO_COUNT;
- kr = thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count);
- if (kr != KERN_SUCCESS) continue;
-
- if ((basic_info.flags & TH_FLAGS_IDLE) == 0) {
+ thread_basic_info_data_t basic_info;
+ count = THREAD_BASIC_INFO_COUNT;
+ kr = thread_info(threads[i], THREAD_BASIC_INFO, (thread_info_t)&basic_info, &count);
+ if (kr != KERN_SUCCESS) continue;
+
+ if ((basic_info.flags & TH_FLAGS_IDLE) == 0) {
(*threads_id)[i] = identifier_info.thread_id;
- struct timeval tv;
- struct timeval thread_tv;
- TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &tv);
- TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv);
- timeradd(¤t_used_time, &tv, ¤t_used_time);
- TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv);
- timeradd(&thread_tv, &tv, &thread_tv);
- timeradd(¤t_used_time, &tv, ¤t_used_time);
+ struct timeval tv;
+ struct timeval thread_tv;
+ TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &tv);
+ TIME_VALUE_TO_TIMEVAL(&basic_info.user_time, &thread_tv);
+ timeradd(¤t_used_time, &tv, ¤t_used_time);
+ TIME_VALUE_TO_TIMEVAL(&basic_info.system_time, &tv);
+ timeradd(&thread_tv, &tv, &thread_tv);
+ timeradd(¤t_used_time, &tv, ¤t_used_time);
uint64_t used_usec = thread_tv.tv_sec * 1000000ULL + thread_tv.tv_usec;
(*threads_used_usec)[i] = used_usec;
- }
+ }
- kr = mach_port_deallocate(mach_task_self(), threads[i]);
- }
- kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads));
+ kr = mach_port_deallocate(mach_task_self(), threads[i]);
+ }
+ kr = mach_vm_deallocate(mach_task_self(), (mach_vm_address_t)(uintptr_t)threads, tcnt * sizeof(*threads));
}
const char *
MachTask::GetProfileDataAsCString ()
{
task_t task = TaskPort();
- if (task == TASK_NULL)
- return NULL;
+ if (task == TASK_NULL)
+ return NULL;
struct task_basic_info task_info;
DNBError err;
task_t
MachTask::TaskPortForProcessID (pid_t pid, DNBError &err, uint32_t num_retries, uint32_t usec_interval)
{
- if (pid != INVALID_NUB_PROCESS)
- {
- DNBError err;
- mach_port_t task_self = mach_task_self ();
- task_t task = TASK_NULL;
- for (uint32_t i=0; i<num_retries; i++)
- {
- err = ::task_for_pid ( task_self, pid, &task);
+ if (pid != INVALID_NUB_PROCESS)
+ {
+ DNBError err;
+ mach_port_t task_self = mach_task_self ();
+ task_t task = TASK_NULL;
+ for (uint32_t i=0; i<num_retries; i++)
+ {
+ err = ::task_for_pid ( task_self, pid, &task);
if (DNBLogCheckLogBit(LOG_TASK) || err.Fail())
{
err.LogThreaded(str);
}
- if (err.Success())
- return task;
+ if (err.Success())
+ return task;
- // Sleep a bit and try again
- ::usleep (usec_interval);
- }
- }
- return TASK_NULL;
+ // Sleep a bit and try again
+ ::usleep (usec_interval);
+ }
+ }
+ return TASK_NULL;
}
while (1)
{
- mach_msg_type_number_t count;
+ mach_msg_type_number_t count;
struct vm_region_submap_info_64 info;
count = VM_REGION_SUBMAP_INFO_COUNT_64;
// Test whether the virtual address is within the architecture's shared region.
static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
{
- mach_vm_address_t base = 0, size = 0;
+ mach_vm_address_t base = 0, size = 0;
- switch(type) {
- case CPU_TYPE_ARM:
- base = SHARED_REGION_BASE_ARM;
- size = SHARED_REGION_SIZE_ARM;
+ switch(type) {
+ case CPU_TYPE_ARM:
+ base = SHARED_REGION_BASE_ARM;
+ size = SHARED_REGION_SIZE_ARM;
break;
- case CPU_TYPE_X86_64:
- base = SHARED_REGION_BASE_X86_64;
- size = SHARED_REGION_SIZE_X86_64;
+ case CPU_TYPE_X86_64:
+ base = SHARED_REGION_BASE_X86_64;
+ size = SHARED_REGION_SIZE_X86_64;
break;
- case CPU_TYPE_I386:
- base = SHARED_REGION_BASE_I386;
- size = SHARED_REGION_SIZE_I386;
+ case CPU_TYPE_I386:
+ base = SHARED_REGION_BASE_I386;
+ size = SHARED_REGION_SIZE_I386;
break;
- default: {
+ default: {
// Log error abut unknown CPU type
break;
- }
- }
+ }
+ }
- return(addr >= base && addr < (base + size));
+ return(addr >= base && addr < (base + size));
}
static void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
{
// Collecting some other info cheaply but not reporting for now.
mach_vm_size_t empty = 0;
- mach_vm_size_t fw_private = 0;
+ mach_vm_size_t fw_private = 0;
- mach_vm_size_t aliased = 0;
- mach_vm_size_t pagesize = vm_page_size;
+ mach_vm_size_t aliased = 0;
+ mach_vm_size_t pagesize = vm_page_size;
bool global_shared_text_data_mapped = false;
- for (mach_vm_address_t addr=0, size=0; ; addr += size)
+ for (mach_vm_address_t addr=0, size=0; ; addr += size)
{
- vm_region_top_info_data_t info;
- mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
- mach_port_t object_name;
-
- kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
- if (kr != KERN_SUCCESS) break;
+ vm_region_top_info_data_t info;
+ mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
+ mach_port_t object_name;
+
+ kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
+ if (kr != KERN_SUCCESS) break;
- if (InSharedRegion(addr, cputype))
+ if (InSharedRegion(addr, cputype))
{
- // Private Shared
- fw_private += info.private_pages_resident * pagesize;
+ // Private Shared
+ fw_private += info.private_pages_resident * pagesize;
- // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
- if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
- vm_region_basic_info_data_64_t b_info;
- mach_vm_address_t b_addr = addr;
- mach_vm_size_t b_size = size;
- count = VM_REGION_BASIC_INFO_COUNT_64;
-
- kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
- if (kr != KERN_SUCCESS) break;
+ // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
+ if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
+ vm_region_basic_info_data_64_t b_info;
+ mach_vm_address_t b_addr = addr;
+ mach_vm_size_t b_size = size;
+ count = VM_REGION_BASIC_INFO_COUNT_64;
- if (b_info.reserved) {
- global_shared_text_data_mapped = TRUE;
- }
- }
-
- // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
- if (info.share_mode != SM_PRIVATE)
+ kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
+ if (kr != KERN_SUCCESS) break;
+
+ if (b_info.reserved) {
+ global_shared_text_data_mapped = TRUE;
+ }
+ }
+
+ // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
+ if (info.share_mode != SM_PRIVATE)
{
- continue;
- }
- }
-
- // Update counters according to the region type.
- if (info.share_mode == SM_COW && info.ref_count == 1)
+ continue;
+ }
+ }
+
+ // Update counters according to the region type.
+ if (info.share_mode == SM_COW && info.ref_count == 1)
{
- // Treat single reference SM_COW as SM_PRIVATE
- info.share_mode = SM_PRIVATE;
- }
+ // Treat single reference SM_COW as SM_PRIVATE
+ info.share_mode = SM_PRIVATE;
+ }
- switch (info.share_mode)
+ switch (info.share_mode)
{
- case SM_LARGE_PAGE:
- // Treat SM_LARGE_PAGE the same as SM_PRIVATE
- // since they are not shareable and are wired.
- case SM_PRIVATE:
- rprvt += info.private_pages_resident * pagesize;
- rprvt += info.shared_pages_resident * pagesize;
- vprvt += size;
- break;
-
- case SM_EMPTY:
+ case SM_LARGE_PAGE:
+ // Treat SM_LARGE_PAGE the same as SM_PRIVATE
+ // since they are not shareable and are wired.
+ case SM_PRIVATE:
+ rprvt += info.private_pages_resident * pagesize;
+ rprvt += info.shared_pages_resident * pagesize;
+ vprvt += size;
+ break;
+
+ case SM_EMPTY:
empty += size;
- break;
-
- case SM_COW:
- case SM_SHARED:
+ break;
+
+ case SM_COW:
+ case SM_SHARED:
{
- if (pid == 0)
+ if (pid == 0)
{
- // Treat kernel_task specially
- if (info.share_mode == SM_COW)
+ // Treat kernel_task specially
+ if (info.share_mode == SM_COW)
{
- rprvt += info.private_pages_resident * pagesize;
- vprvt += size;
- }
- break;
- }
+ rprvt += info.private_pages_resident * pagesize;
+ vprvt += size;
+ }
+ break;
+ }
- if (info.share_mode == SM_COW)
+ if (info.share_mode == SM_COW)
{
- rprvt += info.private_pages_resident * pagesize;
- vprvt += info.private_pages_resident * pagesize;
- }
- break;
+ rprvt += info.private_pages_resident * pagesize;
+ vprvt += info.private_pages_resident * pagesize;
+ }
+ break;
}
- default:
+ default:
// log that something is really bad.
- break;
- }
- }
+ break;
+ }
+ }
- rprvt += aliased;
+ rprvt += aliased;
}
nub_bool_t