Allow the ThreadPlanStackMap to hold the thread plans for threads
authorJim Ingham <jingham@apple.com>
Wed, 18 Mar 2020 19:05:08 +0000 (12:05 -0700)
committerJim Ingham <jingham@apple.com>
Fri, 3 Apr 2020 21:56:28 +0000 (14:56 -0700)
that were not reported by the OS plugin.  To facilitate this, move
adding/updating the ThreadPlans for a Thread to the ThreadPlanStackMap.
Also move dumping thread plans there as well.

Added some tests for "thread plan list" and "thread plan discard" since
I didn't seem to have written any originally.

Differential Revision: https://reviews.llvm.org/D76814

22 files changed:
lldb/include/lldb/Target/Process.h
lldb/include/lldb/Target/Target.h
lldb/include/lldb/Target/Thread.h
lldb/include/lldb/Target/ThreadPlan.h
lldb/include/lldb/Target/ThreadPlanStack.h
lldb/source/Commands/CommandObjectThread.cpp
lldb/source/Commands/Options.td
lldb/source/Target/Process.cpp
lldb/source/Target/Target.cpp
lldb/source/Target/TargetProperties.td
lldb/source/Target/Thread.cpp
lldb/source/Target/ThreadList.cpp
lldb/source/Target/ThreadPlan.cpp
lldb/source/Target/ThreadPlanStack.cpp
lldb/source/Target/ThreadPlanStepOut.cpp
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile [new file with mode: 0644]
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py [new file with mode: 0644]
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp [new file with mode: 0644]
lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py [new file with mode: 0644]
lldb/test/API/functionalities/thread_plan/Makefile [new file with mode: 0644]
lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py [new file with mode: 0644]
lldb/test/API/functionalities/thread_plan/main.c [new file with mode: 0644]

index 02987f9..82c302c 100644 (file)
@@ -2198,19 +2198,75 @@ public:
   }
 
   void SetDynamicCheckers(DynamicCheckerFunctions *dynamic_checkers);
-  
+
+/// Prune ThreadPlanStacks for unreported threads.
+///
+/// \param[in] tid
+///     The tid whose Plan Stack we are seeking to prune.
+///
+/// \return
+///     \b true if the TID is found or \b false if not.
+bool PruneThreadPlansForTID(lldb::tid_t tid);
+
+/// Prune ThreadPlanStacks for all unreported threads.
+void PruneThreadPlans();
+
   /// Find the thread plan stack associated with thread with \a tid.
   ///
   /// \param[in] tid
-  ///     The tid whose Plan Stack we are seeking..
+  ///     The tid whose Plan Stack we are seeking.
   ///
   /// \return
   ///     Returns a ThreadPlan if the TID is found or nullptr if not.
   ThreadPlanStack *FindThreadPlans(lldb::tid_t tid);
-  
-  void AddThreadPlansForThread(Thread &thread);
-  
-  void RemoveThreadPlansForTID(lldb::tid_t tid);
+
+  /// Dump the thread plans associated with thread with \a tid.
+  ///
+  /// \param[in/out] strm
+  ///     The stream to which to dump the output
+  ///
+  /// \param[in] tid
+  ///     The tid whose Plan Stack we are dumping
+  ///
+  /// \param[in] desc_level
+  ///     How much detail to dump
+  ///
+  /// \param[in] internal
+  ///     If \b true dump all plans, if false only user initiated plans
+  ///
+  /// \param[in] condense_trivial
+  ///     If true, only dump a header if the plan stack is just the base plan.
+  ///
+  /// \param[in] skip_unreported_plans
+  ///     If true, only dump a plan if it is currently backed by an
+  ///     lldb_private::Thread *.
+  ///
+  /// \return
+  ///     Returns \b true if TID was found, \b false otherwise
+  bool DumpThreadPlansForTID(Stream &strm, lldb::tid_t tid,
+                             lldb::DescriptionLevel desc_level, bool internal,
+                             bool condense_trivial, bool skip_unreported_plans);
+
+  /// Dump all the thread plans for this process.
+  ///
+  /// \param[in/out] strm
+  ///     The stream to which to dump the output
+  ///
+  /// \param[in] desc_level
+  ///     How much detail to dump
+  ///
+  /// \param[in] internal
+  ///     If \b true dump all plans, if false only user initiated plans
+  ///
+  /// \param[in] condense_trivial
+  ///     If true, only dump a header if the plan stack is just the base plan.
+  ///
+  /// \param[in] skip_unreported_plans
+  ///     If true, skip printing all thread plan stacks that don't currently
+  ///     have a backing lldb_private::Thread *.
+  void DumpThreadPlans(Stream &strm, lldb::DescriptionLevel desc_level,
+                       bool internal, bool condense_trivial,
+                       bool skip_unreported_plans);
 
   /// Call this to set the lldb in the mode where it breaks on new thread
   /// creations, and then auto-restarts.  This is useful when you are trying
@@ -2547,7 +2603,7 @@ protected:
     virtual EventActionResult HandleBeingInterrupted() = 0;
     virtual const char *GetExitString() = 0;
     void RequestResume() { m_process->m_resume_requested = true; }
-    
+
   protected:
     Process *m_process;
   };
index cc74fe0..f0a57b8 100644 (file)
@@ -204,6 +204,10 @@ public:
   bool GetInjectLocalVariables(ExecutionContext *exe_ctx) const;
 
   void SetInjectLocalVariables(ExecutionContext *exe_ctx, bool b);
+  
+  bool GetOSPluginReportsAllThreads() const;
+
+  void SetOSPluginReportsAllThreads(bool does_report);
 
   void SetRequireHardwareBreakpoints(bool b);
 
@@ -213,6 +217,7 @@ public:
 
   void UpdateLaunchInfoFromProperties();
 
+
 private:
   // Callbacks for m_launch_info.
   void Arg0ValueChangedCallback();
index dda303f..7422ef0 100644 (file)
@@ -1019,16 +1019,6 @@ public:
   ///    otherwise.
   bool DiscardUserThreadPlansUpToIndex(uint32_t thread_index);
 
-  /// Prints the current plan stack.
-  ///
-  /// \param[in] s
-  ///    The stream to which to dump the plan stack info.
-  ///
-  void DumpThreadPlans(
-      Stream *s,
-      lldb::DescriptionLevel desc_level = lldb::eDescriptionLevelVerbose,
-      bool include_internal = true, bool ignore_boring = false) const;
-
   virtual bool CheckpointThreadState(ThreadStateCheckpoint &saved_state);
 
   virtual bool
@@ -1186,7 +1176,7 @@ protected:
   // thread is still in good shape to call virtual thread methods.  This must
   // be called by classes that derive from Thread in their destructor.
   virtual void DestroyThread();
-  
+
   ThreadPlanStack &GetPlans() const;
 
   void PushPlan(lldb::ThreadPlanSP plan_sp);
@@ -1260,6 +1250,7 @@ protected:
   bool m_destroy_called; // This is used internally to make sure derived Thread
                          // classes call DestroyThread.
   LazyBool m_override_should_notify;
+  mutable std::unique_ptr<ThreadPlanStack> m_null_plan_stack_up;
 
 private:
   bool m_extended_info_fetched; // Have we tried to retrieve the m_extended_info
@@ -1267,7 +1258,6 @@ private:
   StructuredData::ObjectSP m_extended_info; // The extended info for this thread
 
 private:
-  
   void BroadcastSelectedFrameChange(StackID &new_frame_id);
 
   DISALLOW_COPY_AND_ASSIGN(Thread);
index ef1e5f2..5b7bea2 100644 (file)
@@ -376,7 +376,9 @@ public:
   const Target &GetTarget() const;
 
   /// Print a description of this thread to the stream \a s.
-  /// \a thread.
+  /// \a thread.  Don't expect that the result of GetThread is valid in
+  /// the description method.  This might get called when the underlying
+  /// Thread has not been reported, so we only know the TID and not the thread.
   ///
   /// \param[in] s
   ///    The stream to which to print the description.
@@ -598,7 +600,9 @@ private:
   // For ThreadPlan only
   static lldb::user_id_t GetNextID();
 
-  Thread *m_thread;
+  Thread *m_thread; // Stores a cached value of the thread, which is set to
+                    // nullptr when the thread resumes.  Don't use this anywhere
+                    // but ThreadPlan::GetThread().
   ThreadPlanKind m_kind;
   std::string m_name;
   std::recursive_mutex m_plan_complete_mutex;
index be7791a..de52ee3 100644 (file)
@@ -32,14 +32,14 @@ class ThreadPlanStack {
   friend class lldb_private::Thread;
 
 public:
-  ThreadPlanStack(Thread &thread) {}
+  ThreadPlanStack(const Thread &thread, bool make_empty = false);
   ~ThreadPlanStack() {}
 
   enum StackKind { ePlans, eCompletedPlans, eDiscardedPlans };
 
   using PlanStack = std::vector<lldb::ThreadPlanSP>;
 
-  void DumpThreadPlans(Stream *s, lldb::DescriptionLevel desc_level,
+  void DumpThreadPlans(Stream &s, lldb::DescriptionLevel desc_level,
                        bool include_internal) const;
 
   size_t CheckpointCompletedPlans();
@@ -98,6 +98,10 @@ public:
 private:
   const PlanStack &GetStackOfKind(ThreadPlanStack::StackKind kind) const;
 
+  void PrintOneStack(Stream &s, llvm::StringRef stack_name,
+                     const PlanStack &stack, lldb::DescriptionLevel desc_level,
+                     bool include_internal) const;
+
   PlanStack m_plans;           ///< The stack of plans this thread is executing.
   PlanStack m_completed_plans; ///< Plans that have been completed by this
                                /// stop.  They get deleted when the thread
@@ -112,9 +116,13 @@ private:
 
 class ThreadPlanStackMap {
 public:
-  ThreadPlanStackMap() {}
+  ThreadPlanStackMap(Process &process) : m_process(process) {}
   ~ThreadPlanStackMap() {}
 
+  // Prune the map using the current_threads list.
+  void Update(ThreadList &current_threads, bool delete_missing,
+              bool check_for_new = true);
+
   void AddThread(Thread &thread) {
     lldb::tid_t tid = thread.GetID();
     auto result = m_plans_list.emplace(tid, thread);
@@ -143,7 +151,19 @@ public:
     m_plans_list.clear();
   }
 
+  // Implements Process::DumpThreadPlans
+  void DumpPlans(Stream &strm, lldb::DescriptionLevel desc_level, bool internal,
+                 bool ignore_boring, bool skip_unreported);
+
+  // Implements Process::DumpThreadPlansForTID
+  bool DumpPlansForTID(Stream &strm, lldb::tid_t tid,
+                       lldb::DescriptionLevel desc_level, bool internal,
+                       bool ignore_boring, bool skip_unreported);
+                       
+  bool PrunePlansForTID(lldb::tid_t tid);
+
 private:
+  Process &m_process;
   using PlansList = std::unordered_map<lldb::tid_t, ThreadPlanStack>;
   PlansList m_plans_list;
 };
index 3117c96..579f336 100644 (file)
@@ -1833,25 +1833,36 @@ public:
 
     Status SetOptionValue(uint32_t option_idx, llvm::StringRef option_arg,
                           ExecutionContext *execution_context) override {
-      Status error;
       const int short_option = m_getopt_table[option_idx].val;
 
       switch (short_option) {
       case 'i':
         m_internal = true;
         break;
+      case 't':
+        lldb::tid_t tid;
+        if (option_arg.getAsInteger(0, tid))
+          return Status("invalid tid: '%s'.", option_arg.str().c_str());
+        m_tids.push_back(tid);
+        break;
+      case 'u':
+        m_unreported = false;
+        break;
       case 'v':
         m_verbose = true;
         break;
       default:
         llvm_unreachable("Unimplemented option");
       }
-      return error;
+      return {};
     }
 
     void OptionParsingStarting(ExecutionContext *execution_context) override {
       m_verbose = false;
       m_internal = false;
+      m_unreported = true; // The variable is "skip unreported" and we want to
+                           // skip unreported by default.
+      m_tids.clear();
     }
 
     llvm::ArrayRef<OptionDefinition> GetDefinitions() override {
@@ -1861,6 +1872,8 @@ public:
     // Instance variables to hold the values for command options.
     bool m_verbose;
     bool m_internal;
+    bool m_unreported;
+    std::vector<lldb::tid_t> m_tids;
   };
 
   CommandObjectThreadPlanList(CommandInterpreter &interpreter)
@@ -1879,25 +1892,59 @@ public:
 
   Options *GetOptions() override { return &m_options; }
 
+  bool DoExecute(Args &command, CommandReturnObject &result) override {
+    // If we are reporting all threads, dispatch to the Process to do that:
+    if (command.GetArgumentCount() == 0 && m_options.m_tids.empty()) {
+      Stream &strm = result.GetOutputStream();
+      DescriptionLevel desc_level = m_options.m_verbose
+                                        ? eDescriptionLevelVerbose
+                                        : eDescriptionLevelFull;
+      m_exe_ctx.GetProcessPtr()->DumpThreadPlans(
+          strm, desc_level, m_options.m_internal, true, m_options.m_unreported);
+      result.SetStatus(eReturnStatusSuccessFinishResult);
+      return true;
+    } else {
+      // Do any TID's that the user may have specified as TID, then do any
+      // Thread Indexes...
+      if (!m_options.m_tids.empty()) {
+        Process *process = m_exe_ctx.GetProcessPtr();
+        StreamString tmp_strm;
+        for (lldb::tid_t tid : m_options.m_tids) {
+          bool success = process->DumpThreadPlansForTID(
+              tmp_strm, tid, eDescriptionLevelFull, m_options.m_internal,
+              true /* condense_trivial */, m_options.m_unreported);
+          // If we didn't find a TID, stop here and return an error.
+          if (!success) {
+            result.SetError("Error dumping plans:");
+            result.AppendError(tmp_strm.GetString());
+            result.SetStatus(eReturnStatusFailed);
+            return false;
+          }
+          // Otherwise, add our data to the output:
+          result.GetOutputStream() << tmp_strm.GetString();
+        }
+      }
+      return CommandObjectIterateOverThreads::DoExecute(command, result);
+    }
+  }
+
 protected:
   bool HandleOneThread(lldb::tid_t tid, CommandReturnObject &result) override {
-    ThreadSP thread_sp =
-        m_exe_ctx.GetProcessPtr()->GetThreadList().FindThreadByID(tid);
-    if (!thread_sp) {
-      result.AppendErrorWithFormat("thread no longer exists: 0x%" PRIx64 "\n",
-                                   tid);
-      result.SetStatus(eReturnStatusFailed);
-      return false;
-    }
+    // If we have already handled this from a -t option, skip it here.
+    if (std::find(m_options.m_tids.begin(), m_options.m_tids.end(), tid) !=
+        m_options.m_tids.end())
+      return true;
 
-    Thread *thread = thread_sp.get();
+    Process *process = m_exe_ctx.GetProcessPtr();
 
     Stream &strm = result.GetOutputStream();
     DescriptionLevel desc_level = eDescriptionLevelFull;
     if (m_options.m_verbose)
       desc_level = eDescriptionLevelVerbose;
 
-    thread->DumpThreadPlans(&strm, desc_level, m_options.m_internal, true);
+    process->DumpThreadPlansForTID(strm, tid, desc_level, m_options.m_internal,
+                                   true /* condense_trivial */,
+                                   m_options.m_unreported);
     return true;
   }
 
@@ -1974,6 +2021,75 @@ public:
   }
 };
 
+class CommandObjectThreadPlanPrune : public CommandObjectParsed {
+public:
+  CommandObjectThreadPlanPrune(CommandInterpreter &interpreter)
+      : CommandObjectParsed(interpreter, "thread plan prune",
+                            "Removes any thread plans associated with "
+                            "currently unreported threads.  "
+                            "Specify one or more TID's to remove, or if no "
+                            "TID's are provides, remove threads for all "
+                            "unreported threads",
+                            nullptr,
+                            eCommandRequiresProcess |
+                                eCommandTryTargetAPILock |
+                                eCommandProcessMustBeLaunched |
+                                eCommandProcessMustBePaused) {
+    CommandArgumentEntry arg;
+    CommandArgumentData tid_arg;
+
+    // Define the first (and only) variant of this arg.
+    tid_arg.arg_type = eArgTypeThreadID;
+    tid_arg.arg_repetition = eArgRepeatStar;
+
+    // There is only one variant this argument could be; put it into the
+    // argument entry.
+    arg.push_back(tid_arg);
+
+    // Push the data for the first argument into the m_arguments vector.
+    m_arguments.push_back(arg);
+  }
+
+  ~CommandObjectThreadPlanPrune() override = default;
+
+  bool DoExecute(Args &args, CommandReturnObject &result) override {
+    Process *process = m_exe_ctx.GetProcessPtr();
+    
+    if (args.GetArgumentCount() == 0) {
+      process->PruneThreadPlans();
+      result.SetStatus(eReturnStatusSuccessFinishNoResult);
+      return true;  
+    }
+
+    bool success;
+    const size_t num_args = args.GetArgumentCount();
+
+    std::lock_guard<std::recursive_mutex> guard(
+        process->GetThreadList().GetMutex());
+
+    for (size_t i = 0; i < num_args; i++) {
+      bool success;
+
+      lldb::tid_t tid = StringConvert::ToUInt64(
+          args.GetArgumentAtIndex(i), 0, 0, &success);
+      if (!success) {
+        result.AppendErrorWithFormat("invalid thread specification: \"%s\"\n",
+                                     args.GetArgumentAtIndex(i));
+        result.SetStatus(eReturnStatusFailed);
+        return false;
+      }
+      if (!process->PruneThreadPlansForTID(tid)) {
+        result.AppendErrorWithFormat("Could not find unreported tid: \"%s\"\n",
+                                     args.GetArgumentAtIndex(i));
+        result.SetStatus(eReturnStatusFailed);
+        return false;
+      }
+    }
+    result.SetStatus(eReturnStatusSuccessFinishNoResult);
+    return true;
+  }
+};
+
 // CommandObjectMultiwordThreadPlan
 
 class CommandObjectMultiwordThreadPlan : public CommandObjectMultiword {
@@ -1988,6 +2104,9 @@ public:
     LoadSubCommand(
         "discard",
         CommandObjectSP(new CommandObjectThreadPlanDiscard(interpreter)));
+    LoadSubCommand(
+        "prune",
+        CommandObjectSP(new CommandObjectThreadPlanPrune(interpreter)));
   }
 
   ~CommandObjectMultiwordThreadPlan() override = default;
index b8a1426..b0cf97e 100644 (file)
@@ -969,6 +969,11 @@ let Command = "thread plan list" in {
     Desc<"Display more information about the thread plans">;
   def thread_plan_list_internal : Option<"internal", "i">, Group<1>,
     Desc<"Display internal as well as user thread plans">;
+  def thread_plan_list_thread_id : Option<"thread-id", "t">, Group<1>,
+    Arg<"ThreadID">, Desc<"List the thread plans for this TID, can be "
+    "specified more than once.">;
+  def thread_plan_list_unreported : Option<"unreported", "u">, Group<1>,
+    Desc<"Display thread plans for unreported threads">;
 }
 
 let Command = "type summary add" in {
index 411160d..a399997 100644 (file)
@@ -478,7 +478,7 @@ Process::Process(lldb::TargetSP target_sp, ListenerSP listener_sp,
       m_mod_id(), m_process_unique_id(0), m_thread_index_id(0),
       m_thread_id_to_index_id_map(), m_exit_status(-1), m_exit_string(),
       m_exit_status_mutex(), m_thread_mutex(), m_thread_list_real(this),
-      m_thread_list(this), m_extended_thread_list(this),
+      m_thread_list(this), m_thread_plans(*this), m_extended_thread_list(this),
       m_extended_thread_stop_id(0), m_queue_list(this), m_queue_list_stop_id(0),
       m_notifications(), m_image_tokens(), m_listener_sp(listener_sp),
       m_breakpoint_site_list(), m_dynamic_checkers_up(),
@@ -1184,9 +1184,12 @@ void Process::UpdateThreadListIfNeeded() {
   const uint32_t stop_id = GetStopID();
   if (m_thread_list.GetSize(false) == 0 ||
       stop_id != m_thread_list.GetStopID()) {
+    bool clear_unused_threads = true;
     const StateType state = GetPrivateState();
     if (StateIsStoppedState(state, true)) {
       std::lock_guard<std::recursive_mutex> guard(m_thread_list.GetMutex());
+      m_thread_list.SetStopID(stop_id);
+
       // m_thread_list does have its own mutex, but we need to hold onto the
       // mutex between the call to UpdateThreadList(...) and the
       // os->UpdateThreadList(...) so it doesn't change on us
@@ -1207,6 +1210,10 @@ void Process::UpdateThreadListIfNeeded() {
           size_t num_old_threads = old_thread_list.GetSize(false);
           for (size_t i = 0; i < num_old_threads; ++i)
             old_thread_list.GetThreadAtIndex(i, false)->ClearBackingThread();
+          // See if the OS plugin reports all threads.  If it does, then
+          // it is safe to clear unseen thread's plans here.  Otherwise we 
+          // should preserve them in case they show up again:
+          clear_unused_threads = GetTarget().GetOSPluginReportsAllThreads();
 
           // Turn off dynamic types to ensure we don't run any expressions.
           // Objective-C can run an expression to determine if a SBValue is a
@@ -1233,7 +1240,7 @@ void Process::UpdateThreadListIfNeeded() {
             target.SetPreferDynamicValue(saved_prefer_dynamic);
         } else {
           // No OS plug-in, the new thread list is the same as the real thread
-          // list
+          // list.
           new_thread_list = real_thread_list;
         }
 
@@ -1250,6 +1257,12 @@ void Process::UpdateThreadListIfNeeded() {
           m_queue_list_stop_id = GetLastNaturalStopID();
         }
       }
+      // Now update the plan stack map.
+      // If we do have an OS plugin, any absent real threads in the
+      // m_thread_list have already been removed from the ThreadPlanStackMap.
+      // So any remaining threads are OS Plugin threads, and those we want to
+      // preserve in case they show up again.
+      m_thread_plans.Update(m_thread_list, clear_unused_threads);
     }
   }
 }
@@ -1258,14 +1271,26 @@ ThreadPlanStack *Process::FindThreadPlans(lldb::tid_t tid) {
   return m_thread_plans.Find(tid);
 }
 
-void Process::AddThreadPlansForThread(Thread &thread) {
-  if (m_thread_plans.Find(thread.GetID()))
-    return;
-  m_thread_plans.AddThread(thread);
+bool Process::PruneThreadPlansForTID(lldb::tid_t tid) {
+  return m_thread_plans.PrunePlansForTID(tid);
 }
 
-void Process::RemoveThreadPlansForTID(lldb::tid_t tid) {
-  m_thread_plans.RemoveTID(tid);
+void Process::PruneThreadPlans() {
+  m_thread_plans.Update(GetThreadList(), true, false);
+}
+
+bool Process::DumpThreadPlansForTID(Stream &strm, lldb::tid_t tid,
+                                    lldb::DescriptionLevel desc_level,
+                                    bool internal, bool condense_trivial,
+                                    bool skip_unreported_plans) {
+  return m_thread_plans.DumpPlansForTID(
+      strm, tid, desc_level, internal, condense_trivial, skip_unreported_plans);
+}
+void Process::DumpThreadPlans(Stream &strm, lldb::DescriptionLevel desc_level,
+                              bool internal, bool condense_trivial,
+                              bool skip_unreported_plans) {
+  m_thread_plans.DumpPlans(strm, desc_level, internal, condense_trivial,
+                           skip_unreported_plans);
 }
 
 void Process::UpdateQueueListIfNeeded() {
index e2c8081..bdfd314 100644 (file)
@@ -3487,6 +3487,34 @@ void TargetProperties::SetInjectLocalVariables(ExecutionContext *exe_ctx,
                                             true);
 }
 
+bool TargetProperties::GetOSPluginReportsAllThreads() const {
+  const bool fail_value = true;
+  const Property *exp_property =
+      m_collection_sp->GetPropertyAtIndex(nullptr, true, ePropertyExperimental);
+  OptionValueProperties *exp_values =
+      exp_property->GetValue()->GetAsProperties();
+  if (!exp_values)
+    return fail_value;
+    
+  return 
+      exp_values->GetPropertyAtIndexAsBoolean(nullptr, 
+                                              ePropertyOSPluginReportsAllThreads,
+                                              fail_value);
+}
+
+void TargetProperties::SetOSPluginReportsAllThreads(bool does_report) {
+  const Property *exp_property =
+      m_collection_sp->GetPropertyAtIndex(nullptr, true, ePropertyExperimental);
+  OptionValueProperties *exp_values =
+      exp_property->GetValue()->GetAsProperties();
+  if (exp_values)
+    exp_values->SetPropertyAtIndexAsBoolean(nullptr, 
+                                            ePropertyOSPluginReportsAllThreads,
+                                            does_report);
+}
+
+
+
 ArchSpec TargetProperties::GetDefaultArchitecture() const {
   OptionValueArch *value = m_collection_sp->GetPropertyAtIndexAsOptionValueArch(
       nullptr, ePropertyDefaultArch);
index c8dd0a1..fa4f4bd 100644 (file)
@@ -4,6 +4,10 @@ let Definition = "experimental" in {
   def InjectLocalVars : Property<"inject-local-vars", "Boolean">,
     Global, DefaultTrue,
     Desc<"If true, inject local variables explicitly into the expression text. This will fix symbol resolution when there are name collisions between ivars and local variables. But it can make expressions run much more slowly.">;
+  def OSPluginReportsAllThreads: Property<"os-plugin-reports-all-threads", "Boolean">,
+    Global,
+    DefaultTrue,
+    Desc<"Set to False if your OS Plugins doesn't report all threads on each stop.">;
 }
 
 let Definition = "target" in {
index 6b8edfb..4af4abd 100644 (file)
@@ -240,10 +240,6 @@ Thread::Thread(Process &process, lldb::tid_t tid, bool use_invalid_index_id)
             static_cast<void *>(this), GetID());
 
   CheckInWithManager();
-  
-  process.AddThreadPlansForThread(*this);
-  
-  QueueFundamentalPlan(true);
 }
 
 Thread::~Thread() {
@@ -781,7 +777,9 @@ bool Thread::ShouldStop(Event *event_ptr) {
     LLDB_LOGF(log, "^^^^^^^^ Thread::ShouldStop Begin ^^^^^^^^");
     StreamString s;
     s.IndentMore();
-    DumpThreadPlans(&s);
+    GetProcess()->DumpThreadPlansForTID(
+        s, GetID(), eDescriptionLevelVerbose, true /* internal */,
+        false /* condense_trivial */, true /* skip_unreported */);
     LLDB_LOGF(log, "Plan stack initial state:\n%s", s.GetData());
   }
 
@@ -945,7 +943,9 @@ bool Thread::ShouldStop(Event *event_ptr) {
   if (log) {
     StreamString s;
     s.IndentMore();
-    DumpThreadPlans(&s);
+    GetProcess()->DumpThreadPlansForTID(
+        s, GetID(), eDescriptionLevelVerbose, true /* internal */,
+        false /* condense_trivial */, true /* skip_unreported */);
     LLDB_LOGF(log, "Plan stack final state:\n%s", s.GetData());
     LLDB_LOGF(log, "vvvvvvvv Thread::ShouldStop End (returning %i) vvvvvvvv",
               should_stop);
@@ -1051,8 +1051,18 @@ bool Thread::MatchesSpec(const ThreadSpec *spec) {
 
 ThreadPlanStack &Thread::GetPlans() const {
   ThreadPlanStack *plans = GetProcess()->FindThreadPlans(GetID());
-  assert(plans && "Can't have a thread with no plans");
-  return *plans;
+  if (plans)
+    return *plans;
+
+  // History threads don't have a thread plan, but they do ask get asked to
+  // describe themselves, which usually involves pulling out the stop reason.
+  // That in turn will check for a completed plan on the ThreadPlanStack.
+  // Instead of special-casing at that point, we return a Stack with a
+  // ThreadPlanNull as its base plan.  That will give the right answers to the
+  // queries GetDescription makes, and only assert if you try to run the thread.
+  if (!m_null_plan_stack_up)
+    m_null_plan_stack_up.reset(new ThreadPlanStack(*this, true));
+  return *(m_null_plan_stack_up.get());
 }
 
 void Thread::PushPlan(ThreadPlanSP thread_plan_sp) {
@@ -1372,26 +1382,6 @@ lldb::ThreadPlanSP Thread::QueueThreadPlanForStepScripted(
 
 uint32_t Thread::GetIndexID() const { return m_index_id; }
 
-void Thread::DumpThreadPlans(Stream *s, lldb::DescriptionLevel desc_level,
-                             bool include_internal,
-                             bool ignore_boring_threads) const {
-  if (ignore_boring_threads) {
-    if (!GetPlans().AnyPlans() && !GetPlans().AnyCompletedPlans()
-        && !GetPlans().AnyDiscardedPlans()) {
-      s->Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", GetIndexID(), GetID());
-      s->IndentMore();
-      s->Indent();
-      s->Printf("No active thread plans\n");
-      s->IndentLess();
-      return;
-    }
-  }
-  
-  s->Indent();
-  s->Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", GetIndexID(), GetID());
-  GetPlans().DumpThreadPlans(s, desc_level, include_internal);
-}
-
 TargetSP Thread::CalculateTarget() {
   TargetSP target_sp;
   ProcessSP process_sp(GetProcess());
index 327ff97..032dcc9 100644 (file)
@@ -715,6 +715,11 @@ void ThreadList::Update(ThreadList &rhs) {
     // to work around the issue
     collection::iterator rhs_pos, rhs_end = rhs.m_threads.end();
     for (rhs_pos = rhs.m_threads.begin(); rhs_pos != rhs_end; ++rhs_pos) {
+      // If this thread has already been destroyed, we don't need to look for
+      // it to destroy it again.
+      if (!(*rhs_pos)->IsValid())
+        continue;
+
       const lldb::tid_t tid = (*rhs_pos)->GetID();
       bool thread_is_alive = false;
       const uint32_t num_threads = m_threads.size();
@@ -728,7 +733,6 @@ void ThreadList::Update(ThreadList &rhs) {
       }
       if (!thread_is_alive) {
         (*rhs_pos)->DestroyThread();
-        m_process->RemoveThreadPlansForTID((*rhs_pos)->GetID());
       }
     }
   }
index d0da8e1..52b1a8b 100644 (file)
@@ -21,7 +21,7 @@ using namespace lldb_private;
 // ThreadPlan constructor
 ThreadPlan::ThreadPlan(ThreadPlanKind kind, const char *name, Thread &thread,
                        Vote stop_vote, Vote run_vote)
-    : m_process(*thread.GetProcess().get()), m_tid(thread.GetID()), 
+    : m_process(*thread.GetProcess().get()), m_tid(thread.GetID()),
       m_stop_vote(stop_vote), m_run_vote(run_vote),
       m_takes_iteration_count(false), m_could_not_resolve_hw_bp(false),
       m_kind(kind), m_thread(&thread), m_name(name), m_plan_complete_mutex(),
@@ -41,7 +41,7 @@ const Target &ThreadPlan::GetTarget() const { return m_process.GetTarget(); }
 Thread &ThreadPlan::GetThread() {
   if (m_thread)
     return *m_thread;
-    
+
   ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(m_tid);
   m_thread = thread_sp.get();
   return *m_thread;
@@ -127,13 +127,17 @@ bool ThreadPlan::WillResume(StateType resume_state, bool current_plan) {
           "%s Thread #%u (0x%p): tid = 0x%4.4" PRIx64 ", pc = 0x%8.8" PRIx64
           ", sp = 0x%8.8" PRIx64 ", fp = 0x%8.8" PRIx64 ", "
           "plan = '%s', state = %s, stop others = %d",
-          __FUNCTION__, GetThread().GetIndexID(), 
+          __FUNCTION__, GetThread().GetIndexID(),
           static_cast<void *>(&GetThread()), m_tid, static_cast<uint64_t>(pc),
           static_cast<uint64_t>(sp), static_cast<uint64_t>(fp), m_name.c_str(),
           StateAsCString(resume_state), StopOthers());
     }
   }
-  return DoWillResume(resume_state, current_plan);
+  bool success = DoWillResume(resume_state, current_plan);
+  m_thread = nullptr; // We don't cache the thread pointer over resumes.  This
+                      // Thread might go away, and another Thread represent
+                      // the same underlying object on a later stop.
+  return success;
 }
 
 lldb::user_id_t ThreadPlan::GetNextID() {
index bd11718..6120d0c 100644 (file)
 using namespace lldb;
 using namespace lldb_private;
 
-static void PrintPlanElement(Stream *s, const ThreadPlanSP &plan,
+static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
                              lldb::DescriptionLevel desc_level,
                              int32_t elem_idx) {
-  s->IndentMore();
-  s->Indent();
-  s->Printf("Element %d: ", elem_idx);
-  plan->GetDescription(s, desc_level);
-  s->EOL();
-  s->IndentLess();
+  s.IndentMore();
+  s.Indent();
+  s.Printf("Element %d: ", elem_idx);
+  plan->GetDescription(&s, desc_level);
+  s.EOL();
+  s.IndentLess();
 }
 
-void ThreadPlanStack::DumpThreadPlans(Stream *s,
+ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
+  if (make_null) {
+    // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
+    // still a const operation.
+    m_plans.push_back(
+        ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
+  }
+}
+
+void ThreadPlanStack::DumpThreadPlans(Stream &s,
                                       lldb::DescriptionLevel desc_level,
                                       bool include_internal) const {
 
   uint32_t stack_size;
 
-  s->IndentMore();
-  s->Indent();
-  s->Printf("Active plan stack:\n");
-  int32_t print_idx = 0;
-  for (auto plan : m_plans) {
-    PrintPlanElement(s, plan, desc_level, print_idx++);
-  }
+  s.IndentMore();
+  PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
+  PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
+                include_internal);
+  PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
+                include_internal);
+  s.IndentLess();
+}
 
-  if (AnyCompletedPlans()) {
-    print_idx = 0;
-    s->Indent();
-    s->Printf("Completed Plan Stack:\n");
-    for (auto plan : m_completed_plans)
-      PrintPlanElement(s, plan, desc_level, print_idx++);
+void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
+                                    const PlanStack &stack,
+                                    lldb::DescriptionLevel desc_level,
+                                    bool include_internal) const {
+  // If the stack is empty, just exit:
+  if (stack.empty())
+    return;
+
+  // Make sure there are public completed plans:
+  bool any_public = false;
+  if (!include_internal) {
+    for (auto plan : stack) {
+      if (!plan->GetPrivate()) {
+        any_public = true;
+        break;
+      }
+    }
   }
 
-  if (AnyDiscardedPlans()) {
-    print_idx = 0;
-    s->Indent();
-    s->Printf("Discarded Plan Stack:\n");
-    for (auto plan : m_discarded_plans)
+  if (include_internal || any_public) {
+    int print_idx = 0;
+    s.Indent();
+    s.Printf("%s:\n", stack_name);
+    for (auto plan : stack) {
+      if (!include_internal && plan->GetPrivate())
+        continue;
       PrintPlanElement(s, plan, desc_level, print_idx++);
+    }
   }
-
-  s->IndentLess();
 }
 
 size_t ThreadPlanStack::CheckpointCompletedPlans() {
@@ -368,3 +390,123 @@ ThreadPlanStack::GetStackOfKind(ThreadPlanStack::StackKind kind) const {
   }
   llvm_unreachable("Invalid StackKind value");
 }
+
+void ThreadPlanStackMap::Update(ThreadList &current_threads,
+                                bool delete_missing,
+                                bool check_for_new) {
+
+  // Now find all the new threads and add them to the map:
+  if (check_for_new) {
+    for (auto thread : current_threads.Threads()) {
+      lldb::tid_t cur_tid = thread->GetID();
+      if (!Find(cur_tid)) {
+        AddThread(*thread.get());
+        thread->QueueFundamentalPlan(true);
+      }
+    }
+  }
+
+  // If we aren't reaping missing threads at this point,
+  // we are done.
+  if (!delete_missing)
+    return;
+  // Otherwise scan for absent TID's.
+  std::vector<lldb::tid_t> missing_threads;
+  // If we are going to delete plans from the plan stack,
+  // then scan for absent TID's:
+  for (auto thread_plans : m_plans_list) {
+    lldb::tid_t cur_tid = thread_plans.first;
+    ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
+    if (!thread_sp)
+      missing_threads.push_back(cur_tid);
+  }
+  for (lldb::tid_t tid : missing_threads) {
+    RemoveTID(tid);
+  }
+}
+
+void ThreadPlanStackMap::DumpPlans(Stream &strm,
+                                   lldb::DescriptionLevel desc_level,
+                                   bool internal, bool condense_if_trivial,
+                                   bool skip_unreported) {
+  for (auto elem : m_plans_list) {
+    lldb::tid_t tid = elem.first;
+    uint32_t index_id = 0;
+    ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+
+    if (skip_unreported) {
+      if (!thread_sp)
+        continue;
+    }
+    if (thread_sp)
+      index_id = thread_sp->GetIndexID();
+
+    if (condense_if_trivial) {
+      if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
+          !elem.second.AnyDiscardedPlans()) {
+        strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
+        strm.IndentMore();
+        strm.Indent();
+        strm.Printf("No active thread plans\n");
+        strm.IndentLess();
+        return;
+      }
+    }
+
+    strm.Indent();
+    strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
+
+    elem.second.DumpThreadPlans(strm, desc_level, internal);
+  }
+}
+
+bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
+                                         lldb::DescriptionLevel desc_level,
+                                         bool internal,
+                                         bool condense_if_trivial,
+                                         bool skip_unreported) {
+  uint32_t index_id = 0;
+  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+
+  if (skip_unreported) {
+    if (!thread_sp) {
+      strm.Format("Unknown TID: {0}", tid);
+      return false;
+    }
+  }
+
+  if (thread_sp)
+    index_id = thread_sp->GetIndexID();
+  ThreadPlanStack *stack = Find(tid);
+  if (!stack) {
+    strm.Format("Unknown TID: {0}\n", tid);
+    return false;
+  }
+
+  if (condense_if_trivial) {
+    if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
+        !stack->AnyDiscardedPlans()) {
+      strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
+      strm.IndentMore();
+      strm.Indent();
+      strm.Printf("No active thread plans\n");
+      strm.IndentLess();
+      return true;
+    }
+  }
+
+  strm.Indent();
+  strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
+
+  stack->DumpThreadPlans(strm, desc_level, internal);
+  return true;
+}
+
+bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
+  // We only remove the plans for unreported TID's.
+  ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
+  if (thread_sp)
+    return false;
+
+  return RemoveTID(tid);
+}
index 29d3fd5..9f0749c 100644 (file)
@@ -188,7 +188,7 @@ void ThreadPlanStepOut::DidPush() {
 
 ThreadPlanStepOut::~ThreadPlanStepOut() {
   if (m_return_bp_id != LLDB_INVALID_BREAK_ID)
-    GetThread().CalculateTarget()->RemoveBreakpointByID(m_return_bp_id);
+    GetTarget().RemoveBreakpointByID(m_return_bp_id);
 }
 
 void ThreadPlanStepOut::GetDescription(Stream *s,
@@ -204,7 +204,7 @@ void ThreadPlanStepOut::GetDescription(Stream *s,
       s->Printf("Stepping out from ");
       Address tmp_address;
       if (tmp_address.SetLoadAddress(m_step_from_insn, &GetTarget())) {
-        tmp_address.Dump(s, &GetThread(), Address::DumpStyleResolvedDescription,
+        tmp_address.Dump(s, &m_process, Address::DumpStyleResolvedDescription,
                          Address::DumpStyleLoadAddress);
       } else {
         s->Printf("address 0x%" PRIx64 "", (uint64_t)m_step_from_insn);
@@ -216,7 +216,7 @@ void ThreadPlanStepOut::GetDescription(Stream *s,
 
       s->Printf(" returning to frame at ");
       if (tmp_address.SetLoadAddress(m_return_addr, &GetTarget())) {
-        tmp_address.Dump(s, &GetThread(), Address::DumpStyleResolvedDescription,
+        tmp_address.Dump(s, &m_process, Address::DumpStyleResolvedDescription,
                          Address::DumpStyleLoadAddress);
       } else {
         s->Printf("address 0x%" PRIx64 "", (uint64_t)m_return_addr);
@@ -227,6 +227,9 @@ void ThreadPlanStepOut::GetDescription(Stream *s,
     }
   }
 
+  if (m_stepped_past_frames.empty())
+    return;
+
   s->Printf("\n");
   for (StackFrameSP frame_sp : m_stepped_past_frames) {
     s->Printf("Stepped out past: ");
diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/Makefile
new file mode 100644 (file)
index 0000000..c46619c
--- /dev/null
@@ -0,0 +1,4 @@
+CXX_SOURCES := main.cpp
+ENABLE_THREADS := YES
+
+include Makefile.rules
diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/TestOSPluginStepping.py
new file mode 100644 (file)
index 0000000..5bba483
--- /dev/null
@@ -0,0 +1,113 @@
+"""
+Test that stepping works even when the OS Plugin doesn't report
+all threads at every stop.
+"""
+
+from __future__ import print_function
+
+
+import os
+import lldb
+from lldbsuite.test.lldbtest import *
+import lldbsuite.test.lldbutil as lldbutil
+
+
+class TestOSPluginStepping(TestBase):
+
+    mydir = TestBase.compute_mydir(__file__)
+    NO_DEBUG_INFO_TESTCASE = True
+
+    def test_python_os_plugin(self):
+        """Test that stepping works when the OS Plugin doesn't report all
+           threads at every stop"""
+        self.build()
+        self.main_file = lldb.SBFileSpec('main.cpp')
+        self.run_python_os_step_missing_thread(False)
+
+    def test_python_os_plugin_prune(self):
+        """Test that pruning the unreported PlanStacks works"""
+        self.build()
+        self.main_file = lldb.SBFileSpec('main.cpp')
+        self.run_python_os_step_missing_thread(True)
+
+    def get_os_thread(self):
+        return self.process.GetThreadByID(0x111111111)
+
+    def is_os_thread(self, thread):
+        id = thread.GetID()
+        return id == 0x111111111
+    
+    def run_python_os_step_missing_thread(self, do_prune):
+        """Test that the Python operating system plugin works correctly"""
+
+        # Our OS plugin does NOT report all threads:
+        result = self.dbg.HandleCommand("settings set target.experimental.os-plugin-reports-all-threads false")
+
+        python_os_plugin_path = os.path.join(self.getSourceDir(),
+                                             "operating_system.py")
+        (target, self.process, thread, thread_bkpt) = lldbutil.run_to_source_breakpoint(
+            self, "first stop in thread - do a step out", self.main_file)
+
+        main_bkpt = target.BreakpointCreateBySourceRegex('Stop here and do not make a memory thread for thread_1',
+                                                         self.main_file)
+        self.assertEqual(main_bkpt.GetNumLocations(), 1, "Main breakpoint has one location")
+
+        # There should not be an os thread before we load the plugin:
+        self.assertFalse(self.get_os_thread().IsValid(), "No OS thread before loading plugin")
+        
+        # Now load the python OS plug-in which should update the thread list and we should have
+        # an OS plug-in thread overlaying thread_1 with id 0x111111111
+        command = "settings set target.process.python-os-plugin-path '%s'" % python_os_plugin_path
+        self.dbg.HandleCommand(command)
+
+        # Verify our OS plug-in threads showed up
+        os_thread = self.get_os_thread()
+        self.assertTrue(
+            os_thread.IsValid(),
+            "Make sure we added the thread 0x111111111 after we load the python OS plug-in")
+        
+        # Now we are going to step-out.  This should get interrupted by main_bkpt.  We've
+        # set up the OS plugin so at this stop, we have lost the OS thread 0x111111111.
+        # Make sure both of these are true:
+        os_thread.StepOut()
+        
+        stopped_threads = lldbutil.get_threads_stopped_at_breakpoint(self.process, main_bkpt)
+        self.assertEqual(len(stopped_threads), 1, "Stopped at main_bkpt")
+        thread = self.process.GetThreadByID(0x111111111)
+        self.assertFalse(thread.IsValid(), "No thread 0x111111111 on second stop.")
+        
+        # Make sure we still have the thread plans for this thread:
+        # First, don't show unreported threads, that should fail:
+        command = "thread plan list -t 0x111111111"
+        result = lldb.SBCommandReturnObject()
+        interp = self.dbg.GetCommandInterpreter() 
+        interp.HandleCommand(command, result)
+        self.assertFalse(result.Succeeded(), "We found no plans for the unreported thread.")
+        # Now do it again but with the -u flag:
+        command        = "thread plan list -u -t 0x111111111"
+        result = lldb.SBCommandReturnObject()
+        interp.HandleCommand(command, result)
+        self.assertTrue(result.Succeeded(), "We found plans for the unreported thread.")
+        
+        if do_prune:
+            # Prune the thread plan and continue, and we will run to exit.
+            interp.HandleCommand("thread plan prune 0x111111111", result)
+            self.assertTrue(result.Succeeded(), "Found the plan for 0x111111111 and pruned it")
+
+            # List again, make sure it doesn't work:
+            command    = "thread plan list -u -t 0x111111111"
+            interp.HandleCommand(command, result)
+            self.assertFalse(result.Succeeded(), "We still found plans for the unreported thread.")
+            
+            self.process.Continue()
+            self.assertEqual(self.process.GetState(), lldb.eStateExited, "We exited.")
+        else:
+            # Now we are going to continue, and when we hit the step-out breakpoint, we will
+            # put the OS plugin thread back, lldb will recover its ThreadPlanStack, and
+            # we will stop with a "step-out" reason.
+            self.process.Continue()
+            os_thread = self.get_os_thread()
+            self.assertTrue(os_thread.IsValid(), "The OS thread is back after continue")
+            self.assertTrue("step out" in os_thread.GetStopDescription(100), "Completed step out plan")
+        
+        
diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/main.cpp
new file mode 100644 (file)
index 0000000..6fd6b1c
--- /dev/null
@@ -0,0 +1,55 @@
+// This test will present lldb with two threads one of which the test will
+// overlay with an OSPlugin thread.  Then we'll do a step out on the thread_1,
+// but arrange to hit a breakpoint in main before the step out completes. At
+// that point we will not report an OS plugin thread for thread_1. Then we'll
+// run again and hit the step out breakpoint.  Make sure we haven't deleted
+// that, and recognize it.
+
+#include <condition_variable>
+#include <mutex>
+#include <stdio.h>
+#include <thread>
+
+static int g_value = 0; // I don't have access to the real threads in the
+                        // OS Plugin, and I don't want to have to count
+                        // StopID's. So I'm using this value to tell me which
+                        // stop point the program has reached.
+std::mutex g_mutex;
+std::condition_variable g_cv;
+static int g_condition = 0; // Using this as the variable backing g_cv
+                            // to prevent spurious wakeups.
+
+void step_out_of_here() {
+  std::unique_lock<std::mutex> func_lock(g_mutex);
+  // Set a breakpoint:first stop in thread - do a step out.
+  g_condition = 1;
+  g_cv.notify_one();
+  g_cv.wait(func_lock, [&] { return g_condition == 2; });
+}
+
+void *thread_func() {
+  // Do something
+  step_out_of_here();
+
+  // Return
+  return NULL;
+}
+
+int main() {
+  // Lock the mutex so we can block the thread:
+  std::unique_lock<std::mutex> main_lock(g_mutex);
+  // Create the thread
+  std::thread thread_1(thread_func);
+  g_cv.wait(main_lock, [&] { return g_condition == 1; });
+  g_value = 1;
+  g_condition = 2;
+  // Stop here and do not make a memory thread for thread_1.
+  g_cv.notify_one();
+  g_value = 2;
+  main_lock.unlock();
+
+  // Wait for the threads to finish
+  thread_1.join();
+
+  return 0;
+}
diff --git a/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py b/lldb/test/API/functionalities/plugins/python_os_plugin/stepping_plugin_threads/operating_system.py
new file mode 100644 (file)
index 0000000..ff9a573
--- /dev/null
@@ -0,0 +1,62 @@
+#!/usr/bin/python
+
+import lldb
+import struct
+
+
+class OperatingSystemPlugIn(object):
+    """Class that provides a OS plugin that along with the particular code in main.cpp
+       emulates the following scenario:
+             a) We stop in an OS Plugin created thread - which should be thread index 1
+             b) We step-out from that thread
+             c) We hit a breakpoint in another thread, and DON'T produce the OS Plugin thread.
+             d) We continue, and when we hit the step out breakpoint, we again produce the same
+                OS Plugin thread.
+             main.cpp sets values into the global variable g_value, which we use to tell the OS
+             plugin whether to produce the OS plugin thread or not.
+             Since we are always producing an OS plugin thread with a backing thread, we don't
+             need to implement get_register_info or get_register_data.
+    """
+
+    def __init__(self, process):
+        '''Initialization needs a valid.SBProcess object.
+
+        This plug-in will get created after a live process is valid and has stopped for the
+        first time.'''
+        print("Plugin initialized.")
+        self.process = None
+        self.start_stop_id = 0
+        self.g_value = lldb.SBValue()
+        
+        if isinstance(process, lldb.SBProcess) and process.IsValid():
+            self.process = process
+            self.g_value = process.GetTarget().FindFirstGlobalVariable("g_value")
+            if not self.g_value.IsValid():
+                print("Could not find g_value")
+            
+    def create_thread(self, tid, context):
+        print("Called create thread with tid: ", tid)
+        return None
+
+    def get_thread_info(self):
+        g_value = self.g_value.GetValueAsUnsigned()
+        print("Called get_thread_info: g_value: %d"%(g_value))
+        if g_value == 0 or g_value == 2:
+            return [{'tid': 0x111111111,
+                             'name': 'one',
+                             'queue': 'queue1',
+                             'state': 'stopped',
+                             'stop_reason': 'breakpoint',
+                             'core' : 1 }]
+        else:
+            return []
+
+    def get_register_info(self):
+        print ("called get_register_info")
+        return None
+
+    
+    def get_register_data(self, tid):
+        print("Get register data called for tid: %d"%(tid))
+        return None
+
diff --git a/lldb/test/API/functionalities/thread_plan/Makefile b/lldb/test/API/functionalities/thread_plan/Makefile
new file mode 100644 (file)
index 0000000..695335e
--- /dev/null
@@ -0,0 +1,4 @@
+C_SOURCES := main.c
+CFLAGS_EXTRAS := -std=c99
+
+include Makefile.rules
diff --git a/lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py b/lldb/test/API/functionalities/thread_plan/TestThreadPlanCommands.py
new file mode 100644 (file)
index 0000000..30fdb21
--- /dev/null
@@ -0,0 +1,164 @@
+"""
+Test that thread plan listing, and deleting works.
+"""
+
+
+
+import lldb
+import lldbsuite.test.lldbutil as lldbutil
+from lldbsuite.test.lldbtest import *
+
+
+class TestThreadPlanCommands(TestBase):
+
+    mydir = TestBase.compute_mydir(__file__)
+
+    NO_DEBUG_INFO_TESTCASE = True
+
+    def test_thread_plan_actions(self):
+        self.build()
+        self.main_source_file = lldb.SBFileSpec("main.c")
+        self.thread_plan_test()
+
+    def check_list_output(self, command, active_plans = [], completed_plans = [], discarded_plans = []):
+        # Check the "thread plan list" output against a list of active & completed and discarded plans.
+        # If all three check arrays are empty, that means the command is expected to fail. 
+
+        interp = self.dbg.GetCommandInterpreter()
+        result = lldb.SBCommandReturnObject()
+
+        num_active = len(active_plans)
+        num_completed = len(completed_plans)
+        num_discarded = len(discarded_plans)
+
+        interp.HandleCommand(command, result)
+        if self.TraceOn():
+            print("Command: %s"%(command))
+            print(result.GetOutput())
+
+        if num_active == 0 and num_completed == 0 and num_discarded == 0:
+            self.assertFalse(result.Succeeded(), "command: '%s' succeeded when it should have failed: '%s'"%
+                             (command, result.GetError()))
+            return
+
+        self.assertTrue(result.Succeeded(), "command: '%s' failed: '%s'"%(command, result.GetError()))
+        result_arr = result.GetOutput().splitlines()
+        num_results = len(result_arr)
+
+        # Match the expected number of elements.
+        # Adjust the count for the number of header lines we aren't matching:
+        fudge = 0
+        
+        if num_completed == 0 and num_discarded == 0:
+            # The fudge is 3: Thread header, Active Plan header and base plan
+            fudge = 3
+        elif num_completed == 0 or num_discarded == 0:
+            # The fudge is 4: The above plus either the Completed or Discarded Plan header:
+            fudge = 4
+        else:
+            # The fudge is 5 since we have both headers:
+            fudge = 5
+
+        self.assertEqual(num_results, num_active + num_completed + num_discarded + fudge,
+                             "Too many elements in match arrays")
+            
+        # Now iterate through the results array and pick out the results.
+        result_idx = 0
+        self.assertIn("thread #", result_arr[result_idx], "Found thread header") ; result_idx += 1
+        self.assertIn("Active plan stack", result_arr[result_idx], "Found active header") ; result_idx += 1
+        self.assertIn("Element 0: Base thread plan", result_arr[result_idx], "Found base plan") ; result_idx += 1
+
+        for text in active_plans:
+            self.assertFalse("Completed plan stack" in result_arr[result_idx], "Found Completed header too early.")
+            self.assertIn(text, result_arr[result_idx], "Didn't find active plan: %s"%(text)) ; result_idx += 1
+
+        if len(completed_plans) > 0:
+            self.assertIn("Completed plan stack:", result_arr[result_idx], "Found completed plan stack header") ; result_idx += 1
+            for text in completed_plans:
+                self.assertIn(text, result_arr[result_idx], "Didn't find completed plan: %s"%(text)) ; result_idx += 1
+
+        if len(discarded_plans) > 0:
+            self.assertIn("Discarded plan stack:", result_arr[result_idx], "Found discarded plan stack header") ; result_idx += 1
+            for text in discarded_plans:
+                self.assertIn(text, result_arr[result_idx], "Didn't find completed plan: %s"%(text)) ; result_idx += 1
+
+
+    def thread_plan_test(self):
+        (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
+                                   "Set a breakpoint here", self.main_source_file)
+
+        # Now set a breakpoint in call_me and step over.  We should have
+        # two public thread plans
+        call_me_bkpt = target.BreakpointCreateBySourceRegex("Set another here", self.main_source_file)
+        self.assertTrue(call_me_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+        thread.StepOver()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
+        self.assertEqual(len(threads), 1, "Hit my breakpoint while stepping over")
+
+        current_id = threads[0].GetIndexID()
+        current_tid = threads[0].GetThreadID()
+        # Run thread plan list without the -i flag:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output (command, ["Stepping over line main.c"], [])
+
+        # Run thread plan list with the -i flag:
+        command = "thread plan list -i %d"%(current_id)
+        self.check_list_output(command, ["Stepping over line main.c", "Stepping out from"])
+
+        # Run thread plan list providing TID, output should be the same:
+        command = "thread plan list -t %d"%(current_tid)
+        self.check_list_output(command, ["Stepping over line main.c"])
+
+        # Provide both index & tid, and make sure we only print once:
+        command = "thread plan list -t %d %d"%(current_tid, current_id)
+        self.check_list_output(command, ["Stepping over line main.c"])
+
+        # Try a fake TID, and make sure that fails:
+        fake_tid = 0
+        for i in range(100, 10000, 100):
+            fake_tid = current_tid + i
+            thread = process.GetThreadByID(fake_tid)
+            if not thread:
+                break
+        
+        command = "thread plan list -t %d"%(fake_tid)
+        self.check_list_output(command)
+
+        # Now continue, and make sure we printed the completed plan:
+        process.Continue()
+        threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
+        self.assertEqual(len(threads), 1, "One thread completed a step")
+        
+        # Run thread plan list - there aren't any private plans at this point:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output(command, [], ["Stepping over line main.c"])
+
+        # Set another breakpoint that we can run to, to try deleting thread plans.
+        second_step_bkpt = target.BreakpointCreateBySourceRegex("Run here to step over again",
+                                                                self.main_source_file)
+        self.assertTrue(second_step_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+        final_bkpt = target.BreakpointCreateBySourceRegex("Make sure we get here on last continue",
+                                                          self.main_source_file)
+        self.assertTrue(final_bkpt.GetNumLocations() > 0, "Set the breakpoint successfully")
+
+        threads = lldbutil.continue_to_breakpoint(process, second_step_bkpt)
+        self.assertEqual(len(threads), 1, "Hit the second step breakpoint")
+
+        threads[0].StepOver()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, call_me_bkpt)
+
+        result = lldb.SBCommandReturnObject()
+        interp = self.dbg.GetCommandInterpreter()
+        interp.HandleCommand("thread plan discard 1", result)
+        self.assertTrue(result.Succeeded(), "Deleted the step over plan: %s"%(result.GetOutput()))
+
+        # Make sure the plan gets listed in the discarded plans:
+        command = "thread plan list %d"%(current_id)
+        self.check_list_output(command, [], [], ["Stepping over line main.c:"])
+
+        process.Continue()
+        threads = lldbutil.get_threads_stopped_at_breakpoint(process, final_bkpt)
+        self.assertEqual(len(threads), 1, "Ran to final breakpoint")
+        threads = lldbutil.get_stopped_threads(process, lldb.eStopReasonPlanComplete)
+        self.assertEqual(len(threads), 0, "Did NOT complete the step over plan")
+
diff --git a/lldb/test/API/functionalities/thread_plan/main.c b/lldb/test/API/functionalities/thread_plan/main.c
new file mode 100644 (file)
index 0000000..74654cb
--- /dev/null
@@ -0,0 +1,16 @@
+#include <stdio.h>
+
+void
+call_me(int value) {
+  printf("called with %d\n", value); // Set another here.
+}
+
+int
+main(int argc, char **argv)
+{
+  call_me(argc); // Set a breakpoint here.
+  printf("This just spaces the two calls\n");
+  call_me(argc); // Run here to step over again.
+  printf("More spacing\n");
+  return 0; // Make sure we get here on last continue
+}