2 * Copyright (C) 2011 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "thread_list.h"
19 #include <backtrace/BacktraceMap.h>
21 #include <ScopedLocalRef.h>
22 #include <ScopedUtfChars.h>
23 #include <sys/types.h>
28 #include "android-base/stringprintf.h"
30 #include "base/histogram-inl.h"
31 #include "base/mutex-inl.h"
32 #include "base/systrace.h"
33 #include "base/time_utils.h"
34 #include "base/timing_logger.h"
36 #include "gc/collector/concurrent_copying.h"
37 #include "gc/reference_processor.h"
38 #include "jni_internal.h"
39 #include "lock_word.h"
41 #include "native_stack_dump.h"
42 #include "scoped_thread_state_change-inl.h"
45 #include "well_known_classes.h"
48 #include "linux/futex.h"
49 #include "sys/syscall.h"
51 #define SYS_futex __NR_futex
53 #endif // ART_USE_FUTEXES
57 using android::base::StringPrintf;
59 static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
60 // Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
61 static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
62 static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
63 static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
65 // Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
67 // Turned off again. b/29248079
68 static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = false;
70 ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
71 : suspend_all_count_(0),
72 debug_suspend_all_count_(0),
73 unregistering_count_(0),
74 suspend_all_historam_("suspend all histogram", 16, 64),
76 thread_suspend_timeout_ns_(thread_suspend_timeout_ns),
77 empty_checkpoint_barrier_(new Barrier(0)) {
78 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
81 ThreadList::~ThreadList() {
82 ScopedTrace trace(__PRETTY_FUNCTION__);
83 // Detach the current thread if necessary. If we failed to start, there might not be any threads.
84 // We need to detach the current thread here in case there's another thread waiting to join with
86 bool contains = false;
87 Thread* self = Thread::Current();
89 MutexLock mu(self, *Locks::thread_list_lock_);
90 contains = Contains(self);
93 Runtime::Current()->DetachCurrentThread();
95 WaitForOtherNonDaemonThreadsToExit();
96 // Disable GC and wait for GC to complete in case there are still daemon threads doing
98 gc::Heap* const heap = Runtime::Current()->GetHeap();
99 heap->DisableGCForShutdown();
100 // In case a GC is in progress, wait for it to finish.
101 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
102 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
104 SuspendAllDaemonThreadsForShutdown();
107 bool ThreadList::Contains(Thread* thread) {
108 return find(list_.begin(), list_.end(), thread) != list_.end();
111 bool ThreadList::Contains(pid_t tid) {
112 for (const auto& thread : list_) {
113 if (thread->GetTid() == tid) {
120 pid_t ThreadList::GetLockOwner() {
121 return Locks::thread_list_lock_->GetExclusiveOwnerTid();
124 void ThreadList::DumpNativeStacks(std::ostream& os) {
125 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
126 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
127 for (const auto& thread : list_) {
128 os << "DUMPING THREAD " << thread->GetTid() << "\n";
129 DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
134 void ThreadList::DumpForSigQuit(std::ostream& os) {
136 ScopedObjectAccess soa(Thread::Current());
137 // Only print if we have samples.
138 if (suspend_all_historam_.SampleSize() > 0) {
139 Histogram<uint64_t>::CumulativeData data;
140 suspend_all_historam_.CreateHistogram(&data);
141 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
144 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
145 Dump(os, dump_native_stack);
146 DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
149 static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
150 NO_THREAD_SAFETY_ANALYSIS {
151 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
152 // refactor DumpState to avoid skipping analysis.
153 Thread::DumpState(os, nullptr, tid);
154 DumpKernelStack(os, tid, " kernel: ", false);
155 if (dump_native_stack) {
156 DumpNativeStack(os, tid, nullptr, " native: ");
161 void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
162 DIR* d = opendir("/proc/self/task");
167 Thread* self = Thread::Current();
169 while ((e = readdir(d)) != nullptr) {
171 pid_t tid = strtol(e->d_name, &end, 10);
175 MutexLock mu(self, *Locks::thread_list_lock_);
176 contains = Contains(tid);
179 DumpUnattachedThread(os, tid, dump_native_stack);
186 // Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
187 // overloaded with ANR dumps.
188 static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
190 // A closure used by Thread::Dump.
191 class DumpCheckpoint FINAL : public Closure {
193 DumpCheckpoint(std::ostream* os, bool dump_native_stack)
196 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
197 dump_native_stack_(dump_native_stack) {}
199 void Run(Thread* thread) OVERRIDE {
200 // Note thread and self may not be equal if thread was already suspended at the point of the
202 Thread* self = Thread::Current();
203 CHECK(self != nullptr);
204 std::ostringstream local_os;
206 ScopedObjectAccess soa(self);
207 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
211 // Use the logging lock to ensure serialization when writing to the common ostream.
212 MutexLock mu(self, *Locks::logging_lock_);
213 *os_ << local_os.str();
218 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
219 Thread* self = Thread::Current();
220 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
221 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
223 // Avoid a recursive abort.
224 LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
225 << "Unexpected time out during dump checkpoint.";
230 // The common stream that will accumulate all the dumps.
231 std::ostream* const os_;
232 // The barrier to be passed through and for the requestor to wait upon.
234 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
235 std::unique_ptr<BacktraceMap> backtrace_map_;
236 // Whether we should dump the native stack.
237 const bool dump_native_stack_;
240 void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
241 Thread* self = Thread::Current();
243 MutexLock mu(self, *Locks::thread_list_lock_);
244 os << "DALVIK THREADS (" << list_.size() << "):\n";
246 if (self != nullptr) {
247 DumpCheckpoint checkpoint(&os, dump_native_stack);
248 size_t threads_running_checkpoint;
250 // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
251 ScopedObjectAccess soa(self);
252 threads_running_checkpoint = RunCheckpoint(&checkpoint);
254 if (threads_running_checkpoint != 0) {
255 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
258 DumpUnattachedThreads(os, dump_native_stack);
262 void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
263 MutexLock mu(self, *Locks::thread_list_lock_);
264 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
265 for (const auto& thread : list_) {
266 if (thread != ignore1 && thread != ignore2) {
267 CHECK(thread->IsSuspended())
268 << "\nUnsuspended thread: <<" << *thread << "\n"
269 << "self: <<" << *Thread::Current();
274 #if HAVE_TIMED_RWLOCK
275 // Attempt to rectify locks so that we dump thread list with required locks before exiting.
276 NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
277 Runtime* runtime = Runtime::Current();
278 std::ostringstream ss;
279 ss << "Thread suspend timeout\n";
280 Locks::mutator_lock_->Dump(ss);
282 runtime->GetThreadList()->Dump(ss);
283 LOG(FATAL) << ss.str();
288 // Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
289 // individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
290 // we use sched_yield instead of calling usleep.
291 static void ThreadSuspendSleep(useconds_t delay_us) {
299 size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) {
300 Thread* self = Thread::Current();
301 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
302 Locks::thread_list_lock_->AssertNotHeld(self);
303 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
305 std::vector<Thread*> suspended_count_modified_threads;
308 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
310 MutexLock mu(self, *Locks::thread_list_lock_);
311 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
312 count = list_.size();
313 for (const auto& thread : list_) {
314 if (thread != self) {
316 if (thread->RequestCheckpoint(checkpoint_function)) {
317 // This thread will run its checkpoint some time in the near future.
320 // We are probably suspended, try to make sure that we stay suspended.
321 // The thread switched back to runnable.
322 if (thread->GetState() == kRunnable) {
323 // Spurious fail, try again.
326 thread->ModifySuspendCount(self, +1, nullptr, false);
327 suspended_count_modified_threads.push_back(thread);
333 // Run the callback to be called inside this critical section.
334 if (callback != nullptr) {
339 // Run the checkpoint on ourself while we wait for threads to suspend.
340 checkpoint_function->Run(self);
342 // Run the checkpoint on the suspended threads.
343 for (const auto& thread : suspended_count_modified_threads) {
344 if (!thread->IsSuspended()) {
345 if (ATRACE_ENABLED()) {
346 std::ostringstream oss;
347 thread->ShortDump(oss);
348 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str());
350 // Busy wait until the thread is suspended.
351 const uint64_t start_time = NanoTime();
353 ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
354 } while (!thread->IsSuspended());
355 const uint64_t total_delay = NanoTime() - start_time;
356 // Shouldn't need to wait for longer than 1000 microseconds.
357 constexpr uint64_t kLongWaitThreshold = MsToNs(1);
359 if (UNLIKELY(total_delay > kLongWaitThreshold)) {
360 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
361 << *thread << " suspension!";
364 // We know for sure that the thread is suspended at this point.
365 checkpoint_function->Run(thread);
367 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
368 thread->ModifySuspendCount(self, -1, nullptr, false);
373 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
374 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
375 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
376 Thread::resume_cond_->Broadcast(self);
382 void ThreadList::RunEmptyCheckpoint() {
383 Thread* self = Thread::Current();
384 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
385 Locks::thread_list_lock_->AssertNotHeld(self);
386 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
387 std::vector<uint32_t> runnable_thread_ids;
389 Barrier* barrier = empty_checkpoint_barrier_.get();
390 barrier->Init(self, 0);
392 MutexLock mu(self, *Locks::thread_list_lock_);
393 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
394 for (Thread* thread : list_) {
395 if (thread != self) {
397 if (thread->RequestEmptyCheckpoint()) {
398 // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
399 // some time in the near future.
402 runnable_thread_ids.push_back(thread->GetThreadId());
406 if (thread->GetState() != kRunnable) {
407 // It's seen suspended, we are done because it must not be in the middle of a mutator
416 // Wake up the threads blocking for weak ref access so that they will respond to the empty
417 // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
418 Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
419 Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
421 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
422 uint64_t total_wait_time = 0;
423 bool first_iter = true;
425 // Wake up the runnable threads blocked on the mutexes that another thread, which is blocked
426 // on a weak ref access, holds (indirectly blocking for weak ref access through another thread
427 // and a mutex.) This needs to be done periodically because the thread may be preempted
428 // between the CheckEmptyCheckpointFromMutex call and the subsequent futex wait in
429 // Mutex::ExclusiveLock, etc. when the wakeup via WakeupToRespondToEmptyCheckpoint
430 // arrives. This could cause a *very rare* deadlock, if not repeated. Most of the cases are
431 // handled in the first iteration.
432 for (BaseMutex* mutex : Locks::expected_mutexes_on_weak_ref_access_) {
433 mutex->WakeupToRespondToEmptyCheckpoint();
435 static constexpr uint64_t kEmptyCheckpointPeriodicTimeoutMs = 100; // 100ms
436 static constexpr uint64_t kEmptyCheckpointTotalTimeoutMs = 600 * 1000; // 10 minutes.
437 size_t barrier_count = first_iter ? count : 0;
438 first_iter = false; // Don't add to the barrier count from the second iteration on.
439 bool timed_out = barrier->Increment(self, barrier_count, kEmptyCheckpointPeriodicTimeoutMs);
443 // This is a very rare case.
444 total_wait_time += kEmptyCheckpointPeriodicTimeoutMs;
445 if (kIsDebugBuild && total_wait_time > kEmptyCheckpointTotalTimeoutMs) {
446 std::ostringstream ss;
447 ss << "Empty checkpoint timeout\n";
448 ss << "Barrier count " << barrier->GetCount(self) << "\n";
449 ss << "Runnable thread IDs";
450 for (uint32_t tid : runnable_thread_ids) {
454 Locks::mutator_lock_->Dump(ss);
456 LOG(FATAL_WITHOUT_ABORT) << ss.str();
457 // Some threads in 'runnable_thread_ids' are probably stuck. Try to dump their stacks.
458 // Avoid using ThreadList::Dump() initially because it is likely to get stuck as well.
460 ScopedObjectAccess soa(self);
461 MutexLock mu1(self, *Locks::thread_list_lock_);
462 for (Thread* thread : GetList()) {
463 uint32_t tid = thread->GetThreadId();
464 bool is_in_runnable_thread_ids =
465 std::find(runnable_thread_ids.begin(), runnable_thread_ids.end(), tid) !=
466 runnable_thread_ids.end();
467 if (is_in_runnable_thread_ids &&
468 thread->ReadFlag(kEmptyCheckpointRequest)) {
469 // Found a runnable thread that hasn't responded to the empty checkpoint request.
470 // Assume it's stuck and safe to dump its stack.
471 thread->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT),
472 /*dump_native_stack*/ true,
473 /*backtrace_map*/ nullptr,
474 /*force_dump_stack*/ true);
478 LOG(FATAL_WITHOUT_ABORT)
479 << "Dumped runnable threads that haven't responded to empty checkpoint.";
480 // Now use ThreadList::Dump() to dump more threads, noting it may get stuck.
481 Dump(LOG_STREAM(FATAL_WITHOUT_ABORT));
482 LOG(FATAL) << "Dumped all threads.";
488 // Request that a checkpoint function be run on all active (non-suspended)
489 // threads. Returns the number of successful requests.
490 size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
491 Thread* self = Thread::Current();
492 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
493 Locks::thread_list_lock_->AssertNotHeld(self);
494 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
495 CHECK_NE(self->GetState(), kRunnable);
499 // Call a checkpoint function for each non-suspended thread.
500 MutexLock mu(self, *Locks::thread_list_lock_);
501 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
502 for (const auto& thread : list_) {
503 if (thread != self) {
504 if (thread->RequestCheckpoint(checkpoint_function)) {
505 // This thread will run its checkpoint some time in the near future.
512 // Return the number of threads that will run the checkpoint function.
516 // A checkpoint/suspend-all hybrid to switch thread roots from
517 // from-space to to-space refs. Used to synchronize threads at a point
518 // to mark the initiation of marking while maintaining the to-space
520 size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
521 Closure* flip_callback,
522 gc::collector::GarbageCollector* collector) {
523 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
524 Thread* self = Thread::Current();
525 Locks::mutator_lock_->AssertNotHeld(self);
526 Locks::thread_list_lock_->AssertNotHeld(self);
527 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
528 CHECK_NE(self->GetState(), kRunnable);
530 collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
532 // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the
534 const uint64_t suspend_start_time = NanoTime();
535 SuspendAllInternal(self, self, nullptr);
537 // Run the flip callback for the collector.
538 Locks::mutator_lock_->ExclusiveLock(self);
539 suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time);
540 flip_callback->Run(self);
541 Locks::mutator_lock_->ExclusiveUnlock(self);
542 collector->RegisterPause(NanoTime() - suspend_start_time);
544 // Resume runnable threads.
545 size_t runnable_thread_count = 0;
546 std::vector<Thread*> other_threads;
548 TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
549 MutexLock mu(self, *Locks::thread_list_lock_);
550 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
551 --suspend_all_count_;
552 for (const auto& thread : list_) {
553 // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
554 // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
555 // a runnable thread runs it for itself or we run it for a suspended thread below.
556 thread->SetFlipFunction(thread_flip_visitor);
557 if (thread == self) {
560 // Resume early the threads that were runnable but are suspended just for this thread flip or
561 // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
562 // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
563 // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
564 ThreadState state = thread->GetState();
565 if ((state == kWaitingForGcThreadFlip || thread->IsTransitioningToRunnable()) &&
566 thread->GetSuspendCount() == 1) {
567 // The thread will resume right after the broadcast.
568 thread->ModifySuspendCount(self, -1, nullptr, false);
569 ++runnable_thread_count;
571 other_threads.push_back(thread);
574 Thread::resume_cond_->Broadcast(self);
577 collector->GetHeap()->ThreadFlipEnd(self);
579 // Run the closure on the other threads and let them resume.
581 TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
582 ReaderMutexLock mu(self, *Locks::mutator_lock_);
583 for (const auto& thread : other_threads) {
584 Closure* flip_func = thread->GetFlipFunction();
585 if (flip_func != nullptr) {
586 flip_func->Run(thread);
590 Closure* flip_func = self->GetFlipFunction();
591 if (flip_func != nullptr) {
592 flip_func->Run(self);
596 // Resume other threads.
598 TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
599 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
600 for (const auto& thread : other_threads) {
601 thread->ModifySuspendCount(self, -1, nullptr, false);
603 Thread::resume_cond_->Broadcast(self);
606 return runnable_thread_count + other_threads.size() + 1; // +1 for self.
609 void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
610 Thread* self = Thread::Current();
612 if (self != nullptr) {
613 VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
615 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
618 ScopedTrace trace("Suspending mutator threads");
619 const uint64_t start_time = NanoTime();
621 SuspendAllInternal(self, self);
622 // All threads are known to have suspended (but a thread may still own the mutator lock)
623 // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
624 #if HAVE_TIMED_RWLOCK
626 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self,
627 NsToMs(thread_suspend_timeout_ns_),
630 } else if (!long_suspend_) {
631 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
632 // could result in a thread suspend timeout.
633 // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds.
634 UnsafeLogFatalForThreadSuspendAllTimeout();
638 Locks::mutator_lock_->ExclusiveLock(self);
641 long_suspend_ = long_suspend;
643 const uint64_t end_time = NanoTime();
644 const uint64_t suspend_time = end_time - start_time;
645 suspend_all_historam_.AdjustAndAddValue(suspend_time);
646 if (suspend_time > kLongThreadSuspendThreshold) {
647 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
651 // Debug check that all threads are suspended.
652 AssertThreadsAreSuspended(self, self);
655 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
657 if (self != nullptr) {
658 VLOG(threads) << *self << " SuspendAll complete";
660 VLOG(threads) << "Thread[null] SuspendAll complete";
664 // Ensures all threads running Java suspend and that those not running Java don't start.
665 // Debugger thread might be set to kRunnable for a short period of time after the
666 // SuspendAllInternal. This is safe because it will be set back to suspended state before
667 // the SuspendAll returns.
668 void ThreadList::SuspendAllInternal(Thread* self,
671 bool debug_suspend) {
672 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
673 Locks::thread_list_lock_->AssertNotHeld(self);
674 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
675 if (kDebugLocking && self != nullptr) {
676 CHECK_NE(self->GetState(), kRunnable);
679 // First request that all threads suspend, then wait for them to suspend before
680 // returning. This suspension scheme also relies on other behaviour:
681 // 1. Threads cannot be deleted while they are suspended or have a suspend-
682 // request flag set - (see Unregister() below).
683 // 2. When threads are created, they are created in a suspended state (actually
684 // kNative) and will never begin executing Java code without first checking
685 // the suspend-request flag.
687 // The atomic counter for number of threads that need to pass the barrier.
688 AtomicInteger pending_threads;
689 uint32_t num_ignored = 0;
690 if (ignore1 != nullptr) {
693 if (ignore2 != nullptr && ignore1 != ignore2) {
697 MutexLock mu(self, *Locks::thread_list_lock_);
698 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
699 // Update global suspend all state for attaching threads.
700 ++suspend_all_count_;
702 ++debug_suspend_all_count_;
704 pending_threads.StoreRelaxed(list_.size() - num_ignored);
705 // Increment everybody's suspend count (except those that should be ignored).
706 for (const auto& thread : list_) {
707 if (thread == ignore1 || thread == ignore2) {
710 VLOG(threads) << "requesting thread suspend: " << *thread;
711 thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend);
713 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear
714 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended()
715 // that can lead a thread to miss a call to PassActiveSuspendBarriers().
716 if (thread->IsSuspended()) {
717 // Only clear the counter for the current thread.
718 thread->ClearSuspendBarrier(&pending_threads);
719 pending_threads.FetchAndSubSequentiallyConsistent(1);
724 // Wait for the barrier to be passed by all runnable threads. This wait
725 // is done with a timeout so that we can detect problems.
727 timespec wait_timeout;
728 InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(thread_suspend_timeout_ns_), 0, &wait_timeout);
730 const uint64_t start_time = NanoTime();
732 int32_t cur_val = pending_threads.LoadRelaxed();
733 if (LIKELY(cur_val > 0)) {
735 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
736 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
737 if ((errno != EAGAIN) && (errno != EINTR)) {
738 if (errno == ETIMEDOUT) {
739 LOG(::android::base::FATAL)
740 << "Timed out waiting for threads to suspend, waited for "
741 << PrettyDuration(NanoTime() - start_time);
743 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
746 } // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
748 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set.
752 CHECK_EQ(cur_val, 0);
758 void ThreadList::ResumeAll() {
759 Thread* self = Thread::Current();
761 if (self != nullptr) {
762 VLOG(threads) << *self << " ResumeAll starting";
764 VLOG(threads) << "Thread[null] ResumeAll starting";
769 ScopedTrace trace("Resuming mutator threads");
772 // Debug check that all threads are suspended.
773 AssertThreadsAreSuspended(self, self);
776 long_suspend_ = false;
778 Locks::mutator_lock_->ExclusiveUnlock(self);
780 MutexLock mu(self, *Locks::thread_list_lock_);
781 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
782 // Update global suspend all state for attaching threads.
783 --suspend_all_count_;
784 // Decrement the suspend counts for all threads.
785 for (const auto& thread : list_) {
786 if (thread == self) {
789 thread->ModifySuspendCount(self, -1, nullptr, false);
792 // Broadcast a notification to all suspended threads, some or all of
793 // which may choose to wake up. No need to wait for them.
794 if (self != nullptr) {
795 VLOG(threads) << *self << " ResumeAll waking others";
797 VLOG(threads) << "Thread[null] ResumeAll waking others";
799 Thread::resume_cond_->Broadcast(self);
802 if (self != nullptr) {
803 VLOG(threads) << *self << " ResumeAll complete";
805 VLOG(threads) << "Thread[null] ResumeAll complete";
809 void ThreadList::Resume(Thread* thread, bool for_debugger) {
810 // This assumes there was an ATRACE_BEGIN when we suspended the thread.
813 Thread* self = Thread::Current();
814 DCHECK_NE(thread, self);
815 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
816 << (for_debugger ? " (debugger)" : "");
819 // To check Contains.
820 MutexLock mu(self, *Locks::thread_list_lock_);
821 // To check IsSuspended.
822 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
823 DCHECK(thread->IsSuspended());
824 if (!Contains(thread)) {
825 // We only expect threads within the thread-list to have been suspended otherwise we can't
826 // stop such threads from delete-ing themselves.
827 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
828 << ") thread not within thread list";
831 thread->ModifySuspendCount(self, -1, nullptr, for_debugger);
835 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
836 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
837 Thread::resume_cond_->Broadcast(self);
840 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
843 static void ThreadSuspendByPeerWarning(Thread* self,
844 LogSeverity severity,
847 JNIEnvExt* env = self->GetJniEnv();
848 ScopedLocalRef<jstring>
849 scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
850 peer, WellKnownClasses::java_lang_Thread_name)));
851 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
852 if (scoped_name_chars.c_str() == nullptr) {
853 LOG(severity) << message << ": " << peer;
854 env->ExceptionClear();
856 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
860 Thread* ThreadList::SuspendThreadByPeer(jobject peer,
861 bool request_suspension,
862 bool debug_suspension,
864 const uint64_t start_time = NanoTime();
865 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
867 Thread* const self = Thread::Current();
868 Thread* suspended_thread = nullptr;
869 VLOG(threads) << "SuspendThreadByPeer starting";
873 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
874 // is requesting another suspend, to avoid deadlock, by requiring this function be called
875 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
876 // than request thread suspension, to avoid potential cycles in threads requesting each other
878 ScopedObjectAccess soa(self);
879 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
880 thread = Thread::FromManagedThread(soa, peer);
881 if (thread == nullptr) {
882 if (suspended_thread != nullptr) {
883 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
884 // If we incremented the suspend count but the thread reset its peer, we need to
885 // re-decrement it since it is shutting down and may deadlock the runtime in
886 // ThreadList::WaitForOtherNonDaemonThreadsToExit.
887 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
889 ThreadSuspendByPeerWarning(self,
890 ::android::base::WARNING,
891 "No such thread for suspend",
895 if (!Contains(thread)) {
896 CHECK(suspended_thread == nullptr);
897 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
898 << reinterpret_cast<void*>(thread);
901 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
903 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
904 if (request_suspension) {
905 if (self->GetSuspendCount() > 0) {
906 // We hold the suspend count lock but another thread is trying to suspend us. Its not
907 // safe to try to suspend another thread in case we get a cycle. Start the loop again
908 // which will allow this thread to be suspended.
911 CHECK(suspended_thread == nullptr);
912 suspended_thread = thread;
913 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
914 request_suspension = false;
916 // If the caller isn't requesting suspension, a suspension should have already occurred.
917 CHECK_GT(thread->GetSuspendCount(), 0);
919 // IsSuspended on the current thread will fail as the current thread is changed into
920 // Runnable above. As the suspend count is now raised if this is the current thread
921 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
922 // to just explicitly handle the current thread in the callers to this code.
923 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
924 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
925 // count, or else we've waited and it has self suspended) or is the current thread, we're
927 if (thread->IsSuspended()) {
928 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
929 if (ATRACE_ENABLED()) {
931 thread->GetThreadName(name);
932 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
937 const uint64_t total_delay = NanoTime() - start_time;
938 if (total_delay >= thread_suspend_timeout_ns_) {
939 ThreadSuspendByPeerWarning(self,
940 ::android::base::FATAL,
941 "Thread suspension timed out",
943 if (suspended_thread != nullptr) {
944 CHECK_EQ(suspended_thread, thread);
945 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
949 } else if (sleep_us == 0 &&
950 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
951 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
952 // excessive CPU usage.
953 sleep_us = kThreadSuspendMaxYieldUs / 2;
956 // Release locks and come out of runnable state.
958 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend";
959 ThreadSuspendSleep(sleep_us);
960 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at
961 // all if possible. This shouldn't be an issue since time to suspend should always be small.
962 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
966 static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
968 uint32_t thread_id) {
969 LOG(severity) << StringPrintf("%s: %d", message, thread_id);
972 Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
973 bool debug_suspension,
975 const uint64_t start_time = NanoTime();
976 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
978 Thread* suspended_thread = nullptr;
979 Thread* const self = Thread::Current();
980 CHECK_NE(thread_id, kInvalidThreadId);
981 VLOG(threads) << "SuspendThreadByThreadId starting";
984 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
985 // is requesting another suspend, to avoid deadlock, by requiring this function be called
986 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
987 // than request thread suspension, to avoid potential cycles in threads requesting each other
989 ScopedObjectAccess soa(self);
990 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
991 Thread* thread = nullptr;
992 for (const auto& it : list_) {
993 if (it->GetThreadId() == thread_id) {
998 if (thread == nullptr) {
999 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
1000 << " no longer in thread list";
1001 // There's a race in inflating a lock and the owner giving up ownership and then dying.
1002 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
1003 "No such thread id for suspend",
1007 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
1008 DCHECK(Contains(thread));
1010 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1011 if (suspended_thread == nullptr) {
1012 if (self->GetSuspendCount() > 0) {
1013 // We hold the suspend count lock but another thread is trying to suspend us. Its not
1014 // safe to try to suspend another thread in case we get a cycle. Start the loop again
1015 // which will allow this thread to be suspended.
1018 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
1019 suspended_thread = thread;
1021 CHECK_EQ(suspended_thread, thread);
1022 // If the caller isn't requesting suspension, a suspension should have already occurred.
1023 CHECK_GT(thread->GetSuspendCount(), 0);
1025 // IsSuspended on the current thread will fail as the current thread is changed into
1026 // Runnable above. As the suspend count is now raised if this is the current thread
1027 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
1028 // to just explicitly handle the current thread in the callers to this code.
1029 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
1030 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
1031 // count, or else we've waited and it has self suspended) or is the current thread, we're
1033 if (thread->IsSuspended()) {
1034 if (ATRACE_ENABLED()) {
1036 thread->GetThreadName(name);
1037 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
1038 name.c_str(), thread_id).c_str());
1040 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
1043 const uint64_t total_delay = NanoTime() - start_time;
1044 if (total_delay >= thread_suspend_timeout_ns_) {
1045 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
1046 "Thread suspension timed out",
1048 if (suspended_thread != nullptr) {
1049 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
1053 } else if (sleep_us == 0 &&
1054 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
1055 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
1056 // excessive CPU usage.
1057 sleep_us = kThreadSuspendMaxYieldUs / 2;
1060 // Release locks and come out of runnable state.
1062 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend";
1063 ThreadSuspendSleep(sleep_us);
1064 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
1068 Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
1069 for (const auto& thread : list_) {
1070 if (thread->GetThreadId() == thread_id) {
1077 void ThreadList::SuspendAllForDebugger() {
1078 Thread* self = Thread::Current();
1079 Thread* debug_thread = Dbg::GetDebugThread();
1081 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
1083 SuspendAllInternal(self, self, debug_thread, true);
1084 // Block on the mutator lock until all Runnable threads release their share of access then
1085 // immediately unlock again.
1086 #if HAVE_TIMED_RWLOCK
1087 // Timeout if we wait more than 30 seconds.
1088 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
1089 UnsafeLogFatalForThreadSuspendAllTimeout();
1091 Locks::mutator_lock_->ExclusiveUnlock(self);
1094 Locks::mutator_lock_->ExclusiveLock(self);
1095 Locks::mutator_lock_->ExclusiveUnlock(self);
1097 // Disabled for the following race condition:
1098 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock.
1099 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected).
1100 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable
1101 // state (from SetStateUnsafe).
1102 // AssertThreadsAreSuspended(self, self, debug_thread);
1104 VLOG(threads) << *self << " SuspendAllForDebugger complete";
1107 void ThreadList::SuspendSelfForDebugger() {
1108 Thread* const self = Thread::Current();
1109 self->SetReadyForDebugInvoke(true);
1111 // The debugger thread must not suspend itself due to debugger activity!
1112 Thread* debug_thread = Dbg::GetDebugThread();
1113 CHECK(self != debug_thread);
1114 CHECK_NE(self->GetState(), kRunnable);
1115 Locks::mutator_lock_->AssertNotHeld(self);
1117 // The debugger may have detached while we were executing an invoke request. In that case, we
1118 // must not suspend ourself.
1119 DebugInvokeReq* pReq = self->GetInvokeReq();
1120 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
1121 if (!skip_thread_suspension) {
1122 // Collisions with other suspends aren't really interesting. We want
1123 // to ensure that we're the only one fiddling with the suspend count
1125 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1126 self->ModifySuspendCount(self, +1, nullptr, true);
1127 CHECK_GT(self->GetSuspendCount(), 0);
1129 VLOG(threads) << *self << " self-suspending (debugger)";
1131 // We must no longer be subject to debugger suspension.
1132 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1133 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
1135 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
1138 // If the debugger requested an invoke, we need to send the reply and clear the request.
1139 if (pReq != nullptr) {
1140 Dbg::FinishInvokeMethod(pReq);
1141 self->ClearDebugInvokeReq();
1142 pReq = nullptr; // object has been deleted, clear it for safety.
1145 // Tell JDWP that we've completed suspension. The JDWP thread can't
1146 // tell us to resume before we're fully asleep because we hold the
1147 // suspend count lock.
1148 Dbg::ClearWaitForEventThread();
1151 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1152 while (self->GetSuspendCount() != 0) {
1153 Thread::resume_cond_->Wait(self);
1154 if (self->GetSuspendCount() != 0) {
1155 // The condition was signaled but we're still suspended. This
1156 // can happen when we suspend then resume all threads to
1157 // update instrumentation or compute monitor info. This can
1158 // also happen if the debugger lets go while a SIGQUIT thread
1159 // dump event is pending (assuming SignalCatcher was resumed for
1160 // just long enough to try to grab the thread-suspend lock).
1161 VLOG(jdwp) << *self << " still suspended after undo "
1162 << "(suspend count=" << self->GetSuspendCount() << ", "
1163 << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
1166 CHECK_EQ(self->GetSuspendCount(), 0);
1169 self->SetReadyForDebugInvoke(false);
1170 VLOG(threads) << *self << " self-reviving (debugger)";
1173 void ThreadList::ResumeAllForDebugger() {
1174 Thread* self = Thread::Current();
1175 Thread* debug_thread = Dbg::GetDebugThread();
1177 VLOG(threads) << *self << " ResumeAllForDebugger starting...";
1179 // Threads can't resume if we exclusively hold the mutator lock.
1180 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
1183 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
1185 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
1186 // Update global suspend all state for attaching threads.
1187 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
1188 if (debug_suspend_all_count_ > 0) {
1189 --suspend_all_count_;
1190 --debug_suspend_all_count_;
1192 // We've been asked to resume all threads without being asked to
1193 // suspend them all before. That may happen if a debugger tries
1194 // to resume some suspended threads (with suspend count == 1)
1195 // at once with a VirtualMachine.Resume command. Let's print a
1197 LOG(WARNING) << "Debugger attempted to resume all threads without "
1198 << "having suspended them all before.";
1200 // Decrement everybody's suspend count (except our own).
1201 for (const auto& thread : list_) {
1202 if (thread == self || thread == debug_thread) {
1205 if (thread->GetDebugSuspendCount() == 0) {
1206 // This thread may have been individually resumed with ThreadReference.Resume.
1209 VLOG(threads) << "requesting thread resume: " << *thread;
1210 thread->ModifySuspendCount(self, -1, nullptr, true);
1216 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1217 Thread::resume_cond_->Broadcast(self);
1220 VLOG(threads) << *self << " ResumeAllForDebugger complete";
1223 void ThreadList::UndoDebuggerSuspensions() {
1224 Thread* self = Thread::Current();
1226 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
1229 MutexLock mu(self, *Locks::thread_list_lock_);
1230 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1231 // Update global suspend all state for attaching threads.
1232 suspend_all_count_ -= debug_suspend_all_count_;
1233 debug_suspend_all_count_ = 0;
1234 // Update running threads.
1235 for (const auto& thread : list_) {
1236 if (thread == self || thread->GetDebugSuspendCount() == 0) {
1239 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true);
1244 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1245 Thread::resume_cond_->Broadcast(self);
1248 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
1251 void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
1252 ScopedTrace trace(__PRETTY_FUNCTION__);
1253 Thread* self = Thread::Current();
1254 Locks::mutator_lock_->AssertNotHeld(self);
1257 // No more threads can be born after we start to shutdown.
1258 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
1259 CHECK(Runtime::Current()->IsShuttingDownLocked());
1260 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1262 MutexLock mu(self, *Locks::thread_list_lock_);
1263 // Also wait for any threads that are unregistering to finish. This is required so that no
1264 // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1265 // threads since they could unregister at the wrong time.
1266 bool done = unregistering_count_ == 0;
1268 for (const auto& thread : list_) {
1269 if (thread != self && !thread->IsDaemon()) {
1278 // Wait for another thread to exit before re-checking.
1279 Locks::thread_exit_cond_->Wait(self);
1283 void ThreadList::SuspendAllDaemonThreadsForShutdown() {
1284 ScopedTrace trace(__PRETTY_FUNCTION__);
1285 Thread* self = Thread::Current();
1286 size_t daemons_left = 0;
1288 // Tell all the daemons it's time to suspend.
1289 MutexLock mu(self, *Locks::thread_list_lock_);
1290 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1291 for (const auto& thread : list_) {
1292 // This is only run after all non-daemon threads have exited, so the remainder should all be
1294 CHECK(thread->IsDaemon()) << *thread;
1295 if (thread != self) {
1296 thread->ModifySuspendCount(self, +1, nullptr, false);
1299 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1300 // the sleep forever one.
1301 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
1304 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
1305 // are about to access runtime state and are not in a runnable state. Examples: Monitor code
1306 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
1307 // for daemon threads to be in a blocked state.
1308 if (daemons_left > 0) {
1309 static constexpr size_t kDaemonSleepTime = 200 * 1000;
1310 usleep(kDaemonSleepTime);
1312 // Give the threads a chance to suspend, complaining if they're slow.
1313 bool have_complained = false;
1314 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1315 static constexpr size_t kSleepMicroseconds = 1000;
1316 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
1317 bool all_suspended = true;
1319 MutexLock mu(self, *Locks::thread_list_lock_);
1320 for (const auto& thread : list_) {
1321 if (thread != self && thread->GetState() == kRunnable) {
1322 if (!have_complained) {
1323 LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1324 have_complained = true;
1326 all_suspended = false;
1330 if (all_suspended) {
1333 usleep(kSleepMicroseconds);
1335 LOG(WARNING) << "timed out suspending all daemon threads";
1338 void ThreadList::Register(Thread* self) {
1339 DCHECK_EQ(self, Thread::Current());
1341 if (VLOG_IS_ON(threads)) {
1342 std::ostringstream oss;
1343 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
1344 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str();
1347 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1348 // SuspendAll requests.
1349 MutexLock mu(self, *Locks::thread_list_lock_);
1350 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1351 CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
1352 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
1353 // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
1354 for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
1355 self->ModifySuspendCount(self, +1, nullptr, true);
1357 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
1358 self->ModifySuspendCount(self, +1, nullptr, false);
1360 CHECK(!Contains(self));
1361 list_.push_back(self);
1362 if (kUseReadBarrier) {
1363 // Initialize according to the state of the CC collector.
1364 bool is_gc_marking =
1365 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
1366 self->SetIsGcMarkingAndUpdateEntrypoints(is_gc_marking);
1367 bool weak_ref_access_enabled =
1368 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
1369 self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
1373 void ThreadList::Unregister(Thread* self) {
1374 DCHECK_EQ(self, Thread::Current());
1375 CHECK_NE(self->GetState(), kRunnable);
1376 Locks::mutator_lock_->AssertNotHeld(self);
1378 VLOG(threads) << "ThreadList::Unregister() " << *self;
1381 MutexLock mu(self, *Locks::thread_list_lock_);
1382 ++unregistering_count_;
1385 // Any time-consuming destruction, plus anything that can call back into managed code or
1386 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1387 // causes the threads to join. It is important to do this after incrementing unregistering_count_
1388 // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1392 // If tracing, remember thread id and name before thread exits.
1393 Trace::StoreExitingThreadInfo(self);
1395 uint32_t thin_lock_id = self->GetThreadId();
1397 // Remove and delete the Thread* while holding the thread_list_lock_ and
1398 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
1399 // Note: deliberately not using MutexLock that could hold a stale self pointer.
1400 MutexLock mu(self, *Locks::thread_list_lock_);
1401 if (!Contains(self)) {
1402 std::string thread_name;
1403 self->GetThreadName(thread_name);
1404 std::ostringstream os;
1405 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
1406 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
1409 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1410 if (!self->IsSuspended()) {
1415 // We failed to remove the thread due to a suspend request, loop and try again.
1419 // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1420 // temporarily have multiple threads with the same thread id. When this occurs, it causes
1421 // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1422 ReleaseThreadId(nullptr, thin_lock_id);
1424 // Clear the TLS data, so that the underlying native thread is recognizably detached.
1425 // (It may wish to reattach later.)
1426 #ifdef ART_TARGET_ANDROID
1427 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1429 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
1432 // Signal that a thread just detached.
1433 MutexLock mu(nullptr, *Locks::thread_list_lock_);
1434 --unregistering_count_;
1435 Locks::thread_exit_cond_->Broadcast(nullptr);
1438 void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
1439 for (const auto& thread : list_) {
1440 callback(thread, context);
1444 void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1445 Thread* const self = Thread::Current();
1446 std::vector<Thread*> threads_to_visit;
1448 // Tell threads to suspend and copy them into list.
1450 MutexLock mu(self, *Locks::thread_list_lock_);
1451 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1452 for (Thread* thread : list_) {
1453 thread->ModifySuspendCount(self, +1, nullptr, false);
1454 if (thread == self || thread->IsSuspended()) {
1455 threads_to_visit.push_back(thread);
1457 thread->ModifySuspendCount(self, -1, nullptr, false);
1462 // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1463 // order violations.
1464 for (Thread* thread : threads_to_visit) {
1465 thread->VisitRoots(visitor);
1468 // Restore suspend counts.
1470 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1471 for (Thread* thread : threads_to_visit) {
1472 thread->ModifySuspendCount(self, -1, nullptr, false);
1477 void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const {
1478 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
1479 for (const auto& thread : list_) {
1480 thread->VisitRoots(visitor, flags);
1484 uint32_t ThreadList::AllocThreadId(Thread* self) {
1485 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1486 for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1487 if (!allocated_ids_[i]) {
1488 allocated_ids_.set(i);
1489 return i + 1; // Zero is reserved to mean "invalid".
1492 LOG(FATAL) << "Out of internal thread ids";
1496 void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
1497 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
1498 --id; // Zero is reserved to mean "invalid".
1499 DCHECK(allocated_ids_[id]) << id;
1500 allocated_ids_.reset(id);
1503 ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1504 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1507 ScopedSuspendAll::~ScopedSuspendAll() {
1508 Runtime::Current()->GetThreadList()->ResumeAll();