summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMythri Alle <mythria@google.com>2024-04-23 10:09:35 +0000
committerTreehugger Robot <android-test-infra-autosubmit@system.gserviceaccount.com>2024-04-25 09:21:37 +0000
commit23d8f2c4a951ee5e3b41efdfed05d606a0da19ad (patch)
tree2b54b236007bb97b26f28fcfc726af8de91e55e2
parent3b6f3b2d025c9bf2dac4de61ea92ca0df077b171 (diff)
downloadart-23d8f2c4a951ee5e3b41efdfed05d606a0da19ad.tar.gz
Remove listeners installed for tracing when there is an overflow
In non-streaming method tracing we no longer record methods when there is an overflow. So remove listeners that were installed to get trace events. Bug: 259258187, 329498538 Test: art/test.py --trace Change-Id: If5b13c041b674a277b77fbf5c3f66372dc06a875
-rw-r--r--runtime/trace.cc63
-rw-r--r--runtime/trace.h5
2 files changed, 43 insertions, 25 deletions
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 1f97f155dd..e2fbd65bbe 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -367,6 +367,17 @@ bool UseWallClock(TraceClockSource clock_source) {
return (clock_source == TraceClockSource::kWall) || (clock_source == TraceClockSource::kDual);
}
+bool UseFastTraceListeners(TraceClockSource clock_source) {
+ // Thread cpu clocks needs a kernel call, so we don't directly support them in JITed code.
+ bool is_fast_trace = !UseThreadCpuClock(clock_source);
+#if defined(__arm__)
+ // On ARM 32 bit, we don't always have access to the timestamp counters from
+ // user space. See comment in GetTimestamp for more details.
+ is_fast_trace = false;
+#endif
+ return is_fast_trace;
+}
+
void Trace::MeasureClockOverhead() {
if (UseThreadCpuClock(clock_source_)) {
Thread::Current()->GetCpuMicroTime();
@@ -695,14 +706,6 @@ void Trace::Start(std::unique_ptr<File>&& trace_file_in,
runtime->GetInstrumentation()->UpdateEntrypointsForDebuggable();
runtime->DeoptimizeBootImage();
}
- // For thread cpu clocks, we need to make a kernel call and hence we call into c++ to
- // support them.
- bool is_fast_trace = !UseThreadCpuClock(the_trace_->GetClockSource());
-#if defined(__arm__)
- // On ARM 32 bit, we don't always have access to the timestamp counters from
- // user space. Seem comment in GetTimestamp for more details.
- is_fast_trace = false;
-#endif
// Add ClassLoadCallback to record methods on class load.
runtime->GetRuntimeCallbacks()->AddClassLoadCallback(the_trace_);
runtime->GetInstrumentation()->AddListener(
@@ -710,7 +713,7 @@ void Trace::Start(std::unique_ptr<File>&& trace_file_in,
instrumentation::Instrumentation::kMethodEntered |
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind,
- is_fast_trace);
+ UseFastTraceListeners(the_trace_->GetClockSource()));
runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey,
the_trace_,
/*needs_interpreter=*/false);
@@ -762,21 +765,13 @@ void Trace::StopTracing(bool flush_entries) {
MutexLock mu(self, *Locks::thread_list_lock_);
runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
- // For thread cpu clocks, we need to make a kernel call and hence we call into c++ to support
- // them.
- bool is_fast_trace = !UseThreadCpuClock(the_trace_->GetClockSource());
-#if defined(__arm__)
- // On ARM 32 bit, we don't always have access to the timestamp counters from
- // user space. Seem comment in GetTimestamp for more details.
- is_fast_trace = false;
-#endif
runtime->GetRuntimeCallbacks()->RemoveClassLoadCallback(the_trace_);
runtime->GetInstrumentation()->RemoveListener(
the_trace,
instrumentation::Instrumentation::kMethodEntered |
instrumentation::Instrumentation::kMethodExited |
instrumentation::Instrumentation::kMethodUnwind,
- is_fast_trace);
+ UseFastTraceListeners(the_trace_->GetClockSource()));
runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
}
@@ -816,6 +811,21 @@ void Trace::StopTracing(bool flush_entries) {
}
}
+void Trace::RemoveListeners() {
+ Thread* self = Thread::Current();
+ // This is expected to be called in SuspendAll scope.
+ DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self));
+ MutexLock mu(self, *Locks::trace_lock_);
+ Runtime* runtime = Runtime::Current();
+ runtime->GetRuntimeCallbacks()->RemoveClassLoadCallback(the_trace_);
+ runtime->GetInstrumentation()->RemoveListener(
+ the_trace_,
+ instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind,
+ UseFastTraceListeners(the_trace_->GetClockSource()));
+}
+
void Trace::FlushThreadBuffer(Thread* self) {
MutexLock mu(self, *Locks::trace_lock_);
// Check if we still need to flush inside the trace_lock_. If we are stopping tracing it is
@@ -1346,16 +1356,19 @@ void TraceWriter::RecordMethodInfo(const std::string& method_info_line, uint64_t
void TraceWriter::FlushAllThreadBuffers() {
ScopedThreadStateChange stsc(Thread::Current(), ThreadState::kSuspended);
ScopedSuspendAll ssa(__FUNCTION__);
- MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) {
- if (thread->GetMethodTraceBuffer() != nullptr) {
- FlushBuffer(thread, /* is_sync= */ true, /* free_buffer= */ false);
- // We cannot flush anynore data, so just return.
- if (overflow_) {
- return;
+ {
+ MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+ for (Thread* thread : Runtime::Current()->GetThreadList()->GetList()) {
+ if (thread->GetMethodTraceBuffer() != nullptr) {
+ FlushBuffer(thread, /* is_sync= */ true, /* free_buffer= */ false);
+ // We cannot flush anynore data, so just break.
+ if (overflow_) {
+ break;
+ }
}
}
}
+ Trace::RemoveListeners();
return;
}
diff --git a/runtime/trace.h b/runtime/trace.h
index baa8e20259..b2012ed8e6 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -422,6 +422,11 @@ class Trace final : public instrumentation::InstrumentationListener, public Clas
static void FlushThreadBuffer(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!Locks::trace_lock_) NO_THREAD_SAFETY_ANALYSIS;
+ // Removes any listeners installed for method tracing. This is used in non-streaming case
+ // when we no longer record any events once the buffer is full. In other cases listeners are
+ // removed only when tracing stops. This is expected to be called in SuspendAll scope.
+ static void RemoveListeners() REQUIRES(Locks::mutator_lock_);
+
void MeasureClockOverhead();
uint32_t GetClockOverheadNanoSeconds();