summaryrefslogtreecommitdiff
path: root/base/threading/hang_watcher.cc
blob: 3b0041d8c5d89885dc5848a5fbf0ad1ea4088f7f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/threading/hang_watcher.h"

#include <atomic>
#include <utility>

#include "base/containers/flat_map.h"
#include "base/debug/alias.h"
#include "base/debug/crash_logging.h"
#include "base/debug/dump_without_crashing.h"
#include "base/debug/leak_annotations.h"
#include "base/feature_list.h"
#include "base/functional/bind.h"
#include "base/functional/callback_helpers.h"
#include "base/metrics/field_trial_params.h"
#include "base/metrics/histogram_macros.h"
#include "base/power_monitor/power_monitor.h"
#include "base/ranges/algorithm.h"
#include "base/strings/string_number_conversions.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_restrictions.h"
#include "base/threading/threading_features.h"
#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#include "third_party/abseil-cpp/absl/base/attributes.h"

namespace base {

namespace {

// Defines how much logging happens when the HangWatcher monitors the threads.
// Logging levels are set per thread type through Finch. It's important that
// the order of the enum members stay the same and that their numerical
// values be in increasing order. The implementation of
// ThreadTypeLoggingLevelGreaterOrEqual() depends on it.
enum class LoggingLevel { kNone = 0, kUmaOnly = 1, kUmaAndCrash = 2 };

HangWatcher* g_instance = nullptr;
ABSL_CONST_INIT thread_local internal::HangWatchState* hang_watch_state =
    nullptr;
std::atomic<bool> g_use_hang_watcher{false};
std::atomic<HangWatcher::ProcessType> g_hang_watcher_process_type{
    HangWatcher::ProcessType::kBrowserProcess};

std::atomic<LoggingLevel> g_threadpool_log_level{LoggingLevel::kNone};
std::atomic<LoggingLevel> g_io_thread_log_level{LoggingLevel::kNone};
std::atomic<LoggingLevel> g_main_thread_log_level{LoggingLevel::kNone};

// Indicates whether HangWatcher::Run() should return after the next monitoring.
std::atomic<bool> g_keep_monitoring{true};

// Emits the hung thread count histogram. |count| is the number of threads
// of type |thread_type| that were hung or became hung during the last
// monitoring window. This function should be invoked for each thread type
// encountered on each call to Monitor().
void LogHungThreadCountHistogram(HangWatcher::ThreadType thread_type,
                                 int count) {
  // In the case of unique threads like the IO or UI/Main thread a count does
  // not make sense.
  const bool any_thread_hung = count >= 1;

  const HangWatcher::ProcessType process_type =
      g_hang_watcher_process_type.load(std::memory_order_relaxed);
  switch (process_type) {
    case HangWatcher::ProcessType::kUnknownProcess:
      break;

    case HangWatcher::ProcessType::kBrowserProcess:
      switch (thread_type) {
        case HangWatcher::ThreadType::kIOThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.BrowserProcess."
              "IOThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kMainThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.BrowserProcess."
              "UIThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kThreadPoolThread:
          // Not recorded for now.
          break;
      }
      break;

    case HangWatcher::ProcessType::kGPUProcess:
      // Not recorded for now.
      break;

    case HangWatcher::ProcessType::kRendererProcess:
      switch (thread_type) {
        case HangWatcher::ThreadType::kIOThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.RendererProcess."
              "IOThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kMainThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.RendererProcess."
              "MainThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kThreadPoolThread:
          // Not recorded for now.
          break;
      }
      break;

    case HangWatcher::ProcessType::kUtilityProcess:
      switch (thread_type) {
        case HangWatcher::ThreadType::kIOThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.UtilityProcess."
              "IOThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kMainThread:
          UMA_HISTOGRAM_BOOLEAN(
              "HangWatcher.IsThreadHung.UtilityProcess."
              "MainThread",
              any_thread_hung);
          break;
        case HangWatcher::ThreadType::kThreadPoolThread:
          // Not recorded for now.
          break;
      }
      break;
  }
}

// Returns true if |thread_type| was configured through Finch to have a logging
// level that is equal to or exceeds |logging_level|.
bool ThreadTypeLoggingLevelGreaterOrEqual(HangWatcher::ThreadType thread_type,
                                          LoggingLevel logging_level) {
  switch (thread_type) {
    case HangWatcher::ThreadType::kIOThread:
      return g_io_thread_log_level.load(std::memory_order_relaxed) >=
             logging_level;
    case HangWatcher::ThreadType::kMainThread:
      return g_main_thread_log_level.load(std::memory_order_relaxed) >=
             logging_level;
    case HangWatcher::ThreadType::kThreadPoolThread:
      return g_threadpool_log_level.load(std::memory_order_relaxed) >=
             logging_level;
  }
}

}  // namespace

// Determines if the HangWatcher is activated. When false the HangWatcher
// thread never started.
BASE_FEATURE(kEnableHangWatcher,
             "EnableHangWatcher",
             FEATURE_ENABLED_BY_DEFAULT);

BASE_FEATURE(kEnableHangWatcherInZygoteChildren,
             "EnableHangWatcherInZygoteChildren",
             FEATURE_ENABLED_BY_DEFAULT);

// Browser process.
constexpr base::FeatureParam<int> kIOThreadLogLevel{
    &kEnableHangWatcher, "io_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kUIThreadLogLevel{
    &kEnableHangWatcher, "ui_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kThreadPoolLogLevel{
    &kEnableHangWatcher, "threadpool_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};

// GPU process.
constexpr base::FeatureParam<int> kGPUProcessIOThreadLogLevel{
    &kEnableHangWatcher, "gpu_process_io_thread_log_level",
    static_cast<int>(LoggingLevel::kNone)};
constexpr base::FeatureParam<int> kGPUProcessMainThreadLogLevel{
    &kEnableHangWatcher, "gpu_process_main_thread_log_level",
    static_cast<int>(LoggingLevel::kNone)};
constexpr base::FeatureParam<int> kGPUProcessThreadPoolLogLevel{
    &kEnableHangWatcher, "gpu_process_threadpool_log_level",
    static_cast<int>(LoggingLevel::kNone)};

// Renderer process.
constexpr base::FeatureParam<int> kRendererProcessIOThreadLogLevel{
    &kEnableHangWatcher, "renderer_process_io_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kRendererProcessMainThreadLogLevel{
    &kEnableHangWatcher, "renderer_process_main_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kRendererProcessThreadPoolLogLevel{
    &kEnableHangWatcher, "renderer_process_threadpool_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};

// Utility process.
constexpr base::FeatureParam<int> kUtilityProcessIOThreadLogLevel{
    &kEnableHangWatcher, "utility_process_io_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kUtilityProcessMainThreadLogLevel{
    &kEnableHangWatcher, "utility_process_main_thread_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};
constexpr base::FeatureParam<int> kUtilityProcessThreadPoolLogLevel{
    &kEnableHangWatcher, "utility_process_threadpool_log_level",
    static_cast<int>(LoggingLevel::kUmaOnly)};

constexpr const char* kThreadName = "HangWatcher";

// The time that the HangWatcher thread will sleep for between calls to
// Monitor(). Increasing or decreasing this does not modify the type of hangs
// that can be detected. It instead increases the probability that a call to
// Monitor() will happen at the right time to catch a hang. This has to be
// balanced with power/cpu use concerns as busy looping would catch amost all
// hangs but present unacceptable overhead. NOTE: If this period is ever changed
// then all metrics that depend on it like
// HangWatcher.IsThreadHung need to be updated.
constexpr auto kMonitoringPeriod = base::Seconds(10);

WatchHangsInScope::WatchHangsInScope(TimeDelta timeout) {
  internal::HangWatchState* current_hang_watch_state =
      HangWatcher::IsEnabled()
          ? internal::HangWatchState::GetHangWatchStateForCurrentThread()
          : nullptr;

  DCHECK(timeout >= base::TimeDelta()) << "Negative timeouts are invalid.";

  // Thread is not monitored, noop.
  if (!current_hang_watch_state) {
    took_effect_ = false;
    return;
  }

#if DCHECK_IS_ON()
  previous_watch_hangs_in_scope_ =
      current_hang_watch_state->GetCurrentWatchHangsInScope();
  current_hang_watch_state->SetCurrentWatchHangsInScope(this);
#endif

  auto [old_flags, old_deadline] =
      current_hang_watch_state->GetFlagsAndDeadline();

  // TODO(crbug.com/1034046): Check whether we are over deadline already for the
  // previous WatchHangsInScope here by issuing only one TimeTicks::Now()
  // and resuing the value.

  previous_deadline_ = old_deadline;
  TimeTicks deadline = TimeTicks::Now() + timeout;
  current_hang_watch_state->SetDeadline(deadline);
  current_hang_watch_state->IncrementNestingLevel();

  const bool hangs_ignored_for_current_scope =
      internal::HangWatchDeadline::IsFlagSet(
          internal::HangWatchDeadline::Flag::kIgnoreCurrentWatchHangsInScope,
          old_flags);

  // If the current WatchHangsInScope is ignored, temporarily reactivate hang
  // watching for newly created WatchHangsInScopes. On exiting hang watching
  // is suspended again to return to the original state.
  if (hangs_ignored_for_current_scope) {
    current_hang_watch_state->UnsetIgnoreCurrentWatchHangsInScope();
    set_hangs_ignored_on_exit_ = true;
  }
}

WatchHangsInScope::~WatchHangsInScope() {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);

  // If hang watching was not enabled at construction time there is nothing to
  // validate or undo.
  if (!took_effect_) {
    return;
  }

  // If the thread was unregistered since construction there is also nothing to
  // do.
  auto* const state =
      internal::HangWatchState::GetHangWatchStateForCurrentThread();
  if (!state) {
    return;
  }

  // If a hang is currently being captured we should block here so execution
  // stops and we avoid recording unrelated stack frames in the crash.
  if (state->IsFlagSet(internal::HangWatchDeadline::Flag::kShouldBlockOnHang)) {
    base::HangWatcher::GetInstance()->BlockIfCaptureInProgress();
  }

#if DCHECK_IS_ON()
  // Verify that no Scope was destructed out of order.
  DCHECK_EQ(this, state->GetCurrentWatchHangsInScope());
  state->SetCurrentWatchHangsInScope(previous_watch_hangs_in_scope_);
#endif

  if (state->nesting_level() == 1) {
    // If a call to InvalidateActiveExpectations() suspended hang watching
    // during the lifetime of this or any nested WatchHangsInScope it can now
    // safely be reactivated by clearing the ignore bit since this is the
    // outer-most scope.
    state->UnsetIgnoreCurrentWatchHangsInScope();
  } else if (set_hangs_ignored_on_exit_) {
    // Return to ignoring hangs since this was the previous state before hang
    // watching was temporarily enabled for this WatchHangsInScope only in the
    // constructor.
    state->SetIgnoreCurrentWatchHangsInScope();
  }

  // Reset the deadline to the value it had before entering this
  // WatchHangsInScope.
  state->SetDeadline(previous_deadline_);
  // TODO(crbug.com/1034046): Log when a WatchHangsInScope exits after its
  // deadline and that went undetected by the HangWatcher.

  state->DecrementNestingLevel();
}

// static
void HangWatcher::InitializeOnMainThread(ProcessType process_type,
                                         bool is_zygote_child,
                                         bool emit_crashes) {
  DCHECK(!g_use_hang_watcher);
  DCHECK(g_io_thread_log_level == LoggingLevel::kNone);
  DCHECK(g_main_thread_log_level == LoggingLevel::kNone);
  DCHECK(g_threadpool_log_level == LoggingLevel::kNone);

  bool enable_hang_watcher = base::FeatureList::IsEnabled(kEnableHangWatcher);

#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
  if (is_zygote_child) {
    enable_hang_watcher =
        enable_hang_watcher &&
        base::FeatureList::IsEnabled(kEnableHangWatcherInZygoteChildren);
  }
#endif

  // Do not start HangWatcher in the GPU process until the issue related to
  // invalid magic signature in the GPU WatchDog is fixed
  // (https://crbug.com/1297760).
  if (process_type == ProcessType::kGPUProcess)
    enable_hang_watcher = false;

  g_use_hang_watcher.store(enable_hang_watcher, std::memory_order_relaxed);

  // Keep the process type.
  g_hang_watcher_process_type.store(process_type, std::memory_order_relaxed);

  // If hang watching is disabled as a whole there is no need to read the
  // params.
  if (!enable_hang_watcher)
    return;

  // Retrieve thread-specific config for hang watching.
  if (process_type == HangWatcher::ProcessType::kBrowserProcess) {
    // Crashes are set to always emit. Override any feature flags.
    if (emit_crashes) {
      g_io_thread_log_level.store(
          static_cast<LoggingLevel>(LoggingLevel::kUmaAndCrash),
          std::memory_order_relaxed);
      g_main_thread_log_level.store(
          static_cast<LoggingLevel>(LoggingLevel::kUmaAndCrash),
          std::memory_order_relaxed);
    } else {
      g_io_thread_log_level.store(
          static_cast<LoggingLevel>(kIOThreadLogLevel.Get()),
          std::memory_order_relaxed);
      g_main_thread_log_level.store(
          static_cast<LoggingLevel>(kUIThreadLogLevel.Get()),
          std::memory_order_relaxed);
    }

    g_threadpool_log_level.store(
        static_cast<LoggingLevel>(kThreadPoolLogLevel.Get()),
        std::memory_order_relaxed);
  } else if (process_type == HangWatcher::ProcessType::kGPUProcess) {
    g_threadpool_log_level.store(
        static_cast<LoggingLevel>(kGPUProcessThreadPoolLogLevel.Get()),
        std::memory_order_relaxed);
    g_io_thread_log_level.store(
        static_cast<LoggingLevel>(kGPUProcessIOThreadLogLevel.Get()),
        std::memory_order_relaxed);
    g_main_thread_log_level.store(
        static_cast<LoggingLevel>(kGPUProcessMainThreadLogLevel.Get()),
        std::memory_order_relaxed);
  } else if (process_type == HangWatcher::ProcessType::kRendererProcess) {
    g_threadpool_log_level.store(
        static_cast<LoggingLevel>(kRendererProcessThreadPoolLogLevel.Get()),
        std::memory_order_relaxed);
    g_io_thread_log_level.store(
        static_cast<LoggingLevel>(kRendererProcessIOThreadLogLevel.Get()),
        std::memory_order_relaxed);
    g_main_thread_log_level.store(
        static_cast<LoggingLevel>(kRendererProcessMainThreadLogLevel.Get()),
        std::memory_order_relaxed);
  } else if (process_type == HangWatcher::ProcessType::kUtilityProcess) {
    g_threadpool_log_level.store(
        static_cast<LoggingLevel>(kUtilityProcessThreadPoolLogLevel.Get()),
        std::memory_order_relaxed);
    g_io_thread_log_level.store(
        static_cast<LoggingLevel>(kUtilityProcessIOThreadLogLevel.Get()),
        std::memory_order_relaxed);
    g_main_thread_log_level.store(
        static_cast<LoggingLevel>(kUtilityProcessMainThreadLogLevel.Get()),
        std::memory_order_relaxed);
  }
}

void HangWatcher::UnitializeOnMainThreadForTesting() {
  g_use_hang_watcher.store(false, std::memory_order_relaxed);
  g_threadpool_log_level.store(LoggingLevel::kNone, std::memory_order_relaxed);
  g_io_thread_log_level.store(LoggingLevel::kNone, std::memory_order_relaxed);
  g_main_thread_log_level.store(LoggingLevel::kNone, std::memory_order_relaxed);
}

// static
bool HangWatcher::IsEnabled() {
  return g_use_hang_watcher.load(std::memory_order_relaxed);
}

// static
bool HangWatcher::IsThreadPoolHangWatchingEnabled() {
  return g_threadpool_log_level.load(std::memory_order_relaxed) !=
         LoggingLevel::kNone;
}

// static
bool HangWatcher::IsIOThreadHangWatchingEnabled() {
  return g_io_thread_log_level.load(std::memory_order_relaxed) !=
         LoggingLevel::kNone;
}

// static
bool HangWatcher::IsCrashReportingEnabled() {
  if (g_main_thread_log_level.load(std::memory_order_relaxed) ==
      LoggingLevel::kUmaAndCrash) {
    return true;
  }
  if (g_io_thread_log_level.load(std::memory_order_relaxed) ==
      LoggingLevel::kUmaAndCrash) {
    return true;
  }
  if (g_threadpool_log_level.load(std::memory_order_relaxed) ==
      LoggingLevel::kUmaAndCrash) {
    return true;
  }
  return false;
}

// static
void HangWatcher::InvalidateActiveExpectations() {
  auto* const state =
      internal::HangWatchState::GetHangWatchStateForCurrentThread();
  if (!state) {
    // If the current thread is not under watch there is nothing to invalidate.
    return;
  }
  state->SetIgnoreCurrentWatchHangsInScope();
}

HangWatcher::HangWatcher()
    : monitor_period_(kMonitoringPeriod),
      should_monitor_(WaitableEvent::ResetPolicy::AUTOMATIC),
      thread_(this, kThreadName),
      tick_clock_(base::DefaultTickClock::GetInstance()),
      memory_pressure_listener_(
          FROM_HERE,
          base::BindRepeating(&HangWatcher::OnMemoryPressure,
                              base::Unretained(this))) {
  // |thread_checker_| should not be bound to the constructing thread.
  DETACH_FROM_THREAD(hang_watcher_thread_checker_);

  should_monitor_.declare_only_used_while_idle();

  DCHECK(!g_instance);
  g_instance = this;
}

// static
void HangWatcher::CreateHangWatcherInstance() {
  DCHECK(!g_instance);
  g_instance = new base::HangWatcher();
  // The hang watcher is leaked to make sure it survives all watched threads.
  ANNOTATE_LEAKING_OBJECT_PTR(g_instance);
}

#if !BUILDFLAG(IS_NACL)
debug::ScopedCrashKeyString
HangWatcher::GetTimeSinceLastCriticalMemoryPressureCrashKey() {
  DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_);

  // The crash key size is large enough to hold the biggest possible return
  // value from base::TimeDelta::InSeconds().
  constexpr debug::CrashKeySize kCrashKeyContentSize =
      debug::CrashKeySize::Size32;
  DCHECK_GE(static_cast<uint64_t>(kCrashKeyContentSize),
            base::NumberToString(std::numeric_limits<int64_t>::max()).size());

  static debug::CrashKeyString* crash_key = AllocateCrashKeyString(
      "seconds-since-last-memory-pressure", kCrashKeyContentSize);

  const base::TimeTicks last_critical_memory_pressure_time =
      last_critical_memory_pressure_.load(std::memory_order_relaxed);
  if (last_critical_memory_pressure_time.is_null()) {
    constexpr char kNoMemoryPressureMsg[] = "No critical memory pressure";
    static_assert(
        std::size(kNoMemoryPressureMsg) <=
            static_cast<uint64_t>(kCrashKeyContentSize),
        "The crash key is too small to hold \"No critical memory pressure\".");
    return debug::ScopedCrashKeyString(crash_key, kNoMemoryPressureMsg);
  } else {
    base::TimeDelta time_since_last_critical_memory_pressure =
        base::TimeTicks::Now() - last_critical_memory_pressure_time;
    return debug::ScopedCrashKeyString(
        crash_key, base::NumberToString(
                       time_since_last_critical_memory_pressure.InSeconds()));
  }
}
#endif

std::string HangWatcher::GetTimeSinceLastSystemPowerResumeCrashKeyValue()
    const {
  DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_);

  const TimeTicks last_system_power_resume_time =
      PowerMonitor::GetLastSystemResumeTime();
  if (last_system_power_resume_time.is_null())
    return "Never suspended";
  if (last_system_power_resume_time == TimeTicks::Max())
    return "Power suspended";

  const TimeDelta time_since_last_system_resume =
      TimeTicks::Now() - last_system_power_resume_time;
  return NumberToString(time_since_last_system_resume.InSeconds());
}

void HangWatcher::OnMemoryPressure(
    base::MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
  if (memory_pressure_level ==
      base::MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
    last_critical_memory_pressure_.store(base::TimeTicks::Now(),
                                         std::memory_order_relaxed);
  }
}

HangWatcher::~HangWatcher() {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  DCHECK_EQ(g_instance, this);
  DCHECK(watch_states_.empty());
  g_instance = nullptr;
  Stop();
}

void HangWatcher::Start() {
  thread_.Start();
  thread_started_ = true;
}

void HangWatcher::Stop() {
  g_keep_monitoring.store(false, std::memory_order_relaxed);
  should_monitor_.Signal();
  thread_.Join();
  thread_started_ = false;

  // In production HangWatcher is always leaked but during testing it's possibly
  // stopped and restarted using a new instance. This makes sure the next call
  // to Start() will actually monitor in that case.
  g_keep_monitoring.store(true, std::memory_order_relaxed);
}

bool HangWatcher::IsWatchListEmpty() {
  AutoLock auto_lock(watch_state_lock_);
  return watch_states_.empty();
}

void HangWatcher::Wait() {
  while (true) {
    // Amount by which the actual time spent sleeping can deviate from
    // the target time and still be considered timely.
    constexpr base::TimeDelta kWaitDriftTolerance = base::Milliseconds(100);

    const base::TimeTicks time_before_wait = tick_clock_->NowTicks();

    // Sleep until next scheduled monitoring or until signaled.
    const bool was_signaled = should_monitor_.TimedWait(monitor_period_);

    if (after_wait_callback_)
      after_wait_callback_.Run(time_before_wait);

    const base::TimeTicks time_after_wait = tick_clock_->NowTicks();
    const base::TimeDelta wait_time = time_after_wait - time_before_wait;
    const bool wait_was_normal =
        wait_time <= (monitor_period_ + kWaitDriftTolerance);

    UMA_HISTOGRAM_TIMES("HangWatcher.SleepDrift.BrowserProcess",
                        wait_time - monitor_period_);

    if (!wait_was_normal) {
      // If the time spent waiting was too high it might indicate the machine is
      // very slow or that that it went to sleep. In any case we can't trust the
      // WatchHangsInScopes that are currently live. Update the ignore
      // threshold to make sure they don't trigger a hang on subsequent monitors
      // then keep waiting.

      base::AutoLock auto_lock(watch_state_lock_);

      // Find the latest deadline among the live watch states. They might change
      // atomically while iterating but that's fine because if they do that
      // means the new WatchHangsInScope was constructed very soon after the
      // abnormal sleep happened and might be affected by the root cause still.
      // Ignoring it is cautious and harmless.
      base::TimeTicks latest_deadline;
      for (const auto& state : watch_states_) {
        base::TimeTicks deadline = state->GetDeadline();
        if (deadline > latest_deadline) {
          latest_deadline = deadline;
        }
      }

      deadline_ignore_threshold_ = latest_deadline;
    }

    // Stop waiting.
    if (wait_was_normal || was_signaled)
      return;
  }
}

void HangWatcher::Run() {
  // Monitor() should only run on |thread_|. Bind |thread_checker_| here to make
  // sure of that.
  DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_);

  while (g_keep_monitoring.load(std::memory_order_relaxed)) {
    Wait();

    if (!IsWatchListEmpty() &&
        g_keep_monitoring.load(std::memory_order_relaxed)) {
      Monitor();
      if (after_monitor_closure_for_testing_) {
        after_monitor_closure_for_testing_.Run();
      }
    }
  }
}

// static
HangWatcher* HangWatcher::GetInstance() {
  return g_instance;
}

// static
void HangWatcher::RecordHang() {
  base::debug::DumpWithoutCrashing();
  NO_CODE_FOLDING();
}

ScopedClosureRunner HangWatcher::RegisterThreadInternal(
    ThreadType thread_type) {
  AutoLock auto_lock(watch_state_lock_);
  CHECK(base::FeatureList::GetInstance());

  // Do not install a WatchState if the results would never be observable.
  if (!ThreadTypeLoggingLevelGreaterOrEqual(thread_type,
                                            LoggingLevel::kUmaOnly)) {
    return ScopedClosureRunner(base::DoNothing());
  }

  watch_states_.push_back(
      internal::HangWatchState::CreateHangWatchStateForCurrentThread(
          thread_type));
  return ScopedClosureRunner(BindOnce(&HangWatcher::UnregisterThread,
                                      Unretained(HangWatcher::GetInstance())));
}

// static
ScopedClosureRunner HangWatcher::RegisterThread(ThreadType thread_type) {
  if (!GetInstance()) {
    return ScopedClosureRunner();
  }

  return GetInstance()->RegisterThreadInternal(thread_type);
}

base::TimeTicks HangWatcher::WatchStateSnapShot::GetHighestDeadline() const {
  DCHECK(IsActionable());

  // Since entries are sorted in increasing order the last entry is the largest
  // one.
  return hung_watch_state_copies_.back().deadline;
}

HangWatcher::WatchStateSnapShot::WatchStateSnapShot() = default;

void HangWatcher::WatchStateSnapShot::Init(
    const HangWatchStates& watch_states,
    base::TimeTicks deadline_ignore_threshold) {
  DCHECK(!initialized_);

  // No matter if the snapshot is actionable or not after this function
  // it will have been initialized.
  initialized_ = true;

  const base::TimeTicks now = base::TimeTicks::Now();
  bool all_threads_marked = true;
  bool found_deadline_before_ignore_threshold = false;

  // Use an std::array to store the hang counts to avoid allocations. The
  // numerical values of the HangWatcher::ThreadType enum is used to index into
  // the array. A |kInvalidHangCount| is used to signify there were no threads
  // of the type found.
  constexpr size_t kHangCountArraySize =
      static_cast<std::size_t>(base::HangWatcher::ThreadType::kMax) + 1;
  std::array<int, kHangCountArraySize> hung_counts_per_thread_type;

  constexpr int kInvalidHangCount = -1;
  hung_counts_per_thread_type.fill(kInvalidHangCount);

  // Will be true if any of the hung threads has a logging level high enough,
  // as defined through finch params, to warant dumping a crash.
  bool any_hung_thread_has_dumping_enabled = false;

  // Copy hung thread information.
  for (const auto& watch_state : watch_states) {
    uint64_t flags;
    TimeTicks deadline;
    std::tie(flags, deadline) = watch_state->GetFlagsAndDeadline();

    if (deadline <= deadline_ignore_threshold) {
      found_deadline_before_ignore_threshold = true;
    }

    if (internal::HangWatchDeadline::IsFlagSet(
            internal::HangWatchDeadline::Flag::kIgnoreCurrentWatchHangsInScope,
            flags)) {
      continue;
    }

    // If a thread type is monitored and did not hang it still needs to be
    // logged as a zero count;
    const size_t hang_count_index =
        static_cast<size_t>(watch_state.get()->thread_type());
    if (hung_counts_per_thread_type[hang_count_index] == kInvalidHangCount) {
      hung_counts_per_thread_type[hang_count_index] = 0;
    }

    // Only copy hung threads.
    if (deadline <= now) {
      ++hung_counts_per_thread_type[hang_count_index];

      if (ThreadTypeLoggingLevelGreaterOrEqual(watch_state.get()->thread_type(),
                                               LoggingLevel::kUmaAndCrash)) {
        any_hung_thread_has_dumping_enabled = true;
      }

#if BUILDFLAG(ENABLE_BASE_TRACING)
      // Emit trace events for monitored threads.
      if (ThreadTypeLoggingLevelGreaterOrEqual(watch_state.get()->thread_type(),
                                               LoggingLevel::kUmaOnly)) {
        const PlatformThreadId thread_id = watch_state.get()->GetThreadID();
        const auto track = perfetto::Track::FromPointer(
            this, perfetto::ThreadTrack::ForThread(thread_id));
        TRACE_EVENT_BEGIN("base", "HangWatcher::ThreadHung", track, deadline);
        TRACE_EVENT_END("base", track, now);
        // TODO(crbug.com/1021571): Remove this once fixed.
        PERFETTO_INTERNAL_ADD_EMPTY_EVENT();
      }
#endif

      // Attempt to mark the thread as needing to stay within its current
      // WatchHangsInScope until capture is complete.
      bool thread_marked = watch_state->SetShouldBlockOnHang(flags, deadline);

      // If marking some threads already failed the snapshot won't be kept so
      // there is no need to keep adding to it. The loop doesn't abort though
      // to keep marking the other threads. If these threads remain hung until
      // the next capture then they'll already be marked and will be included
      // in the capture at that time.
      if (thread_marked && all_threads_marked) {
        hung_watch_state_copies_.push_back(
            WatchStateCopy{deadline, watch_state.get()->GetThreadID()});
      } else {
        all_threads_marked = false;
      }
    }
  }

  // Log the hung thread counts to histograms for each thread type if any thread
  // of the type were found.
  for (size_t i = 0; i < kHangCountArraySize; ++i) {
    const int hang_count = hung_counts_per_thread_type[i];
    const HangWatcher::ThreadType thread_type =
        static_cast<HangWatcher::ThreadType>(i);
    if (hang_count != kInvalidHangCount &&
        ThreadTypeLoggingLevelGreaterOrEqual(thread_type,
                                             LoggingLevel::kUmaOnly)) {
      LogHungThreadCountHistogram(thread_type, hang_count);
    }
  }

  // Three cases can invalidate this snapshot and prevent the capture of the
  // hang.
  //
  // 1. Some threads could not be marked for blocking so this snapshot isn't
  // actionable since marked threads could be hung because of unmarked ones.
  // If only the marked threads were captured the information would be
  // incomplete.
  //
  // 2. Any of the threads have a deadline before |deadline_ignore_threshold|.
  // If any thread is ignored it reduces the confidence in the whole state and
  // it's better to avoid capturing misleading data.
  //
  // 3. The hung threads found were all of types that are not configured through
  // Finch to trigger a crash dump.
  //
  if (!all_threads_marked || found_deadline_before_ignore_threshold ||
      !any_hung_thread_has_dumping_enabled) {
    hung_watch_state_copies_.clear();
    return;
  }

  // Sort |hung_watch_state_copies_| by order of decreasing hang severity so the
  // most severe hang is first in the list.
  ranges::sort(hung_watch_state_copies_,
               [](const WatchStateCopy& lhs, const WatchStateCopy& rhs) {
                 return lhs.deadline < rhs.deadline;
               });
}

void HangWatcher::WatchStateSnapShot::Clear() {
  hung_watch_state_copies_.clear();
  initialized_ = false;
}

HangWatcher::WatchStateSnapShot::WatchStateSnapShot(
    const WatchStateSnapShot& other) = default;

HangWatcher::WatchStateSnapShot::~WatchStateSnapShot() = default;

std::string HangWatcher::WatchStateSnapShot::PrepareHungThreadListCrashKey()
    const {
  DCHECK(IsActionable());

  // Build a crash key string that contains the ids of the hung threads.
  constexpr char kSeparator{'|'};
  std::string list_of_hung_thread_ids;

  // Add as many thread ids to the crash key as possible.
  for (const WatchStateCopy& copy : hung_watch_state_copies_) {
    std::string fragment = base::NumberToString(copy.thread_id) + kSeparator;
    if (list_of_hung_thread_ids.size() + fragment.size() <
        static_cast<std::size_t>(debug::CrashKeySize::Size256)) {
      list_of_hung_thread_ids += fragment;
    } else {
      // Respect the by priority ordering of thread ids in the crash key by
      // stopping the construction as soon as one does not fit. This avoids
      // including lesser priority ids while omitting more important ones.
      break;
    }
  }

  return list_of_hung_thread_ids;
}

bool HangWatcher::WatchStateSnapShot::IsActionable() const {
  DCHECK(initialized_);
  return !hung_watch_state_copies_.empty();
}

HangWatcher::WatchStateSnapShot HangWatcher::GrabWatchStateSnapshotForTesting()
    const {
  WatchStateSnapShot snapshot;
  snapshot.Init(watch_states_, deadline_ignore_threshold_);
  return snapshot;
}

void HangWatcher::Monitor() {
  DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_);
  AutoLock auto_lock(watch_state_lock_);

  // If all threads unregistered since this function was invoked there's
  // nothing to do anymore.
  if (watch_states_.empty())
    return;

  watch_state_snapshot_.Init(watch_states_, deadline_ignore_threshold_);

  if (watch_state_snapshot_.IsActionable()) {
    DoDumpWithoutCrashing(watch_state_snapshot_);
  }

  watch_state_snapshot_.Clear();
}

void HangWatcher::DoDumpWithoutCrashing(
    const WatchStateSnapShot& watch_state_snapshot) {
  TRACE_EVENT("base", "HangWatcher::DoDumpWithoutCrashing");

  capture_in_progress_.store(true, std::memory_order_relaxed);
  base::AutoLock scope_lock(capture_lock_);

#if !BUILDFLAG(IS_NACL)
  const std::string list_of_hung_thread_ids =
      watch_state_snapshot.PrepareHungThreadListCrashKey();

  static debug::CrashKeyString* crash_key = AllocateCrashKeyString(
      "list-of-hung-threads", debug::CrashKeySize::Size256);

  const debug::ScopedCrashKeyString list_of_hung_threads_crash_key_string(
      crash_key, list_of_hung_thread_ids);

  const debug::ScopedCrashKeyString
      time_since_last_critical_memory_pressure_crash_key_string =
          GetTimeSinceLastCriticalMemoryPressureCrashKey();

  SCOPED_CRASH_KEY_STRING32("HangWatcher", "seconds-since-last-resume",
                            GetTimeSinceLastSystemPowerResumeCrashKeyValue());
#endif

  // To avoid capturing more than one hang that blames a subset of the same
  // threads it's necessary to keep track of what is the furthest deadline
  // that contributed to declaring a hang. Only once
  // all threads have deadlines past this point can we be sure that a newly
  // discovered hang is not directly related.
  // Example:
  // **********************************************************************
  // Timeline A : L------1-------2----------3-------4----------N-----------
  // Timeline B : -------2----------3-------4----------L----5------N-------
  // Timeline C : L----------------------------5------6----7---8------9---N
  // **********************************************************************
  // In the example when a Monitor() happens during timeline A
  // |deadline_ignore_threshold_| (L) is at time zero and deadlines (1-4)
  // are before Now() (N) . A hang is captured and L is updated. During
  // the next Monitor() (timeline B) a new deadline is over but we can't
  // capture a hang because deadlines 2-4 are still live and already counted
  // toward a hang. During a third monitor (timeline C) all live deadlines
  // are now after L and a second hang can be recorded.
  base::TimeTicks latest_expired_deadline =
      watch_state_snapshot.GetHighestDeadline();

  if (on_hang_closure_for_testing_)
    on_hang_closure_for_testing_.Run();
  else
    RecordHang();

  // Update after running the actual capture.
  deadline_ignore_threshold_ = latest_expired_deadline;

  capture_in_progress_.store(false, std::memory_order_relaxed);
}

void HangWatcher::SetAfterMonitorClosureForTesting(
    base::RepeatingClosure closure) {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  after_monitor_closure_for_testing_ = std::move(closure);
}

void HangWatcher::SetOnHangClosureForTesting(base::RepeatingClosure closure) {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  on_hang_closure_for_testing_ = std::move(closure);
}

void HangWatcher::SetMonitoringPeriodForTesting(base::TimeDelta period) {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  monitor_period_ = period;
}

void HangWatcher::SetAfterWaitCallbackForTesting(
    RepeatingCallback<void(TimeTicks)> callback) {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  after_wait_callback_ = callback;
}

void HangWatcher::SignalMonitorEventForTesting() {
  DCHECK_CALLED_ON_VALID_THREAD(constructing_thread_checker_);
  should_monitor_.Signal();
}

// static
void HangWatcher::StopMonitoringForTesting() {
  g_keep_monitoring.store(false, std::memory_order_relaxed);
}

void HangWatcher::SetTickClockForTesting(const base::TickClock* tick_clock) {
  tick_clock_ = tick_clock;
}

void HangWatcher::BlockIfCaptureInProgress() {
  // Makes a best-effort attempt to block execution if a hang is currently being
  // captured. Only block on |capture_lock| if |capture_in_progress_| hints that
  // it's already held to avoid serializing all threads on this function when no
  // hang capture is in-progress.
  if (capture_in_progress_.load(std::memory_order_relaxed))
    base::AutoLock hang_lock(capture_lock_);
}

void HangWatcher::UnregisterThread() {
  AutoLock auto_lock(watch_state_lock_);

  auto it = ranges::find(
      watch_states_,
      internal::HangWatchState::GetHangWatchStateForCurrentThread(),
      &std::unique_ptr<internal::HangWatchState>::get);

  // Thread should be registered to get unregistered.
  CHECK(it != watch_states_.end(), base::NotFatalUntil::M125);

  watch_states_.erase(it);
}

namespace internal {
namespace {

constexpr uint64_t kOnlyDeadlineMask = 0x00FF'FFFF'FFFF'FFFFu;
constexpr uint64_t kOnlyFlagsMask = ~kOnlyDeadlineMask;
constexpr uint64_t kMaximumFlag = 0x8000'0000'0000'0000u;

// Use as a mask to keep persistent flags and the deadline.
constexpr uint64_t kPersistentFlagsAndDeadlineMask =
    kOnlyDeadlineMask |
    static_cast<uint64_t>(
        HangWatchDeadline::Flag::kIgnoreCurrentWatchHangsInScope);
}  // namespace

// Flag binary representation assertions.
static_assert(
    static_cast<uint64_t>(HangWatchDeadline::Flag::kMinValue) >
        kOnlyDeadlineMask,
    "Invalid numerical value for flag. Would interfere with bits of data.");
static_assert(static_cast<uint64_t>(HangWatchDeadline::Flag::kMaxValue) <=
                  kMaximumFlag,
              "A flag can only set a single bit.");

HangWatchDeadline::HangWatchDeadline() = default;
HangWatchDeadline::~HangWatchDeadline() = default;

std::pair<uint64_t, TimeTicks> HangWatchDeadline::GetFlagsAndDeadline() const {
  uint64_t bits = bits_.load(std::memory_order_relaxed);
  return std::make_pair(ExtractFlags(bits),
                        DeadlineFromBits(ExtractDeadline((bits))));
}

TimeTicks HangWatchDeadline::GetDeadline() const {
  return DeadlineFromBits(
      ExtractDeadline(bits_.load(std::memory_order_relaxed)));
}

// static
TimeTicks HangWatchDeadline::Max() {
  // |kOnlyDeadlineMask| has all the bits reserved for the TimeTicks value
  // set. This means it also represents the highest representable value.
  return DeadlineFromBits(kOnlyDeadlineMask);
}

// static
bool HangWatchDeadline::IsFlagSet(Flag flag, uint64_t flags) {
  return static_cast<uint64_t>(flag) & flags;
}

void HangWatchDeadline::SetDeadline(TimeTicks new_deadline) {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  DCHECK(new_deadline <= Max()) << "Value too high to be represented.";
  DCHECK(new_deadline >= TimeTicks{}) << "Value cannot be negative.";

  if (switch_bits_callback_for_testing_) {
    const uint64_t switched_in_bits = SwitchBitsForTesting();
    // If a concurrent deadline change is tested it cannot have a deadline or
    // persistent flag change since those always happen on the same thread.
    DCHECK((switched_in_bits & kPersistentFlagsAndDeadlineMask) == 0u);
  }

  // Discard all non-persistent flags and apply deadline change.
  const uint64_t old_bits = bits_.load(std::memory_order_relaxed);
  const uint64_t new_flags =
      ExtractFlags(old_bits & kPersistentFlagsAndDeadlineMask);
  bits_.store(new_flags | ExtractDeadline(static_cast<uint64_t>(
                              new_deadline.ToInternalValue())),
              std::memory_order_relaxed);
}

// TODO(crbug.com/1087026): Add flag DCHECKs here.
bool HangWatchDeadline::SetShouldBlockOnHang(uint64_t old_flags,
                                             TimeTicks old_deadline) {
  DCHECK(old_deadline <= Max()) << "Value too high to be represented.";
  DCHECK(old_deadline >= TimeTicks{}) << "Value cannot be negative.";

  // Set the kShouldBlockOnHang flag only if |bits_| did not change since it was
  // read. kShouldBlockOnHang is the only non-persistent flag and should never
  // be set twice. Persistent flags and deadline changes are done from the same
  // thread so there is no risk of losing concurrently added information.
  uint64_t old_bits =
      old_flags | static_cast<uint64_t>(old_deadline.ToInternalValue());
  const uint64_t desired_bits =
      old_bits | static_cast<uint64_t>(Flag::kShouldBlockOnHang);

  // If a test needs to simulate |bits_| changing since calling this function
  // this happens now.
  if (switch_bits_callback_for_testing_) {
    const uint64_t switched_in_bits = SwitchBitsForTesting();

    // Injecting the flag being tested is invalid.
    DCHECK(!IsFlagSet(Flag::kShouldBlockOnHang, switched_in_bits));
  }

  return bits_.compare_exchange_weak(old_bits, desired_bits,
                                     std::memory_order_relaxed,
                                     std::memory_order_relaxed);
}

void HangWatchDeadline::SetIgnoreCurrentWatchHangsInScope() {
  SetPersistentFlag(Flag::kIgnoreCurrentWatchHangsInScope);
}

void HangWatchDeadline::UnsetIgnoreCurrentWatchHangsInScope() {
  ClearPersistentFlag(Flag::kIgnoreCurrentWatchHangsInScope);
}

void HangWatchDeadline::SetPersistentFlag(Flag flag) {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  if (switch_bits_callback_for_testing_)
    SwitchBitsForTesting();
  bits_.fetch_or(static_cast<uint64_t>(flag), std::memory_order_relaxed);
}

void HangWatchDeadline::ClearPersistentFlag(Flag flag) {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  if (switch_bits_callback_for_testing_)
    SwitchBitsForTesting();
  bits_.fetch_and(~(static_cast<uint64_t>(flag)), std::memory_order_relaxed);
}

// static
uint64_t HangWatchDeadline::ExtractFlags(uint64_t bits) {
  return bits & kOnlyFlagsMask;
}

// static
uint64_t HangWatchDeadline::ExtractDeadline(uint64_t bits) {
  return bits & kOnlyDeadlineMask;
}

// static
TimeTicks HangWatchDeadline::DeadlineFromBits(uint64_t bits) {
  // |kOnlyDeadlineMask| has all the deadline bits set to 1 so is the largest
  // representable value.
  DCHECK(bits <= kOnlyDeadlineMask)
      << "Flags bits are set. Remove them before returning deadline.";
  static_assert(kOnlyDeadlineMask <= std::numeric_limits<int64_t>::max());
  return TimeTicks::FromInternalValue(static_cast<int64_t>(bits));
}

bool HangWatchDeadline::IsFlagSet(Flag flag) const {
  return bits_.load(std::memory_order_relaxed) & static_cast<uint64_t>(flag);
}

void HangWatchDeadline::SetSwitchBitsClosureForTesting(
    RepeatingCallback<uint64_t(void)> closure) {
  switch_bits_callback_for_testing_ = closure;
}

void HangWatchDeadline::ResetSwitchBitsClosureForTesting() {
  DCHECK(switch_bits_callback_for_testing_);
  switch_bits_callback_for_testing_.Reset();
}

uint64_t HangWatchDeadline::SwitchBitsForTesting() {
  DCHECK(switch_bits_callback_for_testing_);

  const uint64_t old_bits = bits_.load(std::memory_order_relaxed);
  const uint64_t new_bits = switch_bits_callback_for_testing_.Run();
  const uint64_t old_flags = ExtractFlags(old_bits);

  const uint64_t switched_in_bits = old_flags | new_bits;
  bits_.store(switched_in_bits, std::memory_order_relaxed);
  return switched_in_bits;
}

HangWatchState::HangWatchState(HangWatcher::ThreadType thread_type)
    : resetter_(&hang_watch_state, this, nullptr), thread_type_(thread_type) {
// TODO(crbug.com/1223033): Remove this once macOS uses system-wide ids.
// On macOS the thread ids used by CrashPad are not the same as the ones
// provided by PlatformThread. Make sure to use the same for correct
// attribution.
#if BUILDFLAG(IS_MAC)
  uint64_t thread_id;
  pthread_threadid_np(pthread_self(), &thread_id);
  thread_id_ = checked_cast<PlatformThreadId>(thread_id);
#else
  thread_id_ = PlatformThread::CurrentId();
#endif
}

HangWatchState::~HangWatchState() {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);

  DCHECK_EQ(GetHangWatchStateForCurrentThread(), this);

#if DCHECK_IS_ON()
  // Destroying the HangWatchState should not be done if there are live
  // WatchHangsInScopes.
  DCHECK(!current_watch_hangs_in_scope_);
#endif
}

// static
std::unique_ptr<HangWatchState>
HangWatchState::CreateHangWatchStateForCurrentThread(
    HangWatcher::ThreadType thread_type) {
  // Allocate a watch state object for this thread.
  std::unique_ptr<HangWatchState> hang_state =
      std::make_unique<HangWatchState>(thread_type);

  // Setting the thread local worked.
  DCHECK_EQ(GetHangWatchStateForCurrentThread(), hang_state.get());

  // Transfer ownership to caller.
  return hang_state;
}

TimeTicks HangWatchState::GetDeadline() const {
  return deadline_.GetDeadline();
}

std::pair<uint64_t, TimeTicks> HangWatchState::GetFlagsAndDeadline() const {
  return deadline_.GetFlagsAndDeadline();
}

void HangWatchState::SetDeadline(TimeTicks deadline) {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  deadline_.SetDeadline(deadline);
}

bool HangWatchState::IsOverDeadline() const {
  return TimeTicks::Now() > deadline_.GetDeadline();
}

void HangWatchState::SetIgnoreCurrentWatchHangsInScope() {
  deadline_.SetIgnoreCurrentWatchHangsInScope();
}

void HangWatchState::UnsetIgnoreCurrentWatchHangsInScope() {
  deadline_.UnsetIgnoreCurrentWatchHangsInScope();
}

bool HangWatchState::SetShouldBlockOnHang(uint64_t old_flags,
                                          TimeTicks old_deadline) {
  return deadline_.SetShouldBlockOnHang(old_flags, old_deadline);
}

bool HangWatchState::IsFlagSet(HangWatchDeadline::Flag flag) {
  return deadline_.IsFlagSet(flag);
}

#if DCHECK_IS_ON()
void HangWatchState::SetCurrentWatchHangsInScope(
    WatchHangsInScope* current_hang_watch_scope_enable) {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  current_watch_hangs_in_scope_ = current_hang_watch_scope_enable;
}

WatchHangsInScope* HangWatchState::GetCurrentWatchHangsInScope() {
  DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
  return current_watch_hangs_in_scope_;
}
#endif

HangWatchDeadline* HangWatchState::GetHangWatchDeadlineForTesting() {
  return &deadline_;
}

void HangWatchState::IncrementNestingLevel() {
  ++nesting_level_;
}

void HangWatchState::DecrementNestingLevel() {
  --nesting_level_;
}

// static
HangWatchState* HangWatchState::GetHangWatchStateForCurrentThread() {
  // Workaround false-positive MSAN use-of-uninitialized-value on
  // thread_local storage for loaded libraries:
  // https://github.com/google/sanitizers/issues/1265
  MSAN_UNPOISON(&hang_watch_state, sizeof(internal::HangWatchState*));

  return hang_watch_state;
}

PlatformThreadId HangWatchState::GetThreadID() const {
  return thread_id_;
}

}  // namespace internal

}  // namespace base