summaryrefslogtreecommitdiff
path: root/base/memory/discardable_shared_memory.cc
blob: 4fd15185ebcf47588004b67697498dcb57e7e810 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "base/memory/discardable_shared_memory.h"

#if defined(OS_POSIX)
#include <unistd.h>
#endif

#include <algorithm>

#include "base/atomicops.h"
#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"

#if defined(OS_ANDROID)
#include "third_party/ashmem/ashmem.h"
#endif

namespace base {
namespace {

// Use a machine-sized pointer as atomic type. It will use the Atomic32 or
// Atomic64 routines, depending on the architecture.
typedef intptr_t AtomicType;
typedef uintptr_t UAtomicType;

// Template specialization for timestamp serialization/deserialization. This
// is used to serialize timestamps using Unix time on systems where AtomicType
// does not have enough precision to contain a timestamp in the standard
// serialized format.
template <int>
Time TimeFromWireFormat(int64 value);
template <int>
int64 TimeToWireFormat(Time time);

// Serialize to Unix time when using 4-byte wire format.
// Note: 19 January 2038, this will cease to work.
template <>
Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64 value) {
  return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
}
template <>
int64 ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
  return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
}

// Standard serialization format when using 8-byte wire format.
template <>
Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64 value) {
  return Time::FromInternalValue(value);
}
template <>
int64 ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
  return time.ToInternalValue();
}

struct SharedState {
  enum LockState { UNLOCKED = 0, LOCKED = 1 };

  explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
  SharedState(LockState lock_state, Time timestamp) {
    int64 wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
    DCHECK_GE(wire_timestamp, 0);
    DCHECK_EQ(lock_state & ~1, 0);
    value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
  }

  LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }

  Time GetTimestamp() const {
    return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
  }

  // Bit 1: Lock state. Bit is set when locked.
  // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
  // purged.
  union {
    AtomicType i;
    UAtomicType u;
  } value;
};

// Shared state is stored at offset 0 in shared memory segments.
SharedState* SharedStateFromSharedMemory(const SharedMemory& shared_memory) {
  DCHECK(shared_memory.memory());
  return static_cast<SharedState*>(shared_memory.memory());
}

// Round up |size| to a multiple of alignment, which must be a power of two.
size_t Align(size_t alignment, size_t size) {
  DCHECK_EQ(alignment & (alignment - 1), 0u);
  return (size + alignment - 1) & ~(alignment - 1);
}

// Round up |size| to a multiple of page size.
size_t AlignToPageSize(size_t size) {
  return Align(base::GetPageSize(), size);
}

}  // namespace

DiscardableSharedMemory::DiscardableSharedMemory()
    : mapped_size_(0), locked_page_count_(0) {
}

DiscardableSharedMemory::DiscardableSharedMemory(
    SharedMemoryHandle shared_memory_handle)
    : shared_memory_(shared_memory_handle, false),
      mapped_size_(0),
      locked_page_count_(0) {
}

DiscardableSharedMemory::~DiscardableSharedMemory() {
}

bool DiscardableSharedMemory::CreateAndMap(size_t size) {
  CheckedNumeric<size_t> checked_size = size;
  checked_size += AlignToPageSize(sizeof(SharedState));
  if (!checked_size.IsValid())
    return false;

  if (!shared_memory_.CreateAndMapAnonymous(checked_size.ValueOrDie()))
    return false;

  mapped_size_ =
      shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));

  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
#if DCHECK_IS_ON()
  for (size_t page = 0; page < locked_page_count_; ++page)
    locked_pages_.insert(page);
#endif

  DCHECK(last_known_usage_.is_null());
  SharedState new_state(SharedState::LOCKED, Time());
  subtle::Release_Store(&SharedStateFromSharedMemory(shared_memory_)->value.i,
                        new_state.value.i);
  return true;
}

bool DiscardableSharedMemory::Map(size_t size) {
  if (!shared_memory_.Map(AlignToPageSize(sizeof(SharedState)) + size))
    return false;

  mapped_size_ =
      shared_memory_.mapped_size() - AlignToPageSize(sizeof(SharedState));

  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
#if DCHECK_IS_ON()
  for (size_t page = 0; page < locked_page_count_; ++page)
    locked_pages_.insert(page);
#endif

  return true;
}

bool DiscardableSharedMemory::Unmap() {
  if (!shared_memory_.Unmap())
    return false;

  mapped_size_ = 0;
  return true;
}

DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
    size_t offset, size_t length) {
  DCHECK_EQ(AlignToPageSize(offset), offset);
  DCHECK_EQ(AlignToPageSize(length), length);

  // Calls to this function must be synchronized properly.
  DFAKE_SCOPED_LOCK(thread_collision_warner_);

  DCHECK(shared_memory_.memory());

  // We need to successfully acquire the platform independent lock before
  // individual pages can be locked.
  if (!locked_page_count_) {
    // Return false when instance has been purged or not initialized properly
    // by checking if |last_known_usage_| is NULL.
    if (last_known_usage_.is_null())
      return FAILED;

    SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
    SharedState new_state(SharedState::LOCKED, Time());
    SharedState result(subtle::Acquire_CompareAndSwap(
        &SharedStateFromSharedMemory(shared_memory_)->value.i,
        old_state.value.i,
        new_state.value.i));
    if (result.value.u != old_state.value.u) {
      // Update |last_known_usage_| in case the above CAS failed because of
      // an incorrect timestamp.
      last_known_usage_ = result.GetTimestamp();
      return FAILED;
    }
  }

  // Zero for length means "everything onward".
  if (!length)
    length = AlignToPageSize(mapped_size_) - offset;

  size_t start = offset / base::GetPageSize();
  size_t end = start + length / base::GetPageSize();
  DCHECK_LT(start, end);
  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());

  // Add pages to |locked_page_count_|.
  // Note: Locking a page that is already locked is an error.
  locked_page_count_ += end - start;
#if DCHECK_IS_ON()
  // Detect incorrect usage by keeping track of exactly what pages are locked.
  for (auto page = start; page < end; ++page) {
    auto result = locked_pages_.insert(page);
    DCHECK(result.second);
  }
  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
#endif

#if defined(OS_ANDROID)
  SharedMemoryHandle handle = shared_memory_.handle();
  if (SharedMemory::IsHandleValid(handle)) {
    if (ashmem_pin_region(
            handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
      return PURGED;
    }
  }
#endif

  return SUCCESS;
}

void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
  DCHECK_EQ(AlignToPageSize(offset), offset);
  DCHECK_EQ(AlignToPageSize(length), length);

  // Calls to this function must be synchronized properly.
  DFAKE_SCOPED_LOCK(thread_collision_warner_);

  // Zero for length means "everything onward".
  if (!length)
    length = AlignToPageSize(mapped_size_) - offset;

  DCHECK(shared_memory_.memory());

#if defined(OS_ANDROID)
  SharedMemoryHandle handle = shared_memory_.handle();
  if (SharedMemory::IsHandleValid(handle)) {
    if (ashmem_unpin_region(
            handle.fd, AlignToPageSize(sizeof(SharedState)) + offset, length)) {
      DPLOG(ERROR) << "ashmem_unpin_region() failed";
    }
  }
#endif

  size_t start = offset / base::GetPageSize();
  size_t end = start + length / base::GetPageSize();
  DCHECK_LT(start, end);
  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());

  // Remove pages from |locked_page_count_|.
  // Note: Unlocking a page that is not locked is an error.
  DCHECK_GE(locked_page_count_, end - start);
  locked_page_count_ -= end - start;
#if DCHECK_IS_ON()
  // Detect incorrect usage by keeping track of exactly what pages are locked.
  for (auto page = start; page < end; ++page) {
    auto erased_count = locked_pages_.erase(page);
    DCHECK_EQ(1u, erased_count);
  }
  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
#endif

  // Early out and avoid releasing the platform independent lock if some pages
  // are still locked.
  if (locked_page_count_)
    return;

  Time current_time = Now();
  DCHECK(!current_time.is_null());

  SharedState old_state(SharedState::LOCKED, Time());
  SharedState new_state(SharedState::UNLOCKED, current_time);
  // Note: timestamp cannot be NULL as that is a unique value used when
  // locked or purged.
  DCHECK(!new_state.GetTimestamp().is_null());
  // Timestamp precision should at least be accurate to the second.
  DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
            (current_time - Time::UnixEpoch()).InSeconds());
  SharedState result(subtle::Release_CompareAndSwap(
      &SharedStateFromSharedMemory(shared_memory_)->value.i,
      old_state.value.i,
      new_state.value.i));

  DCHECK_EQ(old_state.value.u, result.value.u);

  last_known_usage_ = current_time;
}

void* DiscardableSharedMemory::memory() const {
  return reinterpret_cast<uint8*>(shared_memory_.memory()) +
         AlignToPageSize(sizeof(SharedState));
}

bool DiscardableSharedMemory::Purge(Time current_time) {
  // Calls to this function must be synchronized properly.
  DFAKE_SCOPED_LOCK(thread_collision_warner_);

  // Early out if not mapped. This can happen if the segment was previously
  // unmapped using a call to Close().
  if (!shared_memory_.memory())
    return true;

  SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
  SharedState new_state(SharedState::UNLOCKED, Time());
  SharedState result(subtle::Acquire_CompareAndSwap(
      &SharedStateFromSharedMemory(shared_memory_)->value.i,
      old_state.value.i,
      new_state.value.i));

  // Update |last_known_usage_| to |current_time| if the memory is locked. This
  // allows the caller to determine if purging failed because last known usage
  // was incorrect or memory was locked. In the second case, the caller should
  // most likely wait for some amount of time before attempting to purge the
  // the memory again.
  if (result.value.u != old_state.value.u) {
    last_known_usage_ = result.GetLockState() == SharedState::LOCKED
                            ? current_time
                            : result.GetTimestamp();
    return false;
  }

  last_known_usage_ = Time();
  return true;
}

bool DiscardableSharedMemory::IsMemoryResident() const {
  DCHECK(shared_memory_.memory());

  SharedState result(subtle::NoBarrier_Load(
      &SharedStateFromSharedMemory(shared_memory_)->value.i));

  return result.GetLockState() == SharedState::LOCKED ||
         !result.GetTimestamp().is_null();
}

void DiscardableSharedMemory::Close() {
  shared_memory_.Close();
}

#if defined(DISCARDABLE_SHARED_MEMORY_SHRINKING)
void DiscardableSharedMemory::Shrink() {
#if defined(OS_POSIX)
  SharedMemoryHandle handle = shared_memory_.handle();
  if (!SharedMemory::IsHandleValid(handle))
    return;

  // Truncate shared memory to size of SharedState.
  if (HANDLE_EINTR(ftruncate(SharedMemory::GetFdFromSharedMemoryHandle(handle),
                             AlignToPageSize(sizeof(SharedState)))) != 0) {
    DPLOG(ERROR) << "ftruncate() failed";
    return;
  }
  mapped_size_ = 0;
#else
  NOTIMPLEMENTED();
#endif
}
#endif

Time DiscardableSharedMemory::Now() const {
  return Time::Now();
}

}  // namespace base