aboutsummaryrefslogtreecommitdiff
path: root/tests/test_bytes_vec_alloc.rs
blob: 107e56e58cdecc74a6d467c3886aea376c1cfd38 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
use std::alloc::{GlobalAlloc, Layout, System};
use std::ptr::null_mut;
use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};

use bytes::{Buf, Bytes};

#[global_allocator]
static LEDGER: Ledger = Ledger::new();

const LEDGER_LENGTH: usize = 2048;

struct Ledger {
    alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
}

impl Ledger {
    const fn new() -> Self {
        const ELEM: (AtomicPtr<u8>, AtomicUsize) =
            (AtomicPtr::new(null_mut()), AtomicUsize::new(0));
        let alloc_table = [ELEM; LEDGER_LENGTH];

        Self { alloc_table }
    }

    /// Iterate over our table until we find an open entry, then insert into said entry
    fn insert(&self, ptr: *mut u8, size: usize) {
        for (entry_ptr, entry_size) in self.alloc_table.iter() {
            // SeqCst is good enough here, we don't care about perf, i just want to be correct!
            if entry_ptr
                .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
                .is_ok()
            {
                entry_size.store(size, Ordering::SeqCst);
                break;
            }
        }
    }

    fn remove(&self, ptr: *mut u8) -> usize {
        for (entry_ptr, entry_size) in self.alloc_table.iter() {
            // set the value to be something that will never try and be deallocated, so that we
            // don't have any chance of a race condition
            //
            // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
            if entry_ptr
                .compare_exchange(
                    ptr,
                    invalid_ptr(usize::MAX),
                    Ordering::SeqCst,
                    Ordering::SeqCst,
                )
                .is_ok()
            {
                return entry_size.load(Ordering::SeqCst);
            }
        }

        panic!("Couldn't find a matching entry for {:x?}", ptr);
    }
}

unsafe impl GlobalAlloc for Ledger {
    unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
        let size = layout.size();
        let ptr = System.alloc(layout);
        self.insert(ptr, size);
        ptr
    }

    unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
        let orig_size = self.remove(ptr);

        if orig_size != layout.size() {
            panic!(
                "bad dealloc: alloc size was {}, dealloc size is {}",
                orig_size,
                layout.size()
            );
        } else {
            System.dealloc(ptr, layout);
        }
    }
}

#[test]
fn test_bytes_advance() {
    let mut bytes = Bytes::from(vec![10, 20, 30]);
    bytes.advance(1);
    drop(bytes);
}

#[test]
fn test_bytes_truncate() {
    let mut bytes = Bytes::from(vec![10, 20, 30]);
    bytes.truncate(2);
    drop(bytes);
}

#[test]
fn test_bytes_truncate_and_advance() {
    let mut bytes = Bytes::from(vec![10, 20, 30]);
    bytes.truncate(2);
    bytes.advance(1);
    drop(bytes);
}

/// Returns a dangling pointer with the given address. This is used to store
/// integer data in pointer fields.
#[inline]
fn invalid_ptr<T>(addr: usize) -> *mut T {
    let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
    debug_assert_eq!(ptr as usize, addr);
    ptr.cast::<T>()
}

#[test]
fn test_bytes_into_vec() {
    let vec = vec![33u8; 1024];

    // Test cases where kind == KIND_VEC
    let b1 = Bytes::from(vec.clone());
    assert_eq!(Vec::from(b1), vec);

    // Test cases where kind == KIND_ARC, ref_cnt == 1
    let b1 = Bytes::from(vec.clone());
    drop(b1.clone());
    assert_eq!(Vec::from(b1), vec);

    // Test cases where kind == KIND_ARC, ref_cnt == 2
    let b1 = Bytes::from(vec.clone());
    let b2 = b1.clone();
    assert_eq!(Vec::from(b1), vec);

    // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
    assert_eq!(Vec::from(b2), vec);

    // Test cases where offset != 0
    let mut b1 = Bytes::from(vec.clone());
    let b2 = b1.split_off(20);

    assert_eq!(Vec::from(b2), vec[20..]);
    assert_eq!(Vec::from(b1), vec[..20]);
}