Skip to content

Commit a8ed490

Browse files
committed
scx_rustland_core/alloc.rs: Sync with upstream
Sync with the latest upstream updates and remove unused code. Ref: https://github.com/jjyr/buddy-alloc Signed-off-by: Cheng-Yang Chou <[email protected]>
1 parent 050776c commit a8ed490

File tree

1 file changed

+95
-91
lines changed

1 file changed

+95
-91
lines changed

rust/scx_rustland_core/src/alloc.rs

Lines changed: 95 additions & 91 deletions
Original file line numberDiff line numberDiff line change
@@ -107,21 +107,25 @@ struct Node {
107107
impl Node {
108108
fn init(list: *mut Node) {
109109
unsafe {
110-
(*list).next = list;
111-
(*list).prev = list;
110+
list.write(Node {
111+
next: list,
112+
prev: list,
113+
});
112114
}
113115
}
114116

115117
fn remove(list: *mut Node) {
116118
unsafe {
117-
(*(*list).prev).next = (*list).next;
118-
(*(*list).next).prev = (*list).prev;
119+
// To prevent the compiler from optimizing alias potiners
120+
// details https://github.com/jjyr/buddy-alloc/issues/16
121+
core::ptr::write_volatile(&mut (*(*list).prev).next, (*list).next);
122+
core::ptr::write_volatile(&mut (*(*list).next).prev, (*list).prev);
119123
}
120124
}
121125

122126
fn pop(list: *mut Node) -> *mut Node {
123127
debug_assert!(!Self::is_empty(list));
124-
let n_list = unsafe { (*list).next };
128+
let n_list: *mut Node = unsafe { (*list).next };
125129
Self::remove(n_list);
126130
n_list
127131
}
@@ -135,8 +139,10 @@ impl Node {
135139
};
136140
// pointer aligned to 16 bytes(MIN_LEAF_SIZE_ALIGN), so it's safe to use write
137141
p.write(n_list);
138-
(*(*list).next).prev = p;
139-
(*list).next = p;
142+
// To prevent the compiler from optimizing alias potiners
143+
// details https://github.com/jjyr/buddy-alloc/issues/16
144+
core::ptr::write_volatile(&mut (*(*list).next).prev, p);
145+
core::ptr::write_volatile(&mut (*list).next, p);
140146
}
141147
}
142148

@@ -210,96 +216,94 @@ impl BuddyAlloc {
210216
/// and must guarantee no others write to the memory range, to avoid undefined behaviors.
211217
/// The new function panic if memory space not enough for initialize BuddyAlloc.
212218
pub unsafe fn new(param: BuddyAllocParam) -> Self {
213-
unsafe {
214-
let BuddyAllocParam {
215-
base_addr,
216-
len,
217-
leaf_size,
218-
zero_filled,
219-
} = param;
220-
let mut base_addr = base_addr as usize;
221-
let end_addr = base_addr + len;
222-
assert!(
223-
leaf_size % MIN_LEAF_SIZE_ALIGN == 0 && leaf_size != 0,
224-
"{}",
225-
LEAF_ALIGN_ERROR_MSG
226-
);
227-
let leaf2base = log2(leaf_size);
228-
base_addr = roundup(base_addr, leaf2base);
229-
// we use (k + 1)-th entry's split flag to test existence of k-th entry's blocks;
230-
// to accoding this convention, we make a dummy (entries_size - 1)-th entry.
231-
// so we plus 2 on entries_size.
232-
let entries_size = log2((end_addr - base_addr) >> leaf2base) + 2;
233-
234-
// alloc buddy allocator memory
235-
let used_bytes = core::mem::size_of::<Entry>() * entries_size;
236-
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
237-
let entries = base_addr as *mut Entry;
238-
base_addr += used_bytes;
239-
240-
let buddy_list_size = core::mem::size_of::<Node>();
241-
// init entries free
242-
for k in 0..entries_size {
243-
// use one bit for per memory block
244-
debug_assert!(end_addr >= base_addr + buddy_list_size, "{}", OOM_MSG);
245-
let entry = entries.add(k).as_mut().expect("entry");
246-
entry.free = base_addr as *mut Node;
247-
if !zero_filled {
248-
core::ptr::write_bytes(entry.free, 0, buddy_list_size);
249-
}
250-
Node::init(entry.free);
251-
base_addr += buddy_list_size;
219+
let BuddyAllocParam {
220+
base_addr,
221+
len,
222+
leaf_size,
223+
zero_filled,
224+
} = param;
225+
let mut base_addr = base_addr as usize;
226+
let end_addr = base_addr + len;
227+
assert!(
228+
leaf_size % MIN_LEAF_SIZE_ALIGN == 0 && leaf_size != 0,
229+
"{}",
230+
LEAF_ALIGN_ERROR_MSG
231+
);
232+
let leaf2base = log2(leaf_size);
233+
base_addr = roundup(base_addr, leaf2base);
234+
// we use (k + 1)-th entry's split flag to test existence of k-th entry's blocks;
235+
// to accoding this convention, we make a dummy (entries_size - 1)-th entry.
236+
// so we plus 2 on entries_size.
237+
let entries_size = log2((end_addr - base_addr) >> leaf2base) + 2;
238+
239+
// alloc buddy allocator memory
240+
let used_bytes = core::mem::size_of::<Entry>() * entries_size;
241+
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
242+
let entries = base_addr as *mut Entry;
243+
base_addr += used_bytes;
244+
245+
let buddy_list_size = core::mem::size_of::<Node>();
246+
// init entries free
247+
for k in 0..entries_size {
248+
// use one bit for per memory block
249+
debug_assert!(end_addr >= base_addr + buddy_list_size, "{}", OOM_MSG);
250+
let entry = entries.add(k).as_mut().expect("entry");
251+
entry.free = base_addr as *mut Node;
252+
if !zero_filled {
253+
core::ptr::write_bytes(entry.free, 0, buddy_list_size);
252254
}
255+
Node::init(entry.free);
256+
base_addr += buddy_list_size;
257+
}
253258

254-
// init alloc
255-
for k in 0..entries_size {
256-
// use one bit for per memory block
257-
// use shift instead `/`, 8 == 1 << 3
258-
let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
259-
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
260-
let entry = entries.add(k).as_mut().expect("entry");
261-
entry.alloc = base_addr as *mut u8;
262-
// mark all blocks as allocated
263-
if !zero_filled {
264-
core::ptr::write_bytes(entry.alloc, 0, used_bytes);
265-
}
266-
base_addr += used_bytes;
259+
// init alloc
260+
for k in 0..entries_size {
261+
// use one bit for per memory block
262+
// use shift instead `/`, 8 == 1 << 3
263+
let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
264+
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
265+
let entry = entries.add(k).as_mut().expect("entry");
266+
entry.alloc = base_addr as *mut u8;
267+
// mark all blocks as allocated
268+
if !zero_filled {
269+
core::ptr::write_bytes(entry.alloc, 0, used_bytes);
267270
}
271+
base_addr += used_bytes;
272+
}
268273

269-
// init split
270-
for k in 1..entries_size {
271-
// use one bit for per memory block
272-
// use shift instead `/`, 8 == 1 << 3
273-
let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
274-
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
275-
let entry = entries.add(k).as_mut().expect("entry");
276-
entry.split = base_addr as *mut u8;
277-
if !zero_filled {
278-
core::ptr::write_bytes(entry.split, 0, used_bytes);
279-
}
280-
base_addr += used_bytes;
274+
// init split
275+
for k in 1..entries_size {
276+
// use one bit for per memory block
277+
// use shift instead `/`, 8 == 1 << 3
278+
let used_bytes = roundup(nblock(k, entries_size), 3) >> 3;
279+
debug_assert!(end_addr >= base_addr + used_bytes, "{}", OOM_MSG);
280+
let entry = entries.add(k).as_mut().expect("entry");
281+
entry.split = base_addr as *mut u8;
282+
if !zero_filled {
283+
core::ptr::write_bytes(entry.split, 0, used_bytes);
281284
}
282-
283-
// align base_addr to leaf size
284-
base_addr = roundup(base_addr, leaf2base);
285-
assert!(end_addr >= base_addr, "{}", OOM_MSG);
286-
debug_assert_eq!(
287-
(base_addr >> leaf2base) << leaf2base,
288-
base_addr,
289-
"misalignment"
290-
);
291-
292-
let mut allocator = BuddyAlloc {
293-
base_addr,
294-
end_addr,
295-
entries,
296-
entries_size,
297-
leaf2base,
298-
unavailable: 0,
299-
};
300-
allocator.init_free_list();
301-
allocator
285+
base_addr += used_bytes;
302286
}
287+
288+
// align base_addr to leaf size
289+
base_addr = roundup(base_addr, leaf2base);
290+
assert!(end_addr >= base_addr, "{}", OOM_MSG);
291+
debug_assert_eq!(
292+
(base_addr >> leaf2base) << leaf2base,
293+
base_addr,
294+
"misalignment"
295+
);
296+
297+
let mut allocator = BuddyAlloc {
298+
base_addr,
299+
end_addr,
300+
entries,
301+
entries_size,
302+
leaf2base,
303+
unavailable: 0,
304+
};
305+
allocator.init_free_list();
306+
allocator
303307
}
304308

305309
fn init_free_list(&mut self) {

0 commit comments

Comments
 (0)