Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
6ce5a74
clean up clamping of memory regions to after kernel
m-mueller678 Oct 16, 2025
e328615
respect FDT reserved memory regions
m-mueller678 Oct 16, 2025
aa68758
do not use memory below kernel
m-mueller678 Oct 17, 2025
d2eb426
extract physical memory free list inline capacity into global
m-mueller678 Oct 17, 2025
4f2e046
use memory below kernel.
m-mueller678 Oct 17, 2025
40e7fdc
use memory below kernel only on uefi systems
m-mueller678 Oct 17, 2025
8911cfd
improve logging
m-mueller678 Oct 17, 2025
612ffde
use `Fdt::total_size`
m-mueller678 Oct 17, 2025
b78d410
Merge branch 'main' into main
m-mueller678 Oct 20, 2025
d393cca
abstract physical free list access
m-mueller678 Oct 18, 2025
6ba5dc2
clean up stack deallocation
m-mueller678 Oct 19, 2025
dcc67f1
abstract virtual free list access
m-mueller678 Oct 19, 2025
dc51431
use physical allocation abstractions in more places
m-mueller678 Oct 19, 2025
3972846
add assert_physical_unavailable
m-mueller678 Oct 19, 2025
5a95d0e
remove unused imports
m-mueller678 Oct 19, 2025
35ec530
readd mistakenly removed try_deallocate_physical
m-mueller678 Oct 19, 2025
f331cfd
fix mman
m-mueller678 Oct 19, 2025
b6f1998
introduce size_with_guards variable in aarch scheduler
m-mueller678 Oct 19, 2025
a4afa92
remove unnecessary conversion in DeviceAlloc
m-mueller678 Oct 19, 2025
0011e33
add tracing to free lists
m-mueller678 Oct 19, 2025
3484492
fix PageRange creation in allocation functions
m-mueller678 Oct 19, 2025
7cc0128
remove unused imports
m-mueller678 Oct 20, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 3 additions & 9 deletions src/arch/aarch64/kernel/interrupts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,8 @@ use ahash::RandomState;
use arm_gic::gicv3::{GicV3, InterruptGroup, SgiTarget, SgiTargetGroup};
use arm_gic::{IntId, Trigger};
use fdt::standard_nodes::Compatible;
use free_list::PageLayout;
use hashbrown::HashMap;
use hermit_sync::{InterruptSpinMutex, InterruptTicketMutex, OnceCell, SpinMutex};
use memory_addresses::VirtAddr;
use memory_addresses::arch::aarch64::PhysAddr;

use crate::arch::aarch64::kernel::core_local::increment_irq_counter;
Expand All @@ -22,7 +20,7 @@ use crate::drivers::mmio::get_interrupt_handlers;
use crate::drivers::pci::get_interrupt_handlers;
use crate::drivers::{InterruptHandlerQueue, InterruptLine};
use crate::kernel::serial::handle_uart_interrupt;
use crate::mm::virtualmem::KERNEL_FREE_LIST;
use crate::mm::virtualmem::allocate_virtual;
use crate::scheduler::{self, CoreId};
use crate::{core_id, core_scheduler, env};

Expand Down Expand Up @@ -312,9 +310,7 @@ pub(crate) fn init() {
"Found generic interrupt controller redistributor at {gicr_start:p} (size {gicr_size:#X})"
);

let layout = PageLayout::from_size_align(gicd_size.try_into().unwrap(), 0x10000).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let gicd_address = VirtAddr::from(page_range.start());
let gicd_address = allocate_virtual(gicd_size.try_into().unwrap(), 0x10000).unwrap();
debug!("Mapping GIC Distributor interface to virtual address {gicd_address:p}");

let mut flags = PageTableEntryFlags::empty();
Expand All @@ -326,9 +322,7 @@ pub(crate) fn init() {
flags,
);

let layout = PageLayout::from_size_align(gicr_size.try_into().unwrap(), 0x10000).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let gicr_address = VirtAddr::from(page_range.start());
let gicr_address = allocate_virtual(gicr_size.try_into().unwrap(), 0x10000).unwrap();
debug!("Mapping generic interrupt controller to virtual address {gicr_address:p}");
paging::map::<BasePageSize>(
gicr_address,
Expand Down
7 changes: 2 additions & 5 deletions src/arch/aarch64/kernel/pci.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ use arm_gic::{IntId, Trigger};
use bit_field::BitField;
use fdt::Fdt;
use fdt::node::FdtNode;
use free_list::PageLayout;
use memory_addresses::arch::aarch64::{PhysAddr, VirtAddr};
use pci_types::{
Bar, CommandRegister, ConfigRegionAccess, InterruptLine, InterruptPin, MAX_BARS, PciAddress,
Expand All @@ -12,7 +11,7 @@ use pci_types::{
use crate::arch::aarch64::kernel::interrupts::GIC;
use crate::arch::aarch64::mm::paging::{self, BasePageSize, PageSize, PageTableEntryFlags};
use crate::drivers::pci::{PCI_DEVICES, PciDevice};
use crate::mm::virtualmem::KERNEL_FREE_LIST;
use crate::mm::virtualmem::allocate_virtual;
use crate::{core_id, env};

const PCI_MAX_DEVICE_NUMBER: u8 = 32;
Expand Down Expand Up @@ -224,9 +223,7 @@ pub fn init() {
let addr = PhysAddr::from(reg.starting_address.addr());
let size = u64::try_from(reg.size.unwrap()).unwrap();

let layout = PageLayout::from_size_align(size.try_into().unwrap(), 0x1000_0000).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let pci_address = VirtAddr::from(page_range.start());
let pci_address = allocate_virtual(size.try_into().unwrap(), 0x1000_0000).unwrap();
info!(
"Mapping PCI Enhanced Configuration Space interface to virtual address {pci_address:p} (size {size:#X})"
);
Expand Down
32 changes: 8 additions & 24 deletions src/arch/aarch64/kernel/scheduler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,13 @@ use core::sync::atomic::Ordering;
use core::{mem, ptr};

use align_address::Align;
use free_list::{PageLayout, PageRange};
use memory_addresses::arch::aarch64::{PhysAddr, VirtAddr};

use crate::arch::aarch64::kernel::CURRENT_STACK_ADDRESS;
use crate::arch::aarch64::kernel::core_local::core_scheduler;
use crate::arch::aarch64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
use crate::mm::virtualmem::KERNEL_FREE_LIST;
use crate::mm::physicalmem::{allocate_physical, deallocate_physical};
use crate::mm::virtualmem::{allocate_virtual, deallocate_virtual};
#[cfg(target_os = "none")]
use crate::scheduler::PerCoreSchedulerExt;
use crate::scheduler::task::{Task, TaskFrame};
Expand Down Expand Up @@ -127,15 +126,10 @@ impl TaskStacks {
size.align_up(BasePageSize::SIZE as usize)
};
let total_size = user_stack_size + DEFAULT_STACK_SIZE;
let layout = PageLayout::from_size(total_size + 3 * BasePageSize::SIZE as usize).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let virt_addr = VirtAddr::from(page_range.start());
let frame_layout = PageLayout::from_size(total_size).unwrap();
let frame_range = PHYSICAL_FREE_LIST
.lock()
.allocate(frame_layout)
let size_with_guards = total_size + 3 * BasePageSize::SIZE as usize;
let virt_addr = allocate_virtual(size_with_guards, free_list::PAGE_SIZE).unwrap();
let phys_addr = allocate_physical(total_size, free_list::PAGE_SIZE)
.expect("Failed to allocate Physical Memory for TaskStacks");
let phys_addr = PhysAddr::from(frame_range.start());

debug!(
"Create stacks at {:p} with a size of {} KB",
Expand Down Expand Up @@ -232,20 +226,10 @@ impl Drop for TaskStacks {
stacks.virt_addr,
stacks.total_size / BasePageSize::SIZE as usize + 3,
);
let range = PageRange::from_start_len(
stacks.virt_addr.as_usize(),
stacks.total_size + 3 * BasePageSize::SIZE as usize,
)
.unwrap();
let size_with_guards = stacks.total_size + 3 * BasePageSize::SIZE as usize;
unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}

let range =
PageRange::from_start_len(stacks.phys_addr.as_usize(), stacks.total_size)
.unwrap();
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
deallocate_virtual(stacks.virt_addr, size_with_guards);
deallocate_physical(stacks.phys_addr, stacks.total_size);
}
}
}
Expand Down
8 changes: 3 additions & 5 deletions src/arch/aarch64/kernel/systemtime.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ use time::OffsetDateTime;

use crate::arch::aarch64::mm::paging::{self, BasePageSize, PageSize, PageTableEntryFlags};
use crate::env;
use crate::mm::virtualmem;
use crate::mm::virtualmem::KERNEL_FREE_LIST;
use crate::mm::virtualmem::{self, allocate_virtual};

static PL031_ADDRESS: OnceCell<VirtAddr> = OnceCell::new();
static BOOT_TIME: OnceCell<u64> = OnceCell::new();
Expand Down Expand Up @@ -65,9 +64,8 @@ pub fn init() {

debug!("Found RTC at {addr:p} (size {size:#X})");

let layout = PageLayout::from_size(size.try_into().unwrap()).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let pl031_address = VirtAddr::from(page_range.start());
let pl031_address =
allocate_virtual(size.try_into().unwrap(), free_list::PAGE_SIZE).unwrap();
PL031_ADDRESS.set(pl031_address).unwrap();
debug!("Mapping RTC to virtual address {pl031_address:p}");

Expand Down
20 changes: 7 additions & 13 deletions src/arch/aarch64/mm/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,14 @@ use core::arch::asm;
use core::marker::PhantomData;
use core::{fmt, mem, ptr};

use aarch64::paging::BASE_PAGE_SIZE;
use align_address::Align;
use free_list::PageLayout;
use memory_addresses::{PhysAddr, VirtAddr};

use crate::arch::aarch64::kernel::{get_base_address, get_image_size, get_ram_address, processor};
use crate::env::is_uhyve;
use crate::mm::physicalmem;
use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
use crate::mm::physicalmem::{self, allocate_physical};
use crate::{KERNEL_STACK_SIZE, mm, scheduler};

/// Pointer to the root page table (called "Level 0" in ARM terminology).
Expand Down Expand Up @@ -496,12 +496,10 @@ where
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let frame_layout = PageLayout::from_size(BasePageSize::SIZE as usize).unwrap();
let frame_range = PHYSICAL_FREE_LIST
.lock()
.allocate(frame_layout)
.expect("Unable to allocate physical memory");
let physical_address = PhysAddr::from(frame_range.start());
let physical_address =
allocate_physical(BasePageSize::SIZE as usize, BasePageSize::SIZE as usize)
.expect("Unable to allocate physical memory");

self.entries[index].set(
physical_address,
PageTableEntryFlags::NORMAL | PageTableEntryFlags::TABLE_OR_4KIB_PAGE,
Expand Down Expand Up @@ -668,11 +666,7 @@ pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, nr_pages: usize) -> Result<(),
while map_counter < nr_pages {
let size = (nr_pages - map_counter) * S::SIZE as usize;
for i in (S::SIZE as usize..=size).rev().step_by(S::SIZE as usize) {
let layout = PageLayout::from_size_align(i, S::SIZE as usize).unwrap();
let frame_range = PHYSICAL_FREE_LIST.lock().allocate(layout);

if let Ok(frame_range) = frame_range {
let phys_addr = PhysAddr::from(frame_range.start());
if let Ok(phys_addr) = allocate_physical(i, S::SIZE as usize) {
map::<S>(
virt_addr + map_counter * S::SIZE as usize,
phys_addr,
Expand Down
16 changes: 4 additions & 12 deletions src/arch/riscv64/kernel/devicetree.rs
Original file line number Diff line number Diff line change
Expand Up @@ -216,18 +216,10 @@ pub fn init_drivers() {
let id = mmio.as_ptr().device_id().read();

if cfg!(debug_assertions) {
use free_list::PageRange;

use crate::mm::physicalmem::PHYSICAL_FREE_LIST;

let start = virtio_region.starting_address.addr();
let len = virtio_region.size.unwrap();
let frame_range = PageRange::from_start_len(start, len).unwrap();

PHYSICAL_FREE_LIST
.lock()
.allocate_at(frame_range)
.unwrap_err();
crate::mm::physicalmem::assert_physical_unavailable(
PhysAddr::new(virtio_region.starting_address.addr() as u64),
virtio_region.size.unwrap(),
);
}

match id {
Expand Down
9 changes: 2 additions & 7 deletions src/arch/riscv64/kernel/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ use core::ptr;
use core::sync::atomic::{AtomicPtr, AtomicU32, AtomicU64, Ordering};

use fdt::Fdt;
use free_list::PageLayout;
use memory_addresses::{PhysAddr, VirtAddr};
use riscv::register::sstatus;

Expand All @@ -29,7 +28,7 @@ use crate::arch::riscv64::kernel::processor::lsb;
use crate::config::KERNEL_STACK_SIZE;
use crate::env;
use crate::init_cell::InitCell;
use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
use crate::mm::physicalmem::allocate_physical;

// Used to store information about available harts. The index of the hart in the vector
// represents its CpuId and does not need to match its hart_id
Expand Down Expand Up @@ -161,12 +160,8 @@ pub fn boot_next_processor() {
if let Some(next_hart_id) = next_hart_index {
{
debug!("Allocating stack for hard_id {next_hart_id}");
let frame_layout = PageLayout::from_size(KERNEL_STACK_SIZE).unwrap();
let frame_range = PHYSICAL_FREE_LIST
.lock()
.allocate(frame_layout)
let stack = allocate_physical(KERNEL_STACK_SIZE, free_list::PAGE_SIZE)
.expect("Failed to allocate boot stack for new core");
let stack = PhysAddr::from(frame_range.start());
CURRENT_STACK_ADDRESS.store(stack.as_usize() as _, Ordering::Relaxed);
}

Expand Down
32 changes: 8 additions & 24 deletions src/arch/riscv64/kernel/scheduler.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
use core::{mem, ptr};

use align_address::Align;
use free_list::{PageLayout, PageRange};
use memory_addresses::{PhysAddr, VirtAddr};

use crate::arch::riscv64::kernel::core_local::core_scheduler;
use crate::arch::riscv64::kernel::processor::set_oneshot_timer;
use crate::arch::riscv64::mm::paging::{BasePageSize, PageSize, PageTableEntryFlags};
use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
use crate::mm::virtualmem::KERNEL_FREE_LIST;
use crate::mm::physicalmem::{allocate_physical, deallocate_physical};
use crate::mm::virtualmem::{allocate_virtual, deallocate_virtual};
use crate::scheduler::task::{Task, TaskFrame};
use crate::{DEFAULT_STACK_SIZE, KERNEL_STACK_SIZE};

Expand Down Expand Up @@ -111,15 +110,10 @@ impl TaskStacks {
size.align_up(BasePageSize::SIZE as usize)
};
let total_size = user_stack_size + DEFAULT_STACK_SIZE + KERNEL_STACK_SIZE;
let layout = PageLayout::from_size(total_size + 4 * BasePageSize::SIZE as usize).unwrap();
let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
let virt_addr = VirtAddr::from(page_range.start());
let frame_layout = PageLayout::from_size(total_size).unwrap();
let frame_range = PHYSICAL_FREE_LIST
.lock()
.allocate(frame_layout)
let size_with_guards = total_size + 4 * BasePageSize::SIZE as usize;
let virt_addr = allocate_virtual(size_with_guards, free_list::PAGE_SIZE).unwrap();
let phys_addr = allocate_physical(total_size, free_list::PAGE_SIZE)
.expect("Failed to allocate Physical Memory for TaskStacks");
let phys_addr = PhysAddr::from(frame_range.start());

debug!(
"Create stacks at {:#X} with a size of {} KB",
Expand Down Expand Up @@ -249,20 +243,10 @@ impl Drop for TaskStacks {
stacks.total_size / BasePageSize::SIZE as usize + 4,
//stacks.total_size / BasePageSize::SIZE as usize,
);
let range = PageRange::from_start_len(
stacks.virt_addr.as_usize(),
stacks.total_size + 4 * BasePageSize::SIZE as usize,
)
.unwrap();
let size_with_guards = stacks.total_size + 4 * BasePageSize::SIZE as usize;
unsafe {
KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
}

let range =
PageRange::from_start_len(stacks.phys_addr.as_usize(), stacks.total_size)
.unwrap();
unsafe {
PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
deallocate_virtual(stacks.virt_addr, size_with_guards);
deallocate_physical(stacks.phys_addr, stacks.total_size);
}
}
}
Expand Down
16 changes: 5 additions & 11 deletions src/arch/riscv64/mm/paging.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,13 @@ use core::marker::PhantomData;
use core::ptr;

use align_address::Align;
use free_list::PageLayout;
use hermit_sync::SpinMutex;
use memory_addresses::{AddrRange, PhysAddr, VirtAddr};
use riscv::asm::sfence_vma;
use riscv::register::satp;
use riscv::register::satp::Satp;

use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
use crate::mm::physicalmem::allocate_physical;

static ROOT_PAGETABLE: SpinMutex<PageTable<L2Table>> = SpinMutex::new(PageTable::new());

Expand Down Expand Up @@ -434,9 +433,8 @@ where
// Does the table exist yet?
if !self.entries[index].is_present() {
// Allocate a single 4 KiB page for the new entry and mark it as a valid, writable subtable.
let frame_layout = PageLayout::from_size(BasePageSize::SIZE as usize).unwrap();
let frame_range = PHYSICAL_FREE_LIST.lock().allocate(frame_layout).unwrap();
let new_entry = PhysAddr::from(frame_range.start());
let new_entry =
allocate_physical(BasePageSize::SIZE as usize, free_list::PAGE_SIZE).unwrap();
self.entries[index].set(new_entry, PageTableEntryFlags::BLANK);

// trace!("new_entry {:#X}", new_entry);
Expand Down Expand Up @@ -613,12 +611,8 @@ pub fn map_heap<S: PageSize>(virt_addr: VirtAddr, count: usize) -> Result<(), us
let virt_addrs = (0..count as u64).map(|n| virt_addr + n * S::SIZE);

for (map_counter, virt_addr) in virt_addrs.enumerate() {
let layout = PageLayout::from_size_align(S::SIZE as usize, S::SIZE as usize).unwrap();
let frame_range = PHYSICAL_FREE_LIST
.lock()
.allocate(layout)
.map_err(|_| map_counter)?;
let phys_addr = PhysAddr::from(frame_range.start());
let phys_addr =
allocate_physical(S::SIZE as usize, S::SIZE as usize).map_err(|_| map_counter)?;
map::<S>(virt_addr, phys_addr, 1, flags);
}

Expand Down
Loading
Loading