From d612e67f526e2c2ce933b1df309db6ea0a605563 Mon Sep 17 00:00:00 2001 From: Dan Cross Date: Mon, 6 Feb 2023 15:49:07 +0000 Subject: [PATCH 1/2] global: prototype transfer vectors Add a prototype transfer vector into `global`. Signed-off-by: Dan Cross --- Cargo.lock | 1 + global/Cargo.toml | 1 + global/src/link.ld | 10 +++++++++- global/src/main.rs | 5 ++++- global/src/x86_64/mod.rs | 8 ++++++++ global/src/x86_64/swtch.rs | 0 global/src/x86_64/xferv.rs | 29 +++++++++++++++++++++++++++++ 7 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 global/src/x86_64/mod.rs create mode 100644 global/src/x86_64/swtch.rs create mode 100644 global/src/x86_64/xferv.rs diff --git a/Cargo.lock b/Cargo.lock index 1620ac0..0fe6af7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,6 +154,7 @@ name = "global" version = "0.1.0" dependencies = [ "hypatia", + "uart", "x86_64", ] diff --git a/global/Cargo.toml b/global/Cargo.toml index e45c89d..acb936f 100644 --- a/global/Cargo.toml +++ b/global/Cargo.toml @@ -15,3 +15,4 @@ edition = "2024" [dependencies] arch = { package = "x86_64", path = "../x86_64" } hypatia = { path = "../hypatia" } +uart = { path = "../uart" } diff --git a/global/src/link.ld b/global/src/link.ld index a825abb..18f2c70 100644 --- a/global/src/link.ld +++ b/global/src/link.ld @@ -1,5 +1,5 @@ /* - * Copyright 2021 The Hypatia Authors + * Copyright 2023 The Hypatia Authors * All rights reserved * * Use of this source code is governed by an MIT-style @@ -9,9 +9,17 @@ ENTRY(init) +EXTERN(xferv); + SECTIONS { . = 0xFFFFFC0000000000; + .xferv . : + { + KEEP(*(.xferv*)) + } + . = ALIGN(4096); + .text . : { *(.text*) diff --git a/global/src/main.rs b/global/src/main.rs index 6b6c7cb..de49e6f 100644 --- a/global/src/main.rs +++ b/global/src/main.rs @@ -1,4 +1,4 @@ -// Copyright 2021 The Hypatia Authors +// Copyright 2023 The Hypatia Authors // All rights reserved // // Use of this source code is governed by an MIT-style @@ -11,6 +11,8 @@ #![forbid(elided_lifetimes_in_paths)] #![forbid(unsafe_op_in_unsafe_fn)] +mod x86_64; + use arch::Page4K; /// Returns a static reference to the global zero page. @@ -23,6 +25,7 @@ pub fn zero_page() -> &'static Page4K { #[unsafe(no_mangle)] pub extern "C" fn init() { zero_page(); + uart::panic_println!("Hello from global"); } hypatia::runtime!(); diff --git a/global/src/x86_64/mod.rs b/global/src/x86_64/mod.rs new file mode 100644 index 0000000..d09b490 --- /dev/null +++ b/global/src/x86_64/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +mod xferv; diff --git a/global/src/x86_64/swtch.rs b/global/src/x86_64/swtch.rs new file mode 100644 index 0000000..e69de29 diff --git a/global/src/x86_64/xferv.rs b/global/src/x86_64/xferv.rs new file mode 100644 index 0000000..9e2344d --- /dev/null +++ b/global/src/x86_64/xferv.rs @@ -0,0 +1,29 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use core::arch::naked_asm; + +#[unsafe(export_name = "xferv")] +#[unsafe(link_section = ".xferv")] +#[unsafe(naked)] +unsafe extern "C" fn xferv() { + naked_asm!(r#" + .balign 8; jmp {hi}; + .balign 8; jmp {bye}; + "#, + hi = sym hi, + bye = sym bye, + options(att_syntax)); +} + +extern "C" fn hi() { + uart::panic_println!("Hi!"); +} + +extern "C" fn bye() { + uart::panic_println!("Bye!"); +} From 36e23848dc63da2bbd37fe760e0510cb7ff66589 Mon Sep 17 00:00:00 2001 From: Dan Cross Date: Mon, 6 Feb 2023 15:53:28 +0000 Subject: [PATCH 2/2] node: per-node/CPU segments Signed-off-by: Dan Cross --- Cargo.lock | 10 ++ Cargo.toml | 1 + global/src/main.rs | 9 -- node/Cargo.toml | 18 ++++ node/build.rs | 15 +++ node/src/link.ld | 56 +++++++++++ node/src/main.rs | 31 ++++++ node/src/x86_64/gdt.rs | 36 +++++++ node/src/x86_64/idt.rs | 23 +++++ node/src/x86_64/mod.rs | 18 ++++ node/src/x86_64/tss.rs | 14 +++ node/src/x86_64/xferv.rs | 29 ++++++ rustfmt.toml | 2 +- theon/src/main.rs | 33 ++++--- x86_64/Cargo.toml | 1 + x86_64/src/gdt.rs | 9 +- x86_64/src/lib.rs | 2 +- x86_64/src/vm.rs | 203 +++++++++++++++++++++++++++++++-------- xtask/src/main.rs | 1 + 19 files changed, 442 insertions(+), 69 deletions(-) create mode 100644 node/Cargo.toml create mode 100644 node/build.rs create mode 100644 node/src/link.ld create mode 100644 node/src/main.rs create mode 100644 node/src/x86_64/gdt.rs create mode 100644 node/src/x86_64/idt.rs create mode 100644 node/src/x86_64/mod.rs create mode 100644 node/src/x86_64/tss.rs create mode 100644 node/src/x86_64/xferv.rs diff --git a/Cargo.lock b/Cargo.lock index 0fe6af7..6b8dde4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -219,6 +219,15 @@ dependencies = [ "paste", ] +[[package]] +name = "node" +version = "0.1.0" +dependencies = [ + "hypatia", + "uart", + "x86_64", +] + [[package]] name = "once_cell_polyfill" version = "1.70.1" @@ -495,6 +504,7 @@ dependencies = [ "bitflags 2.9.1", "bitstruct", "seq-macro", + "static_assertions", "x86", "zerocopy", ] diff --git a/Cargo.toml b/Cargo.toml index 69765d8..c3434c1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,6 +12,7 @@ members = [ "hypatia", "memory", "monitor", + "node", "scheduler", "supervisor", "system", diff --git a/global/src/main.rs b/global/src/main.rs index de49e6f..2a1c4c4 100644 --- a/global/src/main.rs +++ b/global/src/main.rs @@ -13,18 +13,9 @@ mod x86_64; -use arch::Page4K; - -/// Returns a static reference to the global zero page. -pub fn zero_page() -> &'static Page4K { - const ZERO_PAGE: Page4K = Page4K::new(); - &ZERO_PAGE -} - /// Initialize the system. #[unsafe(no_mangle)] pub extern "C" fn init() { - zero_page(); uart::panic_println!("Hello from global"); } diff --git a/node/Cargo.toml b/node/Cargo.toml new file mode 100644 index 0000000..ef34eb8 --- /dev/null +++ b/node/Cargo.toml @@ -0,0 +1,18 @@ +# Copyright 2021 The Hypatia Authors +# All rights reserved +# +# Use of this source code is governed by an MIT-style +# license that can be found in the LICENSE file or at +# https://opensource.org/licenses/MIT. + +[package] +name = "node" +version = "0.1.0" +edition = "2024" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +arch = { package = "x86_64", path = "../x86_64" } +hypatia = { path = "../hypatia" } +uart = { path = "../uart" } diff --git a/node/build.rs b/node/build.rs new file mode 100644 index 0000000..e5c61c6 --- /dev/null +++ b/node/build.rs @@ -0,0 +1,15 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use std::env; + +fn main() { + let target = env::var("TARGET").unwrap(); + if target.as_str() == "x86_64-unknown-none-elf" { + println!("cargo:rustc-link-arg-bins=-Tnode/src/link.ld") + } +} diff --git a/node/src/link.ld b/node/src/link.ld new file mode 100644 index 0000000..875647e --- /dev/null +++ b/node/src/link.ld @@ -0,0 +1,56 @@ +/* + * Copyright 2023 The Hypatia Authors + * All rights reserved + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + */ + +ENTRY(init) + +SECTIONS { + . = 0xFFFFFB4000000000; + + .xferv . : + { + KEEP(*(.xferv*)) + } + . = ALIGN(4096); + + .text . : + { + *(.text*) + } + . = ALIGN(4096); + PROVIDE(etext = .); + + .rodata . : + { + *(.rodata*) + } + . = ALIGN(4096); + PROVIDE(erodata = .); + + .got . : + { + *(.got*) + } + . = ALIGN(4096); + + .data . : + { + *(.data*) + } + . = ALIGN(4096); + PROVIDE(edata = .); + + .bss . : + { + *(.bss*) + *(COMMON) + } + . = ALIGN(4096); + + PROVIDE(end = .); +} diff --git a/node/src/main.rs b/node/src/main.rs new file mode 100644 index 0000000..a9dca1e --- /dev/null +++ b/node/src/main.rs @@ -0,0 +1,31 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +#![feature(sync_unsafe_cell)] +#![cfg_attr(not(test), no_main)] +#![cfg_attr(not(test), no_std)] +#![forbid(absolute_paths_not_starting_with_crate)] +#![forbid(elided_lifetimes_in_paths)] +#![forbid(unsafe_op_in_unsafe_fn)] + +use arch::Page4K; + +mod x86_64; + +/// Returns a static reference to the global zero page. +pub fn zero_page() -> &'static Page4K { + const ZERO_PAGE: Page4K = Page4K::new(); + &ZERO_PAGE +} + +/// Initialize the system. +#[unsafe(no_mangle)] +pub extern "C" fn init() { + x86_64::init(); +} + +hypatia::runtime!(); diff --git a/node/src/x86_64/gdt.rs b/node/src/x86_64/gdt.rs new file mode 100644 index 0000000..c1991aa --- /dev/null +++ b/node/src/x86_64/gdt.rs @@ -0,0 +1,36 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use arch::{Page, V4KA, VPageAddr, gdt}; +use core::sync::atomic::{AtomicBool, Ordering}; + +#[unsafe(link_section = ".gdt")] +static mut GDT: gdt::GDT = gdt::GDT::empty(); +static INITED: AtomicBool = AtomicBool::new(false); + +pub(crate) fn map() { + let zeros = crate::zero_page(); + let va = V4KA::new((&raw const GDT).addr()); + const R: bool = true; + const NW: bool = false; + const NX: bool = false; + for k in 1..16 { + let zva = V4KA::new(va.addr() + k * 4096); + arch::vm::map_leaf(zeros.frame(), zva, R, NW, NX).expect("mapped zero page in GDT"); + } +} + +pub(crate) fn init(task_state: &arch::tss::TSS) { + if !INITED.swap(true, Ordering::AcqRel) { + let gdtp = &raw mut GDT; + let gdt = unsafe { &mut *gdtp }; + gdt.init(task_state); + unsafe { + arch::gdt::load(gdt); + } + } +} diff --git a/node/src/x86_64/idt.rs b/node/src/x86_64/idt.rs new file mode 100644 index 0000000..2eb61d6 --- /dev/null +++ b/node/src/x86_64/idt.rs @@ -0,0 +1,23 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use core::cell::SyncUnsafeCell; +use core::sync::atomic::{AtomicBool, Ordering}; + +static IDT: SyncUnsafeCell = SyncUnsafeCell::new(arch::idt::IDT::empty()); +static INITED: AtomicBool = AtomicBool::new(false); + +pub(crate) fn init() { + if INITED.swap(true, Ordering::AcqRel) { + panic!("double init node IDT"); + } + let idt = unsafe { &mut *IDT.get() }; + idt.init(arch::trap::stubs()); + unsafe { + arch::idt::load(idt); + } +} diff --git a/node/src/x86_64/mod.rs b/node/src/x86_64/mod.rs new file mode 100644 index 0000000..5ffddbd --- /dev/null +++ b/node/src/x86_64/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +pub(crate) mod gdt; +pub(crate) mod idt; +pub(crate) mod tss; +mod xferv; + +pub(crate) fn init() { + idt::init(); + gdt::map(); + let tss = tss::init(); + gdt::init(tss); +} diff --git a/node/src/x86_64/tss.rs b/node/src/x86_64/tss.rs new file mode 100644 index 0000000..4b84972 --- /dev/null +++ b/node/src/x86_64/tss.rs @@ -0,0 +1,14 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use core::cell::SyncUnsafeCell; + +static TSS: SyncUnsafeCell = SyncUnsafeCell::new(arch::tss::TSS::empty()); + +pub(crate) fn init() -> &'static arch::tss::TSS { + unsafe { &*TSS.get() } +} diff --git a/node/src/x86_64/xferv.rs b/node/src/x86_64/xferv.rs new file mode 100644 index 0000000..2936c31 --- /dev/null +++ b/node/src/x86_64/xferv.rs @@ -0,0 +1,29 @@ +// Copyright 2023 The Hypatia Authors +// All rights reserved +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file or at +// https://opensource.org/licenses/MIT. + +use core::arch::naked_asm; + +#[unsafe(export_name = "xferv")] +#[unsafe(link_section = ".xferv")] +#[unsafe(naked)] +pub unsafe extern "C" fn xferv() { + naked_asm!(r#" + .balign 8; jmp {hi}; + .balign 8; jmp {bye}; + "#, + hi = sym hi, + bye = sym bye, + options(att_syntax)); +} + +pub extern "C" fn hi() { + uart::panic_println!("Hi!"); +} + +pub extern "C" fn bye() { + uart::panic_println!("Bye!"); +} diff --git a/rustfmt.toml b/rustfmt.toml index a70acca..0c214eb 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,3 +1,3 @@ -edition = "2021" +edition = "2024" use_small_heuristics = "Max" newline_style = "Unix" diff --git a/theon/src/main.rs b/theon/src/main.rs index 2c0ec89..27e54b2 100644 --- a/theon/src/main.rs +++ b/theon/src/main.rs @@ -41,10 +41,10 @@ //! execution. Theon will locate them, and load them into //! physical memory. //! -//! Each binary is allocated a 16MiB region of physical RAM for -//! its various pages; these regions begin at 64MiB and are -//! aligned on 32MiB boundaries, giving us room for loading new -//! images into the second 16MiBs of each binary's region for +//! Each binary is allocated an 8MiB region of physical RAM for +//! its various pages; these regions begin at 16MiB and are +//! aligned on 16MiB boundaries, giving us room for loading new +//! images into the second 8MiBs of each binary's region for //! hitless update. //! //! Binaries represent either tasks or segments; see HDP 0002 @@ -100,13 +100,13 @@ enum BinaryType { /// or a task). type BinaryMeta = (&'static str, HPA, BinaryType); -/// Binaries are loaded in 16MiB regions of physical memory -/// that are aligned on 32MiB boundaries, starting at 64MiB. +/// Binaries are loaded in 8MiB regions of physical memory +/// that are aligned on 16MiB boundaries, starting at 64MiB. const fn load_addr(offset: usize) -> HPA { - let addr = (64 + offset * 32) * MIB; + let addr = (64 + offset * 16) * MIB; HPA::new(addr as u64) } -const BINARY_IMAGE_MEMORY_SIZE: usize = 16 * MIB; +const BINARY_IMAGE_MEMORY_SIZE: usize = 8 * MIB; /// A table description all the binaries that are loaded by /// theon, where to load them in physical memory, and their @@ -116,12 +116,13 @@ const BINARY_TABLE: &[BinaryMeta] = &[ ("global", load_addr(1), BinaryType::Segment), ("memory", load_addr(2), BinaryType::Segment), ("monitor", load_addr(3), BinaryType::Segment), - ("scheduler", load_addr(4), BinaryType::Segment), - ("supervisor", load_addr(5), BinaryType::Segment), - ("trace", load_addr(6), BinaryType::Segment), - ("system", load_addr(7), BinaryType::Task), - ("vcpu", load_addr(8), BinaryType::Task), - ("vm", load_addr(9), BinaryType::Task), + ("node", load_addr(4), BinaryType::Segment), + ("scheduler", load_addr(5), BinaryType::Segment), + ("supervisor", load_addr(6), BinaryType::Segment), + ("trace", load_addr(7), BinaryType::Segment), + ("system", load_addr(8), BinaryType::Task), + ("vcpu", load_addr(9), BinaryType::Task), + ("vm", load_addr(10), BinaryType::Task), ]; const BINARY_LOAD_REGION_START: HPA = load_addr(0); const BINARY_LOAD_REGION_END: HPA = load_addr(BINARY_TABLE.len()); @@ -153,10 +154,12 @@ pub extern "C" fn main(mbinfo_phys: u64) -> ! { let archive = goblin::archive::Archive::parse(bins.bytes).expect("cannot parse bin.a"); uart::panic_println!("Binary archive: {:#x?}", archive); clear_binary_load_region(); + let mut roots = alloc::vec::Vec::with_capacity(BINARY_TABLE.len()); for &(name, addr, typ) in BINARY_TABLE { let bytes = archive.extract(name, bins.bytes).expect("cannot extract elf"); let region_end = addr.offset(BINARY_IMAGE_MEMORY_SIZE); - load(name, typ, bytes, addr..region_end).expect("loaded binary"); + let root = load(name, typ, bytes, addr..region_end).expect("loaded binary"); + roots.push((name, root)); } unsafe { core::arch::asm!("int3") }; // Start other CPUs. diff --git a/x86_64/Cargo.toml b/x86_64/Cargo.toml index 03d4a44..173a607 100644 --- a/x86_64/Cargo.toml +++ b/x86_64/Cargo.toml @@ -17,5 +17,6 @@ bitflags = "*" bit_field = "*" bitstruct = "*" seq-macro = "*" +static_assertions = "*" x86 = "*" zerocopy = { version = "*", features = [ "derive" ] } diff --git a/x86_64/src/gdt.rs b/x86_64/src/gdt.rs index 54857f1..deec255 100644 --- a/x86_64/src/gdt.rs +++ b/x86_64/src/gdt.rs @@ -12,12 +12,13 @@ use crate::segment; use crate::tss::TSS; use core::arch::asm; +use static_assertions::const_assert_eq; /// Support the x86_64 64-bit Global Descriptor Table. /// /// We waste a few bytes per CPU by allocating a 4KiB page for /// the GDT, then we map that at the known GDT location in the -/// per-CPU virtual memory segment, but we pad that out to 64KiB +/// per-node virtual memory segment. We pad that out to 64KiB /// by mapping the zero page repeatedly beyond the end of the /// GDT proper. /// @@ -34,7 +35,9 @@ pub struct GDT { _usertext: segment::Descriptor, _unused: segment::Descriptor, // For alignment. task: segment::TaskStateDescriptor, + _empty: [u64; 512 - 8], } +const_assert_eq!(core::mem::size_of::(), 4096); impl GDT { pub const fn empty() -> GDT { @@ -46,6 +49,7 @@ impl GDT { _usertext: segment::Descriptor::empty(), _unused: segment::Descriptor::empty(), task: segment::TaskStateDescriptor::empty(), + _empty: [0; 512 - 8], } } @@ -71,12 +75,11 @@ impl GDT { /// descriptor. /// /// # Safety - /// /// Called on a valid GDT. unsafe fn lgdt(&self) { let ptr: *const Self = self; let base = u64::try_from(ptr.addr()).unwrap(); - const LIMIT: u16 = core::mem::size_of::() as u16 - 1; + const LIMIT: u16 = 65535; unsafe { asm!(r#" subq $16, %rsp; diff --git a/x86_64/src/lib.rs b/x86_64/src/lib.rs index 0f74e1d..d83784e 100644 --- a/x86_64/src/lib.rs +++ b/x86_64/src/lib.rs @@ -115,7 +115,7 @@ pub trait Page { fn frame(&self) -> Self::FrameType { let addr = self.vaddr().addr(); - let pfa = vm::translate(addr); + let pfa = vm::translate(addr).expect("Page is mapped"); Self::FrameType::new(pfa) } } diff --git a/x86_64/src/vm.rs b/x86_64/src/vm.rs index c0d1e2d..26ae8e6 100644 --- a/x86_64/src/vm.rs +++ b/x86_64/src/vm.rs @@ -242,6 +242,14 @@ trait Level { let entry = unsafe { Self::side_pte_ref(va) }; entry.assign(pte); } + + /// # Safety + /// + /// This is note safe. It rquires that some address space is side-loaded + /// before calling. + unsafe fn make_side_level(va: V4KA, allocator: &mut A) -> Result<()> + where + A: FnMut() -> Result; } enum Level4 {} @@ -265,6 +273,18 @@ impl Level for Level4 { fn decode(pte: PTE) -> Option { if pte.is_present() { Some(L4E::Next(pte)) } else { None } } + + unsafe fn make_side_level(va: V4KA, allocator: &mut A) -> Result<()> + where + A: FnMut() -> Result, + { + unsafe { + if Level4::side_entry(va.addr()).is_none() { + Level4::set_side_entry(va.addr(), alloc_inner(allocator)?); + } + } + Ok(()) + } } impl Level for Level3 { @@ -283,6 +303,19 @@ impl Level for Level3 { Some(L3E::Next(pte)) } } + + unsafe fn make_side_level(va: V4KA, allocator: &mut A) -> Result<()> + where + A: FnMut() -> Result, + { + unsafe { + Level4::make_side_level(va, allocator)?; + if Level3::side_entry(va.addr()).is_none() { + Level3::set_side_entry(va.addr(), alloc_inner(allocator)?); + } + } + Ok(()) + } } impl Level for Level2 { @@ -301,6 +334,19 @@ impl Level for Level2 { Some(L2E::Next(pte)) } } + + unsafe fn make_side_level(va: V4KA, allocator: &mut A) -> Result<()> + where + A: FnMut() -> Result, + { + unsafe { + Level3::make_side_level(va, allocator)?; + if Level2::side_entry(va.addr()).is_none() { + Level2::set_side_entry(va.addr(), alloc_inner(allocator)?); + } + } + Ok(()) + } } impl Level for Level1 { @@ -313,6 +359,19 @@ impl Level for Level1 { fn decode(pte: PTE) -> Option { if !pte.is_present() { None } else { Some(L1E::Page(PF4K(pte.pfa()))) } } + + unsafe fn make_side_level(va: V4KA, allocator: &mut A) -> Result<()> + where + A: FnMut() -> Result, + { + unsafe { + Level2::make_side_level(va, allocator)?; + if Level1::side_entry(va.addr()).is_none() { + Level1::set_side_entry(va.addr(), alloc_inner(allocator)?); + } + } + Ok(()) + } } #[repr(C, align(4096))] @@ -326,7 +385,7 @@ impl PageTable { } pub fn root_addr(&self) -> HPA { - translate_ptr(self) + translate_ptr(self).expect("mapped object is mapped") } pub const fn proto_ptr() -> *const PageTable { @@ -371,22 +430,26 @@ fn walk(va: usize) -> Walk { /// Translates the virtual address of the given pointer in the current /// address space to a host physical address. -pub fn translate_ptr(p: *const T) -> HPA { +pub fn translate_ptr(p: *const T) -> Option { translate(p.addr()) } -pub fn translate(va: usize) -> HPA { - match walk(va) { +pub fn translate(va: usize) -> Option { + translate_walk(va, walk(va)) +} + +fn translate_walk(va: usize, w: Walk) -> Option { + match w { Walk(Some(_), Some(L3E::Next(_)), Some(L2E::Next(_)), Some(L1E::Page(PF4K(hpa)))) => { - hpa.offset(va & ::PageType::MASK) + Some(hpa.offset(va & ::PageType::MASK)) } Walk(Some(_), Some(L3E::Next(_)), Some(L2E::Page(PF2M(hpa))), _) => { - hpa.offset(va & ::PageType::MASK) + Some(hpa.offset(va & ::PageType::MASK)) } Walk(Some(_), Some(L3E::Page(PF1G(hpa))), _, _) => { - hpa.offset(va & ::PageType::MASK) + Some(hpa.offset(va & ::PageType::MASK)) } - Walk(_, _, _, _) => HPA::new(0), + Walk(_, _, _, _) => None, } } @@ -398,20 +461,16 @@ where { let va = va.addr(); assert!(va < Level1::SIDE_BASE_ADDRESS, "attempting to map in the recursive region"); - let inner_flags = PTEFlags::PRESENT | PTEFlags::WRITE; let w = walk(va); if let Walk(None, _, _, _) = w { - let pml4e = allocator()?; - Level4::set_entry(va, PTE::new(pml4e.pfa(), inner_flags)); + Level4::set_entry(va, alloc_inner(allocator)?); } if let Walk(_, None, _, _) = w { - let pml3e = allocator()?; - Level3::set_entry(va, PTE::new(pml3e.pfa(), inner_flags)); + Level3::set_entry(va, alloc_inner(allocator)?); } if let Walk(_, _, None, _) = w { - let pml2e = allocator()?; - Level2::set_entry(va, PTE::new(pml2e.pfa(), inner_flags)); + Level2::set_entry(va, alloc_inner(allocator)?); } if let Walk(_, _, _, None) = w { Level1::set_entry(va, PTE::new(hpf.pfa(), flags)); @@ -421,6 +480,9 @@ where } } +/// Maps a leaf node into the address space. Requires that the +/// intermediate paging structures for the mapping already +/// exist. pub fn map_leaf(hpf: PF4K, va: V4KA, r: bool, w: bool, x: bool) -> Result<()> { let flags = page_perm_flags(r, w, x); let mut allocator = || Err("not a leaf"); @@ -451,6 +513,17 @@ fn page_perm_flags(r: bool, w: bool, x: bool) -> PTEFlags { flags } +// Allocate an "inner" node in the radix tree; that is, make a +// new interior node that is, itself, a page table. +fn alloc_inner(allocator: &mut A) -> Result +where + A: FnMut() -> Result, +{ + let inner_flags = PTEFlags::PRESENT | PTEFlags::WRITE; + let p = allocator()?; + Ok(PTE::new(p.pfa(), inner_flags)) +} + // Makes the paging structures at a given level for the // specified regions and page permissions. fn make_ranges_level(ranges: &[Range], allocator: &mut F) -> Result<()> @@ -526,14 +599,62 @@ where } Ok(()) } - let _tlb = TLBFlushGuard::new(); unsafe { side_load(side)?; } make_shared_ranges_level4::<_>(ranges, allocator)?; make_ranges_level::(ranges, allocator)?; make_ranges_level::(ranges, allocator)?; - unsafe { unload_side() } + unload_side() +} + +/// Shares some subtree of an address space into a side-loaded +/// space. +pub fn share_range(range: Range, side: PF4K, allocator: &mut A) -> Result +where + A: FnMut() -> Result, +{ + const SIZE_512G: usize = ::PageType::SIZE; + const SIZE_1G: usize = ::PageType::SIZE; + const SIZE_2M: usize = ::PageType::SIZE; + const SIZE_4K: usize = ::PageType::SIZE; + + let mut va = range.start.addr(); + let end = range.end.addr(); + assert!(end <= Level1::SIDE_BASE_ADDRESS, "attempting to map in the recursive region"); + unsafe { + side_load(side)?; + } + while va != end { + let len = if end.wrapping_sub(va) >= SIZE_512G && va % SIZE_512G == 0 { + unsafe { + Level4::set_side_entry(va, Level4::pte_ref(va).clone()); + } + SIZE_512G + } else if end.wrapping_sub(va) >= SIZE_1G && va % SIZE_1G == 0 { + unsafe { + Level4::make_side_level(V4KA::new(va), allocator)?; + Level3::set_side_entry(va, Level3::pte_ref(va).clone()); + } + SIZE_1G + } else if end.wrapping_sub(va) >= SIZE_2M && va % SIZE_2M == 0 { + unsafe { + Level3::make_side_level(V4KA::new(va), allocator)?; + Level2::set_side_entry(va, Level2::pte_ref(va).clone()); + } + SIZE_2M + } else if end.wrapping_sub(va) >= SIZE_4K && va % SIZE_4K == 0 { + unsafe { + Level2::make_side_level(V4KA::new(va), allocator)?; + Level1::set_side_entry(va, Level1::pte_ref(va).clone()); + } + SIZE_4K + } else { + panic!("impossible page size"); + }; + va += len; + } + unload_side() } /// unmaps a region by clearing its root level PTEs. Only @@ -550,6 +671,24 @@ pub fn unmap_root_ranges(ranges: &[Range]) { } } +/// unmaps a side region by clearing its root level PTEs. Only +/// useful for segments and tasks. +/// +/// # Safety +/// This is not safe. The side-loaded address space may not +/// be loaded. +pub unsafe fn unmap_side_root_ranges(ranges: &[Range]) { + let _tlb = TLBFlushGuard::new(); + for range in ranges { + let start = V512GA::new_round_down(range.start.addr()); + let end = V512GA::new_round_up(range.end.addr()); + for addr in start..end { + let entry = unsafe { Level4::side_pte_ref(addr.addr()) }; + entry.clear(); + } + } +} + /// Maps an address space in the side-load slot. /// /// # Safety @@ -568,8 +707,8 @@ pub unsafe fn side_load(pf: PF4K) -> Result<()> { /// # Safety /// /// This is not safe. The side-loaded address space may not -/// loaded. -pub unsafe fn unload_side() -> Result { +/// be loaded. +pub fn unload_side() -> Result { let _tlb = TLBFlushGuard::new(); let table = unsafe { &mut *PageTable::proto_ptr().with_addr(Level4::BASE_ADDRESS).cast_mut() }; let entry = table.entries[Level4::SIDE_INDEX].pfa(); @@ -630,19 +769,8 @@ unsafe fn side_walk(va: usize) -> Walk { /// /// XXX(cross): We should figure out some way to at least improve /// safety here. -pub unsafe fn side_translate(va: usize) -> HPA { - match unsafe { side_walk(va) } { - Walk(Some(_), Some(L3E::Next(_)), Some(L2E::Next(_)), Some(L1E::Page(PF4K(hpa)))) => { - hpa.offset(va & ::PageType::MASK) - } - Walk(Some(_), Some(L3E::Next(_)), Some(L2E::Page(PF2M(hpa))), _) => { - hpa.offset(va & ::PageType::MASK) - } - Walk(Some(_), Some(L3E::Page(PF1G(hpa))), _, _) => { - hpa.offset(va & ::PageType::MASK) - } - Walk(_, _, _, _) => HPA::new(0), - } +pub unsafe fn side_translate(va: usize) -> Option { + translate_walk(va, unsafe { side_walk(va) }) } /// Maps the given PF4K to the given virtual address in the currently @@ -657,25 +785,20 @@ where F: FnMut() -> Result, { let va = va.addr(); - let inner_flags = PTEFlags::PRESENT | PTEFlags::WRITE; - let w = unsafe { side_walk(va) }; if let Walk(None, _, _, _) = w { - let pml4e = allocator()?; unsafe { - Level4::set_side_entry(va, PTE::new(pml4e.pfa(), inner_flags)); + Level4::set_side_entry(va, alloc_inner(allocator)?); } } if let Walk(_, None, _, _) = w { - let pml3e = allocator()?; unsafe { - Level3::set_side_entry(va, PTE::new(pml3e.pfa(), inner_flags)); + Level3::set_side_entry(va, alloc_inner(allocator)?); } } if let Walk(_, _, None, _) = w { - let pml2e = allocator()?; unsafe { - Level2::set_side_entry(va, PTE::new(pml2e.pfa(), inner_flags)); + Level2::set_side_entry(va, alloc_inner(allocator)?); } } if let Walk(_, _, _, None) = w { diff --git a/xtask/src/main.rs b/xtask/src/main.rs index 3034d5d..941b0dd 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -205,6 +205,7 @@ fn archive(profile: Profile, locked: Locked) -> Result<()> { "global", "memory", "monitor", + "node", "scheduler", "supervisor", "system",