From 5db9381eb0c630b44da4c7dc9f6ed967514fb976 Mon Sep 17 00:00:00 2001 From: Mazunki Hoksaas Date: Tue, 21 Oct 2025 10:52:25 +0200 Subject: [PATCH 1/5] add explicit types for mmap() flags --- api/sys/mman.hpp | 30 ++++++++++++++++++++++++++++++ api/util/bitops.hpp | 19 +++++++++++++++++-- src/musl/mmap.cpp | 19 +++++++++++-------- 3 files changed, 58 insertions(+), 10 deletions(-) create mode 100644 api/sys/mman.hpp diff --git a/api/sys/mman.hpp b/api/sys/mman.hpp new file mode 100644 index 000000000..e8053fdec --- /dev/null +++ b/api/sys/mman.hpp @@ -0,0 +1,30 @@ +/* + * provides namespaced types for `sys_mman(0p)` values + */ +#ifndef _SYS_MMAN_HPP +#define _SYS_MMAN_HPP + +#include +#include +#include +#include + +namespace os::mem { + enum class Flags : uint8_t { + None = 0, + Shared = MAP_SHARED, + Private = MAP_PRIVATE, + Fixed = MAP_FIXED, + Anonymous = MAP_ANONYMOUS, + }; +} // os::mmap + + +namespace util { + inline namespace bitops { + template<> struct enable_bitmask_ops { + using type = std::underlying_type::type; + static constexpr bool enable = true; + }; + } + diff --git a/api/util/bitops.hpp b/api/util/bitops.hpp index 4c9c0e617..580ec9273 100644 --- a/api/util/bitops.hpp +++ b/api/util/bitops.hpp @@ -115,7 +115,7 @@ constexpr operator~(E flag){ // bool has_flag(flag) template -constexpr typename std::enable_if::enable, bool>::type +[[nodiscard]] constexpr typename std::enable_if::enable, bool>::type has_flag(E flag){ using base_type = typename std::underlying_type::type; return static_cast(flag); @@ -123,11 +123,26 @@ has_flag(E flag){ // bool has_flag(field, flags) template -constexpr typename std::enable_if::enable, bool>::type +[[nodiscard]] constexpr typename std::enable_if::enable, bool>::type has_flag(E field, E flags){ return (field & flags) == flags ; } +// bool missing_flag(flag) +template +[[nodiscard]] constexpr typename std::enable_if::enable, bool>::type +missing_flag(E flag){ + using base_type = typename std::underlying_type::type; + return static_cast(flag) == 0; +} + +// bool missing_flag(field, flags) +template +[[nodiscard]] constexpr typename std::enable_if::enable, bool>::type +missing_flag(E field, E flags) noexcept { + return (field & flags) != flags; +} + // Enable for uint8_t template<> diff --git a/src/musl/mmap.cpp b/src/musl/mmap.cpp index 1f452dde0..29364bd26 100644 --- a/src/musl/mmap.cpp +++ b/src/musl/mmap.cpp @@ -1,6 +1,6 @@ #include "common.hpp" #include -#include +#include #include #include #include @@ -54,9 +54,11 @@ uintptr_t mmap_allocation_end() { return alloc->highest_used(); } -static void* sys_mmap(void * addr, size_t length, int /*prot*/, int flags, +static void* sys_mmap(void * addr, size_t length, int /*prot*/, int _flags, int fd, off_t /*offset*/) { + using os::mmap::Flags; + const Flags flags = static_cast(_flags); // TODO: Implement minimal functionality to be POSIX compliant // https://pubs.opengroup.org/onlinepubs/009695399/functions/mmap.html @@ -68,37 +70,38 @@ static void* sys_mmap(void * addr, size_t length, int /*prot*/, int flags, return MAP_FAILED; } - if ((flags & MAP_ANONYMOUS) == 0) { + if (util::missing_flag(flags, Flags::Anonymous)) { Expects(false && "We only support MAP_ANONYMOUS calls to mmap()"); errno = ENOTSUP; return MAP_FAILED; } - if ((flags & MAP_FIXED) > 0) { + if (util::has_flag(flags, Flags::Fixed)) { Expects(false && "MAP_FIXED not supported."); errno = ENOTSUP; return MAP_FAILED; } - if (((flags & MAP_PRIVATE) > 0) && ((flags & MAP_ANONYMOUS) == 0)) { - Expects(false && "MAP_PRIVATE only supported for MAP_ANONYMOUS"); + if (util::has_flag(flags, Flags::Private) && util::missing_flag(flags, Flags::Anonymous)) { + Expects(false && "MAP_PRIVATE only supported for MAP_ANONYMOS"); errno = ENOTSUP; return MAP_FAILED; } - if (((flags & MAP_PRIVATE) > 0) && (addr != 0)) { + if (util::has_flag(flags, Flags::Private) && (addr != 0)) { Expects(false && "MAP_PRIVATE only supported for new allocations (address=0)."); errno = ENOTSUP; return MAP_FAILED; } - if (((flags & MAP_SHARED) == 0) && ((flags & MAP_PRIVATE) == 0)) { + if (util::missing_flag(flags, Flags::Shared) && util::missing_flag(flags, Flags::Private)) { Expects(false && "MAP_SHARED or MAP_PRIVATE must be set."); errno = ENOTSUP; return MAP_FAILED; } // If we get here, the following should be true: + // // MAP_ANONYMOUS set + MAP_SHARED or MAP_PRIVATE // fd should be 0, address should be 0 for MAP_PRIVATE // (address is in any case ignored) From b5648fa960af5236b5db87b02386045190724e8c Mon Sep 17 00:00:00 2001 From: Mazunki Hoksaas Date: Tue, 21 Oct 2025 16:16:16 +0200 Subject: [PATCH 2/5] add missing include --- api/arch/x86/paging_utils.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/api/arch/x86/paging_utils.hpp b/api/arch/x86/paging_utils.hpp index c9e683b78..cedfe660b 100644 --- a/api/arch/x86/paging_utils.hpp +++ b/api/arch/x86/paging_utils.hpp @@ -19,6 +19,7 @@ #ifndef X86_PAGING_UTILS #define X86_PAGING_UTILS +#include "arch/x86/paging.hpp" #include #include From 325e014f364746e13af05ab2bcf87d3de6dd4896 Mon Sep 17 00:00:00 2001 From: Mazunki Hoksaas Date: Tue, 21 Oct 2025 16:19:59 +0200 Subject: [PATCH 3/5] prefer explicit namespaces --- src/arch/x86_64/paging.cpp | 43 ++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/src/arch/x86_64/paging.cpp b/src/arch/x86_64/paging.cpp index cd3c78918..caaabd3e0 100644 --- a/src/arch/x86_64/paging.cpp +++ b/src/arch/x86_64/paging.cpp @@ -33,12 +33,8 @@ const size_t x86::paging::Map::any_size { supported_page_sizes() }; template<> const size_t os::mem::Map::any_size { supported_page_sizes() }; -using namespace os::mem; using namespace util; -using Flags = x86::paging::Flags; -using Pml4 = x86::paging::Pml4; - static void allow_executable(); // TODO: -Wunused-function @@ -84,13 +80,14 @@ extern uintptr_t __exec_end; **/ // The main page directory pointer -Pml4* __pml4; +x86::paging::Pml4* __pml4; __attribute__((weak)) void __arch_init_paging() { INFO("x86_64", "Initializing paging"); + using Flags = x86::paging::Flags; auto default_fl = Flags::present | Flags::writable | Flags::huge | Flags::no_exec; - __pml4 = new Pml4(0); + __pml4 = new x86::paging::Pml4(0); Expects(__pml4 != nullptr); Expects(!__pml4->has_flag(0, Flags::present)); @@ -137,23 +134,23 @@ void __arch_init_paging() { namespace x86 { namespace paging { -Access to_memflags(Flags f) +os::mem::Access to_memflags(Flags f) { - Access prot = Access::none; + os::mem::Access prot = os::mem::Access::none; if (! has_flag(f, Flags::present)) { - prot |= Access::none; + prot |= os::mem::Access::none; return prot; } - prot |= Access::read; + prot |= os::mem::Access::read; if (has_flag(f, Flags::writable)) { - prot |= Access::write; + prot |= os::mem::Access::write; } if (! has_flag(f, Flags::no_exec)) { - prot |= Access::execute; + prot |= os::mem::Access::execute; } return prot; @@ -162,17 +159,17 @@ Access to_memflags(Flags f) Flags to_x86(os::mem::Access prot) { Flags flags = Flags::none; - if (prot != Access::none) { + if (prot != os::mem::Access::none) { flags |= Flags::present; } else { return Flags::none; } - if (has_flag(prot, Access::write)) { + if (has_flag(prot, os::mem::Access::write)) { flags |= Flags::writable; } - if (not has_flag(prot, Access::execute)) { + if (not has_flag(prot, os::mem::Access::execute)) { flags |= Flags::no_exec; } @@ -225,11 +222,11 @@ bool mem::supported_page_size(uintptr_t size) return bits::is_pow2(size) and (size & supported_page_sizes()) != 0; } -Map to_mmap(Map_x86 map){ +os::mem::Map to_mmap(os::mem::Map_x86 map){ return {map.lin, map.phys, to_memflags(map.flags), map.size, map.page_sizes}; } -Map_x86 to_x86(Map map){ +os::mem::Map_x86 to_x86(os::mem::Map map){ return {map.lin, map.phys, x86::paging::to_x86(map.flags), map.size, map.page_sizes}; } @@ -240,7 +237,7 @@ uintptr_t mem::virt_to_phys(uintptr_t linear) return __pml4->addr_of(*ent); } -Access mem::protect_page(uintptr_t linear, Access flags) +os::mem::Access mem::protect_page(uintptr_t linear, Access flags) { MEM_PRINT("::protect_page 0x%lx\n", linear); x86::paging::Flags xflags = x86::paging::to_x86(flags); @@ -249,7 +246,7 @@ Access mem::protect_page(uintptr_t linear, Access flags) return to_memflags(f); }; -Access mem::protect_range(uintptr_t linear, Access flags) +os::mem::Access mem::protect_range(uintptr_t linear, Access flags) { MEM_PRINT("::protect 0x%lx \n", linear); x86::paging::Flags xflags = x86::paging::to_x86(flags); @@ -276,7 +273,7 @@ Access mem::protect_range(uintptr_t linear, Access flags) return to_memflags(fl); }; -Map mem::protect(uintptr_t linear, size_t len, Access flags) +os::mem::Map mem::protect(uintptr_t linear, size_t len, Access flags) { if (UNLIKELY(len < min_psize())) mem_fail_fast("Can't map less than a page\n"); @@ -300,13 +297,13 @@ Map mem::protect(uintptr_t linear, size_t len, Access flags) return to_mmap(res); } -Access mem::flags(uintptr_t addr) +os::mem::Access mem::flags(uintptr_t addr) { return to_memflags(__pml4->flags_r(addr)); } __attribute__((weak)) -Map mem::map(Map m, const char* name) +os::mem::Map mem::map(Map m, const char* name) { using namespace x86::paging; using namespace util; @@ -349,7 +346,7 @@ Map mem::map(Map m, const char* name) return to_mmap(new_map); }; -Map mem::unmap(uintptr_t lin){ +os::mem::Map mem::unmap(uintptr_t lin){ auto key = os::mem::vmmap().in_range(lin); Map_x86 m; if (key) { From 3aef58d441a7f6692fc64c69fa0df4002ca51262 Mon Sep 17 00:00:00 2001 From: Mazunki Hoksaas Date: Tue, 21 Oct 2025 16:38:33 +0200 Subject: [PATCH 4/5] migrate os::mem::Access to os::mem::Permission --- api/arch/x86/paging.hpp | 8 +-- api/kernel/memory.hpp | 38 ++++--------- api/sys/mman.hpp | 23 ++++++++ src/arch/i686/paging.cpp | 2 +- src/arch/x86_64/ist.cpp | 4 +- src/arch/x86_64/paging.cpp | 44 +++++++++------ src/kernel/elf.cpp | 2 +- src/kernel/multiboot.cpp | 4 +- src/musl/mmap.cpp | 2 +- src/platform/x86_pc/os.cpp | 2 +- test/kernel/integration/memmap/service.cpp | 2 +- test/kernel/integration/paging/service.cpp | 18 +++---- test/kernel/unit/test_memory.cpp | 62 +++++++++++----------- test/kernel/unit/x86_paging.cpp | 60 ++++++++++----------- 14 files changed, 142 insertions(+), 129 deletions(-) diff --git a/api/arch/x86/paging.hpp b/api/arch/x86/paging.hpp index 47cfa9e15..00c653e7f 100644 --- a/api/arch/x86/paging.hpp +++ b/api/arch/x86/paging.hpp @@ -127,11 +127,11 @@ namespace paging { using namespace util::literals; using namespace util::bitops; -/** Conversion from x86 paging flags to mem::Accessflags **/ -os::mem::Access to_memflags(Flags f); +/** Conversion from x86 paging flags to mem::Permission flags **/ +os::mem::Permission to_memflags(Flags f); -/** Conversion from mem::Access flags to x86 paging flags **/ -Flags to_x86(os::mem::Access prot); +/** Conversion from mem::Permission flags to x86 paging flags **/ +Flags to_x86(os::mem::Permission prot); /** Summary of currently mapped page- and page directories **/ struct Summary { diff --git a/api/kernel/memory.hpp b/api/kernel/memory.hpp index c19480b4c..e9e4c0f20 100644 --- a/api/kernel/memory.hpp +++ b/api/kernel/memory.hpp @@ -26,17 +26,10 @@ #include #include #include +#include namespace os::mem { - /** POSIX mprotect compliant access bits **/ - enum class Access : uint8_t { - none = 0, - read = 1, - write = 2, - execute = 4 - }; - using Raw_allocator = buddy::Alloc; /** Get default allocator for untyped allocations */ @@ -68,7 +61,7 @@ namespace os::mem { * Virtual to physical memory mapping. * For interfacing with the virtual memory API, e.g. mem::map / mem::protect. **/ - template + template struct Mapping { static const size_t any_size; @@ -126,7 +119,7 @@ namespace os::mem { Map unmap(uintptr_t addr); /** Get protection flags for page enclosing a given address */ - Access flags(uintptr_t addr); + Permission flags(uintptr_t addr); /** Determine active page size of a given linear address **/ uintptr_t active_page_size(uintptr_t addr); @@ -142,20 +135,20 @@ namespace os::mem { * might result in 513 4KiB pages or 1 2MiB page and 1 4KiB page getting * protected. **/ - Map protect(uintptr_t linear, size_t len, Access flags = Access::read); + Map protect(uintptr_t linear, size_t len, Permission flags = Permission::Read); // TODO(mazunki): consider whether we should default to Read here /** * Set and return access flags for a given linear address range * The range is expected to be mapped by a previous call to map. **/ - Access protect_range(uintptr_t linear, Access flags = Access::read); + Permission protect_range(uintptr_t linear, Permission flags = Permission::Read); // TODO(mazunki): consider whether we should default to Read here /** * Set and return access flags for a page starting at linear. * @note : the page size can be any of the supported sizes and * protection will apply for that whole page. **/ - Access protect_page(uintptr_t linear, Access flags = Access::read); + Permission protect_page(uintptr_t linear, Permission flags = Permission::Read); // TODO(mazunki): consider whether we should default to Read here /** Get the physical address to which linear address is mapped **/ @@ -176,20 +169,6 @@ namespace os::mem { - - -// Enable bitwise ops on access flags -namespace util { -inline namespace bitops { - template<> - struct enable_bitmask_ops { - using type = typename std::underlying_type::type; - static constexpr bool enable = true; - }; -} -} - - namespace os::mem { // @@ -333,11 +312,12 @@ namespace os::mem { virtual_move(uintptr_t src, size_t size, uintptr_t dst, const char* label) { using namespace util::bitops; - const auto flags = os::mem::Access::read | os::mem::Access::write; + const auto flags = os::mem::Permission::Data; // TODO(mazunki): shouldn't this inherit flags from @src? // setup @dst as new virt area for @src os::mem::map({dst, src, flags, size}, label); + // unpresent @src - os::mem::protect(src, size, os::mem::Access::none); + os::mem::protect(src, size, os::mem::Permission::Any); // TODO(mazunki): change to Permission::None when introduced } } diff --git a/api/sys/mman.hpp b/api/sys/mman.hpp index e8053fdec..43edb4571 100644 --- a/api/sys/mman.hpp +++ b/api/sys/mman.hpp @@ -17,6 +17,20 @@ namespace os::mem { Fixed = MAP_FIXED, Anonymous = MAP_ANONYMOUS, }; + + enum class Permission : uint8_t { // TODO(mazunki): consider making Permission::{Read,Write,Execute} private or standalone class + Read = PROT_READ, + Write = PROT_WRITE, + Execute = PROT_EXEC, + + Data = Read | Write, + Code = Read | Execute, + + Any = 0, // TODO(mazunki): this should really be R|W|X; but requires some refactoring + RWX = Read|Write|Execute, // TODO(mazunki): temporary, remove me. references should use Permission::Any + + // None = 0, // TODO(mazunki): implement this after Any is properly implemented (to avoid confusion with old Access::none which had a different meaning). should block all access (best used for unmapped stuff, potentially tests) + }; } // os::mmap @@ -28,3 +42,12 @@ namespace util { }; } + inline namespace bitops { + template<> + struct enable_bitmask_ops { + using type = typename std::underlying_type::type; + static constexpr bool enable = true; + }; + } +} +#endif // _SYS_MMAN_HPP diff --git a/src/arch/i686/paging.cpp b/src/arch/i686/paging.cpp index f014cdfc2..e1c8799b3 100644 --- a/src/arch/i686/paging.cpp +++ b/src/arch/i686/paging.cpp @@ -33,6 +33,6 @@ namespace mem { } template <> - const size_t Mapping::any_size = 4096; + const size_t Mapping::any_size = 4096; } } diff --git a/src/arch/x86_64/ist.cpp b/src/arch/x86_64/ist.cpp index 059757e44..64a85fde6 100644 --- a/src/arch/x86_64/ist.cpp +++ b/src/arch/x86_64/ist.cpp @@ -38,7 +38,7 @@ static stack create_stack_virt(size_t size, const char* name) // TODO randomize location / ask virtual memory allocator const uintptr_t stack_area = 1ull << 46; - const mem::Access flags = mem::Access::read | mem::Access::write; + const mem::Permission flags = mem::Permission::Data; // Virtual area // Adds a guard page between each new stack @@ -53,7 +53,7 @@ static stack create_stack_virt(size_t size, const char* name) Expects(map); Expects(mem::active_page_size(map.lin) == 4096); - Expects(mem::flags(map.lin - 1) == mem::Access::none + Expects(mem::flags(map.lin - 1) == mem::Permission::Any // TODO(mazunki): should this be Permission::None? && "Guard page should not present"); // Next stack starts after next page diff --git a/src/arch/x86_64/paging.cpp b/src/arch/x86_64/paging.cpp index caaabd3e0..fee5b829e 100644 --- a/src/arch/x86_64/paging.cpp +++ b/src/arch/x86_64/paging.cpp @@ -134,42 +134,44 @@ void __arch_init_paging() { namespace x86 { namespace paging { -os::mem::Access to_memflags(Flags f) +os::mem::Permission to_memflags(Flags f) { - os::mem::Access prot = os::mem::Access::none; + using Permission = os::mem::Permission; + Permission prot = Permission::Any; // TODO(mazunki): should probably be 0 (or introduce Permission::Empty) if (! has_flag(f, Flags::present)) { - prot |= os::mem::Access::none; + prot |= Permission::Any; // TODO(mazunki): should probably be Permission::None return prot; } - prot |= os::mem::Access::read; + prot |= Permission::Read; if (has_flag(f, Flags::writable)) { - prot |= os::mem::Access::write; + prot |= Permission::Write; } if (! has_flag(f, Flags::no_exec)) { - prot |= os::mem::Access::execute; + prot |= Permission::Execute; } return prot; } -Flags to_x86(os::mem::Access prot) +Flags to_x86(os::mem::Permission prot) // TODO(mazunki): probably implement Any, RWX, None here { + using Permission = os::mem::Permission; Flags flags = Flags::none; - if (prot != os::mem::Access::none) { + if (prot != Permission::Any) { flags |= Flags::present; } else { return Flags::none; } - if (has_flag(prot, os::mem::Access::write)) { + if (has_flag(prot, Permission::Write)) { flags |= Flags::writable; } - if (not has_flag(prot, os::mem::Access::execute)) { + if (not has_flag(prot, Permission::Execute)) { flags |= Flags::no_exec; } @@ -183,7 +185,7 @@ void invalidate(void *pageaddr){ }} // x86::paging -namespace os { +namespace os { // TODO(mazunki): could it be worth moving this into `x86::paging::` instead? namespace mem { using Map_x86 = Mapping; @@ -237,7 +239,15 @@ uintptr_t mem::virt_to_phys(uintptr_t linear) return __pml4->addr_of(*ent); } -os::mem::Access mem::protect_page(uintptr_t linear, Access flags) +/* + * TODO(mazunki): + * might be better to rename this to set_protection(linear, flags), + * and introduce permit_page() and prohibit_page() to add/remove permissions + * + * mprotect/protect_page() are misleading as we can use it to remove + * protections of pages too + */ +os::mem::Permission mem::protect_page(uintptr_t linear, Permission flags) { MEM_PRINT("::protect_page 0x%lx\n", linear); x86::paging::Flags xflags = x86::paging::to_x86(flags); @@ -246,7 +256,7 @@ os::mem::Access mem::protect_page(uintptr_t linear, Access flags) return to_memflags(f); }; -os::mem::Access mem::protect_range(uintptr_t linear, Access flags) +os::mem::Permission mem::protect_range(uintptr_t linear, Permission flags) { MEM_PRINT("::protect 0x%lx \n", linear); x86::paging::Flags xflags = x86::paging::to_x86(flags); @@ -273,7 +283,7 @@ os::mem::Access mem::protect_range(uintptr_t linear, Access flags) return to_memflags(fl); }; -os::mem::Map mem::protect(uintptr_t linear, size_t len, Access flags) +os::mem::Map mem::protect(uintptr_t linear, size_t len, Permission flags) { if (UNLIKELY(len < min_psize())) mem_fail_fast("Can't map less than a page\n"); @@ -297,7 +307,7 @@ os::mem::Map mem::protect(uintptr_t linear, size_t len, Access flags) return to_mmap(res); } -os::mem::Access mem::flags(uintptr_t addr) +os::mem::Permission mem::flags(uintptr_t addr) { return to_memflags(__pml4->flags_r(addr)); } @@ -356,7 +366,7 @@ os::mem::Map mem::unmap(uintptr_t lin){ m.phys = 0; m.size = map_ent.size(); - m = __pml4->map_r({key, 0, x86::paging::to_x86(Access::none), (size_t)map_ent.size()}); + m = __pml4->map_r({key, 0, x86::paging::to_x86(Permission::Any), (size_t)map_ent.size()}); // TODO(mazunki): this should maybe be Permission::None Ensures(m.size == util::bits::roundto<4_KiB>(map_ent.size())); os::mem::vmmap().erase(key); @@ -386,7 +396,7 @@ void allow_executable() m.phys = __exec_begin; m.size = exec_size; m.page_sizes = os::mem::Map::any_size; - m.flags = os::mem::Access::execute | os::mem::Access::read; + m.flags = os::mem::Permission::Code; os::mem::map(m, "ELF .text"); } diff --git a/src/kernel/elf.cpp b/src/kernel/elf.cpp index 20b1c5ff2..c592044c2 100644 --- a/src/kernel/elf.cpp +++ b/src/kernel/elf.cpp @@ -502,6 +502,6 @@ void elf_protect_symbol_areas() {(uintptr_t) src, (uintptr_t) src + size-1, "Symbols & strings"}); INFO2("* Protecting syms %p to %p (size %#zx)", src, &src[size], size); - os::mem::protect((uintptr_t) src, size, os::mem::Access::read); + os::mem::protect((uintptr_t) src, size, os::mem::Permission::Read); } #endif diff --git a/src/kernel/multiboot.cpp b/src/kernel/multiboot.cpp index f4a99bf7d..28ad940a9 100644 --- a/src/kernel/multiboot.cpp +++ b/src/kernel/multiboot.cpp @@ -164,7 +164,7 @@ void kernel::multiboot(uint32_t boot_addr) if (not (map.type & MULTIBOOT_MEMORY_AVAILABLE)) { if (util::bits::is_aligned<4_KiB>(map.addr)) { - os::mem::map({addr, addr, os::mem::Access::read | os::mem::Access::write, size}, + os::mem::map({addr, addr, os::mem::Permission::Data, size}, "Reserved (Multiboot)"); continue; } @@ -175,7 +175,7 @@ void kernel::multiboot(uint32_t boot_addr) else { // Map as free memory - //os::mem::map_avail({map.addr, map.addr, {os::mem::Access::read | os::mem::Access::write}, map.len}, "Reserved (Multiboot)"); + //os::mem::map_avail({map.addr, map.addr, {os::mem::Permission::Data}, map.len}, "Reserved (Multiboot)"); } } INFO2(""); diff --git a/src/musl/mmap.cpp b/src/musl/mmap.cpp index 29364bd26..d61288e3f 100644 --- a/src/musl/mmap.cpp +++ b/src/musl/mmap.cpp @@ -57,7 +57,7 @@ uintptr_t mmap_allocation_end() { static void* sys_mmap(void * addr, size_t length, int /*prot*/, int _flags, int fd, off_t /*offset*/) { - using os::mmap::Flags; + using os::mem::Flags; const Flags flags = static_cast(_flags); // TODO: Implement minimal functionality to be POSIX compliant diff --git a/src/platform/x86_pc/os.cpp b/src/platform/x86_pc/os.cpp index 9b2097889..0d62d8fb7 100644 --- a/src/platform/x86_pc/os.cpp +++ b/src/platform/x86_pc/os.cpp @@ -139,7 +139,7 @@ void kernel::start(uint32_t boot_magic, uint32_t boot_addr) #if defined(ARCH_x86_64) // protect the basic pagetable used by LiveUpdate and any other // systems that need to exit long/protected mode - os::mem::map({0x1000, 0x1000, os::mem::Access::read, 0x7000}, "Page tables"); + os::mem::map({0x1000, 0x1000, os::mem::Permission::Read, 0x7000}, "Page tables"); memmap.assign_range({0x10000, 0x9d3ff, "Stack"}); #elif defined(ARCH_i686) memmap.assign_range({0x10000, 0x9d3ff, "Stack"}); diff --git a/test/kernel/integration/memmap/service.cpp b/test/kernel/integration/memmap/service.cpp index 017838fef..0880935a9 100644 --- a/test/kernel/integration/memmap/service.cpp +++ b/test/kernel/integration/memmap/service.cpp @@ -52,7 +52,7 @@ void Service::start(const std::string&) // mem::map is using memory_map to keep track of virutal memory // TODO: we might consider consolidating ranges with mappings. - auto m = os::mem::map({42_GiB, 42_GiB, os::mem::Access::read, 1_GiB}, + auto m = os::mem::map({42_GiB, 42_GiB, os::mem::Permission::Read, 1_GiB}, "Test range"); Expects(m); Expects(map.size() == s + 1); diff --git a/test/kernel/integration/paging/service.cpp b/test/kernel/integration/paging/service.cpp index 595ee71a6..1b65845eb 100644 --- a/test/kernel/integration/paging/service.cpp +++ b/test/kernel/integration/paging/service.cpp @@ -203,7 +203,7 @@ void verify_integrity(){ mem::Map far; far.lin = near + far_distance; far.phys = near; - far.flags = mem::Access::read | mem::Access::write; + far.flags = mem::Permission::Data; // TODO(mazunki): consider whether R|W is more semantic here far.size = 100_MiB; far.page_sizes = mem::Map::any_size; @@ -356,7 +356,7 @@ void map_non_aligned(){ << Byte_r(near_addr1) << ", no page size restrictions \n"; // OK - we don't supply page size, only size - auto res = mem::map({far_addr1, near_addr1, mem::Access::read | mem::Access::write, psize}); + auto res = mem::map({far_addr1, near_addr1, mem::Permission::Data, psize}); // TODO(mazunki): consider whether R|W is more semantic here Expects(res); Expects(res.size == psize); Expects(res.page_sizes & 4_KiB); @@ -369,7 +369,7 @@ void map_non_aligned(){ std::cout << "* Mapping a " << util::Byte_r(psize) << " page to " << Byte_r(near_addr2) << ", requiring page size " << Byte_r(psize) << "\n"; try { - mem::map({far_addr2, near_addr2, mem::Access::read | mem::Access::write, psize, psize}); + mem::map({far_addr2, near_addr2, mem::Permission::Data, psize, psize}); // TODO(mazunki): consider whether R|W is more semantic here } catch (mem::Memory_exception& e) { Expects(std::string(e.what()).find(std::string("linear and physical must be aligned to requested page size"))); std::cout << "* Exception caught as expected\n"; @@ -385,7 +385,7 @@ int main() void(*heap_code)() = (void(*)()) malloc(42); Expects(Byte_r{std::numeric_limits::max()}.to_string() == "2.000_GiB"); - Expects(Byte_r{std::numeric_limits::max()}.to_string() == "16777216.000_TiB"); + Expects(Byte_r{std::numeric_limits::max()}.to_string() == "16777216.000_TiB"); // 16777216 = 2²⁴ verify_magic(); verify_integrity(); @@ -399,7 +399,7 @@ int main() prot.phys = (uintptr_t) protected_page_phys; prot.size = 4_KiB; prot.page_sizes = 4_KiB; - prot.flags = mem::Access::read | mem::Access::write; + prot.flags = mem::Permission::Data; // TODO(mazunki): consider whether R|W is more semantic here mem::Map mapped; int expected_reboots = 4; @@ -407,7 +407,7 @@ int main() std::cout << "Protection fault test setup\n"; std::cout << "* Mapping protected page @ " << prot << "\n"; mapped = mem::map(prot, "Protected test page"); - mem::protect_range((uint64_t)protected_page, mem::Access::read | mem::Access::write); + mem::protect_range((uint64_t)protected_page, mem::Permission::Data); Expects(mapped && mapped == prot); } @@ -423,7 +423,7 @@ int main() pml1 = pml2->page_dir(pml2->entry(mapped.lin)); protected_page[magic->i] = 'a'; - mem::protect_range((uint64_t)protected_page, mem::Access::read); + mem::protect_range((uint64_t)protected_page, mem::Permission::Read); Expects(protected_page[magic->i] == 'a'); std::cout << "* Writing to write-protected page, expecting page write fail\n\n"; protected_page[magic->i] = 'b'; @@ -440,7 +440,7 @@ int main() // Read-protect (e.g. not present) std::cout << "* Reading non-present page, expecting page read fail\n\n"; - mem::protect_range((uint64_t)protected_page, mem::Access::none); + mem::protect_range((uint64_t)protected_page, mem::Permission::Any); // TODO(mazunki): change to Permission::None when introduced Expects(protected_page[magic->i] == 'b'); } @@ -454,7 +454,7 @@ int main() // Execute protected page std::cout << "* Executing code from execute-protected page, expecting instruction fetch fail\n\n"; - mem::protect_range((uint64_t)protected_page, mem::Access::read); + mem::protect_range((uint64_t)protected_page, mem::Permission::Read); ((void(*)())(&protected_page[magic->i]))(); } diff --git a/test/kernel/unit/test_memory.cpp b/test/kernel/unit/test_memory.cpp index 3be60dccb..7611e4f01 100644 --- a/test/kernel/unit/test_memory.cpp +++ b/test/kernel/unit/test_memory.cpp @@ -97,7 +97,7 @@ CASE ("os::mem - Trying to map the 0 page") m.lin = 0; m.phys = 4_GiB; m.size = 42_KiB; - m.flags = mem::Access::read; + m.flags = mem::Permission::Read; // Throw due to assert fail in map EXPECT_THROWS(mem::map(m, "Fail")); @@ -153,7 +153,7 @@ CASE ("os::mem Using map and unmap") m.lin = 5_GiB; m.phys = 4_GiB; m.size = 42_MiB; - m.flags = mem::Access::read; + m.flags = mem::Permission::Read; m.page_sizes = 4_KiB | 2_MiB; // It shouldn't exist in the memory map @@ -202,7 +202,7 @@ CASE ("os::mem Using map and unmap") auto un = mem::unmap(m.lin); EXPECT(un.lin == mapping.lin); EXPECT(un.phys == 0); - EXPECT(un.flags == mem::Access::none); + EXPECT(un.flags == mem::Permission::Any); // TODO(mazunki): change this to Permission::None when introduced EXPECT(un.size == mapping.size); key = os::mem::vmmap().in_range(m.lin); EXPECT(key == 0); @@ -227,21 +227,21 @@ CASE ("os::mem using protect_range and flags") Default_paging p{}; EXPECT(__pml4 != nullptr); - mem::Map req = {6_GiB, 3_GiB, mem::Access::read, 15 * 4_KiB, 4_KiB}; + mem::Map req = {6_GiB, 3_GiB, mem::Permission::Read, 15 * 4_KiB, 4_KiB}; auto previous_flags = mem::flags(req.lin); mem::Map res = mem::map(req); EXPECT(req == res); - EXPECT(mem::flags(req.lin) == mem::Access::read); + EXPECT(mem::flags(req.lin) == mem::Permission::Read); EXPECT(mem::active_page_size(req.lin) == 4_KiB); auto page_below = req.lin - 4_KiB; auto page_above = req.lin + 4_KiB; - mem::protect_page(page_below, mem::Access::none); - EXPECT(mem::flags(page_below) == mem::Access::none); + mem::protect_page(page_below, mem::Permission::Any); + EXPECT(mem::flags(page_below) == mem::Permission::Any); EXPECT(mem::active_page_size(page_below) >= 2_MiB); - mem::protect_page(page_above, mem::Access::none); - EXPECT(mem::flags(page_above) == mem::Access::none); + mem::protect_page(page_above, mem::Permission::Any); + EXPECT(mem::flags(page_above) == mem::Permission::Any); EXPECT(mem::active_page_size(page_above) == 4_KiB); // The original page is untouched @@ -249,21 +249,21 @@ CASE ("os::mem using protect_range and flags") // Can't protect a range that isn't mapped auto unmapped = 590_GiB; - EXPECT(mem::flags(unmapped) == mem::Access::none); - EXPECT_THROWS(mem::protect_range(unmapped, mem::Access::write | mem::Access::read)); - EXPECT(mem::flags(unmapped) == mem::Access::none); + EXPECT(mem::flags(unmapped) == mem::Permission::Any); + EXPECT_THROWS(mem::protect_range(unmapped, mem::Permission::Data)); // TODO(mazunki): consider whether R|W is more semantic here + EXPECT(mem::flags(unmapped) == mem::Permission::Any); // You can still protect page EXPECT(mem::active_page_size(unmapped) == 512_GiB); - auto rw = mem::Access::write | mem::Access::read; + auto rw = mem::Permission::Write | mem::Permission::Read; // But a 512 GiB page can't be present without being mapped - EXPECT(mem::protect_page(unmapped, rw) == mem::Access::none); + EXPECT(mem::protect_page(unmapped, rw) == mem::Permission::Any); - mem::protect_range(req.lin, mem::Access::execute); + mem::protect_range(req.lin, mem::Permission::Execute); for (auto p = req.lin; p < req.lin + req.size; p += 4_KiB){ EXPECT(mem::active_page_size(p) == 4_KiB); - EXPECT(mem::flags(p) == (mem::Access::execute | mem::Access::read)); + EXPECT(mem::flags(p) == mem::Permission::Code); // TODO(mazunki): consider whether W|X is more semantic here } EXPECT(mem::flags(req.lin + req.size) == previous_flags); @@ -325,18 +325,18 @@ SETUP ("Assuming a default page table setup") mem::active_page_size(7_GiB)}; // You can't protect an unmapped range - EXPECT_THROWS(mem::protect(6_GiB + 900_MiB, 300_MiB, mem::Access::read)); + EXPECT_THROWS(mem::protect(6_GiB + 900_MiB, 300_MiB, mem::Permission::Read)); // Map something (a lot will be mapped by default in IncludeOS) - mem::Map req {6_GiB, 3_GiB, mem::Access::read | mem::Access::write, 300_MiB}; + mem::Map req {6_GiB, 3_GiB, mem::Permission::Data, 300_MiB}; // TODO(mazunki): consider whether R|W is more semantic here auto res = mem::map(req); EXPECT(res); - EXPECT(res.flags == (mem::Access::write | mem::Access::read)); + EXPECT(res.flags == mem::Permission::Data); // TODO(mazunki): consider whether R|W is more semantic here EXPECT(res.lin == req.lin); EXPECT(res.phys == req.phys); // You can't protect a partially mapped range - EXPECT_THROWS(mem::protect(5_GiB + 900_MiB, 300_MiB, mem::Access::read | mem::Access::write)); + EXPECT_THROWS(mem::protect(5_GiB + 900_MiB, 300_MiB, mem::Permission::Data)); // TODO(mazunki): consider whether R|W is more semantic here auto prot_offs = 100_MiB; auto prot_begin = req.lin + prot_offs; auto prot_size = 12_KiB; @@ -348,7 +348,7 @@ SETUP ("Assuming a default page table setup") auto pres = mem::protect(prot_begin, prot_size); EXPECT(pres); - EXPECT(pres.flags == mem::Access::read); + EXPECT(pres.flags == mem::Permission::Read); EXPECT(pres.lin == prot_begin); EXPECT(pres.size == prot_size); EXPECT(pres.page_sizes == 4_KiB); @@ -378,7 +378,7 @@ SETUP ("Assuming a default page table setup") CASE("os::mem::protect try to break stuff"){ using namespace util::literals; - auto init_access = mem::Access::none; + auto init_access = mem::Permission::Any; Default_paging::clear_paging(); EXPECT(__pml4 == nullptr); @@ -395,16 +395,16 @@ CASE("os::mem::protect try to break stuff"){ mem::Map req; req.lin = util::bits::roundto<4_KiB>(lin); req.phys = util::bits::roundto<4_KiB>(phys); - req.flags = mem::Access::none; + req.flags = mem::Permission::Any; req.size = util::bits::roundto<4_KiB>(size); req.page_sizes = mem::Map::any_size; if (r % 3 == 0) - req.flags |= mem::Access::read; + req.flags |= mem::Permission::Read; if (r % 3 == 1) - req.flags |= mem::Access::write; + req.flags |= mem::Permission::Write; if (r % 3 == 2) - req.flags |= mem::Access::execute; + req.flags |= mem::Permission::Execute; auto m = mem::map(req); EXPECT(m); @@ -437,7 +437,7 @@ CASE("os::mem::protect try to break stuff"){ CASE("os::mem::protect verify consistency"){ using namespace util::literals; - auto init_access = mem::Access::none; + auto init_access = mem::Permission::Any; if (__pml4 != nullptr) { printf("NOT NULL\n"); @@ -451,10 +451,10 @@ CASE("os::mem::protect verify consistency"){ MYINFO("Initial memory use: %zi \n", initial_use); - mem::Map req {6_GiB, 3_GiB, mem::Access::read | mem::Access::write, 300_MiB}; + mem::Map req {6_GiB, 3_GiB, mem::Permission::Data, 300_MiB}; // TODO(mazunki): consider whether R|W is more semantic here auto res = mem::map(req); EXPECT(res); - EXPECT(res.flags == (mem::Access::write | mem::Access::read)); + EXPECT(res.flags == mem::Permission::Data); // TODO(mazunki): consider whether R|W is more semantic here EXPECT(res.lin == req.lin); EXPECT(res.phys == req.phys); @@ -462,7 +462,7 @@ CASE("os::mem::protect verify consistency"){ auto prot_begin = 6_GiB + prot_offs; auto prot_size = 1043_KiB; auto diff_phys = req.lin - req.phys; - auto new_flags = mem::Access::read; + auto new_flags = mem::Permission::Read; // Write-protect auto prot = mem::protect(prot_begin, prot_size, new_flags); @@ -480,7 +480,7 @@ CASE("os::mem::protect verify consistency"){ EXPECT(__pml4->bytes_allocated() > initial_use); // Protect with different flags - new_flags = mem::Access::read | mem::Access::write | mem::Access::execute; + new_flags = mem::Permission::RWX; // TODO(mazunki): RWX should be deprecated auto prot2 = mem::protect(prot_begin, prot_size, new_flags); EXPECT(prot2); diff --git a/test/kernel/unit/x86_paging.cpp b/test/kernel/unit/x86_paging.cpp index 214e376bb..be94b8007 100644 --- a/test/kernel/unit/x86_paging.cpp +++ b/test/kernel/unit/x86_paging.cpp @@ -346,20 +346,20 @@ void init_default_paging(uintptr_t exec_beg = 0xa00000, uintptr_t exec_end = 0xb CASE ("x86::paging Verify execute protection") { using namespace util; - using Access = os::mem::Access; + using Permission = os::mem::Permission; init_default_paging(0xa00000, 0xc00000); // 4KiB 0-page has no access EXPECT(__pml4->active_page_size(0LU) == 4_KiB); EXPECT(os::mem::active_page_size(0LU) == 4_KiB); - EXPECT(os::mem::flags(0) == Access::none); + EXPECT(os::mem::flags(0) == Permission::Any); auto flags = os::mem::flags(__exec_begin); // .text segment has execute + read access up to next 4kb page - EXPECT(os::mem::flags(__exec_begin) == (Access::execute | Access::read)); - EXPECT(os::mem::flags(__exec_end - 1) == (Access::execute | Access::read)); - EXPECT(os::mem::flags(__exec_end + 4_KiB) == (Access::read | Access::write)); + EXPECT(os::mem::flags(__exec_begin) == Permission::Code); + EXPECT(os::mem::flags(__exec_end - 1) == Permission::Code); + EXPECT(os::mem::flags(__exec_end + 4_KiB) == Permission::Data); for (int i = 0; i < 10; i++ ) { auto exec_start = (rand() & ~0xfff); @@ -369,13 +369,13 @@ CASE ("x86::paging Verify execute protection") // 4KiB 0-page has no access EXPECT(os::mem::active_page_size(0LU) == 4_KiB); - EXPECT(os::mem::flags(0) == Access::none); + EXPECT(os::mem::flags(0) == Permission::Any); // .text segment has execute + read access up to next 4kb page - EXPECT(os::mem::flags(__exec_begin) == (Access::execute | Access::read)); + EXPECT(os::mem::flags(__exec_begin) == Permission::Code); - EXPECT(os::mem::flags(__exec_end - 1) == (Access::execute | Access::read)); - EXPECT(os::mem::flags(__exec_end + 4_KiB) == (Access::read | Access::write)); + EXPECT(os::mem::flags(__exec_end - 1) == Permission::Code); + EXPECT(os::mem::flags(__exec_end + 4_KiB) == Permission::Data); } } @@ -447,7 +447,7 @@ CASE ("x86::paging Verify default paging setup") using namespace util; using Flags = x86::paging::Flags; - using Access = os::mem::Access; + using Permission = os::mem::Permission; init_default_paging(); @@ -455,31 +455,31 @@ CASE ("x86::paging Verify default paging setup") { // 4KiB 0-page has no access EXPECT(os::mem::active_page_size(0LU) == 4_KiB); - EXPECT(os::mem::flags(0) == Access::none); + EXPECT(os::mem::flags(0) == Permission::Any); // .text segment has execute + read access up to next 4kb page EXPECT(os::mem::active_page_size(__exec_begin) == 4_KiB); - EXPECT(os::mem::flags(__exec_begin) == (Access::execute | Access::read)); - EXPECT(os::mem::flags(__exec_end) == (Access::execute | Access::read)); - EXPECT(os::mem::flags(__exec_end + 4_KiB) == (Access::read | Access::write)); + EXPECT(os::mem::flags(__exec_begin) == Permission::Code); + EXPECT(os::mem::flags(__exec_end) == Permission::Code); + EXPECT(os::mem::flags(__exec_end + 4_KiB) == Permission::Data); // Remaining address space is either read + write or not present - EXPECT(os::mem::flags(100_MiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(1_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(2_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(4_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(8_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(16_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(32_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(64_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(128_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(256_GiB) == (Access::read | Access::write)); - EXPECT(os::mem::flags(512_GiB) == (Access::none)); - EXPECT(os::mem::flags(1_TiB) == (Access::none)); - EXPECT(os::mem::flags(128_TiB) == (Access::none)); - EXPECT(os::mem::flags(256_TiB) == (Access::none)); - EXPECT(os::mem::flags(512_TiB) == (Access::none)); - EXPECT(os::mem::flags(1024_TiB) == (Access::none)); + EXPECT(os::mem::flags(100_MiB) == Permission::Data); + EXPECT(os::mem::flags(1_GiB) == Permission::Data); + EXPECT(os::mem::flags(2_GiB) == Permission::Data); + EXPECT(os::mem::flags(4_GiB) == Permission::Data); + EXPECT(os::mem::flags(8_GiB) == Permission::Data); + EXPECT(os::mem::flags(16_GiB) == Permission::Data); + EXPECT(os::mem::flags(32_GiB) == Permission::Data); + EXPECT(os::mem::flags(64_GiB) == Permission::Data); + EXPECT(os::mem::flags(128_GiB) == Permission::Data); + EXPECT(os::mem::flags(256_GiB) == Permission::Data); + EXPECT(os::mem::flags(512_GiB) == Permission::Any); + EXPECT(os::mem::flags(1_TiB) == Permission::Any); + EXPECT(os::mem::flags(128_TiB) == Permission::Any); + EXPECT(os::mem::flags(256_TiB) == Permission::Any); + EXPECT(os::mem::flags(512_TiB) == Permission::Any); + EXPECT(os::mem::flags(1024_TiB) == Permission::Any); } From 328ff928ac5d755aefc8c754e19cff730b994696 Mon Sep 17 00:00:00 2001 From: Mazunki Hoksaas Date: Tue, 28 Oct 2025 21:40:48 +0100 Subject: [PATCH 5/5] override __mmap --- deps/musl/default.nix | 1 + deps/musl/patches/mmap.patch | 21 +++++++++++++++++++++ 2 files changed, 22 insertions(+) create mode 100644 deps/musl/patches/mmap.patch diff --git a/deps/musl/default.nix b/deps/musl/default.nix index c3070d334..2eb03aa15 100644 --- a/deps/musl/default.nix +++ b/deps/musl/default.nix @@ -17,6 +17,7 @@ stdenv.mkDerivation rec { patches = [ ./patches/musl.patch ./patches/endian.patch + ./patches/mmap.patch ]; passthru.linuxHeaders = linuxHeaders; diff --git a/deps/musl/patches/mmap.patch b/deps/musl/patches/mmap.patch new file mode 100644 index 000000000..5bda61600 --- /dev/null +++ b/deps/musl/patches/mmap.patch @@ -0,0 +1,21 @@ +diff --git a/src/mman/mmap.c b/src/mman/mmap.c +index 43e5e029..43307692 100644 +--- a/src/mman/mmap.c ++++ b/src/mman/mmap.c +@@ -36,4 +36,15 @@ void *__mmap(void *start, size_t len, int prot, int flags, int fd, off_t off) + return (void *)__syscall_ret(ret); + } + +-weak_alias(__mmap, mmap); ++void *__includeos_mmap(void *start, size_t len, int prot, int flags, int fd, off_t off) ++{ ++ long ret; ++ if (flags & MAP_FIXED) { ++ __vm_wait(); ++ } ++ ret = __syscall(SYS_mmap, start, len, prot, flags, fd, off); ++ ++ return (void *) ret; ++} ++ ++weak_alias(__includeos_mmap, mmap);