@@ -81,13 +81,44 @@ uint32_t ToPosixProtectFlags(PageAccess access) {
81
81
82
82
bool IsWritableExecutableMemorySupported () { return true ; }
83
83
84
+ struct MappedFileRange {
85
+ size_t region_begin;
86
+ size_t region_end;
87
+ };
88
+
89
+ std::vector<struct MappedFileRange > mapped_file_ranges;
90
+
84
91
void * AllocFixed (void * base_address, size_t length,
85
92
AllocationType allocation_type, PageAccess access) {
86
93
// mmap does not support reserve / commit, so ignore allocation_type.
87
94
uint32_t prot = ToPosixProtectFlags (access);
88
- void * result = mmap (base_address, length, prot,
89
- MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1 , 0 );
95
+
96
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
97
+ if (base_address != nullptr ) {
98
+ flags |= MAP_FIXED_NOREPLACE;
99
+ }
100
+ void * result = mmap (base_address, length, prot, flags, -1 , 0 );
101
+
90
102
if (result == MAP_FAILED) {
103
+ // If the address is within this range, the mmap failed because we have
104
+ // already mapped this memory.
105
+ size_t region_begin = (size_t )base_address;
106
+ size_t region_end = (size_t )base_address + length;
107
+ for (const auto mapped_range : mapped_file_ranges) {
108
+ // Check if the allocation is within this range...
109
+ if (region_begin >= mapped_range.region_begin &&
110
+ region_end <= mapped_range.region_end ) {
111
+ bool should_protect = (((uint8_t )allocation_type & 2 ) == 2 );
112
+
113
+ if (should_protect) {
114
+ if (Protect (base_address, length, access)) {
115
+ return base_address;
116
+ }
117
+ } else if ((((uint8_t )allocation_type & 1 ) == 1 )) {
118
+ return base_address;
119
+ }
120
+ }
121
+ }
91
122
return nullptr ;
92
123
} else {
93
124
return result;
@@ -96,6 +127,15 @@ void* AllocFixed(void* base_address, size_t length,
96
127
97
128
bool DeallocFixed (void * base_address, size_t length,
98
129
DeallocationType deallocation_type) {
130
+ size_t region_begin = (size_t )base_address;
131
+ size_t region_end = (size_t )base_address + length;
132
+ for (const auto mapped_range : mapped_file_ranges) {
133
+ if (region_begin >= mapped_range.region_begin &&
134
+ region_end <= mapped_range.region_end ) {
135
+ return Protect (base_address, length, PageAccess::kNoAccess );
136
+ }
137
+ }
138
+
99
139
return munmap (base_address, length) == 0 ;
100
140
}
101
141
@@ -178,12 +218,37 @@ void CloseFileMappingHandle(FileMappingHandle handle,
178
218
void * MapFileView (FileMappingHandle handle, void * base_address, size_t length,
179
219
PageAccess access, size_t file_offset) {
180
220
uint32_t prot = ToPosixProtectFlags (access);
181
- return mmap64 (base_address, length, prot, MAP_PRIVATE | MAP_ANONYMOUS, handle,
221
+
222
+ int flags = MAP_SHARED;
223
+ if (base_address != nullptr ) {
224
+ flags = flags | MAP_FIXED_NOREPLACE;
225
+ }
226
+
227
+ void * result = mmap (base_address, length, prot, flags, handle,
182
228
file_offset);
229
+
230
+ if (result == MAP_FAILED) {
231
+ return nullptr ;
232
+ }else {
233
+ mapped_file_ranges.push_back ({(size_t )result, (size_t )result + length});
234
+ return result;
235
+ }
183
236
}
184
237
185
238
bool UnmapFileView (FileMappingHandle handle, void * base_address,
186
239
size_t length) {
240
+ for (auto mapped_range = mapped_file_ranges.begin ();
241
+ mapped_range != mapped_file_ranges.end ();) {
242
+ if (mapped_range->region_begin == (size_t )base_address &&
243
+ mapped_range->region_end == (size_t )base_address + length) {
244
+ mapped_file_ranges.erase (mapped_range);
245
+ return munmap (base_address, length) == 0 ;
246
+ } else {
247
+ mapped_range++;
248
+ }
249
+ }
250
+ // TODO: Implement partial file unmapping.
251
+ assert_always (" Error: Partial unmapping of files not yet supported." );
187
252
return munmap (base_address, length) == 0 ;
188
253
}
189
254
0 commit comments