16
16
#include <tlb.h>
17
17
#include <config.h>
18
18
19
+ #define MEM_SEC_SHARED true
20
+ #define MEM_SEC_NOT_SHARED false
21
+
19
22
extern uint8_t _image_start , _image_load_end , _image_end , _dmem_phys_beg , _dmem_beg ,
20
23
_cpu_private_beg , _cpu_private_end , _vm_beg , _vm_end , _vm_image_start , _vm_image_end ;
21
24
@@ -34,15 +37,16 @@ struct section {
34
37
};
35
38
36
39
struct section hyp_secs [] = {
37
- [SEC_HYP_GLOBAL ] = { (vaddr_t )& _dmem_beg , (vaddr_t )& _cpu_private_beg - 1 , true ,
40
+ [SEC_HYP_GLOBAL ] = { (vaddr_t )& _dmem_beg , (vaddr_t )& _cpu_private_beg - 1 , MEM_SEC_SHARED ,
38
41
SPINLOCK_INITVAL },
39
- [SEC_HYP_IMAGE ] = { (vaddr_t )& _image_start , (vaddr_t )& _image_end - 1 , true, SPINLOCK_INITVAL },
40
- [SEC_HYP_PRIVATE ] = { (vaddr_t )& _cpu_private_beg , (vaddr_t )& _cpu_private_end - 1 , false,
42
+ [SEC_HYP_IMAGE ] = { (vaddr_t )& _image_start , (vaddr_t )& _image_end - 1 , MEM_SEC_SHARED ,
41
43
SPINLOCK_INITVAL },
42
- [SEC_HYP_VM ] = { (vaddr_t )& _vm_beg , (vaddr_t )& _vm_end - 1 , true, SPINLOCK_INITVAL },
44
+ [SEC_HYP_PRIVATE ] = { (vaddr_t )& _cpu_private_beg , (vaddr_t )& _cpu_private_end - 1 ,
45
+ MEM_SEC_NOT_SHARED , SPINLOCK_INITVAL },
46
+ [SEC_HYP_VM ] = { (vaddr_t )& _vm_beg , (vaddr_t )& _vm_end - 1 , MEM_SEC_SHARED , SPINLOCK_INITVAL },
43
47
};
44
48
45
- struct section vm_secs [] = { [SEC_VM_ANY ] = { 0x0 , MAX_VA , false , SPINLOCK_INITVAL } };
49
+ struct section vm_secs [] = { [SEC_VM_ANY ] = { 0x0 , MAX_VA , MEM_SEC_NOT_SHARED , SPINLOCK_INITVAL } };
46
50
47
51
struct {
48
52
struct section * sec ;
@@ -189,7 +193,8 @@ static inline pte_t* mem_alloc_pt(struct addr_space* as, pte_t* parent, size_t l
189
193
{
190
194
/* Must have lock on as and va section to call */
191
195
size_t ptsize = NUM_PAGES (pt_size (& as -> pt , lvl + 1 ));
192
- struct ppages ppage = mem_alloc_ppages (as -> colors , ptsize , ptsize > 1 ? true : false);
196
+ struct ppages ppage =
197
+ mem_alloc_ppages (as -> colors , ptsize , ptsize > 1 ? MEM_ALIGN_PPAGES : MEM_DONT_ALIGN_PPAGES );
193
198
if (ppage .num_pages == 0 ) {
194
199
return NULL ;
195
200
}
@@ -504,7 +509,7 @@ static bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, si
504
509
505
510
struct ppages temp_ppages ;
506
511
if (ppages == NULL && !all_clrs (as -> colors )) {
507
- temp_ppages = mem_alloc_ppages (as -> colors , num_pages , false );
512
+ temp_ppages = mem_alloc_ppages (as -> colors , num_pages , MEM_DONT_ALIGN_PPAGES );
508
513
if (temp_ppages .num_pages < num_pages ) {
509
514
ERROR ("failed to alloc colored physical pages" );
510
515
}
@@ -547,7 +552,8 @@ static bool mem_map(struct addr_space* as, vaddr_t va, struct ppages* ppages, si
547
552
while ((entry < nentries ) && (count < num_pages ) &&
548
553
(num_pages - count >= lvlsz / PAGE_SIZE )) {
549
554
if (ppages == NULL ) {
550
- struct ppages temp = mem_alloc_ppages (as -> colors , lvlsz / PAGE_SIZE , true);
555
+ struct ppages temp =
556
+ mem_alloc_ppages (as -> colors , lvlsz / PAGE_SIZE , MEM_ALIGN_PPAGES );
551
557
if (temp .num_pages < lvlsz / PAGE_SIZE ) {
552
558
if (lvl == (as -> pt .dscr -> lvls - 1 )) {
553
559
// TODO: free previously allocated pages
@@ -612,7 +618,7 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz
612
618
}
613
619
614
620
vaddr_t reclrd_va_base = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_VM , INVALID_VA , reclrd_num );
615
- struct ppages reclrd_ppages = mem_alloc_ppages (as -> colors , reclrd_num , false );
621
+ struct ppages reclrd_ppages = mem_alloc_ppages (as -> colors , reclrd_num , MEM_DONT_ALIGN_PPAGES );
616
622
mem_map (& cpu ()-> as , reclrd_va_base , & reclrd_ppages , reclrd_num , PTE_HYP_FLAGS );
617
623
618
624
/**
@@ -671,8 +677,8 @@ bool mem_map_reclr(struct addr_space* as, vaddr_t va, struct ppages* ppages, siz
671
677
.colors = ~as -> colors };
672
678
mem_free_ppages (& unused_pages );
673
679
674
- mem_unmap (& cpu ()-> as , reclrd_va_base , reclrd_num , false );
675
- mem_unmap (& cpu ()-> as , phys_va_base , num_pages , false );
680
+ mem_unmap (& cpu ()-> as , reclrd_va_base , reclrd_num , MEM_DONT_FREE_PAGES );
681
+ mem_unmap (& cpu ()-> as , phys_va_base , num_pages , MEM_DONT_FREE_PAGES );
676
682
677
683
return true;
678
684
}
@@ -712,7 +718,7 @@ vaddr_t mem_map_cpy(struct addr_space* ass, struct addr_space* asd, vaddr_t vas,
712
718
713
719
static void * copy_space (void * base , const size_t size , struct ppages * pages )
714
720
{
715
- * pages = mem_alloc_ppages (cpu ()-> as .colors , NUM_PAGES (size ), false );
721
+ * pages = mem_alloc_ppages (cpu ()-> as .colors , NUM_PAGES (size ), MEM_DONT_ALIGN_PPAGES );
716
722
vaddr_t va = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_PRIVATE , INVALID_VA , NUM_PAGES (size ));
717
723
mem_map (& cpu ()-> as , va , pages , NUM_PAGES (size ), PTE_HYP_FLAGS );
718
724
memcpy ((void * )va , base , size );
@@ -873,14 +879,14 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio
873
879
va = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_GLOBAL , INVALID_VA , p_image .num_pages );
874
880
mem_map (& cpu ()-> as , va , & p_image , p_image .num_pages , PTE_HYP_FLAGS );
875
881
memset ((void * )va , 0 , p_image .num_pages * PAGE_SIZE );
876
- mem_unmap (& cpu ()-> as , va , p_image .num_pages , true );
882
+ mem_unmap (& cpu ()-> as , va , p_image .num_pages , MEM_FREE_PAGES );
877
883
878
884
p_image = mem_ppages_get (load_addr + image_load_size + vm_image_size ,
879
885
NUM_PAGES (image_noload_size ));
880
886
va = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_GLOBAL , INVALID_VA , p_image .num_pages );
881
887
mem_map (& cpu ()-> as , va , & p_image , p_image .num_pages , PTE_HYP_FLAGS );
882
888
memset ((void * )va , 0 , p_image .num_pages * PAGE_SIZE );
883
- mem_unmap (& cpu ()-> as , va , p_image .num_pages , true );
889
+ mem_unmap (& cpu ()-> as , va , p_image .num_pages , MEM_FREE_PAGES );
884
890
885
891
p_bitmap = mem_ppages_get (load_addr + image_size + vm_image_size +
886
892
(cpu_boot_size * platform .cpu_num ),
@@ -889,15 +895,15 @@ void mem_color_hypervisor(const paddr_t load_addr, struct mem_region* root_regio
889
895
va = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_GLOBAL , INVALID_VA , p_bitmap .num_pages );
890
896
mem_map (& cpu ()-> as , va , & p_bitmap , p_bitmap .num_pages , PTE_HYP_FLAGS );
891
897
memset ((void * )va , 0 , p_bitmap .num_pages * PAGE_SIZE );
892
- mem_unmap (& cpu ()-> as , va , p_bitmap .num_pages , true );
898
+ mem_unmap (& cpu ()-> as , va , p_bitmap .num_pages , MEM_FREE_PAGES );
893
899
}
894
900
895
901
p_cpu = mem_ppages_get (load_addr + image_size + vm_image_size + (cpu_boot_size * cpu ()-> id ),
896
902
cpu_boot_size / PAGE_SIZE );
897
903
va = mem_alloc_vpage (& cpu ()-> as , SEC_HYP_PRIVATE , INVALID_VA , p_cpu .num_pages );
898
904
mem_map (& cpu ()-> as , va , & p_cpu , p_cpu .num_pages , PTE_HYP_FLAGS );
899
905
memset ((void * )va , 0 , p_cpu .num_pages * PAGE_SIZE );
900
- mem_unmap (& cpu ()-> as , va , p_cpu .num_pages , false );
906
+ mem_unmap (& cpu ()-> as , va , p_cpu .num_pages , MEM_DONT_FREE_PAGES );
901
907
}
902
908
903
909
void as_init (struct addr_space * as , enum AS_TYPE type , asid_t id , pte_t * root_pt , colormap_t colors )
@@ -911,7 +917,7 @@ void as_init(struct addr_space* as, enum AS_TYPE type, asid_t id, pte_t* root_pt
911
917
if (root_pt == NULL ) {
912
918
size_t n = NUM_PAGES (pt_size (& as -> pt , 0 ));
913
919
root_pt = (pte_t * )mem_alloc_page (n ,
914
- type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM , true );
920
+ type == AS_HYP || type == AS_HYP_CPY ? SEC_HYP_PRIVATE : SEC_HYP_VM , MEM_ALIGN_PPAGES );
915
921
memset ((void * )root_pt , 0 , n * PAGE_SIZE );
916
922
}
917
923
as -> pt .root = root_pt ;
0 commit comments