@@ -146,7 +146,8 @@ C_ASSERT( sizeof(ARENA_LARGE) == 4 * BLOCK_ALIGN );
146
146
147
147
#define ROUND_ADDR (addr , mask ) ((void *)((UINT_PTR)(addr) & ~(UINT_PTR)(mask)))
148
148
#define ROUND_SIZE (size , mask ) ((((SIZE_T)(size) + (mask)) & ~(SIZE_T)(mask)))
149
- #define FIELD_MAX (type , field ) (((SIZE_T)1 << (sizeof(((type *)0)->field) * 8)) - 1)
149
+ #define FIELD_BITS (type , field ) (sizeof(((type *)0)->field) * 8)
150
+ #define FIELD_MAX (type , field ) (((SIZE_T)1 << FIELD_BITS(type, field)) - 1)
150
151
151
152
#define HEAP_MIN_BLOCK_SIZE ROUND_SIZE(sizeof(struct entry) + BLOCK_ALIGN, BLOCK_ALIGN - 1)
152
153
@@ -168,17 +169,11 @@ C_ASSERT( HEAP_MAX_FREE_BLOCK_SIZE >= HEAP_MAX_BLOCK_REGION_SIZE );
168
169
/* minimum size to start allocating large blocks */
169
170
#define HEAP_MIN_LARGE_BLOCK_SIZE (HEAP_MAX_USED_BLOCK_SIZE - 0x1000)
170
171
171
- /* There will be a free list bucket for every arena size up to and including this value */
172
- #define HEAP_MAX_SMALL_FREE_LIST 0x100
173
- C_ASSERT ( HEAP_MAX_SMALL_FREE_LIST % BLOCK_ALIGN == 0 );
174
- #define HEAP_NB_SMALL_FREE_LISTS (((HEAP_MAX_SMALL_FREE_LIST - HEAP_MIN_BLOCK_SIZE) / BLOCK_ALIGN) + 1)
175
-
176
- /* Max size of the blocks on the free lists above HEAP_MAX_SMALL_FREE_LIST */
177
- static const SIZE_T free_list_sizes [] =
178
- {
179
- 0x200 , 0x400 , 0x1000 , ~(SIZE_T )0
180
- };
181
- #define HEAP_NB_FREE_LISTS (ARRAY_SIZE(free_list_sizes) + HEAP_NB_SMALL_FREE_LISTS)
172
+ #define FREE_LIST_LINEAR_BITS 2
173
+ #define FREE_LIST_LINEAR_MASK ((1 << FREE_LIST_LINEAR_BITS) - 1)
174
+ #define FREE_LIST_COUNT ((FIELD_BITS( struct block, block_size ) - FREE_LIST_LINEAR_BITS + 1) * (1 << FREE_LIST_LINEAR_BITS) + 1)
175
+ /* for reference, update this when changing parameters */
176
+ C_ASSERT ( FREE_LIST_COUNT == 0x3d );
182
177
183
178
typedef struct DECLSPEC_ALIGN (BLOCK_ALIGN ) tagSUBHEAP
184
179
{
@@ -304,7 +299,7 @@ struct heap
304
299
DWORD pending_pos ; /* Position in pending free requests ring */
305
300
struct block * * pending_free ; /* Ring buffer for pending free requests */
306
301
RTL_CRITICAL_SECTION cs ;
307
- struct entry free_lists [HEAP_NB_FREE_LISTS ];
302
+ struct entry free_lists [FREE_LIST_COUNT ];
308
303
struct bin * bins ;
309
304
SUBHEAP subheap ;
310
305
};
@@ -569,23 +564,6 @@ static void valgrind_notify_free_all( SUBHEAP *subheap, const struct heap *heap
569
564
#endif
570
565
}
571
566
572
- /* locate a free list entry of the appropriate size */
573
- /* size is the size of the whole block including the arena header */
574
- static inline struct entry * find_free_list ( struct heap * heap , SIZE_T block_size , BOOL last )
575
- {
576
- struct entry * list , * end = heap -> free_lists + ARRAY_SIZE (heap -> free_lists );
577
- unsigned int i ;
578
-
579
- if (block_size <= HEAP_MAX_SMALL_FREE_LIST )
580
- i = (block_size - HEAP_MIN_BLOCK_SIZE ) / BLOCK_ALIGN ;
581
- else for (i = HEAP_NB_SMALL_FREE_LISTS ; i < HEAP_NB_FREE_LISTS - 1 ; i ++ )
582
- if (block_size <= free_list_sizes [i - HEAP_NB_SMALL_FREE_LISTS ]) break ;
583
-
584
- list = heap -> free_lists + i ;
585
- if (last && ++ list == end ) list = heap -> free_lists ;
586
- return list ;
587
- }
588
-
589
567
/* get the memory protection type to use for a given heap */
590
568
static inline ULONG get_protection_type ( DWORD flags )
591
569
{
@@ -624,10 +602,60 @@ static void heap_set_status( const struct heap *heap, ULONG flags, NTSTATUS stat
624
602
if (status ) RtlSetLastWin32ErrorAndNtStatusFromNtStatus ( status );
625
603
}
626
604
627
- static size_t get_free_list_block_size ( unsigned int index )
605
+ static SIZE_T get_free_list_block_size ( unsigned int index )
606
+ {
607
+ DWORD log = index >> FREE_LIST_LINEAR_BITS ;
608
+ DWORD linear = index & FREE_LIST_LINEAR_MASK ;
609
+
610
+ if (log == 0 ) return index * BLOCK_ALIGN ;
611
+
612
+ return (((1 << FREE_LIST_LINEAR_BITS ) + linear ) << (log - 1 )) * BLOCK_ALIGN ;
613
+ }
614
+
615
+ /*
616
+ * Given a size, return its index in the block size list for freelists.
617
+ *
618
+ * With FREE_LIST_LINEAR_BITS=2, the list looks like this
619
+ * (with respect to size / BLOCK_ALIGN):
620
+ * 0,
621
+ * 1, 2, 3, 4, 5, 6, 7, 8,
622
+ * 10, 12, 14, 16, 20, 24, 28, 32,
623
+ * 40, 48, 56, 64, 80, 96, 112, 128,
624
+ * 160, 192, 224, 256, 320, 384, 448, 512,
625
+ * ...
626
+ */
627
+ static unsigned int get_free_list_index ( SIZE_T block_size )
628
+ {
629
+ DWORD bit , log , linear ;
630
+
631
+ if (block_size > get_free_list_block_size ( FREE_LIST_COUNT - 1 ))
632
+ return FREE_LIST_COUNT - 1 ;
633
+
634
+ block_size /= BLOCK_ALIGN ;
635
+ /* find the highest bit */
636
+ if (!BitScanReverse ( & bit , block_size ) || bit < FREE_LIST_LINEAR_BITS )
637
+ {
638
+ /* for small values, the index is same as block_size. */
639
+ log = 0 ;
640
+ linear = block_size ;
641
+ }
642
+ else
643
+ {
644
+ /* the highest bit is always set, ignore it and encode the next FREE_LIST_LINEAR_BITS bits
645
+ * as a linear scale, combined with the shift as a log scale, in the free list index. */
646
+ log = bit - FREE_LIST_LINEAR_BITS + 1 ;
647
+ linear = (block_size >> (bit - FREE_LIST_LINEAR_BITS )) & FREE_LIST_LINEAR_MASK ;
648
+ }
649
+
650
+ return (log << FREE_LIST_LINEAR_BITS ) + linear ;
651
+ }
652
+
653
+ /* locate a free list entry of the appropriate size */
654
+ static inline struct entry * find_free_list ( struct heap * heap , SIZE_T block_size , BOOL last )
628
655
{
629
- if (index < HEAP_NB_SMALL_FREE_LISTS ) return HEAP_MIN_BLOCK_SIZE + index * BLOCK_ALIGN ;
630
- return free_list_sizes [index - HEAP_NB_SMALL_FREE_LISTS ];
656
+ unsigned int index = get_free_list_index ( block_size );
657
+ if (last && ++ index == FREE_LIST_COUNT ) index = 0 ;
658
+ return & heap -> free_lists [index ];
631
659
}
632
660
633
661
static void heap_dump ( const struct heap * heap )
@@ -651,7 +679,7 @@ static void heap_dump( const struct heap *heap )
651
679
}
652
680
653
681
TRACE ( " free_lists: %p\n" , heap -> free_lists );
654
- for (i = 0 ; i < HEAP_NB_FREE_LISTS ; i ++ )
682
+ for (i = 0 ; i < FREE_LIST_COUNT ; i ++ )
655
683
TRACE ( " %p: size %#8Ix, prev %p, next %p\n" , heap -> free_lists + i , get_free_list_block_size ( i ),
656
684
LIST_ENTRY ( heap -> free_lists [i ].entry .prev , struct entry , entry ),
657
685
LIST_ENTRY ( heap -> free_lists [i ].entry .next , struct entry , entry ) );
@@ -1126,7 +1154,7 @@ static BOOL is_valid_free_block( const struct heap *heap, const struct block *bl
1126
1154
unsigned int i ;
1127
1155
1128
1156
if ((subheap = find_subheap ( heap , block , FALSE ))) return TRUE;
1129
- for (i = 0 ; i < HEAP_NB_FREE_LISTS ; i ++ ) if (block == & heap -> free_lists [i ].block ) return TRUE;
1157
+ for (i = 0 ; i < FREE_LIST_COUNT ; i ++ ) if (block == & heap -> free_lists [i ].block ) return TRUE;
1130
1158
return FALSE;
1131
1159
}
1132
1160
@@ -1510,7 +1538,7 @@ HANDLE WINAPI RtlCreateHeap( ULONG flags, void *addr, SIZE_T total_size, SIZE_T
1510
1538
list_init ( & heap -> large_list );
1511
1539
1512
1540
list_init ( & heap -> free_lists [0 ].entry );
1513
- for (i = 0 , entry = heap -> free_lists ; i < HEAP_NB_FREE_LISTS ; i ++ , entry ++ )
1541
+ for (i = 0 , entry = heap -> free_lists ; i < FREE_LIST_COUNT ; i ++ , entry ++ )
1514
1542
{
1515
1543
block_set_flags ( & entry -> block , ~0 , BLOCK_FLAG_FREE_LINK );
1516
1544
block_set_size ( & entry -> block , 0 );
0 commit comments