20
20
#include "random.h"
21
21
#include "util.h"
22
22
23
+ #if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE )
24
+ #include "musl.h"
25
+ #endif
26
+
23
27
#ifdef USE_PKEY
24
28
#include <sys/mman.h>
25
29
#endif
@@ -528,7 +532,7 @@ static void set_canary(UNUSED const struct slab_metadata *metadata, UNUSED void
528
532
}
529
533
#endif
530
534
531
- memcpy ((char * )p + size - canary_size , & metadata -> canary_value , canary_size );
535
+ h_memcpy_internal ((char * )p + size - canary_size , & metadata -> canary_value , canary_size );
532
536
#endif
533
537
}
534
538
@@ -541,7 +545,7 @@ static void check_canary(UNUSED const struct slab_metadata *metadata, UNUSED con
541
545
#endif
542
546
543
547
u64 canary_value ;
544
- memcpy (& canary_value , (const char * )p + size - canary_size , canary_size );
548
+ h_memcpy_internal (& canary_value , (const char * )p + size - canary_size , canary_size );
545
549
546
550
#ifdef HAS_ARM_MTE
547
551
if (unlikely (canary_value == 0 )) {
@@ -831,7 +835,7 @@ static inline void deallocate_small(void *p, const size_t *expected_size) {
831
835
#endif
832
836
833
837
if (ZERO_ON_FREE && !skip_zero ) {
834
- memset (p , 0 , size - canary_size );
838
+ h_memset_internal (p , 0 , size - canary_size );
835
839
}
836
840
}
837
841
@@ -1502,7 +1506,7 @@ EXPORT void *h_calloc(size_t nmemb, size_t size) {
1502
1506
total_size = adjust_size_for_canary (total_size );
1503
1507
void * p = alloc (total_size );
1504
1508
if (!ZERO_ON_FREE && likely (p != NULL ) && total_size && total_size <= max_slab_size_class ) {
1505
- memset (p , 0 , total_size - canary_size );
1509
+ h_memset_internal (p , 0 , total_size - canary_size );
1506
1510
}
1507
1511
#ifdef HAS_ARM_MTE
1508
1512
// use an assert instead of adding a conditional to memset() above (freed memory is always
@@ -1624,7 +1628,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
1624
1628
mutex_unlock (& ra -> lock );
1625
1629
1626
1630
if (memory_remap_fixed (old , old_size , new , size )) {
1627
- memcpy (new , old , copy_size );
1631
+ h_memcpy_internal (new , old , copy_size );
1628
1632
deallocate_pages (old , old_size , old_guard_size );
1629
1633
} else {
1630
1634
memory_unmap ((char * )old - old_guard_size , old_guard_size );
@@ -1646,7 +1650,7 @@ EXPORT void *h_realloc(void *old, size_t size) {
1646
1650
if (copy_size > 0 && copy_size <= max_slab_size_class ) {
1647
1651
copy_size -= canary_size ;
1648
1652
}
1649
- memcpy (new , old_orig , copy_size );
1653
+ h_memcpy_internal (new , old_orig , copy_size );
1650
1654
if (old_size <= max_slab_size_class ) {
1651
1655
deallocate_small (old , NULL );
1652
1656
} else {
@@ -1874,6 +1878,86 @@ EXPORT size_t h_malloc_object_size_fast(const void *p) {
1874
1878
return SIZE_MAX ;
1875
1879
}
1876
1880
1881
+ #if CONFIG_BLOCK_OPS_CHECK_SIZE && !defined(HAS_ARM_MTE )
1882
+ EXPORT void * memcpy (void * restrict dst , const void * restrict src , size_t len ) {
1883
+ if (dst == src || len == 0 ) {
1884
+ return dst ;
1885
+ }
1886
+ if (unlikely (dst < src + len && dst + len > src )) {
1887
+ fatal_error ("memcpy overlap" );
1888
+ }
1889
+ if (unlikely (len > malloc_object_size (src ))) {
1890
+ fatal_error ("memcpy read overflow" );
1891
+ }
1892
+ if (unlikely (len > malloc_object_size (dst ))) {
1893
+ fatal_error ("memcpy buffer overflow" );
1894
+ }
1895
+ return musl_memcpy (dst , src , len );
1896
+ }
1897
+
1898
+ EXPORT void * memmove (void * dst , const void * src , size_t len ) {
1899
+ if (dst == src || len == 0 ) {
1900
+ return dst ;
1901
+ }
1902
+ if (unlikely (len > malloc_object_size (src ))) {
1903
+ fatal_error ("memmove read overflow" );
1904
+ }
1905
+ if (unlikely (len > malloc_object_size (dst ))) {
1906
+ fatal_error ("memmove buffer overflow" );
1907
+ }
1908
+ return musl_memmove (dst , src , len );
1909
+ }
1910
+
1911
+ EXPORT void * memset (void * dst , int value , size_t len ) {
1912
+ if (len == 0 ) {
1913
+ return dst ;
1914
+ }
1915
+ if (unlikely (len > malloc_object_size (dst ))) {
1916
+ fatal_error ("memset buffer overflow" );
1917
+ }
1918
+ return musl_memset (dst , value , len );
1919
+ }
1920
+
1921
+ EXPORT wchar_t * wmemcpy (wchar_t * restrict dst , const wchar_t * restrict src , size_t len ) {
1922
+ if (dst == src || len == 0 ) {
1923
+ return dst ;
1924
+ }
1925
+ if (dst < src + len && dst + len > src ) {
1926
+ fatal_error ("wmemcpy overlap" );
1927
+ }
1928
+ if (len > malloc_object_size (src )) {
1929
+ fatal_error ("wmemcpy read overflow" );
1930
+ }
1931
+ if (len > malloc_object_size (dst )) {
1932
+ fatal_error ("wmemcpy buffer overflow" );
1933
+ }
1934
+ return musl_wmemcpy (dst , src , len );
1935
+ }
1936
+
1937
+ EXPORT wchar_t * wmemmove (wchar_t * dst , const wchar_t * src , size_t len ) {
1938
+ if (dst == src || len == 0 ) {
1939
+ return dst ;
1940
+ }
1941
+ if (len > malloc_object_size (src )) {
1942
+ fatal_error ("wmemmove read overflow" );
1943
+ }
1944
+ if (len > malloc_object_size (dst )) {
1945
+ fatal_error ("wmemmove buffer overflow" );
1946
+ }
1947
+ return musl_wmemmove (dst , src , len );
1948
+ }
1949
+
1950
+ EXPORT wchar_t * wmemset (wchar_t * dst , wchar_t value , size_t len ) {
1951
+ if (len == 0 ) {
1952
+ return dst ;
1953
+ }
1954
+ if (len > malloc_object_size (dst )) {
1955
+ fatal_error ("wmemset buffer overflow" );
1956
+ }
1957
+ return musl_wmemset (dst , value , len );
1958
+ }
1959
+ #endif
1960
+
1877
1961
EXPORT int h_mallopt (UNUSED int param , UNUSED int value ) {
1878
1962
#ifdef __ANDROID__
1879
1963
if (param == M_PURGE ) {
0 commit comments