@@ -538,3 +538,148 @@ pub fn fuzzer_mutate(data: &mut [u8], size: usize, max_size: usize) -> usize {
538538 assert ! ( new_size <= data. len( ) ) ;
539539 new_size
540540}
541+
542+ /// Define a custom cross-over function to combine test cases.
543+ ///
544+ /// This is optional, and libFuzzer will use its own, default cross-over strategy
545+ /// if this is not provided. (As of the time of writing, this default strategy
546+ /// takes alternating byte sequences from the two test cases, to construct the
547+ /// new one) (see `FuzzerCrossOver.cpp`)
548+ ///
549+ /// This could potentially be useful if your input is, for instance, a
550+ /// sequence of fixed sized, multi-byte values and the crossover could then
551+ /// merge discrete values rather than joining parts of a value.
552+ ///
553+ /// ## Implementation Contract
554+ ///
555+ /// The original, read-only inputs are given in the full slices of `data1`, and
556+ /// `data2` (as opposed to the, potentially, partial slice of `data` in
557+ /// [the `fuzz_mutator!` macro][crate::fuzz_mutator]).
558+ ///
559+ /// You must place the new input merged from the two existing inputs' data
560+ /// into `out` and return the size of the relevant data written to that slice.
561+ ///
562+ /// The deterministic requirements from [the `fuzz_mutator!` macro][crate::fuzz_mutator]
563+ /// apply as well to the `seed` parameter
564+ ///
565+ /// ## Example: Floating-Point Sum NaN
566+ ///
567+ /// ```no_run
568+ /// #![no_main]
569+ ///
570+ /// use libfuzzer_sys::{fuzz_crossover, fuzz_mutator, fuzz_target, fuzzer_mutate};
571+ /// use rand::{rngs::StdRng, Rng, SeedableRng};
572+ /// use std::mem::size_of;
573+ ///
574+ /// fuzz_target!(|data: &[u8]| {
575+ /// let (_, floats, _) = unsafe { data.align_to::<f64>() };
576+ ///
577+ /// let res = floats
578+ /// .iter()
579+ /// .fold(0.0, |a, b| if b.is_nan() { a } else { a + b });
580+ ///
581+ /// assert!(
582+ /// !res.is_nan(),
583+ /// "The sum of the following floats resulted in a NaN: {floats:?}"
584+ /// );
585+ /// });
586+ ///
587+ /// // Inject some ...potentially problematic values to make the example close
588+ /// // more quickly.
589+ /// fuzz_mutator!(|data: &mut [u8], size: usize, max_size: usize, seed: u32| {
590+ /// let mut gen = StdRng::seed_from_u64(seed.into());
591+ ///
592+ /// let (_, floats, _) = unsafe { data[..size].align_to_mut::<f64>() };
593+ ///
594+ /// let x = gen.gen_range(0..=1000);
595+ /// if x == 0 && !floats.is_empty() {
596+ /// floats[0] = f64::INFINITY;
597+ /// } else if x == 1000 && floats.len() > 1 {
598+ /// floats[1] = f64::NEG_INFINITY;
599+ /// } else {
600+ /// return fuzzer_mutate(data, size, max_size);
601+ /// }
602+ ///
603+ /// size
604+ /// });
605+ ///
606+ /// fuzz_crossover!(|data1: &[u8], data2: &[u8], out: &mut [u8], _seed: u32| {
607+ /// // Decode each source to see how many floats we can pull with proper
608+ /// // alignment, and destination as to how many will fit with proper alignment
609+ /// //
610+ /// // Keep track of the unaligned prefix to `out`, as we will need to remember
611+ /// // that those bytes will remain prepended to the actual floats that we
612+ /// // write into the out buffer.
613+ /// let (out_pref, out_floats, _) = unsafe { out.align_to_mut::<f64>() };
614+ /// let (_, d1_floats, _) = unsafe { data1.align_to::<f64>() };
615+ /// let (_, d2_floats, _) = unsafe { data2.align_to::<f64>() };
616+ ///
617+ /// // Put into the destination, floats first from data1 then from data2, ...if
618+ /// // possible given the size of `out`
619+ /// let mut i: usize = 0;
620+ /// for float in d1_floats.iter().chain(d2_floats).take(out_floats.len()) {
621+ /// out_floats[i] = *float;
622+ /// i += 1;
623+ /// }
624+ ///
625+ /// // Now that we have written the true floats, report back to the fuzzing
626+ /// // engine that we left the unaligned `out` prefix bytes at the beginning of
627+ /// // `out` and also then the floats that we wrote into the aligned float
628+ /// // section.
629+ /// out_pref.len() * size_of::<u8>() + i * size_of::<f64>()
630+ /// });
631+ /// ```
632+ ///
633+ /// This example is a minimized version of [Erik Rigtorp's floating point summation fuzzing example][1].
634+ /// A more detailed version of this experiment can be found in the
635+ /// `example_crossover` directory.
636+ ///
637+ /// [1]: https://rigtorp.se/fuzzing-floating-point-code/
638+ #[ macro_export]
639+ macro_rules! fuzz_crossover {
640+ (
641+ |
642+ $data1: ident : & [ u8 ] ,
643+ $data2: ident : & [ u8 ] ,
644+ $out: ident : & mut [ u8 ] ,
645+ $seed: ident : u32 $( , ) *
646+ |
647+ $body: block
648+ ) => {
649+ /// Auto-generated function. Do not use; only for LibFuzzer's
650+ /// consumption.
651+ #[ export_name = "LLVMFuzzerCustomCrossOver" ]
652+ #[ doc( hidden) ]
653+ pub unsafe fn rust_fuzzer_custom_crossover(
654+ $data1: * const u8 ,
655+ size1: usize ,
656+ $data2: * const u8 ,
657+ size2: usize ,
658+ $out: * mut u8 ,
659+ max_out_size: usize ,
660+ $seed: std:: os:: raw:: c_uint,
661+ ) -> usize {
662+ let $data1: & [ u8 ] = std:: slice:: from_raw_parts( $data1, size1) ;
663+ let $data2: & [ u8 ] = std:: slice:: from_raw_parts( $data2, size2) ;
664+ let $out: & mut [ u8 ] = std:: slice:: from_raw_parts_mut( $out, max_out_size) ;
665+
666+ // `unsigned int` is generally a `u32`, but not on all targets. Do
667+ // an infallible (and potentially lossy, but that's okay because it
668+ // preserves determinism) conversion.
669+ let $seed = $seed as u32 ;
670+
671+ // Define and invoke a new, safe function so that the body doesn't
672+ // inherit `unsafe`.
673+ fn custom_crossover(
674+ $data1: & [ u8 ] ,
675+ $data2: & [ u8 ] ,
676+ $out: & mut [ u8 ] ,
677+ $seed: u32 ,
678+ ) -> usize {
679+ $body
680+ }
681+
682+ custom_crossover( $data1, $data2, $out, $seed)
683+ }
684+ } ;
685+ }
0 commit comments