diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index 96c16e31ac4..db3684993c7 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -659af3c353e49b35c191cdd2dba3b3c79d0e6822 +a79095985ccd3a645bd2fc31c5ae4fcf215ef518 diff --git a/.ci/scripts/test_model_e2e.sh b/.ci/scripts/test_model_e2e.sh index cb7785036d3..872146d3ab3 100755 --- a/.ci/scripts/test_model_e2e.sh +++ b/.ci/scripts/test_model_e2e.sh @@ -248,7 +248,7 @@ if [ "$AUDIO_URL" != "" ]; then elif [[ "$MODEL_NAME" == *whisper* ]] || [ "$MODEL_NAME" = "voxtral_realtime" ]; then conda install -y -c conda-forge "ffmpeg<8" pip install datasets soundfile - pip install torchcodec==0.11.0.dev20260217 --extra-index-url https://download.pytorch.org/whl/nightly/cpu + pip install torchcodec==0.11.0.dev20260310 --extra-index-url https://download.pytorch.org/whl/nightly/cpu python -c "from datasets import load_dataset;import soundfile as sf;sample = load_dataset('distil-whisper/librispeech_long', 'clean', split='validation')[0]['audio'];sf.write('${MODEL_DIR}/$AUDIO_FILE', sample['array'][:sample['sampling_rate']*30], sample['sampling_rate'])" fi diff --git a/examples/models/moshi/mimi/install_requirements.sh b/examples/models/moshi/mimi/install_requirements.sh index de179dc8c92..c862b2846ef 100755 --- a/examples/models/moshi/mimi/install_requirements.sh +++ b/examples/models/moshi/mimi/install_requirements.sh @@ -8,7 +8,7 @@ set -x sudo apt install ffmpeg -y -pip install torchcodec==0.11.0.dev20260217 --extra-index-url https://download.pytorch.org/whl/nightly/cpu +pip install torchcodec==0.11.0.dev20260310 --extra-index-url https://download.pytorch.org/whl/nightly/cpu pip install moshi==0.2.11 pip install bitsandbytes soundfile einops # Run llama2/install requirements for torchao deps diff --git a/exir/sym_util.py b/exir/sym_util.py index 64f4b64a32a..6b7a38ae224 100644 --- a/exir/sym_util.py +++ b/exir/sym_util.py @@ -25,7 +25,10 @@ def eval_expr(symint: Union[int, torch.SymInt]) -> Optional[int]: shape_env = node.shape_env expr = node.expr try: - output = shape_env.size_hint(expr) + if hasattr(shape_env, "guarding_hint_or_throw"): + output = shape_env.guarding_hint_or_throw(expr) + else: + output = shape_env.size_hint(expr) except torch.fx.experimental.symbolic_shapes.GuardOnDataDependentSymNode: return None return int(output) diff --git a/runtime/core/portable_type/c10/c10/util/complex_math.h b/runtime/core/portable_type/c10/c10/util/complex_math.h index 2b591026c94..d369df50592 100644 --- a/runtime/core/portable_type/c10/c10/util/complex_math.h +++ b/runtime/core/portable_type/c10/c10/util/complex_math.h @@ -86,6 +86,41 @@ C10_HOST_DEVICE inline c10::complex pow( #endif } +// Regression in ROCm 7.2. See https://github.com/ROCm/rocm-libraries/pull/3836. +// Specialized version for complex on AMD GPUs to use FMA-based +// multiplication +#if defined(__HIPCC__) +namespace detail { +// FMA-aware complex multiplication for float precision on AMD GPUs. +// This prevents SLP vectorizer from breaking FMA formation, which causes +// numerical precision loss in complex arithmetic. +// The issue occurs when vectorizer packs scalar multiplies before backend +// can form FMA instructions, resulting in double rounding instead of single. +C10_HOST_DEVICE inline thrust::complex complex_mul_fma( + thrust::complex a, + thrust::complex b) { + // Complex multiplication: (a.r + a.i*i) * (b.r + b.i*i) + // = (a.r*b.r - a.i*b.i) + (a.r*b.i + a.i*b.r)*i + // Using __builtin_fmaf ensures FMA at source level: + // real: a.r*b.r + (-(a.i*b.i)) = FMA(a.r, b.r, -(a.i*b.i)) + // imag: a.i*b.r + a.r*b.i = FMA(a.r, b.i, a.i*b.r) + float real_part = __builtin_fmaf(a.real(), b.real(), -(a.imag() * b.imag())); + float imag_part = __builtin_fmaf(a.real(), b.imag(), a.imag() * b.real()); + return thrust::complex(real_part, imag_part); +} +} // namespace detail + +template <> +C10_HOST_DEVICE inline c10::complex pow( + const c10::complex& x, + const c10::complex& y) { + auto log_x = thrust::log(static_cast>(x)); + auto y_log_x = + detail::complex_mul_fma(static_cast>(y), log_x); + return static_cast>(thrust::exp(y_log_x)); +} +#endif + template C10_HOST_DEVICE inline c10::complex pow( const c10::complex& x, diff --git a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h index 63aa0d20d8e..880e741abf6 100644 --- a/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h +++ b/runtime/core/portable_type/c10/torch/headeronly/macros/Macros.h @@ -629,7 +629,7 @@ __host__ __device__ // This macro is used to find older C++ compilers // that don't support move optimization for return values. -#if (defined(__GNUC__) && __GNUC__ < 13) || \ +#if (defined(__GNUC__) && __GNUC__ < 13 && __cplusplus < 202002L) || \ (defined(__clang_major__) && __clang_major__ < 13) #define C10_RETURN_MOVE_IF_OLD_COMPILER 1 #else diff --git a/torch_pin.py b/torch_pin.py index 2dd1ac62f51..92d3c72ba2a 100644 --- a/torch_pin.py +++ b/torch_pin.py @@ -1,2 +1,2 @@ -TORCH_VERSION = "2.11.0" -NIGHTLY_VERSION = "dev20260215" +TORCH_VERSION = "2.12.0" +NIGHTLY_VERSION = "dev20260310"