@@ -376,7 +376,10 @@ pub struct Local {
376
376
// https://github.com/crossbeam-rs/crossbeam/issues/551
377
377
#[ test]
378
378
fn local_size ( ) {
379
- assert ! ( core:: mem:: size_of:: <Local >( ) <= 2048 , "An allocation of `Local` should be <= 2048 bytes." ) ;
379
+ assert ! (
380
+ core:: mem:: size_of:: <Local >( ) <= 2048 ,
381
+ "An allocation of `Local` should be <= 2048 bytes."
382
+ ) ;
380
383
}
381
384
382
385
impl Local {
@@ -468,7 +471,7 @@ impl Local {
468
471
// a `SeqCst` fence.
469
472
//
470
473
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
471
- // 2. `_.compare_and_swap (_, _, SeqCst)`, which compiles into a `lock cmpxchg`
474
+ // 2. `_.compare_exchange (_, _, SeqCst , SeqCst)`, which compiles into a `lock cmpxchg`
472
475
// instruction.
473
476
//
474
477
// Both instructions have the effect of a full barrier, but benchmarks have shown
@@ -478,10 +481,13 @@ impl Local {
478
481
// works fine. Using inline assembly would be a viable (and correct) alternative,
479
482
// but alas, that is not possible on stable Rust.
480
483
let current = Epoch :: starting ( ) ;
481
- let previous = self
482
- . epoch
483
- . compare_and_swap ( current, new_epoch, Ordering :: SeqCst ) ;
484
- debug_assert_eq ! ( current, previous, "participant was expected to be unpinned" ) ;
484
+ let res = self . epoch . compare_exchange (
485
+ current,
486
+ new_epoch,
487
+ Ordering :: SeqCst ,
488
+ Ordering :: SeqCst ,
489
+ ) ;
490
+ debug_assert ! ( res. is_ok( ) , "participant was expected to be unpinned" ) ;
485
491
// We add a compiler fence to make it less likely for LLVM to do something wrong
486
492
// here. Formally, this is not enough to get rid of data races; practically,
487
493
// it should go a long way.
0 commit comments