diff --git a/benches/sync_rwlock.rs b/benches/sync_rwlock.rs index 46eeac0c1d0..1ac56518d8e 100644 --- a/benches/sync_rwlock.rs +++ b/benches/sync_rwlock.rs @@ -14,7 +14,7 @@ fn read_uncontended(b: &mut Bencher) { rt.block_on(async move { for _ in 0..6 { let read = lock.read().await; - black_box(read); + let _read = black_box(read); } }) }); @@ -28,7 +28,7 @@ fn read_concurrent_uncontended_multi(b: &mut Bencher) { async fn task(lock: Arc>) { let read = lock.read().await; - black_box(read); + let _read = black_box(read); } let lock = Arc::new(RwLock::new(())); @@ -55,7 +55,7 @@ fn read_concurrent_uncontended(b: &mut Bencher) { async fn task(lock: Arc>) { let read = lock.read().await; - black_box(read); + let _read = black_box(read); } let lock = Arc::new(RwLock::new(())); @@ -82,7 +82,7 @@ fn read_concurrent_contended_multi(b: &mut Bencher) { async fn task(lock: Arc>) { let read = lock.read().await; - black_box(read); + let _read = black_box(read); } let lock = Arc::new(RwLock::new(())); @@ -110,7 +110,7 @@ fn read_concurrent_contended(b: &mut Bencher) { async fn task(lock: Arc>) { let read = lock.read().await; - black_box(read); + let _read = black_box(read); } let lock = Arc::new(RwLock::new(())); diff --git a/tokio/src/sync/mutex.rs b/tokio/src/sync/mutex.rs index 4c9899f940b..1104cb85bfc 100644 --- a/tokio/src/sync/mutex.rs +++ b/tokio/src/sync/mutex.rs @@ -141,6 +141,7 @@ pub struct Mutex { /// /// The lock is automatically released whenever the guard is dropped, at which /// point `lock` will succeed yet again. +#[must_use = "if unused the Mutex will immediately unlock"] pub struct MutexGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] resource_span: tracing::Span, @@ -766,7 +767,7 @@ impl<'a, T: ?Sized> MutexGuard<'a, T> { /// # async fn main() { /// # let mutex = Mutex::new(0u32); /// # let guard = mutex.lock().await; - /// # unlock_and_relock(guard).await; + /// # let _guard = unlock_and_relock(guard).await; /// # } /// ``` #[inline] diff --git a/tokio/src/sync/rwlock/read_guard.rs b/tokio/src/sync/rwlock/read_guard.rs index 36921319923..f5fc1d6de81 100644 --- a/tokio/src/sync/rwlock/read_guard.rs +++ b/tokio/src/sync/rwlock/read_guard.rs @@ -12,6 +12,7 @@ use std::ops; /// /// [`read`]: method@crate::sync::RwLock::read /// [`RwLock`]: struct@crate::sync::RwLock +#[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockReadGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/src/sync/rwlock/write_guard.rs b/tokio/src/sync/rwlock/write_guard.rs index 7cadd74c60d..cefa183d996 100644 --- a/tokio/src/sync/rwlock/write_guard.rs +++ b/tokio/src/sync/rwlock/write_guard.rs @@ -14,6 +14,7 @@ use std::ops; /// /// [`write`]: method@crate::sync::RwLock::write /// [`RwLock`]: struct@crate::sync::RwLock +#[must_use = "if unused the RwLock will immediately unlock"] pub struct RwLockWriteGuard<'a, T: ?Sized> { #[cfg(all(tokio_unstable, feature = "tracing"))] pub(super) resource_span: tracing::Span, diff --git a/tokio/tests/sync_mutex.rs b/tokio/tests/sync_mutex.rs index e7650dcc4eb..1e35a558c1d 100644 --- a/tokio/tests/sync_mutex.rs +++ b/tokio/tests/sync_mutex.rs @@ -53,7 +53,7 @@ fn readiness() { // But once g unlocks, we can acquire it drop(g); assert!(t2.is_woken()); - assert_ready!(t2.poll()); + let _t2 = assert_ready!(t2.poll()); } /* @@ -103,7 +103,7 @@ async fn aborted_future_1() { timeout(Duration::from_millis(1u64), async move { let iv = interval(Duration::from_millis(1000)); tokio::pin!(iv); - m2.lock().await; + let _g = m2.lock().await; iv.as_mut().tick().await; iv.as_mut().tick().await; }) @@ -112,7 +112,7 @@ async fn aborted_future_1() { } // This should succeed as there is no lock left for the mutex. timeout(Duration::from_millis(1u64), async move { - m1.lock().await; + let _g = m1.lock().await; }) .await .expect("Mutex is locked"); @@ -134,7 +134,7 @@ async fn aborted_future_2() { let m2 = m1.clone(); // Try to lock mutex in a future that is aborted prematurely timeout(Duration::from_millis(1u64), async move { - m2.lock().await; + let _g = m2.lock().await; }) .await .unwrap_err(); @@ -142,7 +142,7 @@ async fn aborted_future_2() { } // This should succeed as there is no lock left for the mutex. timeout(Duration::from_millis(1u64), async move { - m1.lock().await; + let _g = m1.lock().await; }) .await .expect("Mutex is locked"); diff --git a/tokio/tests/sync_panic.rs b/tokio/tests/sync_panic.rs index 73bf67d97aa..d3a47067744 100644 --- a/tokio/tests/sync_panic.rs +++ b/tokio/tests/sync_panic.rs @@ -30,7 +30,7 @@ fn mutex_blocking_lock_panic_caller() -> Result<(), Box> { let rt = basic(); rt.block_on(async { let mutex = Mutex::new(5_u32); - mutex.blocking_lock(); + let _g = mutex.blocking_lock(); }); }); diff --git a/tokio/tests/sync_rwlock.rs b/tokio/tests/sync_rwlock.rs index afad39eb481..948ec131ed1 100644 --- a/tokio/tests/sync_rwlock.rs +++ b/tokio/tests/sync_rwlock.rs @@ -31,7 +31,7 @@ fn read_shared() { let mut t1 = spawn(rwlock.read()); let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.read()); - assert_ready!(t2.poll()); + let _g2 = assert_ready!(t2.poll()); } // When there is an active shared owner, exclusive access should not be possible @@ -75,7 +75,7 @@ fn exhaust_reading() { let g2 = reads.pop().unwrap(); drop(g2); assert!(t1.is_woken()); - assert_ready!(t1.poll()); + let _g1 = assert_ready!(t1.poll()); } // When there is an active exclusive owner, subsequent exclusive access should not be possible @@ -100,7 +100,7 @@ fn write_shared_drop() { assert_pending!(t2.poll()); drop(g1); assert!(t2.is_woken()); - assert_ready!(t2.poll()); + let _g2 = assert_ready!(t2.poll()); } // when there is an active shared owner, and exclusive access is triggered, @@ -112,7 +112,7 @@ fn write_read_shared_pending() { let _g1 = assert_ready!(t1.poll()); let mut t2 = spawn(rwlock.read()); - assert_ready!(t2.poll()); + let _g2 = assert_ready!(t2.poll()); let mut t3 = spawn(rwlock.write()); assert_pending!(t3.poll()); @@ -137,7 +137,7 @@ fn write_read_shared_drop_pending() { drop(t2); assert!(t3.is_woken()); - assert_ready!(t3.poll()); + let _t3 = assert_ready!(t3.poll()); } // Acquire an RwLock nonexclusively by a single task