diff --git a/compiler/rustc_mir_build/src/builder/mod.rs b/compiler/rustc_mir_build/src/builder/mod.rs index 01c1e2e79b501..f1b5fd2c718a3 100644 --- a/compiler/rustc_mir_build/src/builder/mod.rs +++ b/compiler/rustc_mir_build/src/builder/mod.rs @@ -36,7 +36,7 @@ use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; use rustc_middle::hir::place::PlaceBase as HirPlaceBase; use rustc_middle::middle::region; use rustc_middle::mir::*; -use rustc_middle::thir::{self, ExprId, LocalVarId, Param, ParamId, PatKind, Thir}; +use rustc_middle::thir::{self, ExprId, ExprKind, LocalVarId, Param, ParamId, PatKind, Thir}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt, TypeVisitableExt, TypingMode}; use rustc_middle::{bug, span_bug}; use rustc_session::lint; @@ -178,6 +178,11 @@ struct Builder<'a, 'tcx> { arg_count: usize, coroutine: Option>>, + /// Whether this coroutine's body contains any yield points (`.await`). + /// When false, no locals can be live across a yield, so `StorageDead` + /// can be omitted from the unwind path — see `DropData::storage_dead_on_unwind`. + coroutine_has_yields: bool, + /// The current set of scopes, updated as we traverse; /// see the `scope` module for more details. scopes: scope::Scopes<'tcx>, @@ -777,6 +782,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { cfg: CFG { basic_blocks: IndexVec::new() }, fn_span: span, arg_count, + coroutine_has_yields: coroutine.is_some() + && thir.exprs.iter().any(|expr| matches!(expr.kind, ExprKind::Yield { .. })), coroutine, scopes: scope::Scopes::new(), block_context: BlockContext::new(), diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 91610e768d012..03c1c386d9d91 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -159,6 +159,20 @@ struct DropData { /// Whether this is a value Drop or a StorageDead. kind: DropKind, + + /// Whether this drop's `StorageDead` should be emitted on the unwind path. + /// For coroutines with yield points, `StorageDead` on the unwind path is + /// needed to prevent the coroutine drop handler from re-dropping locals + /// already dropped during unwinding. However, emitting them for every drop + /// in a coroutine causes O(n^2) MIR basic blocks for large aggregate + /// initialisers (each element's unwind chain becomes unique due to distinct + /// `StorageDead` locals, preventing tail-sharing). + /// + /// To avoid the quadratic blowup, this flag is only set when the coroutine + /// body contains yield points. A coroutine without yields (e.g. an async fn + /// with no `.await`) cannot have locals live across a yield, so the drop + /// handler issue does not arise. + storage_dead_on_unwind: bool, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] @@ -263,12 +277,13 @@ impl Scope { /// * polluting the cleanup MIR with StorageDead creates /// landing pads even though there's no actual destructors /// * freeing up stack space has no effect during unwinding - /// Note that for coroutines we do emit StorageDeads, for the - /// use of optimizations in the MIR coroutine transform. + /// Note that for coroutines we may also emit `StorageDead` on + /// unwind for drops scheduled after a yield point — see + /// `DropData::storage_dead_on_unwind`. fn needs_cleanup(&self) -> bool { self.drops.iter().any(|drop| match drop.kind { DropKind::Value | DropKind::ForLint => true, - DropKind::Storage => false, + DropKind::Storage => drop.storage_dead_on_unwind, }) } @@ -296,8 +311,12 @@ impl DropTree { // represents the block in the tree that should be jumped to once all // of the required drops have been performed. let fake_source_info = SourceInfo::outermost(DUMMY_SP); - let fake_data = - DropData { source_info: fake_source_info, local: Local::MAX, kind: DropKind::Storage }; + let fake_data = DropData { + source_info: fake_source_info, + local: Local::MAX, + kind: DropKind::Storage, + storage_dead_on_unwind: false, + }; let drop_nodes = IndexVec::from_raw(vec![DropNode { data: fake_data, next: DropIdx::MAX }]); Self { drop_nodes, entry_points: Vec::new(), existing_drops_map: FxHashMap::default() } } @@ -1111,7 +1130,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return None; } - Some(DropData { source_info, local, kind: DropKind::Value }) + Some(DropData { + source_info, + local, + kind: DropKind::Value, + storage_dead_on_unwind: false, + }) } Operand::Constant(_) | Operand::RuntimeChecks(_) => None, }) @@ -1239,7 +1263,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block, unwind_to, dropline_to, - is_coroutine && needs_cleanup, self.arg_count, |v: Local| Self::is_async_drop_impl(self.tcx, &self.local_decls, typing_env, v), ) @@ -1474,10 +1497,23 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // path, we only need to invalidate the cache for drops that happen on // the unwind or coroutine drop paths. This means that for // non-coroutines we don't need to invalidate caches for `DropKind::Storage`. - let invalidate_caches = needs_drop || self.coroutine.is_some(); + // + // For coroutines, `StorageDead` on the unwind path is needed for + // correct drop handling: without it, the coroutine drop handler may + // re-drop locals already dropped during unwinding. However, this is + // only relevant when the coroutine body contains yield points, because + // locals can only span a yield if one exists. For coroutines without + // yields (e.g. an async fn whose body has no `.await`), we skip + // `StorageDead` on unwind, avoiding O(n^2) MIR blowup. See #115327. + let storage_dead_on_unwind = self.coroutine_has_yields; + let invalidate_unwind = needs_drop || storage_dead_on_unwind; + let invalidate_dropline = needs_drop || self.coroutine.is_some(); for scope in self.scopes.scopes.iter_mut().rev() { - if invalidate_caches { - scope.invalidate_cache(); + if invalidate_unwind { + scope.cached_unwind_block = None; + } + if invalidate_dropline { + scope.cached_coroutine_drop_block = None; } if scope.region_scope == region_scope { @@ -1489,6 +1525,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, local, kind: drop_kind, + storage_dead_on_unwind, }); return; @@ -1520,6 +1557,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { source_info: SourceInfo { span: scope_end, scope: scope.source_scope }, local, kind: DropKind::ForLint, + storage_dead_on_unwind: false, }); return; @@ -1622,10 +1660,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return cached_drop; } - let is_coroutine = self.coroutine.is_some(); for scope in &mut self.scopes.scopes[uncached_scope..=target] { for drop in &scope.drops { - if is_coroutine || drop.kind == DropKind::Value { + if drop.kind == DropKind::Value || drop.storage_dead_on_unwind { cached_drop = self.scopes.unwind_drops.add_drop(*drop, cached_drop); } } @@ -1811,7 +1848,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// instructions on unwinding) /// * `dropline_to`, describes the drops that would occur at this point in the code if a /// coroutine drop occurred. -/// * `storage_dead_on_unwind`, if true, then we should emit `StorageDead` even when unwinding /// * `arg_count`, number of MIR local variables corresponding to fn arguments (used to assert that we don't drop those) fn build_scope_drops<'tcx, F>( cfg: &mut CFG<'tcx>, @@ -1821,7 +1857,6 @@ fn build_scope_drops<'tcx, F>( block: BasicBlock, unwind_to: DropIdx, dropline_to: Option, - storage_dead_on_unwind: bool, arg_count: usize, is_async_drop: F, ) -> BlockAnd<()> @@ -1844,10 +1879,10 @@ where // drops panic (panicking while unwinding will abort, so there's no need for // another set of arrows). // - // For coroutines, we unwind from a drop on a local to its StorageDead - // statement. For other functions we don't worry about StorageDead. The - // drops for the unwind path should have already been generated by - // `diverge_cleanup_gen`. + // For drops in coroutines that were scheduled after a yield point, we + // unwind from a drop on a local to its StorageDead statement. For other + // drops we don't worry about StorageDead. The drops for the unwind path + // should have already been generated by `diverge_cleanup_gen`. // `unwind_to` indicates what needs to be dropped should unwinding occur. // This is a subset of what needs to be dropped when exiting the scope. @@ -1922,7 +1957,7 @@ where // so we can just leave `unwind_to` unmodified, but in some // cases we emit things ALSO on the unwind path, so we need to adjust // `unwind_to` in that case. - if storage_dead_on_unwind { + if drop_data.storage_dead_on_unwind { debug_assert_eq!( unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local @@ -1953,10 +1988,11 @@ where DropKind::Storage => { // Ordinarily, storage-dead nodes are not emitted on unwind, so we don't // need to adjust `unwind_to` on this path. However, in some specific cases - // we *do* emit storage-dead nodes on the unwind path, and in that case now that - // the storage-dead has completed, we need to adjust the `unwind_to` pointer - // so that any future drops we emit will not register storage-dead. - if storage_dead_on_unwind { + // (drops scheduled after a yield in a coroutine) we *do* emit storage-dead + // nodes on the unwind path, and in that case now that the storage-dead has + // completed, we need to adjust the `unwind_to` pointer so that any future + // drops we emit will not register storage-dead. + if drop_data.storage_dead_on_unwind { debug_assert_eq!( unwind_drops.drop_nodes[unwind_to].data.local, drop_data.local diff --git a/src/tools/compiletest/src/directives.rs b/src/tools/compiletest/src/directives.rs index 036495130f819..c4874da00e101 100644 --- a/src/tools/compiletest/src/directives.rs +++ b/src/tools/compiletest/src/directives.rs @@ -260,6 +260,7 @@ mod directives { pub(crate) const ADD_MINICORE: &str = "add-minicore"; pub(crate) const MINICORE_COMPILE_FLAGS: &str = "minicore-compile-flags"; pub(crate) const DISABLE_GDB_PRETTY_PRINTERS: &str = "disable-gdb-pretty-printers"; + pub(crate) const TIMEOUT: &str = "timeout"; pub(crate) const COMPARE_OUTPUT_BY_LINES: &str = "compare-output-by-lines"; } @@ -957,6 +958,7 @@ pub(crate) fn make_test_description( let mut ignore = false; let mut ignore_message = None; let mut should_fail = false; + let mut timeout_seconds = None; // Scan through the test file to handle `ignore-*`, `only-*`, and `needs-*` directives. iter_directives(config, file_directives, &mut |ln @ &DirectiveLine { line_number, .. }| { @@ -1004,6 +1006,13 @@ pub(crate) fn make_test_description( } should_fail |= config.parse_name_directive(ln, "should-fail"); + + if let Some(seconds) = config + .parse_name_value_directive(ln, directives::TIMEOUT) + .and_then(|value| value.trim().parse::().ok()) + { + timeout_seconds = Some(seconds); + } }); // The `should-fail` annotation doesn't apply to pretty tests, @@ -1021,6 +1030,7 @@ pub(crate) fn make_test_description( ignore, ignore_message, should_fail, + timeout_seconds, } } diff --git a/src/tools/compiletest/src/directives/directive_names.rs b/src/tools/compiletest/src/directives/directive_names.rs index 5421a97201737..fbae250ca304a 100644 --- a/src/tools/compiletest/src/directives/directive_names.rs +++ b/src/tools/compiletest/src/directives/directive_names.rs @@ -289,6 +289,7 @@ pub(crate) const KNOWN_DIRECTIVE_NAMES: &[&str] = &[ "should-ice", "stderr-per-bitwidth", "test-mir-pass", + "timeout", "unique-doc-out-dir", "unset-exec-env", "unset-rustc-env", diff --git a/src/tools/compiletest/src/executor.rs b/src/tools/compiletest/src/executor.rs index c800d11d6b2fd..4edd7b7e899ce 100644 --- a/src/tools/compiletest/src/executor.rs +++ b/src/tools/compiletest/src/executor.rs @@ -21,6 +21,8 @@ use crate::panic_hook; mod deadline; mod json; +const DEFAULT_TEST_WARN_TIMEOUT_S: u64 = 60; + pub(crate) fn run_tests(config: &Config, tests: Vec) -> bool { let tests_len = tests.len(); let filtered = filter_tests(config, tests); @@ -52,7 +54,8 @@ pub(crate) fn run_tests(config: &Config, tests: Vec) -> bool { && let Some((id, test)) = fresh_tests.next() { listener.test_started(test); - deadline_queue.push(id, test); + let timeout = test.desc.timeout_seconds.unwrap_or(DEFAULT_TEST_WARN_TIMEOUT_S); + deadline_queue.push(id, test, timeout); let join_handle = spawn_test_thread(id, test, completion_tx.clone()); running_tests.insert(id, RunningTest { test, join_handle }); } @@ -339,6 +342,7 @@ pub(crate) struct CollectedTestDesc { pub(crate) ignore: bool, pub(crate) ignore_message: Option>, pub(crate) should_fail: ShouldFail, + pub(crate) timeout_seconds: Option, } /// Tests with `//@ should-fail` are tests of compiletest itself, and should diff --git a/src/tools/compiletest/src/executor/deadline.rs b/src/tools/compiletest/src/executor/deadline.rs index 3536eff2fd80d..379da8fd25a4a 100644 --- a/src/tools/compiletest/src/executor/deadline.rs +++ b/src/tools/compiletest/src/executor/deadline.rs @@ -4,8 +4,6 @@ use std::time::{Duration, Instant}; use crate::executor::{CollectedTest, TestId}; -const TEST_WARN_TIMEOUT_S: u64 = 60; - struct DeadlineEntry<'a> { id: TestId, test: &'a CollectedTest, @@ -27,11 +25,8 @@ impl<'a> DeadlineQueue<'a> { Instant::now() } - pub(crate) fn push(&mut self, id: TestId, test: &'a CollectedTest) { - let deadline = self.now() + Duration::from_secs(TEST_WARN_TIMEOUT_S); - if let Some(back) = self.queue.back() { - assert!(back.deadline <= deadline); - } + pub(crate) fn push(&mut self, id: TestId, test: &'a CollectedTest, timeout_seconds: u64) { + let deadline = self.now() + Duration::from_secs(timeout_seconds); self.queue.push_back(DeadlineEntry { id, test, deadline }); } @@ -67,7 +62,7 @@ impl<'a> DeadlineQueue<'a> { } fn next_deadline(&self) -> Option { - Some(self.queue.front()?.deadline) + self.queue.iter().map(|entry| entry.deadline).min() } fn for_each_entry_past_deadline( @@ -77,26 +72,16 @@ impl<'a> DeadlineQueue<'a> { ) { let now = self.now(); - // Clear out entries that are past their deadline, but only invoke the - // callback for tests that are still considered running. - while let Some(entry) = pop_front_if(&mut self.queue, |entry| entry.deadline <= now) { - if is_running(entry.id) { + // Invoke callbacks for entries past their deadline that are still running. + for entry in self.queue.iter() { + if entry.deadline <= now && is_running(entry.id) { on_deadline_passed(entry.id, entry.test); } } - // Also clear out any leading entries that are no longer running, even - // if their deadline hasn't been reached. - while let Some(_) = pop_front_if(&mut self.queue, |entry| !is_running(entry.id)) {} + // Remove entries that are past their deadline or no longer running. + self.queue.retain(|entry| entry.deadline > now && is_running(entry.id)); - if let Some(front) = self.queue.front() { - assert!(now < front.deadline); - } + debug_assert!(self.queue.iter().all(|entry| now < entry.deadline)); } } - -/// FIXME(vec_deque_pop_if): Use `VecDeque::pop_front_if` when it is stable in bootstrap. -fn pop_front_if(queue: &mut VecDeque, predicate: impl FnOnce(&T) -> bool) -> Option { - let first = queue.front()?; - if predicate(first) { queue.pop_front() } else { None } -} diff --git a/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir b/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir index 1dc1d08136290..3d47e948a8da6 100644 --- a/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir +++ b/tests/mir-opt/async_drop_live_dead.a-{closure#0}.coroutine_drop_async.0.panic-unwind.mir @@ -44,7 +44,6 @@ fn a::{closure#0}(_1: Pin<&mut {async fn body of a()}>, _2: &mut Context<'_>) } bb3 (cleanup): { - nop; nop; goto -> bb5; } diff --git a/tests/run-make/large-async-vec-of-strings/rmake.rs b/tests/run-make/large-async-vec-of-strings/rmake.rs new file mode 100644 index 0000000000000..f875cfc2a7c5c --- /dev/null +++ b/tests/run-make/large-async-vec-of-strings/rmake.rs @@ -0,0 +1,34 @@ +// A regression test to ensure that an async function returning a large +// `Vec` compiles within a reasonable time. The quadratic MIR +// blowup (#115327) manifseting itself earlier was caused by `StorageDead` +// on the unwind path preventing tail-sharing of drop chains in coroutines. +// +// The test generates a source file containing `vec!["string".to_string(), ...]` +// with 10,000 elements inside an async fn, then checks that it compiles. +// The timeout is set such that the test will warn about it running too long +// with the quadratic MIR growth. In the future time-outs could be converted +// into hard failures in the compiletest machinery. + +//@ timeout: 10 + +use std::fs; + +use run_make_support::rustc; + +const ELEMENT_COUNT: usize = 10_000; + +fn main() { + let vec_elements = r#""string".to_string(),"#.repeat(ELEMENT_COUNT); + + let source = format!( + r#" + fn main() {{ produce_vector(); }} + async fn produce_vector() -> Vec {{ + vec![{vec_elements}] + }} + "# + ); + + fs::write("generated.rs", &source).unwrap(); + rustc().edition("2021").input("generated.rs").run(); +}