diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 5e81ec28ee35f..524baf5b07fec 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -172,8 +172,22 @@ impl TypedArena { available_bytes >= additional_bytes } + /// Allocates storage for `len >= 1` values in this arena, and returns a + /// raw pointer to the first value's storage. + /// + /// # Safety + /// + /// Caller must initialize each of the `len` slots to a droppable value + /// before the arena is dropped. + /// + /// In practice, this typically means that the caller must be able to + /// raw-copy `len` already-initialized values into the slice without any + /// possibility of panicking. + /// + /// FIXME(Zalathar): This is *very* fragile; perhaps we need a different + /// approach to arena-allocating slices of droppable values. #[inline] - fn alloc_raw_slice(&self, len: usize) -> *mut T { + unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T { assert!(size_of::() != 0); assert!(len != 0); @@ -208,7 +222,7 @@ impl TypedArena { &self, iter: impl IntoIterator>, ) -> Result<&mut [T], E> { - // Despite the similarlty with `DroplessArena`, we cannot reuse their fast case. The reason + // Despite the similarity with `DroplessArena`, we cannot reuse their fast case. The reason // is subtle: these arenas are reentrant. In other words, `iter` may very well be holding a // reference to `self` and adding elements to the arena during iteration. // @@ -229,9 +243,15 @@ impl TypedArena { } // Move the content to the arena by copying and then forgetting it. let len = vec.len(); - let start_ptr = self.alloc_raw_slice(len); + + // SAFETY: After allocating raw storage for exactly `len` values, we + // must fully initialize the storage without panicking, and we must + // also prevent the stale values in the vec from being dropped. Ok(unsafe { + let start_ptr = self.alloc_raw_slice(len); + // Initialize the newly-allocated storage without panicking. vec.as_ptr().copy_to_nonoverlapping(start_ptr, len); + // Prevent the stale values in the vec from being dropped. vec.set_len(0); slice::from_raw_parts_mut(start_ptr, len) }) @@ -584,7 +604,7 @@ impl DroplessArena { &self, iter: impl IntoIterator>, ) -> Result<&mut [T], E> { - // Despite the similarlty with `alloc_from_iter`, we cannot reuse their fast case, as we + // Despite the similarity with `alloc_from_iter`, we cannot reuse their fast case, as we // cannot know the minimum length of the iterator in this case. assert!(size_of::() != 0); diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index e50e31c226fdb..c8874ed99dca9 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -1961,7 +1961,8 @@ impl<'a> State<'a> { } fn print_lifetime(&mut self, lifetime: ast::Lifetime) { - self.print_name(lifetime.ident.name) + self.word(lifetime.ident.name.to_string()); + self.ann_post(lifetime.ident) } fn print_lifetime_bounds(&mut self, bounds: &ast::GenericBounds) { diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs index 5516298c5e85f..f954ac1da7658 100644 --- a/compiler/rustc_lint/src/unused.rs +++ b/compiler/rustc_lint/src/unused.rs @@ -805,7 +805,10 @@ trait UnusedDelimLint { ExprKind::Break(_label, None) => return false, ExprKind::Break(_label, Some(break_expr)) => { - return matches!(break_expr.kind, ExprKind::Block(..)); + // `if (break 'label i) { ... }` removing parens would make `i { ... }` + // be parsed as a struct literal, so keep parentheses if the break value + // ends with a path (which could be mistaken for a struct name). + return matches!(break_expr.kind, ExprKind::Block(..) | ExprKind::Path(..)); } ExprKind::Range(_lhs, Some(rhs), _limits) => { diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index ad63251363e25..cea50f95df4b4 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -88,7 +88,7 @@ use rustc_index::IndexVec; use rustc_lint_defs::LintId; use rustc_macros::rustc_queries; use rustc_query_system::ich::StableHashingContext; -use rustc_query_system::query::{QueryMode, QueryStackDeferred, QueryState}; +use rustc_query_system::query::{QueryMode, QueryState}; use rustc_session::Limits; use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; use rustc_session::cstore::{ diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 7b85dac41aeff..2bda014a19fe7 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -440,7 +440,7 @@ macro_rules! define_callbacks { #[derive(Default)] pub struct QueryStates<'tcx> { $( - pub $name: QueryState<$($K)*, QueryStackDeferred<'tcx>>, + pub $name: QueryState<'tcx, $($K)*>, )* } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 382b8750a1ce7..8fa4fb3090dbd 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -21,7 +21,7 @@ use rustc_query_system::dep_graph::SerializedDepNodeIndex; use rustc_query_system::ich::StableHashingContext; use rustc_query_system::query::{ CycleError, CycleErrorHandling, HashResult, QueryCache, QueryDispatcher, QueryMap, QueryMode, - QueryStackDeferred, QueryState, get_query_incr, get_query_non_incr, + QueryState, get_query_incr, get_query_non_incr, }; use rustc_span::{ErrorGuaranteed, Span}; @@ -66,7 +66,7 @@ impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDA // This is `impl QueryDispatcher for SemiDynamicQueryDispatcher`. impl<'tcx, C: QueryCache, const ANON: bool, const DEPTH_LIMIT: bool, const FEEDABLE: bool> - QueryDispatcher for SemiDynamicQueryDispatcher<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> + QueryDispatcher<'tcx> for SemiDynamicQueryDispatcher<'tcx, C, ANON, DEPTH_LIMIT, FEEDABLE> where for<'a> C::Key: HashStable>, { @@ -86,10 +86,7 @@ where } #[inline(always)] - fn query_state<'a>( - self, - qcx: QueryCtxt<'tcx>, - ) -> &'a QueryState> + fn query_state<'a>(self, qcx: QueryCtxt<'tcx>) -> &'a QueryState<'tcx, Self::Key> where QueryCtxt<'tcx>: 'a, { @@ -98,7 +95,7 @@ where unsafe { &*(&qcx.tcx.query_system.states as *const QueryStates<'tcx>) .byte_add(self.vtable.query_state) - .cast::>>() + .cast::>() } } @@ -211,13 +208,15 @@ where /// on the type `rustc_query_impl::query_impl::$name::QueryType`. trait QueryDispatcherUnerased<'tcx> { type UnerasedValue; - type Dispatcher: QueryDispatcher>; + type Dispatcher: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>; const NAME: &'static &'static str; fn query_dispatcher(tcx: TyCtxt<'tcx>) -> Self::Dispatcher; - fn restore_val(value: ::Value) -> Self::UnerasedValue; + fn restore_val( + value: >::Value, + ) -> Self::UnerasedValue; } pub fn query_system<'a>( diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index c71352c3fd202..ef7a62351930d 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -60,9 +60,7 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> { } } -impl<'tcx> QueryContext for QueryCtxt<'tcx> { - type QueryInfo = QueryStackDeferred<'tcx>; - +impl<'tcx> QueryContext<'tcx> for QueryCtxt<'tcx> { #[inline] fn jobserver_proxy(&self) -> &Proxy { &self.tcx.jobserver_proxy @@ -93,10 +91,7 @@ impl<'tcx> QueryContext for QueryCtxt<'tcx> { /// Prefer passing `false` to `require_complete` to avoid potential deadlocks, /// especially when called from within a deadlock handler, unless a /// complete map is needed and no deadlock is possible at this call site. - fn collect_active_jobs( - self, - require_complete: bool, - ) -> Result>, QueryMap>> { + fn collect_active_jobs(self, require_complete: bool) -> Result, QueryMap<'tcx>> { let mut jobs = QueryMap::default(); let mut complete = true; @@ -322,7 +317,7 @@ macro_rules! should_ever_cache_on_disk { }; } -fn create_query_frame_extra<'tcx, K: Key + Copy + 'tcx>( +fn mk_query_stack_frame_extra<'tcx, K: Key + Copy + 'tcx>( (tcx, key, kind, name, do_describe): ( TyCtxt<'tcx>, K, @@ -373,18 +368,16 @@ pub(crate) fn create_query_frame< ) -> QueryStackFrame> { let def_id = key.key_as_def_id(); - let hash = || { - tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - kind.as_usize().hash_stable(&mut hcx, &mut hasher); - key.hash_stable(&mut hcx, &mut hasher); - hasher.finish::() - }) - }; + let hash = tcx.with_stable_hashing_context(|mut hcx| { + let mut hasher = StableHasher::new(); + kind.as_usize().hash_stable(&mut hcx, &mut hasher); + key.hash_stable(&mut hcx, &mut hasher); + hasher.finish::() + }); let def_id_for_ty_in_cycle = key.def_id_for_ty_in_cycle(); let info = - QueryStackDeferred::new((tcx, key, kind, name, do_describe), create_query_frame_extra); + QueryStackDeferred::new((tcx, key, kind, name, do_describe), mk_query_stack_frame_extra); QueryStackFrame::new(info, kind, hash, def_id, def_id_for_ty_in_cycle) } @@ -417,7 +410,7 @@ pub(crate) fn encode_query_results<'a, 'tcx, Q>( } pub(crate) fn query_key_hash_verify<'tcx>( - query: impl QueryDispatcher>, + query: impl QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>, qcx: QueryCtxt<'tcx>, ) { let _timer = qcx.tcx.prof.generic_activity_with_arg("query_key_hash_verify_for", query.name()); @@ -445,7 +438,7 @@ pub(crate) fn query_key_hash_verify<'tcx>( fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) where - Q: QueryDispatcher>, + Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>, { debug_assert!(tcx.dep_graph.is_green(&dep_node)); @@ -491,7 +484,7 @@ where fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool where - Q: QueryDispatcher>, + Q: QueryDispatcher<'tcx, Qcx = QueryCtxt<'tcx>>, { // We must avoid ever having to call `force_from_dep_node()` for a // `DepNode::codegen_unit`: @@ -734,14 +727,14 @@ macro_rules! define_queries { } #[inline(always)] - fn restore_val(value: ::Value) -> Self::UnerasedValue { + fn restore_val(value: >::Value) -> Self::UnerasedValue { erase::restore_val::>(value) } } pub(crate) fn collect_active_jobs<'tcx>( tcx: TyCtxt<'tcx>, - qmap: &mut QueryMap>, + qmap: &mut QueryMap<'tcx>, require_complete: bool, ) -> Option<()> { let make_query = |tcx, key| { @@ -825,7 +818,7 @@ macro_rules! define_queries { // These arrays are used for iteration and can't be indexed by `DepKind`. const COLLECT_ACTIVE_JOBS: &[ - for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap>, bool) -> Option<()> + for<'tcx> fn(TyCtxt<'tcx>, &mut QueryMap<'tcx>, bool) -> Option<()> ] = &[$(query_impl::$name::collect_active_jobs),*]; diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index f0cc9636b75c2..6d46d144d0f18 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -519,7 +519,11 @@ impl DepGraph { /// This encodes a diagnostic by creating a node with an unique index and associating /// `diagnostic` with it, for use in the next session. #[inline] - pub fn record_diagnostic(&self, qcx: Qcx, diagnostic: &DiagInner) { + pub fn record_diagnostic<'tcx, Qcx: QueryContext<'tcx>>( + &self, + qcx: Qcx, + diagnostic: &DiagInner, + ) { if let Some(ref data) = self.data { D::read_deps(|task_deps| match task_deps { TaskDepsRef::EvalAlways | TaskDepsRef::Ignore => return, @@ -532,7 +536,7 @@ impl DepGraph { /// This forces a diagnostic node green by running its side effect. `prev_index` would /// refer to a node created used `encode_diagnostic` in the previous session. #[inline] - pub fn force_diagnostic_node( + pub fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>( &self, qcx: Qcx, prev_index: SerializedDepNodeIndex, @@ -669,7 +673,7 @@ impl DepGraphData { /// This encodes a diagnostic by creating a node with an unique index and associating /// `diagnostic` with it, for use in the next session. #[inline] - fn encode_diagnostic( + fn encode_diagnostic<'tcx, Qcx: QueryContext<'tcx>>( &self, qcx: Qcx, diagnostic: &DiagInner, @@ -693,7 +697,7 @@ impl DepGraphData { /// This forces a diagnostic node green by running its side effect. `prev_index` would /// refer to a node created used `encode_diagnostic` in the previous session. #[inline] - fn force_diagnostic_node( + fn force_diagnostic_node<'tcx, Qcx: QueryContext<'tcx>>( &self, qcx: Qcx, prev_index: SerializedDepNodeIndex, @@ -843,7 +847,7 @@ impl DepGraph { DepNodeColor::Unknown } - pub fn try_mark_green>( + pub fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>( &self, qcx: Qcx, dep_node: &DepNode, @@ -858,7 +862,7 @@ impl DepGraphData { /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when /// a node index can be found for that node. - pub(crate) fn try_mark_green>( + pub(crate) fn try_mark_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>( &self, qcx: Qcx, dep_node: &DepNode, @@ -883,7 +887,7 @@ impl DepGraphData { } #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")] - fn try_mark_parent_green>( + fn try_mark_parent_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>( &self, qcx: Qcx, parent_dep_node_index: SerializedDepNodeIndex, @@ -973,7 +977,7 @@ impl DepGraphData { /// Try to mark a dep-node which existed in the previous compilation session as green. #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")] - fn try_mark_previous_green>( + fn try_mark_previous_green<'tcx, Qcx: QueryContext<'tcx, Deps = D>>( &self, qcx: Qcx, prev_dep_node_index: SerializedDepNodeIndex, diff --git a/compiler/rustc_query_system/src/query/dispatcher.rs b/compiler/rustc_query_system/src/query/dispatcher.rs index 1ca76a70364c9..d7dd6dd6464a5 100644 --- a/compiler/rustc_query_system/src/query/dispatcher.rs +++ b/compiler/rustc_query_system/src/query/dispatcher.rs @@ -14,8 +14,8 @@ pub type HashResult = Option, &V) -> Fingerp /// Unambiguous shorthand for `::DepContext`. #[expect(type_alias_bounds)] -type DepContextOf = - <::Qcx as HasDepContext>::DepContext; +type DepContextOf<'tcx, This: QueryDispatcher<'tcx>> = + <>::Qcx as HasDepContext>::DepContext; /// Trait that can be used as a vtable for a single query, providing operations /// and metadata for that query. @@ -25,15 +25,15 @@ type DepContextOf = /// Those types are not visible from this `rustc_query_system` crate. /// /// "Dispatcher" should be understood as a near-synonym of "vtable". -pub trait QueryDispatcher: Copy { +pub trait QueryDispatcher<'tcx>: Copy { fn name(self) -> &'static str; /// Query context used by this dispatcher, i.e. `rustc_query_impl::QueryCtxt`. - type Qcx: QueryContext; + type Qcx: QueryContext<'tcx>; // `Key` and `Value` are `Copy` instead of `Clone` to ensure copying them stays cheap, // but it isn't necessary. - type Key: DepNodeParams> + Eq + Hash + Copy + Debug; + type Key: DepNodeParams> + Eq + Hash + Copy + Debug; type Value: Copy; type Cache: QueryCache; @@ -41,18 +41,15 @@ pub trait QueryDispatcher: Copy { fn format_value(self) -> fn(&Self::Value) -> String; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>( - self, - tcx: Self::Qcx, - ) -> &'a QueryState::QueryInfo>; + fn query_state<'a>(self, tcx: Self::Qcx) -> &'a QueryState<'tcx, Self::Key>; // Don't use this method to access query results, instead use the methods on TyCtxt fn query_cache<'a>(self, tcx: Self::Qcx) -> &'a Self::Cache; - fn cache_on_disk(self, tcx: DepContextOf, key: &Self::Key) -> bool; + fn cache_on_disk(self, tcx: DepContextOf<'tcx, Self>, key: &Self::Key) -> bool; // Don't use this method to compute query results, instead use the methods on TyCtxt - fn execute_query(self, tcx: DepContextOf, k: Self::Key) -> Self::Value; + fn execute_query(self, tcx: DepContextOf<'tcx, Self>, k: Self::Key) -> Self::Value; fn compute(self, tcx: Self::Qcx, key: Self::Key) -> Self::Value; @@ -74,7 +71,7 @@ pub trait QueryDispatcher: Copy { /// Synthesize an error value to let compilation continue after a cycle. fn value_from_cycle_error( self, - tcx: DepContextOf, + tcx: DepContextOf<'tcx, Self>, cycle_error: &CycleError, guar: ErrorGuaranteed, ) -> Self::Value; @@ -89,7 +86,7 @@ pub trait QueryDispatcher: Copy { fn hash_result(self) -> HashResult; // Just here for convenience and checking that the key matches the kind, don't override this. - fn construct_dep_node(self, tcx: DepContextOf, key: &Self::Key) -> DepNode { + fn construct_dep_node(self, tcx: DepContextOf<'tcx, Self>, key: &Self::Key) -> DepNode { DepNode::construct(tcx, self.dep_kind(), key) } } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 5810ce0cbe668..177bcd63cbc62 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -12,7 +12,7 @@ use rustc_hir::def::DefKind; use rustc_session::Session; use rustc_span::{DUMMY_SP, Span}; -use super::QueryStackFrameExtra; +use super::{QueryStackDeferred, QueryStackFrameExtra}; use crate::dep_graph::DepContext; use crate::error::CycleStack; use crate::query::plumbing::CycleError; @@ -26,8 +26,8 @@ pub struct QueryInfo { pub query: QueryStackFrame, } -impl QueryInfo { - pub(crate) fn lift>( +impl<'tcx> QueryInfo> { + pub(crate) fn lift>( &self, qcx: Qcx, ) -> QueryInfo { @@ -35,39 +35,39 @@ impl QueryInfo { } } -pub type QueryMap = FxHashMap>; +pub type QueryMap<'tcx> = FxHashMap>; /// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] pub struct QueryJobId(pub NonZero); impl QueryJobId { - fn query(self, map: &QueryMap) -> QueryStackFrame { + fn query<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> QueryStackFrame> { map.get(&self).unwrap().query.clone() } - fn span(self, map: &QueryMap) -> Span { + fn span<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Span { map.get(&self).unwrap().job.span } - fn parent(self, map: &QueryMap) -> Option { + fn parent<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option { map.get(&self).unwrap().job.parent } - fn latch(self, map: &QueryMap) -> Option<&QueryLatch> { + fn latch<'a, 'tcx>(self, map: &'a QueryMap<'tcx>) -> Option<&'a QueryLatch<'tcx>> { map.get(&self).unwrap().job.latch.as_ref() } } #[derive(Clone, Debug)] -pub struct QueryJobInfo { - pub query: QueryStackFrame, - pub job: QueryJob, +pub struct QueryJobInfo<'tcx> { + pub query: QueryStackFrame>, + pub job: QueryJob<'tcx>, } /// Represents an active query job. #[derive(Debug)] -pub struct QueryJob { +pub struct QueryJob<'tcx> { pub id: QueryJobId, /// The span corresponding to the reason for which this query was required. @@ -77,23 +77,23 @@ pub struct QueryJob { pub parent: Option, /// The latch that is used to wait on this job. - latch: Option>, + latch: Option>, } -impl Clone for QueryJob { +impl<'tcx> Clone for QueryJob<'tcx> { fn clone(&self) -> Self { Self { id: self.id, span: self.span, parent: self.parent, latch: self.latch.clone() } } } -impl QueryJob { +impl<'tcx> QueryJob<'tcx> { /// Creates a new query job. #[inline] pub fn new(id: QueryJobId, span: Span, parent: Option) -> Self { QueryJob { id, span, parent, latch: None } } - pub(super) fn latch(&mut self) -> QueryLatch { + pub(super) fn latch(&mut self) -> QueryLatch<'tcx> { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -113,12 +113,12 @@ impl QueryJob { } impl QueryJobId { - pub(super) fn find_cycle_in_stack( + pub(super) fn find_cycle_in_stack<'tcx>( &self, - query_map: QueryMap, + query_map: QueryMap<'tcx>, current_job: &Option, span: Span, - ) -> CycleError { + ) -> CycleError> { // Find the waitee amongst `current_job` parents let mut cycle = Vec::new(); let mut current_job = Option::clone(current_job); @@ -152,7 +152,10 @@ impl QueryJobId { #[cold] #[inline(never)] - pub fn find_dep_kind_root(&self, query_map: QueryMap) -> (QueryJobInfo, usize) { + pub fn find_dep_kind_root<'tcx>( + &self, + query_map: QueryMap<'tcx>, + ) -> (QueryJobInfo<'tcx>, usize) { let mut depth = 1; let info = query_map.get(&self).unwrap(); let dep_kind = info.query.dep_kind; @@ -172,31 +175,31 @@ impl QueryJobId { } #[derive(Debug)] -struct QueryWaiter { +struct QueryWaiter<'tcx> { query: Option, condvar: Condvar, span: Span, - cycle: Mutex>>, + cycle: Mutex>>>, } #[derive(Debug)] -struct QueryLatchInfo { +struct QueryLatchInfo<'tcx> { complete: bool, - waiters: Vec>>, + waiters: Vec>>, } #[derive(Debug)] -pub(super) struct QueryLatch { - info: Arc>>, +pub(super) struct QueryLatch<'tcx> { + info: Arc>>, } -impl Clone for QueryLatch { +impl<'tcx> Clone for QueryLatch<'tcx> { fn clone(&self) -> Self { Self { info: Arc::clone(&self.info) } } } -impl QueryLatch { +impl<'tcx> QueryLatch<'tcx> { fn new() -> Self { QueryLatch { info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), @@ -206,10 +209,10 @@ impl QueryLatch { /// Awaits for the query job to complete. pub(super) fn wait_on( &self, - qcx: impl QueryContext, + qcx: impl QueryContext<'tcx>, query: Option, span: Span, - ) -> Result<(), CycleError> { + ) -> Result<(), CycleError>> { let waiter = Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() }); self.wait_on_inner(qcx, &waiter); @@ -224,7 +227,7 @@ impl QueryLatch { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, qcx: impl QueryContext, waiter: &Arc>) { + fn wait_on_inner(&self, qcx: impl QueryContext<'tcx>, waiter: &Arc>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -260,7 +263,7 @@ impl QueryLatch { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Arc> { + fn extract_waiter(&self, waiter: usize) -> Arc> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -280,8 +283,8 @@ type Waiter = (QueryJobId, usize); /// For visits of resumable waiters it returns Some(Some(Waiter)) which has the /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. -fn visit_waiters( - query_map: &QueryMap, +fn visit_waiters<'tcx, F>( + query_map: &QueryMap<'tcx>, query: QueryJobId, mut visit: F, ) -> Option> @@ -314,8 +317,8 @@ where /// `span` is the reason for the `query` to execute. This is initially DUMMY_SP. /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. -fn cycle_check( - query_map: &QueryMap, +fn cycle_check<'tcx>( + query_map: &QueryMap<'tcx>, query: QueryJobId, span: Span, stack: &mut Vec<(Span, QueryJobId)>, @@ -354,8 +357,8 @@ fn cycle_check( /// Finds out if there's a path to the compiler root (aka. code which isn't in a query) /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. -fn connected_to_root( - query_map: &QueryMap, +fn connected_to_root<'tcx>( + query_map: &QueryMap<'tcx>, query: QueryJobId, visited: &mut FxHashSet, ) -> bool { @@ -376,7 +379,7 @@ fn connected_to_root( } // Deterministically pick an query from a list -fn pick_query<'a, I: Clone, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T +fn pick_query<'a, 'tcx, T, F>(query_map: &QueryMap<'tcx>, queries: &'a [T], f: F) -> &'a T where F: Fn(&T) -> (Span, QueryJobId), { @@ -401,10 +404,10 @@ where /// the function return true. /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. -fn remove_cycle( - query_map: &QueryMap, +fn remove_cycle<'tcx>( + query_map: &QueryMap<'tcx>, jobs: &mut Vec, - wakelist: &mut Vec>>, + wakelist: &mut Vec>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -505,10 +508,7 @@ fn remove_cycle( /// uses a query latch and then resuming that waiter. /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. -pub fn break_query_cycles( - query_map: QueryMap, - registry: &rustc_thread_pool::Registry, -) { +pub fn break_query_cycles<'tcx>(query_map: QueryMap<'tcx>, registry: &rustc_thread_pool::Registry) { let mut wakelist = Vec::new(); // It is OK per the comments: // - https://github.com/rust-lang/rust/pull/131200#issuecomment-2798854932 @@ -602,7 +602,7 @@ pub fn report_cycle<'a>( sess.dcx().create_err(cycle_diag) } -pub fn print_query_stack( +pub fn print_query_stack<'tcx, Qcx: QueryContext<'tcx>>( qcx: Qcx, mut current_query: Option, dcx: DiagCtxtHandle<'_>, diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 701253d50fcca..63202429679d2 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -58,22 +58,19 @@ pub struct QueryStackFrame { pub def_id_for_ty_in_cycle: Option, } -impl QueryStackFrame { +impl<'tcx> QueryStackFrame> { #[inline] pub fn new( - info: I, + info: QueryStackDeferred<'tcx>, dep_kind: DepKind, - hash: impl FnOnce() -> Hash64, + hash: Hash64, def_id: Option, def_id_for_ty_in_cycle: Option, ) -> Self { - Self { info, def_id, dep_kind, hash: hash(), def_id_for_ty_in_cycle } + Self { info, def_id, dep_kind, hash, def_id_for_ty_in_cycle } } - fn lift>( - &self, - qcx: Qcx, - ) -> QueryStackFrame { + fn lift>(&self, qcx: Qcx) -> QueryStackFrame { QueryStackFrame { info: qcx.lift_query_info(&self.info), dep_kind: self.dep_kind, @@ -159,9 +156,7 @@ pub enum QuerySideEffect { Diagnostic(DiagInner), } -pub trait QueryContext: HasDepContext { - type QueryInfo: Clone; - +pub trait QueryContext<'tcx>: HasDepContext { /// Gets a jobserver reference which is used to release then acquire /// a token while waiting on a query. fn jobserver_proxy(&self) -> &Proxy; @@ -171,12 +166,9 @@ pub trait QueryContext: HasDepContext { /// Get the query information from the TLS context. fn current_query_job(self) -> Option; - fn collect_active_jobs( - self, - require_complete: bool, - ) -> Result, QueryMap>; + fn collect_active_jobs(self, require_complete: bool) -> Result, QueryMap<'tcx>>; - fn lift_query_info(self, info: &Self::QueryInfo) -> QueryStackFrameExtra; + fn lift_query_info(self, info: &QueryStackDeferred<'tcx>) -> QueryStackFrameExtra; /// Load a side effect associated to the node in the previous session. fn load_side_effect( diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 7e9f83e8fe82b..c4431ff870d0c 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -18,7 +18,7 @@ use rustc_errors::{Diag, FatalError, StashKey}; use rustc_span::{DUMMY_SP, Span}; use tracing::instrument; -use super::{QueryDispatcher, QueryStackFrameExtra}; +use super::{QueryDispatcher, QueryStackDeferred, QueryStackFrameExtra}; use crate::dep_graph::{ DepContext, DepGraphData, DepNode, DepNodeIndex, DepNodeParams, HasDepContext, }; @@ -34,23 +34,23 @@ fn equivalent_key(k: &K) -> impl Fn(&(K, V)) -> bool + '_ { move |x| x.0 == *k } -pub struct QueryState { - active: Sharded)>>, +pub struct QueryState<'tcx, K> { + active: Sharded)>>, } /// Indicates the state of a query for a given key in a query map. -enum QueryResult { +enum QueryResult<'tcx> { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob), + Started(QueryJob<'tcx>), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl QueryResult { +impl<'tcx> QueryResult<'tcx> { /// Unwraps the query job expecting that it has started. - fn expect_job(self) -> QueryJob { + fn expect_job(self) -> QueryJob<'tcx> { match self { Self::Started(job) => job, Self::Poisoned => { @@ -60,7 +60,7 @@ impl QueryResult { } } -impl QueryState +impl<'tcx, K> QueryState<'tcx, K> where K: Eq + Hash + Copy + Debug, { @@ -71,13 +71,13 @@ where pub fn collect_active_jobs( &self, qcx: Qcx, - make_query: fn(Qcx, K) -> QueryStackFrame, - jobs: &mut QueryMap, + make_query: fn(Qcx, K) -> QueryStackFrame>, + jobs: &mut QueryMap<'tcx>, require_complete: bool, ) -> Option<()> { let mut active = Vec::new(); - let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult)>>| { + let mut collect = |iter: LockGuard<'_, HashTable<(K, QueryResult<'tcx>)>>| { for (k, v) in iter.iter() { if let QueryResult::Started(ref job) = *v { active.push((*k, job.clone())); @@ -108,40 +108,40 @@ where } } -impl Default for QueryState { - fn default() -> QueryState { +impl<'tcx, K> Default for QueryState<'tcx, K> { + fn default() -> QueryState<'tcx, K> { QueryState { active: Default::default() } } } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, K, I> +struct JobOwner<'a, 'tcx, K> where K: Eq + Hash + Copy, { - state: &'tcx QueryState, + state: &'a QueryState<'tcx, K>, key: K, } #[cold] #[inline(never)] -fn mk_cycle(query: Q, qcx: Q::Qcx, cycle_error: CycleError) -> Q::Value +fn mk_cycle<'tcx, Q>(query: Q, qcx: Q::Qcx, cycle_error: CycleError) -> Q::Value where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { let error = report_cycle(qcx.dep_context().sess(), &cycle_error); handle_cycle_error(query, qcx, &cycle_error, error) } -fn handle_cycle_error( +fn handle_cycle_error<'tcx, Q>( query: Q, qcx: Q::Qcx, cycle_error: &CycleError, error: Diag<'_>, ) -> Q::Value where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { match query.cycle_error_handling() { CycleErrorHandling::Error => { @@ -170,7 +170,7 @@ where } } -impl<'tcx, K, I> JobOwner<'tcx, K, I> +impl<'a, 'tcx, K> JobOwner<'a, 'tcx, K> where K: Eq + Hash + Copy, { @@ -207,7 +207,7 @@ where } } -impl<'tcx, K, I> Drop for JobOwner<'tcx, K, I> +impl<'a, 'tcx, K> Drop for JobOwner<'a, 'tcx, K> where K: Eq + Hash + Copy, { @@ -241,8 +241,8 @@ pub struct CycleError { pub cycle: Vec>, } -impl CycleError { - fn lift>(&self, qcx: Qcx) -> CycleError { +impl<'tcx> CycleError> { + fn lift>(&self, qcx: Qcx) -> CycleError { CycleError { usage: self.usage.as_ref().map(|(span, frame)| (*span, frame.lift(qcx))), cycle: self.cycle.iter().map(|info| info.lift(qcx)).collect(), @@ -272,14 +272,14 @@ where #[cold] #[inline(never)] -fn cycle_error( +fn cycle_error<'tcx, Q>( query: Q, qcx: Q::Qcx, try_execute: QueryJobId, span: Span, ) -> (Q::Value, Option) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { // Ensure there was no errors collecting all active jobs. // We need the complete map to ensure we find a cycle to break. @@ -290,16 +290,16 @@ where } #[inline(always)] -fn wait_for_query( +fn wait_for_query<'tcx, Q>( query: Q, qcx: Q::Qcx, span: Span, key: Q::Key, - latch: QueryLatch<::QueryInfo>, + latch: QueryLatch<'tcx>, current: Option, ) -> (Q::Value, Option) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { // For parallel queries, we'll block and wait until the query running // in another thread has completed. Record how long we wait in the @@ -339,7 +339,7 @@ where } #[inline(never)] -fn try_execute_query( +fn try_execute_query<'tcx, Q, const INCR: bool>( query: Q, qcx: Q::Qcx, span: Span, @@ -347,7 +347,7 @@ fn try_execute_query( dep_node: Option, ) -> (Q::Value, Option) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { let state = query.query_state(qcx); let key_hash = sharded::make_hash(&key); @@ -408,17 +408,17 @@ where } #[inline(always)] -fn execute_job( +fn execute_job<'tcx, Q, const INCR: bool>( query: Q, qcx: Q::Qcx, - state: &QueryState::QueryInfo>, + state: &QueryState<'tcx, Q::Key>, key: Q::Key, key_hash: u64, id: QueryJobId, dep_node: Option, ) -> (Q::Value, Option) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { // Use `JobOwner` so the query will be poisoned if executing it panics. let job_owner = JobOwner { state, key }; @@ -480,14 +480,14 @@ where // Fast path for when incr. comp. is off. #[inline(always)] -fn execute_job_non_incr( +fn execute_job_non_incr<'tcx, Q>( query: Q, qcx: Q::Qcx, key: Q::Key, job_id: QueryJobId, ) -> (Q::Value, DepNodeIndex) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled()); @@ -516,7 +516,7 @@ where } #[inline(always)] -fn execute_job_incr( +fn execute_job_incr<'tcx, Q>( query: Q, qcx: Q::Qcx, dep_graph_data: &DepGraphData<::Deps>, @@ -525,7 +525,7 @@ fn execute_job_incr( job_id: QueryJobId, ) -> (Q::Value, DepNodeIndex) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { if !query.anon() && !query.eval_always() { // `to_dep_node` is expensive for some `DepKind`s. @@ -571,7 +571,7 @@ where } #[inline(always)] -fn try_load_from_disk_and_cache_in_memory( +fn try_load_from_disk_and_cache_in_memory<'tcx, Q>( query: Q, dep_graph_data: &DepGraphData<::Deps>, qcx: Q::Qcx, @@ -579,7 +579,7 @@ fn try_load_from_disk_and_cache_in_memory( dep_node: &DepNode, ) -> Option<(Q::Value, DepNodeIndex)> where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. @@ -757,14 +757,14 @@ fn incremental_verify_ich_failed( /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run( +fn ensure_must_run<'tcx, Q>( query: Q, qcx: Q::Qcx, key: &Q::Key, check_cache: bool, ) -> (bool, Option) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { if query.eval_always() { return (true, None); @@ -809,9 +809,9 @@ pub enum QueryMode { } #[inline(always)] -pub fn get_query_non_incr(query: Q, qcx: Q::Qcx, span: Span, key: Q::Key) -> Q::Value +pub fn get_query_non_incr<'tcx, Q>(query: Q, qcx: Q::Qcx, span: Span, key: Q::Key) -> Q::Value where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled()); @@ -819,7 +819,7 @@ where } #[inline(always)] -pub fn get_query_incr( +pub fn get_query_incr<'tcx, Q>( query: Q, qcx: Q::Qcx, span: Span, @@ -827,7 +827,7 @@ pub fn get_query_incr( mode: QueryMode, ) -> Option where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled()); @@ -849,9 +849,9 @@ where Some(result) } -pub fn force_query(query: Q, qcx: Q::Qcx, key: Q::Key, dep_node: DepNode) +pub fn force_query<'tcx, Q>(query: Q, qcx: Q::Qcx, key: Q::Key, dep_node: DepNode) where - Q: QueryDispatcher, + Q: QueryDispatcher<'tcx>, { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. diff --git a/src/doc/rustc-dev-guide/.github/workflows/ci.yml b/src/doc/rustc-dev-guide/.github/workflows/ci.yml index f2f2f7ed14858..bdb70f215f830 100644 --- a/src/doc/rustc-dev-guide/.github/workflows/ci.yml +++ b/src/doc/rustc-dev-guide/.github/workflows/ci.yml @@ -14,7 +14,7 @@ jobs: if: github.repository == 'rust-lang/rustc-dev-guide' runs-on: ubuntu-latest env: - MDBOOK_VERSION: 0.5.1 + MDBOOK_VERSION: 0.5.2 MDBOOK_LINKCHECK2_VERSION: 0.11.0 MDBOOK_MERMAID_VERSION: 0.17.0 MDBOOK_OUTPUT__LINKCHECK__FOLLOW_WEB_LINKS: ${{ github.event_name != 'pull_request' }} diff --git a/src/doc/rustc-dev-guide/book.toml b/src/doc/rustc-dev-guide/book.toml index 15a597e5addbe..5712a364f7602 100644 --- a/src/doc/rustc-dev-guide/book.toml +++ b/src/doc/rustc-dev-guide/book.toml @@ -57,11 +57,39 @@ cache-timeout = 90000 warning-policy = "error" [output.html.redirect] +"/borrow_check.html" = "borrow-check.html" +"/borrow_check/drop_check.html" = "/borrow-check/drop-check.html" +"/borrow_check/moves_and_initialization.html" = "/borrow-check/moves-and-initialization.html" +"/borrow_check/moves_and_initialization/move_paths.html" = "/borrow-check/moves-and-initialization/move-paths.html" +"/borrow_check/opaque-types-region-inference-restrictions.html" = "/borrow-check/opaque-types-region-inference-restrictions.html" +"/borrow_check/region_inference.html" = "/borrow-check/region-inference.html" +"/borrow_check/region_inference/closure_constraints.html" = "/borrow-check/region-inference/closure-constraints.html" +"/borrow_check/region_inference/constraint_propagation.html" = "/borrow-check/region-inference/constraint-propagation.html" +"/borrow_check/region_inference/error_reporting.html" = "/borrow-check/region-inference/error-reporting.html" +"/borrow_check/region_inference/lifetime_parameters.html" = "/borrow-check/region-inference/lifetime-parameters.html" +"/borrow_check/region_inference/member_constraints.html" = "/borrow-check/region-inference/member-constraints.html" +"/borrow_check/region_inference/placeholders_and_universes.html" = "/borrow-check/region-inference/placeholders-and-universes.html" +"/borrow_check/two_phase_borrows.html" = "/borrow-check/two-phase-borrows.html" +"/borrow_check/type_check.html" = "/borrow-check/type-check.html" "/compiletest.html" = "tests/compiletest.html" -"/diagnostics/sessiondiagnostic.html" = "diagnostic-structs.html" "/diagnostics/diagnostic-codes.html" = "error-codes.html" +"/diagnostics/sessiondiagnostic.html" = "diagnostic-structs.html" +"/early_late_parameters.html" = "early-late-parameters.html" +"/generic_parameters_summary.html" = "generic-parameters-summary.html" +"/implementing_new_features.html" = "implementing-new-features.html" "/miri.html" = "const-eval/interpret.html" +"/profiling/with_perf.html" = "with-perf.html" +"/profiling/with_rustc_perf.html" = "with-rustc-perf.html" +"/profiling/wpa_profiling.html" = "wpa-profiling.html" +"/stabilization_guide.html" = "stabilization-guide.html" +"/stabilization_report_template.html" = "stabilization-report-template.html" "/tests/fuchsia.html" = "ecosystem-test-jobs/fuchsia.html" "/tests/headers.html" = "directives.html" "/tests/integration.html" = "ecosystem.html" "/tests/rust-for-linux.html" = "ecosystem-test-jobs/rust-for-linux.html" +"/ty_module/binders.html" = "/ty-module/binders.html" +"/ty_module/early_binder.html" = "/ty-module/early-binder.html" +"/ty_module/generic_arguments.html" = "/ty-module/generic-arguments.html" +"/ty_module/instantiating_binders.html" = "/ty-module/instantiating-binders.html" +"/ty_module/param_ty_const_regions.html" = "/ty-module/param-ty-const-regions.html" +"/typing_parameter_envs.html" = "typing-parameter-envs.html" diff --git a/src/doc/rustc-dev-guide/ci/sembr/src/main.rs b/src/doc/rustc-dev-guide/ci/sembr/src/main.rs index 6f4ce4415f04a..6720267e14f3b 100644 --- a/src/doc/rustc-dev-guide/ci/sembr/src/main.rs +++ b/src/doc/rustc-dev-guide/ci/sembr/src/main.rs @@ -24,7 +24,7 @@ static REGEX_IGNORE_END: LazyLock = static REGEX_IGNORE_LINK_TARGETS: LazyLock = LazyLock::new(|| Regex::new(r"^\[.+\]: ").unwrap()); static REGEX_SPLIT: LazyLock = - LazyLock::new(|| Regex::new(r"([^\.\d\-\*]\.|[^r]\?|!)\s").unwrap()); + LazyLock::new(|| Regex::new(r"([^\.\d\-\*]\.|[^r\~]\?|!)\s").unwrap()); // list elements, numbered (1.) or not (- and *) static REGEX_LIST_ENTRY: LazyLock = LazyLock::new(|| Regex::new(r"^\s*(\d\.|\-|\*|\d\))\s+").unwrap()); @@ -83,6 +83,8 @@ fn ignore(line: &str, in_code_block: bool) -> bool { || line.contains(" etc.") || line.contains("i.e.") || line.contains("et. al") + || line.contains("") || line.contains('|') || line.trim_start().starts_with('>') || line.starts_with('#') @@ -204,6 +206,7 @@ git log main.. compiler o? whatever r? @reviewer r? @reviewer +~? diagnostic "; let expected = " # some. heading @@ -236,6 +239,7 @@ o? whatever r? @reviewer r? @reviewer +~? diagnostic "; assert_eq!(expected, comply(original)); } @@ -263,6 +267,11 @@ leave the text alone ``` + ignore +html comment closing + handle the indented well @@ -289,6 +298,11 @@ leave the text alone ``` + ignore +html comment closing + handle the indented well [a target]: https://example.com diff --git a/src/doc/rustc-dev-guide/rust-version b/src/doc/rustc-dev-guide/rust-version index b53a66c667517..795271ee0ef03 100644 --- a/src/doc/rustc-dev-guide/rust-version +++ b/src/doc/rustc-dev-guide/rust-version @@ -1 +1 @@ -44a5b55557c26353f388400d7da95527256fe260 +370143facfb348ad3b29749c0393402d76b280c3 diff --git a/src/doc/rustc-dev-guide/src/SUMMARY.md b/src/doc/rustc-dev-guide/src/SUMMARY.md index daaaef42d9096..1f9e0aac9d0b2 100644 --- a/src/doc/rustc-dev-guide/src/SUMMARY.md +++ b/src/doc/rustc-dev-guide/src/SUMMARY.md @@ -39,9 +39,9 @@ - [Debugging the compiler](./compiler-debugging.md) - [Using the tracing/logging instrumentation](./tracing.md) - [Profiling the compiler](./profiling.md) - - [with the linux perf tool](./profiling/with_perf.md) - - [with Windows Performance Analyzer](./profiling/wpa_profiling.md) - - [with the Rust benchmark suite](./profiling/with_rustc_perf.md) + - [with the linux perf tool](./profiling/with-perf.md) + - [with Windows Performance Analyzer](./profiling/wpa-profiling.md) + - [with the Rust benchmark suite](./profiling/with-rustc-perf.md) - [crates.io dependencies](./crates-io.md) # Contributing to Rust @@ -51,11 +51,11 @@ - [Using Git](./git.md) - [Mastering @rustbot](./rustbot.md) - [Walkthrough: a typical contribution](./walkthrough.md) -- [Implementing new language features](./implementing_new_features.md) +- [Implementing new language features](./implementing-new-features.md) - [Stability guarantees](./stability-guarantees.md) - [Stability attributes](./stability.md) -- [Stabilizing language features](./stabilization_guide.md) - - [Stabilization report template](./stabilization_report_template.md) +- [Stabilizing language features](./stabilization-guide.md) + - [Stabilization report template](./stabilization-report-template.md) - [Feature Gates](./feature-gates.md) - [Coding conventions](./conventions.md) - [Procedures for breaking changes](./bug-fix-procedure.md) @@ -106,6 +106,7 @@ - [GPU offload internals](./offload/internals.md) - [Installation](./offload/installation.md) - [Usage](./offload/usage.md) + - [Contributing](./offload/contributing.md) - [Autodiff internals](./autodiff/internals.md) - [Installation](./autodiff/installation.md) - [How to debug](./autodiff/debugging.md) @@ -154,17 +155,17 @@ # Analysis - [Prologue](./part-4-intro.md) -- [Generic parameter definitions](./generic_parameters_summary.md) - - [`EarlyBinder` and instantiating parameters](./ty_module/early_binder.md) -- [Binders and Higher ranked regions](./ty_module/binders.md) - - [Instantiating binders](./ty_module/instantiating_binders.md) -- [Early vs Late bound parameters](./early_late_parameters.md) +- [Generic parameter definitions](./generic-parameters-summary.md) + - [`EarlyBinder` and instantiating parameters](./ty-module/early-binder.md) +- [Binders and Higher ranked regions](./ty-module/binders.md) + - [Instantiating binders](./ty-module/instantiating-binders.md) +- [Early vs Late bound parameters](./early-late-parameters.md) - [The `ty` module: representing types](./ty.md) - - [ADTs and Generic Arguments](./ty_module/generic_arguments.md) - - [Parameter types/consts/regions](./ty_module/param_ty_const_regions.md) + - [ADTs and Generic Arguments](./ty-module/generic-arguments.md) + - [Parameter types/consts/regions](./ty-module/param-ty-const-regions.md) - [`TypeFolder` and `TypeFoldable`](./ty-fold.md) - [Aliases and Normalization](./normalization.md) -- [Typing/Param Envs](./typing_parameter_envs.md) +- [Typing/Param Envs](./typing-parameter-envs.md) - [Type inference](./type-inference.md) - [Trait solving](./traits/resolution.md) - [Higher-ranked trait bounds](./traits/hrtb.md) @@ -197,25 +198,25 @@ - [Opaque types](./opaque-types-type-alias-impl-trait.md) - [Inference details](./opaque-types-impl-trait-inference.md) - [Return Position Impl Trait In Trait](./return-position-impl-trait-in-trait.md) - - [Region inference restrictions][opaque-infer] + - [Region inference restrictions](./borrow-check/opaque-types-region-inference-restrictions.md) - [Const condition checking](./effects.md) - [Pattern and exhaustiveness checking](./pat-exhaustive-checking.md) - [Unsafety checking](./unsafety-checking.md) - [MIR dataflow](./mir/dataflow.md) - [Drop elaboration](./mir/drop-elaboration.md) -- [The borrow checker](./borrow_check.md) - - [Tracking moves and initialization](./borrow_check/moves_and_initialization.md) - - [Move paths](./borrow_check/moves_and_initialization/move_paths.md) - - [MIR type checker](./borrow_check/type_check.md) - - [Drop check](./borrow_check/drop_check.md) - - [Region inference](./borrow_check/region_inference.md) - - [Constraint propagation](./borrow_check/region_inference/constraint_propagation.md) - - [Lifetime parameters](./borrow_check/region_inference/lifetime_parameters.md) - - [Member constraints](./borrow_check/region_inference/member_constraints.md) - - [Placeholders and universes][pau] - - [Closure constraints](./borrow_check/region_inference/closure_constraints.md) - - [Error reporting](./borrow_check/region_inference/error_reporting.md) - - [Two-phase-borrows](./borrow_check/two_phase_borrows.md) +- [The borrow checker](./borrow-check.md) + - [Tracking moves and initialization](./borrow-check/moves-and-initialization.md) + - [Move paths](./borrow-check/moves-and-initialization/move-paths.md) + - [MIR type checker](./borrow-check/type-check.md) + - [Drop check](./borrow-check/drop-check.md) + - [Region inference](./borrow-check/region-inference.md) + - [Constraint propagation](./borrow-check/region-inference/constraint-propagation.md) + - [Lifetime parameters](./borrow-check/region-inference/lifetime-parameters.md) + - [Member constraints](./borrow-check/region-inference/member-constraints.md) + - [Placeholders and universes](./borrow-check/region-inference/placeholders-and-universes.md) + - [Closure constraints](./borrow-check/region-inference/closure-constraints.md) + - [Error reporting](./borrow-check/region-inference/error-reporting.md) + - [Two-phase-borrows](./borrow-check/two-phase-borrows.md) - [Closure capture inference](./closure.md) - [Async closures/"coroutine-closures"](coroutine-closures.md) @@ -263,8 +264,3 @@ [Appendix E: Bibliography](./appendix/bibliography.md) [Appendix Z: HumorRust](./appendix/humorust.md) - ---- - -[pau]: ./borrow_check/region_inference/placeholders_and_universes.md -[opaque-infer]: ./borrow_check/opaque-types-region-inference-restrictions.md diff --git a/src/doc/rustc-dev-guide/src/appendix/background.md b/src/doc/rustc-dev-guide/src/appendix/background.md index d36927e82f748..e76285080394d 100644 --- a/src/doc/rustc-dev-guide/src/appendix/background.md +++ b/src/doc/rustc-dev-guide/src/appendix/background.md @@ -243,8 +243,7 @@ use in lambda calculus evaluation (see [this Wikipedia article][wikideb] for more). In `rustc`, we use de Bruijn indices to [represent generic types][sub]. [wikideb]: https://en.wikipedia.org/wiki/De_Bruijn_index -[sub]: ../ty_module/generic_arguments.md - +[sub]: ../ty-module/generic-arguments.md Here is a basic example of how de Bruijn indices might be used for closures (we don't actually do this in `rustc` though!): diff --git a/src/doc/rustc-dev-guide/src/appendix/code-index.md b/src/doc/rustc-dev-guide/src/appendix/code-index.md index bf9d3bd465645..3e1eed17eba64 100644 --- a/src/doc/rustc-dev-guide/src/appendix/code-index.md +++ b/src/doc/rustc-dev-guide/src/appendix/code-index.md @@ -39,5 +39,5 @@ Item | Kind | Short description | Chapter | [Emitting Diagnostics]: ../diagnostics.html [Macro expansion]: ../macro-expansion.html [Name resolution]: ../name-resolution.html -[Parameter Environment]: ../typing_parameter_envs.html +[Parameter Environment]: ../typing-parameter-envs.html [Trait Solving: Goals and Clauses]: ../traits/goals-and-clauses.html#domain-goals diff --git a/src/doc/rustc-dev-guide/src/appendix/glossary.md b/src/doc/rustc-dev-guide/src/appendix/glossary.md index 901fb68c0513f..43935b12a2383 100644 --- a/src/doc/rustc-dev-guide/src/appendix/glossary.md +++ b/src/doc/rustc-dev-guide/src/appendix/glossary.md @@ -53,10 +53,10 @@ Term | Meaning normalize | A general term for converting to a more canonical form, but in the case of rustc typically refers to [associated type normalization](../traits/goals-and-clauses.md#normalizeprojection---type). newtype | A wrapper around some other type (e.g., `struct Foo(T)` is a "newtype" for `T`). This is commonly used in Rust to give a stronger type for indices. niche | Invalid bit patterns for a type _that can be used_ for layout optimizations. Some types cannot have certain bit patterns. For example, the `NonZero*` integers or the reference `&T` cannot be represented by a 0 bitstring. This means the compiler can perform layout optimizations by taking advantage of the invalid "niche value". An example application for this is the [*Discriminant elision on `Option`-like enums*](https://rust-lang.github.io/unsafe-code-guidelines/layout/enums.html#discriminant-elision-on-option-like-enums), which allows using a type's niche as the ["tag"](#tag) for an `enum` without requiring a separate field. -NLL | Short for [non-lexical lifetimes](../borrow_check/region_inference.md), this is an extension to Rust's borrowing system to make it be based on the control-flow graph. +NLL | Short for [non-lexical lifetimes](../borrow-check/region-inference.md), this is an extension to Rust's borrowing system to make it be based on the control-flow graph. node-id or `NodeId` | An index identifying a particular node in the AST or HIR; gradually being phased out and replaced with `HirId`. See [the HIR chapter for more](../hir.md#identifiers-in-the-hir). obligation | Something that must be proven by the trait system. ([see more](../traits/resolution.md)) -placeholder | **NOTE: skolemization is deprecated by placeholder** a way of handling subtyping around "for-all" types (e.g., `for<'a> fn(&'a u32)`) as well as solving higher-ranked trait bounds (e.g., `for<'a> T: Trait<'a>`). See [the chapter on placeholder and universes](../borrow_check/region_inference/placeholders_and_universes.md) for more details. +placeholder | **NOTE: skolemization is deprecated by placeholder** a way of handling subtyping around "for-all" types (e.g., `for<'a> fn(&'a u32)`) as well as solving higher-ranked trait bounds (e.g., `for<'a> T: Trait<'a>`). See [the chapter on placeholder and universes](../borrow-check/region-inference/placeholders-and-universes.md) for more details. point | Used in the NLL analysis to refer to some particular location in the MIR; typically used to refer to a node in the control-flow graph. projection | A general term for a "relative path", e.g. `x.f` is a "field projection", and `T::Item` is an ["associated type projection"](../traits/goals-and-clauses.md#trait-ref). promoted constants | Constants extracted from a function and lifted to static scope; see [this section](../mir/index.md#promoted) for more details. diff --git a/src/doc/rustc-dev-guide/src/autodiff/installation.md b/src/doc/rustc-dev-guide/src/autodiff/installation.md index 5300c12459a19..e05fdc1160f2f 100644 --- a/src/doc/rustc-dev-guide/src/autodiff/installation.md +++ b/src/doc/rustc-dev-guide/src/autodiff/installation.md @@ -28,6 +28,7 @@ You can then run our test cases: ./x test --stage 1 tests/codegen-llvm/autodiff ./x test --stage 1 tests/pretty/autodiff ./x test --stage 1 tests/ui/autodiff +./x test --stage 1 tests/run-make/autodiff ./x test --stage 1 tests/ui/feature-gates/feature-gate-autodiff.rs ``` diff --git a/src/doc/rustc-dev-guide/src/borrow_check.md b/src/doc/rustc-dev-guide/src/borrow-check.md similarity index 95% rename from src/doc/rustc-dev-guide/src/borrow_check.md rename to src/doc/rustc-dev-guide/src/borrow-check.md index f206da42a82ca..826bcf8582ca8 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check.md +++ b/src/doc/rustc-dev-guide/src/borrow-check.md @@ -42,10 +42,10 @@ the [`mir_borrowck`] query. - Next, we perform a number of [dataflow analyses](./appendix/background.md#dataflow) that compute what data is moved and when. -- We then do a [second type check](borrow_check/type_check.md) across the MIR: +- We then do a [second type check](borrow-check/type-check.md) across the MIR: the purpose of this type check is to determine all of the constraints between different regions. -- Next, we do [region inference](borrow_check/region_inference.md), which computes +- Next, we do [region inference](borrow-check/region-inference.md), which computes the values of each region — basically, the points in the control-flow graph where each lifetime must be valid according to the constraints we collected. - At this point, we can compute the "borrows in scope" at each point. diff --git a/src/doc/rustc-dev-guide/src/borrow_check/drop_check.md b/src/doc/rustc-dev-guide/src/borrow-check/drop-check.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/drop_check.md rename to src/doc/rustc-dev-guide/src/borrow-check/drop-check.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/moves_and_initialization.md b/src/doc/rustc-dev-guide/src/borrow-check/moves-and-initialization.md similarity index 94% rename from src/doc/rustc-dev-guide/src/borrow_check/moves_and_initialization.md rename to src/doc/rustc-dev-guide/src/borrow-check/moves-and-initialization.md index 043db2f5354e5..f9eaef8e2b46e 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check/moves_and_initialization.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/moves-and-initialization.md @@ -20,15 +20,15 @@ Consider this example: ```rust,ignore fn foo() { let a: Vec; - + // a is not initialized yet - + a = vec![22]; - + // a is initialized here - + std::mem::drop(a); // a is moved here - + // a is no longer initialized here let l = a.len(); //~ ERROR @@ -44,7 +44,7 @@ moves `a` into the call, and hence it becomes uninitialized again. To make it easier to peruse, this section is broken into a number of subsections: -- [Move paths](./moves_and_initialization/move_paths.html) the +- [Move paths](./moves-and-initialization/move-paths.md) the *move path* concept that we use to track which local variables (or parts of local variables, in some cases) are initialized. - TODO *Rest not yet written* =) diff --git a/src/doc/rustc-dev-guide/src/borrow_check/moves_and_initialization/move_paths.md b/src/doc/rustc-dev-guide/src/borrow-check/moves-and-initialization/move-paths.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/moves_and_initialization/move_paths.md rename to src/doc/rustc-dev-guide/src/borrow-check/moves-and-initialization/move-paths.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/opaque-types-region-inference-restrictions.md b/src/doc/rustc-dev-guide/src/borrow-check/opaque-types-region-inference-restrictions.md similarity index 99% rename from src/doc/rustc-dev-guide/src/borrow_check/opaque-types-region-inference-restrictions.md rename to src/doc/rustc-dev-guide/src/borrow-check/opaque-types-region-inference-restrictions.md index 9877dfc61e9ce..3a060ccfa2828 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check/opaque-types-region-inference-restrictions.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/opaque-types-region-inference-restrictions.md @@ -158,7 +158,7 @@ See [#113971] for how we used to conflate the difference. [#113971]: https://github.com/rust-lang/rust/issues/113971 [SCC]: https://en.wikipedia.org/wiki/Strongly_connected_component -[member constraints]: ./region_inference/member_constraints.md +[member constraints]: region-inference/member-constraints.md **interaction with "once modulo regions" restriction** In the example above, note the opaque type in the signature is `Opaque<'a>` and the one in the @@ -195,7 +195,7 @@ fn test<'a>() -> Opaque<'a> { } ``` -**Motivation:** +**Motivation:** In closure bodies, external lifetimes, although being categorized as "universal" lifetimes, behave more like existential lifetimes in that the relations between them are not known ahead of time, instead their values are inferred just like existential lifetimes and the requirements are @@ -208,7 +208,7 @@ Here is an example that details how : ```rust type Opaque<'x, 'y> = impl Sized; -// +// fn test<'a, 'b>(s: &'a str) -> impl FnOnce() -> Opaque<'a, 'b> { move || { s } //~^ ERROR hidden type for `Opaque<'_, '_>` captures lifetime that does not appear in bounds diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference.md similarity index 98% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference.md index ba67bec45b65e..2a1c34df1a776 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check/region_inference.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/region-inference.md @@ -34,14 +34,14 @@ The MIR-based region analysis consists of two major functions: - The [NLL RFC] also includes fairly thorough (and hopefully readable) coverage. -[cp]: ./region_inference/constraint_propagation.md +[cp]: ./region-inference/constraint-propagation.md [fvb]: ../appendix/background.md#free-vs-bound [`replace_regions_in_mir`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/nll/fn.replace_regions_in_mir.html [`compute_regions`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/nll/fn.compute_regions.html [`RegionInferenceContext`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/region_infer/struct.RegionInferenceContext.html [`solve`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/region_infer/struct.RegionInferenceContext.html#method.solve [NLL RFC]: https://rust-lang.github.io/rfcs/2094-nll.html -[MIR type checker]: ./type_check.md +[MIR type checker]: ./type-check.md ## Universal regions @@ -97,7 +97,7 @@ The kinds of region elements are as follows: - There is an element `!1` for each placeholder region `!1`. This corresponds (intuitively) to some unknown set of other elements – for details on placeholders, see the section - [placeholders and universes](region_inference/placeholders_and_universes.md). + [placeholders and universes](region-inference/placeholders-and-universes.md). ## Constraints diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/closure_constraints.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/closure-constraints.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/closure_constraints.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/closure-constraints.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/constraint_propagation.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/constraint-propagation.md similarity index 98% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/constraint_propagation.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/constraint-propagation.md index c3f8c03cb29f5..580a0aebe9a46 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/constraint_propagation.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/constraint-propagation.md @@ -11,7 +11,7 @@ on one at a time (each of them is fairly independent from the others): - [member constraints][m_c] (`member R_m of [R_c...]`), which arise from impl Trait. [`propagate_constraints`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/region_infer/struct.RegionInferenceContext.html#method.propagate_constraints -[m_c]: ./member_constraints.md +[m_c]: ./member-constraints.md In this chapter, we'll explain the "heart" of constraint propagation, covering both liveness and outlives constraints. @@ -29,7 +29,7 @@ given some set of constraints `{C}` and it computes a set of values - For each constraint C: - Update `Values` as needed to satisfy the constraint -[riv]: ../region_inference.md#region-variables +[riv]: ../region-inference.md#region-variables As a simple example, if we have a liveness constraint `R live at E`, then we can apply `Values(R) = Values(R) union {E}` to make the @@ -211,7 +211,7 @@ have already processed a given SCC or not. For each successor `S2`, once we have computed `S2`'s value, we can union those elements into the value for `S1`. (Although we have to be careful in this process to properly handle [higher-ranked -placeholders](./placeholders_and_universes.html). Note that the value +placeholders](./placeholders-and-universes.md). Note that the value for `S1` already contains the liveness constraints, since they were added in [`RegionInferenceContext::new`]. diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/error_reporting.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/error-reporting.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/error_reporting.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/error-reporting.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/lifetime_parameters.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/lifetime-parameters.md similarity index 98% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/lifetime_parameters.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/lifetime-parameters.md index 2d337dbc020f6..3795cea3c6bc1 100644 --- a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/lifetime_parameters.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/lifetime-parameters.md @@ -54,7 +54,7 @@ In fact, the universal regions can be further subdivided based on where they were brought into scope (see the [`RegionClassification`] type). These subdivisions are not important for the topics discussed here, but become important when we consider [closure constraint -propagation](./closure_constraints.html), so we discuss them there. +propagation](./closure-constraints.md), so we discuss them there. [`RegionClassification`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/universal_regions/enum.RegionClassification.html#variant.Local diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/member_constraints.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/member_constraints.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/region_inference/placeholders_and_universes.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/placeholders-and-universes.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/region_inference/placeholders_and_universes.md rename to src/doc/rustc-dev-guide/src/borrow-check/region-inference/placeholders-and-universes.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/two_phase_borrows.md b/src/doc/rustc-dev-guide/src/borrow-check/two-phase-borrows.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/two_phase_borrows.md rename to src/doc/rustc-dev-guide/src/borrow-check/two-phase-borrows.md diff --git a/src/doc/rustc-dev-guide/src/borrow_check/type_check.md b/src/doc/rustc-dev-guide/src/borrow-check/type-check.md similarity index 100% rename from src/doc/rustc-dev-guide/src/borrow_check/type_check.md rename to src/doc/rustc-dev-guide/src/borrow-check/type-check.md diff --git a/src/doc/rustc-dev-guide/src/building/suggested.md b/src/doc/rustc-dev-guide/src/building/suggested.md index c87dc6b28d875..b5c9b9b4e3d12 100644 --- a/src/doc/rustc-dev-guide/src/building/suggested.md +++ b/src/doc/rustc-dev-guide/src/building/suggested.md @@ -1,27 +1,29 @@ # Suggested workflows -The full bootstrapping process takes quite a while. Here are some suggestions to -make your life easier. +The full bootstrapping process takes quite a while. +Here are some suggestions to make your life easier. ## Installing a pre-push hook CI will automatically fail your build if it doesn't pass `tidy`, our internal -tool for ensuring code quality. If you'd like, you can install a [Git +tool for ensuring code quality. +If you'd like, you can install a [Git hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks) that will -automatically run `./x test tidy` on each push, to ensure your code is up to -par. If the hook fails then run `./x test tidy --bless` and commit the changes. +automatically run `./x test tidy` on each push, to ensure your code is up to par. +If the hook fails then run `./x test tidy --bless` and commit the changes. If you decide later that the pre-push behavior is undesirable, you can delete the `pre-push` file in `.git/hooks`. -A prebuilt git hook lives at [`src/etc/pre-push.sh`]. It can be copied into -your `.git/hooks` folder as `pre-push` (without the `.sh` extension!). +A prebuilt git hook lives at [`src/etc/pre-push.sh`]. + It can be copied into your `.git/hooks` folder as `pre-push` (without the `.sh` extension!). You can also install the hook as a step of running `./x setup`! ## Config extensions When working on different tasks, you might need to switch between different bootstrap configurations. -Sometimes you may want to keep an old configuration for future use. But saving raw config values in +Sometimes you may want to keep an old configuration for future use. +But saving raw config values in random files and manually copying and pasting them can quickly become messy, especially if you have a long history of different configurations. @@ -51,9 +53,10 @@ include = ["cross.toml"] You can also include extensions within extensions recursively. -**Note:** In the `include` field, the overriding logic follows a right-to-left order. For example, -in `include = ["a.toml", "b.toml"]`, extension `b.toml` overrides `a.toml`. Also, parent extensions -always overrides the inner ones. +**Note:** In the `include` field, the overriding logic follows a right-to-left order. +For example, +in `include = ["a.toml", "b.toml"]`, extension `b.toml` overrides `a.toml`. +Also, parent extensions always overrides the inner ones. ## Configuring `rust-analyzer` for `rustc` @@ -61,34 +64,37 @@ always overrides the inner ones. Checking the "library" tree requires a stage1 compiler, which can be a heavy process on some computers. For this reason, bootstrap has a flag called `--skip-std-check-if-no-download-rustc` that skips checking the -"library" tree if `rust.download-rustc` isn't available. If you want to avoid putting a heavy load on your computer +"library" tree if `rust.download-rustc` isn't available. +If you want to avoid putting a heavy load on your computer with `rust-analyzer`, you can add the `--skip-std-check-if-no-download-rustc` flag to your `./x check` command in the `rust-analyzer` configuration. ### Project-local rust-analyzer setup -`rust-analyzer` can help you check and format your code whenever you save a -file. By default, `rust-analyzer` runs the `cargo check` and `rustfmt` commands, +`rust-analyzer` can help you check and format your code whenever you save a file. +By default, `rust-analyzer` runs the `cargo check` and `rustfmt` commands, but you can override these commands to use more adapted versions of these tools -when hacking on `rustc`. With custom setup, `rust-analyzer` can use `./x check` +when hacking on `rustc`. +With custom setup, `rust-analyzer` can use `./x check` to check the sources, and the stage 0 rustfmt to format them. The default `rust-analyzer.check.overrideCommand` command line will check all -the crates and tools in the repository. If you are working on a specific part, -you can override the command to only check the part you are working on to save -checking time. For example, if you are working on the compiler, you can override +the crates and tools in the repository. +If you are working on a specific part, +you can override the command to only check the part you are working on to save checking time. +For example, if you are working on the compiler, you can override the command to `x check compiler --json-output` to only check the compiler part. You can run `x check --help --verbose` to see the available parts. Running `./x setup editor` will prompt you to create a project-local LSP config -file for one of the supported editors. You can also create the config file as a -step of running `./x setup`. +file for one of the supported editors. +You can also create the config file as a step of running `./x setup`. ### Using a separate build directory for rust-analyzer By default, when rust-analyzer runs a check or format command, it will share -the same build directory as manual command-line builds. This can be inconvenient -for two reasons: +the same build directory as manual command-line builds. +This can be inconvenient for two reasons: - Each build will lock the build directory and force the other to wait, so it becomes impossible to run command-line builds while rust-analyzer is running commands in the background. @@ -111,12 +117,11 @@ requires extra disk space. ### Visual Studio Code Selecting `vscode` in `./x setup editor` will prompt you to create a -`.vscode/settings.json` file which will configure Visual Studio code. The -recommended `rust-analyzer` settings live at +`.vscode/settings.json` file which will configure Visual Studio code. +The recommended `rust-analyzer` settings live at [`src/etc/rust_analyzer_settings.json`]. -If running `./x check` on save is inconvenient, in VS Code you can use a [Build -Task] instead: +If running `./x check` on save is inconvenient, in VS Code you can use a [Build Task] instead: ```JSON // .vscode/tasks.json @@ -140,27 +145,26 @@ Task] instead: ### Neovim -For Neovim users, there are a few options. The -easiest way is by using [neoconf.nvim](https://github.com/folke/neoconf.nvim/), -which allows for project-local configuration files with the native LSP. The -steps for how to use it are below. Note that they require rust-analyzer to -already be configured with Neovim. Steps for this can be [found -here](https://rust-analyzer.github.io/manual.html#nvim-lsp). +For Neovim users, there are a few options. +The easiest way is by using [neoconf.nvim](https://github.com/folke/neoconf.nvim/), +which allows for project-local configuration files with the native LSP. +The steps for how to use it are below. +Note that they require rust-analyzer to already be configured with Neovim. +Steps for this can be [found here](https://rust-analyzer.github.io/manual.html#nvim-lsp). -1. First install the plugin. This can be done by following the steps in the - README. -2. Run `./x setup editor`, and select `vscode` to create a - `.vscode/settings.json` file. `neoconf` is able to read and update - rust-analyzer settings automatically when the project is opened when this - file is detected. +1. First install the plugin. + This can be done by following the steps in the README. +2. Run `./x setup editor`, and select `vscode` to create a `.vscode/settings.json` file. + `neoconf` is able to read and update + rust-analyzer settings automatically when the project is opened when this file is detected. If you're using `coc.nvim`, you can run `./x setup editor` and select `vim` to -create a `.vim/coc-settings.json`. The settings can be edited with -`:CocLocalConfig`. The recommended settings live at -[`src/etc/rust_analyzer_settings.json`]. +create a `.vim/coc-settings.json`. +The settings can be edited with `:CocLocalConfig`. +The recommended settings live at [`src/etc/rust_analyzer_settings.json`]. -Another way is without a plugin, and creating your own logic in your -configuration. The following code will work for any checkout of rust-lang/rust (newer than February 2025): +Another way is without a plugin, and creating your own logic in your configuration. +The following code will work for any checkout of rust-lang/rust (newer than February 2025): ```lua local function expand_config_variables(option) @@ -216,8 +220,7 @@ lspconfig.rust_analyzer.setup { If you would like to use the build task that is described above, you may either make your own command in your config, or you can install a plugin such as -[overseer.nvim](https://github.com/stevearc/overseer.nvim) that can [read -VSCode's `task.json` +[overseer.nvim](https://github.com/stevearc/overseer.nvim) that can [read VSCode's `task.json` files](https://github.com/stevearc/overseer.nvim/blob/master/doc/guides.md#vs-code-tasks), and follow the same instructions as above. @@ -240,55 +243,58 @@ Helix comes with built-in LSP and rust-analyzer support. It can be configured through `languages.toml`, as described [here](https://docs.helix-editor.com/languages.html). You can run `./x setup editor` and select `helix`, which will prompt you to -create `languages.toml` with the recommended configuration for Helix. The -recommended settings live at [`src/etc/rust_analyzer_helix.toml`]. +create `languages.toml` with the recommended configuration for Helix. +The recommended settings live at [`src/etc/rust_analyzer_helix.toml`]. ### Zed Zed comes with built-in LSP and rust-analyzer support. It can be configured through `.zed/settings.json`, as described -[here](https://zed.dev/docs/configuring-languages). Selecting `zed` -in `./x setup editor` will prompt you to create a `.zed/settings.json` -file which will configure Zed with the recommended configuration. The -recommended `rust-analyzer` settings live +[here](https://zed.dev/docs/configuring-languages). +Selecting `zed` in `./x setup editor` will prompt you to create a `.zed/settings.json` +file which will configure Zed with the recommended configuration. +The recommended `rust-analyzer` settings live at [`src/etc/rust_analyzer_zed.json`]. ## Check, check, and check again -When doing simple refactoring, it can be useful to run `./x check` -continuously. If you set up `rust-analyzer` as described above, this will be -done for you every time you save a file. Here you are just checking that the +When doing simple refactoring, it can be useful to run `./x check` continuously. +If you set up `rust-analyzer` as described above, this will be +done for you every time you save a file. +Here you are just checking that the compiler can **build**, but often that is all you need (e.g., when renaming a -method). You can then run `./x build` when you actually need to run tests. +method). +You can then run `./x build` when you actually need to run tests. -In fact, it is sometimes useful to put off tests even when you are not 100% sure -the code will work. You can then keep building up refactoring commits and only -run the tests at some later time. You can then use `git bisect` to track down -**precisely** which commit caused the problem. A nice side-effect of this style +In fact, it is sometimes useful to put off tests even when you are not 100% sure the code will work. +You can then keep building up refactoring commits and only run the tests at some later time. +You can then use `git bisect` to track down **precisely** which commit caused the problem. +A nice side-effect of this style is that you are left with a fairly fine-grained set of commits at the end, all -of which build and pass tests. This often helps reviewing. +of which build and pass tests. +This often helps reviewing. ## Configuring `rustup` to use nightly -Some parts of the bootstrap process uses pinned, nightly versions of tools like -rustfmt. To make things like `cargo fmt` work correctly in your repo, run +Some parts of the bootstrap process uses pinned, nightly versions of tools like rustfmt. +To make things like `cargo fmt` work correctly in your repo, run ```console cd rustup override set nightly ``` -after [installing a nightly toolchain] with `rustup`. Don't forget to do this -for all directories you have [setup a worktree for]. You may need to use the -pinned nightly version from `src/stage0`, but often the normal `nightly` channel -will work. +After [installing a nightly toolchain] with `rustup`. +Don't forget to do this for all directories you have [setup a worktree for]. +You may need to use the +pinned nightly version from `src/stage0`, but often the normal `nightly` channel will work. **Note** see [the section on vscode] for how to configure it with this real rustfmt `x` uses, and [the section on rustup] for how to setup `rustup` toolchain for your bootstrapped compiler -**Note** This does _not_ allow you to build `rustc` with cargo directly. You -still have to use `x` to work on the compiler or standard library, this just +**Note** This does _not_ allow you to build `rustc` with cargo directly. +You still have to use `x` to work on the compiler or standard library, this just lets you use `cargo fmt`. [installing a nightly toolchain]: https://rust-lang.github.io/rustup/concepts/channels.html?highlight=nightl#working-with-nightly-rust @@ -300,18 +306,22 @@ lets you use `cargo fmt`. If you are not working on the compiler, you often don't need to build the compiler tree. For example, you can skip building the compiler and only build the `library` tree or the -tools under `src/tools`. To achieve that, you have to enable this by setting the `download-rustc` -option in your configuration. This tells bootstrap to use the latest nightly compiler for `stage > 0` +tools under `src/tools`. +To achieve that, you have to enable this by setting the `download-rustc` +option in your configuration. +This tells bootstrap to use the latest nightly compiler for `stage > 0` steps, meaning it will have two precompiled compilers: stage0 compiler and `download-rustc` compiler -for `stage > 0` steps. This way, it will never need to build the in-tree compiler. As a result, your -build time will be significantly reduced by not building the in-tree compiler. +for `stage > 0` steps. +This way, it will never need to build the in-tree compiler. +As a result, your build time will be significantly reduced by not building the in-tree compiler. ## Faster rebuilds with `--keep-stage-std` -Sometimes just checking whether the compiler builds is not enough. A common -example is that you need to add a `debug!` statement to inspect the value of -some state or better understand the problem. In that case, you don't really need -a full build. By bypassing bootstrap's cache invalidation, you can often get +Sometimes just checking whether the compiler builds is not enough. +A common example is that you need to add a `debug!` statement to inspect the value of +some state or better understand the problem. +In that case, you don't really need a full build. +By bypassing bootstrap's cache invalidation, you can often get these builds to complete very fast (e.g., around 30 seconds). The only catch is this requires a bit of fudging and may produce compilers that don't work (but that is easily detected and fixed). @@ -323,53 +333,54 @@ The sequence of commands you want is as follows: - Note that we added the `--keep-stage-std=1` flag here As mentioned, the effect of `--keep-stage-std=1` is that we just _assume_ that the -old standard library can be re-used. If you are editing the compiler, this is -often true: you haven't changed the standard library, after all. But -sometimes, it's not true: for example, if you are editing the "metadata" part of +old standard library can be re-used. +If you are editing the compiler, this is +often true: you haven't changed the standard library, after all. +But sometimes, it's not true: for example, if you are editing the "metadata" part of the compiler, which controls how the compiler encodes types and other states into the `rlib` files, or if you are editing things that wind up in the metadata (such as the definition of the MIR). **The TL;DR is that you might get weird behavior from a compile when using `--keep-stage-std=1`** -- for example, strange [ICEs](../appendix/glossary.html#ice) -or other panics. In that case, you should simply remove the `--keep-stage-std=1` -from the command and rebuild. That ought to fix the problem. +or other panics. +In that case, you should simply remove the `--keep-stage-std=1` from the command and rebuild. +That ought to fix the problem. -You can also use `--keep-stage-std=1` when running tests. Something like this: +You can also use `--keep-stage-std=1` when running tests. +Something like this: - Initial test run: `./x test tests/ui` - Subsequent test run: `./x test tests/ui --keep-stage-std=1` ## Using incremental compilation -You can further enable the `--incremental` flag to save additional time in -subsequent rebuilds: +You can further enable the `--incremental` flag to save additional time in subsequent rebuilds: ```bash ./x test tests/ui --incremental --test-args issue-1234 ``` -If you don't want to include the flag with every command, you can enable it in -the `bootstrap.toml`: +If you don't want to include the flag with every command, you can enable it in the `bootstrap.toml`: ```toml [rust] incremental = true ``` -Note that incremental compilation will use more disk space than usual. If disk -space is a concern for you, you might want to check the size of the `build` +Note that incremental compilation will use more disk space than usual. +If disk space is a concern for you, you might want to check the size of the `build` directory from time to time. ## Fine-tuning optimizations -Setting `optimize = false` makes the compiler too slow for tests. However, to -improve the test cycle, you can disable optimizations selectively only for the +Setting `optimize = false` makes the compiler too slow for tests. +However, to improve the test cycle, you can disable optimizations selectively only for the crates you'll have to rebuild ([source](https://rust-lang.zulipchat.com/#narrow/stream/131828-t-compiler/topic/incremental.20compilation.20question/near/202712165)). For example, when working on `rustc_mir_build`, the `rustc_mir_build` and -`rustc_driver` crates take the most time to incrementally rebuild. You could -therefore set the following in the root `Cargo.toml`: +`rustc_driver` crates take the most time to incrementally rebuild. +You could therefore set the following in the root `Cargo.toml`: ```toml [profile.release.package.rustc_mir_build] @@ -382,22 +393,24 @@ opt-level = 0 Working on multiple branches in parallel can be a little annoying, since building the compiler on one branch will cause the old build and the incremental -compilation cache to be overwritten. One solution would be to have multiple +compilation cache to be overwritten. +One solution would be to have multiple clones of the repository, but that would mean storing the Git metadata multiple times, and having to update each clone individually. -Fortunately, Git has a better solution called [worktrees]. This lets you create -multiple "working trees", which all share the same Git database. Moreover, +Fortunately, Git has a better solution called [worktrees]. +This lets you create multiple "working trees", which all share the same Git database. +Moreover, because all of the worktrees share the same object database, if you update a branch (e.g. `main`) in any of them, you can use the new commits from any of the -worktrees. One caveat, though, is that submodules do not get shared. They will -still be cloned multiple times. +worktrees. +One caveat, though, is that submodules do not get shared. +They will still be cloned multiple times. [worktrees]: https://git-scm.com/docs/git-worktree Given you are inside the root directory for your Rust repository, you can create -a "linked working tree" in a new "rust2" directory by running the following -command: +a "linked working tree" in a new "rust2" directory by running the following command: ```bash git worktree add ../rust2 @@ -409,8 +422,7 @@ Creating a new worktree for a new branch based on `main` looks like: git worktree add -b my-feature ../rust2 main ``` -You can then use that rust2 folder as a separate workspace for modifying and -building `rustc`! +You can then use that rust2 folder as a separate workspace for modifying and building `rustc`! ## Working with nix diff --git a/src/doc/rustc-dev-guide/src/compiler-team.md b/src/doc/rustc-dev-guide/src/compiler-team.md index 495bd22da4d85..ee058c805f0df 100644 --- a/src/doc/rustc-dev-guide/src/compiler-team.md +++ b/src/doc/rustc-dev-guide/src/compiler-team.md @@ -3,7 +3,8 @@ > NOTE: > There exists much detail about the team [on Forge], making most of the following obsolete. -rustc is maintained by the [Rust compiler team][team]. The people who belong to +rustc is maintained by the [Rust compiler team][team]. +The people who belong to this team collectively work to track regressions and implement new features. Members of the Rust compiler team are people who have made significant contributions to rustc and its design. @@ -34,26 +35,24 @@ who are reviewers of each part. ## Rust compiler meeting The compiler team has a weekly meeting where we do triage and try to -generally stay on top of new bugs, regressions, and discuss important -things in general. -They are held on [Zulip][zulip-meetings]. It works roughly as follows: +generally stay on top of new bugs, regressions, and discuss important things in general. +They are held on [Zulip][zulip-meetings]. +It works roughly as follows: - **Announcements, MCPs/FCPs, and WG-check-ins:** We share some - announcements with the rest of the team about important things we want - everyone to be aware of. We also share the status of MCPs and FCPs and we - use the opportunity to have a couple of WGs giving us an update about - their work. + announcements with the rest of the team about important things we want everyone to be aware of. + We also share the status of MCPs and FCPs and we + use the opportunity to have a couple of WGs giving us an update about their work. - **Check for beta and stable nominations:** These are nominations of things to backport to beta and stable respectively. - We then look for new cases where the compiler broke previously working - code in the wild. Regressions are important issues to fix, so it's + We then look for new cases where the compiler broke previously working code in the wild. + Regressions are important issues to fix, so it's likely that they are tagged as P-critical or P-high; the major exception would be bug fixes (though even there we often [aim to give warnings first][procedure]). - **Review P-critical and P-high bugs:** P-critical and P-high bugs are - those that are sufficiently important for us to actively track - progress. P-critical and P-high bugs should ideally always have an - assignee. + those that are sufficiently important for us to actively track progress. + P-critical and P-high bugs should ideally always have an assignee. - **Check `S-waiting-on-t-compiler` and `I-compiler-nominated` issues:** These are issues where feedback from the team is desired. - **Look over the performance triage report:** We check for PRs that made the @@ -61,8 +60,7 @@ They are held on [Zulip][zulip-meetings]. It works roughly as follows: the regression can be addressed in a future PR. The meeting currently takes place on Thursdays at 10am Boston time -(UTC-4 typically, but daylight savings time sometimes makes things -complicated). +(UTC-4 typically, but daylight savings time sometimes makes things complicated). [procedure]: ./bug-fix-procedure.md [zulip-t-compiler]: https://rust-lang.zulipchat.com/#narrow/stream/131828-t-compiler @@ -72,17 +70,16 @@ complicated). ## Team membership Membership in the Rust team is typically offered when someone has been -making significant contributions to the compiler for some -time. Membership is both a recognition but also an obligation: +making significant contributions to the compiler for some time. +Membership is both a recognition but also an obligation: compiler team members are generally expected to help with upkeep as well as doing reviews and other work. If you are interested in becoming a compiler team member, the first -thing to do is to start fixing some bugs, or get involved in a working -group. One good way to find bugs is to look for +thing to do is to start fixing some bugs, or get involved in a working group. +One good way to find bugs is to look for [open issues tagged with E-easy](https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-easy) -or -[E-mentor](https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-mentor). +or [E-mentor](https://github.com/rust-lang/rust/issues?q=is%3Aopen+is%3Aissue+label%3AE-mentor). You can also dig through the graveyard of PRs that were [closed due to inactivity](https://github.com/rust-lang/rust/pulls?q=is%3Apr+label%3AS-inactive), @@ -92,18 +89,17 @@ for which the original author didn't have time. ### r+ rights -Once you have made a number of individual PRs to rustc, we will often -offer r+ privileges. This means that you have the right to instruct -"bors" (the robot that manages which PRs get landed into rustc) to -merge a PR +Once you have made a number of individual PRs to rustc, we will often offer r+ privileges. +This means that you have the right to instruct +"bors" (the robot that manages which PRs get landed into rustc) to merge a PR ([here are some instructions for how to talk to bors][bors-guide]). [bors-guide]: https://bors.rust-lang.org/ The guidelines for reviewers are as follows: -- You are always welcome to review any PR, regardless of who it is - assigned to. However, do not r+ PRs unless: +- You are always welcome to review any PR, regardless of who it is assigned to. + However, do not r+ PRs unless: - You are confident in that part of the code. - You are confident that nobody else wants to review it first. - For example, sometimes people will express a desire to review a @@ -119,18 +115,16 @@ The guidelines for reviewers are as follows: Once you have r+ rights, you can also be added to the [reviewer rotation]. [triagebot] is the bot that [automatically assigns] incoming PRs to reviewers. -If you are added, you will be randomly selected to review -PRs. If you find you are assigned a PR that you don't feel comfortable +If you are added, you will be randomly selected to review PRs. +If you find you are assigned a PR that you don't feel comfortable reviewing, you can also leave a comment like `r? @so-and-so` to assign to someone else — if you don't know who to request, just write `r? -@nikomatsakis for reassignment` and @nikomatsakis will pick someone -for you. +@nikomatsakis for reassignment` and @nikomatsakis will pick someone for you. [reviewer rotation]: https://github.com/rust-lang/rust/blob/36285c5de8915ecc00d91ae0baa79a87ed5858d5/triagebot.toml#L528-L577 [triagebot]: https://github.com/rust-lang/triagebot/ [automatically assigns]: https://forge.rust-lang.org/triagebot/pr-assignment.html -Getting on the reviewer rotation is much appreciated as it lowers the -review burden for all of us! However, if you don't have time to give -people timely feedback on their PRs, it may be better that you don't -get on the list. +Getting on the reviewer rotation is much appreciated as it lowers the review burden for all of us! +However, if you don't have time to give +people timely feedback on their PRs, it may be better that you don't get on the list. diff --git a/src/doc/rustc-dev-guide/src/const-eval.md b/src/doc/rustc-dev-guide/src/const-eval.md index ca6a35a5e97eb..a3fee034ec6ed 100644 --- a/src/doc/rustc-dev-guide/src/const-eval.md +++ b/src/doc/rustc-dev-guide/src/const-eval.md @@ -35,7 +35,7 @@ They're the wrappers of the `const_eval` query. Statics are special; all other functions do not represent statics correctly and have thus assertions preventing their use on statics. -The `const_eval_*` functions use a [`ParamEnv`](./typing_parameter_envs.html) of environment +The `const_eval_*` functions use a [`ParamEnv`](./typing-parameter-envs.md) of environment in which the constant is evaluated (e.g. the function within which the constant is used) and a [`GlobalId`]. The `GlobalId` is made up of an `Instance` referring to a constant or static or of an `Instance` of a function and an index into the function's `Promoted` table. diff --git a/src/doc/rustc-dev-guide/src/contributing.md b/src/doc/rustc-dev-guide/src/contributing.md index 46d0dc23394a2..83f4253a6a107 100644 --- a/src/doc/rustc-dev-guide/src/contributing.md +++ b/src/doc/rustc-dev-guide/src/contributing.md @@ -60,7 +60,7 @@ See [The Rust Book] for more details on Rust’s train release model. This is the only channel where unstable features are intended to be used, which happens via opt-in feature gates. -See [this chapter on implementing new features](./implementing_new_features.md) for more +See [this chapter on implementing new features](./implementing-new-features.md) for more information. [The Rust Book]: https://doc.rust-lang.org/book/appendix-07-nightly-rust.html diff --git a/src/doc/rustc-dev-guide/src/debuginfo/lldb-visualizers.md b/src/doc/rustc-dev-guide/src/debuginfo/lldb-visualizers.md index 40ab9dce375c9..83e2b0d5794e8 100644 --- a/src/doc/rustc-dev-guide/src/debuginfo/lldb-visualizers.md +++ b/src/doc/rustc-dev-guide/src/debuginfo/lldb-visualizers.md @@ -1,6 +1,6 @@ # LLDB - Python Providers -> NOTE: LLDB's C++<->Python FFI expects a version of python designated at the time LLDB was +> NOTE: LLDB's C++<->Python FFI expects a version of Python designated at the time LLDB was >compiled. LLDB is careful to correspond this version to the minimum in typical Linux and macOS >distributions, but on Windows there is no easy solution. If you receive an import error regarding >`_lldb` not existing, a mismatched Python version is likely the cause. @@ -11,14 +11,15 @@ [minimal_python_install]: https://discourse.llvm.org/t/a-minimal-python-install-for-lldb/88658 [issue_167001]: https://github.com/llvm/llvm-project/issues/167001 -> NOTE: Currently (Nov 2025), LLDB's minimum supported Python version is 3.8 with plans to update it to ->3.9 or 3.10 depending on several outside factors. Scripts should ideally be written with only the ->features available in the minimum supported Python version. Please see [this discussion][mrpv] for ->more info. +> NOTE: As of Nov 2025, +> LLDB's minimum supported Python version is 3.8, with plans to update it to +> 3.9 or 3.10, depending on several outside factors. Scripts should ideally be written with only the +> features available in the minimum supported Python version. Please see [this discussion][mrpv] for +> more info. [mrpv]: https://discourse.llvm.org/t/rfc-upgrading-llvm-s-minimum-required-python-version/88605/ -> NOTE: The path to LLDB's python package can be located via the CLI command `lldb -P` +> NOTE: The path to LLDB's Python package can be located via the CLI command `lldb -P` LLDB provides 3 mechanisms for customizing output: @@ -28,7 +29,8 @@ LLDB provides 3 mechanisms for customizing output: ## Formats -The official documentation is [here](https://lldb.llvm.org/use/variable.html#type-format). In short, +The official documentation is [here](https://lldb.llvm.org/use/variable.html#type-format). +In short, formats allow one to set the default print format for primitive types (e.g. print `25u8` as decimal `25`, hex `0x19`, or binary `00011001`). @@ -47,13 +49,15 @@ plugins and CLI. [sbvalue]: https://lldb.llvm.org/python_api/lldb.SBValue.html A Synthetic Provider is a Python class, written with a specific interface, that is associated with -one or more Rust types. The Synthetic Provider wraps `SBValue` objects and LLDB will call our +one or more Rust types. +The Synthetic Provider wraps `SBValue` objects and LLDB will call our class's functions when inspecting the variable. The wrapped value is still an `SBValue`, but when calling e.g. `SBValue.GetChildAtIndex`, it will -internally call `SyntheticProvider.get_child_at_index`. You can check if a value has a synthetic -provider via `SBValue.IsSynthetic()`, and which synthetic it is via `SBValue.GetTypeSynthetic()`. If -you want to interact with the underlying non-synthetic value, you can call +internally call `SyntheticProvider.get_child_at_index`. +You can check if a value has a synthetic +provider via `SBValue.IsSynthetic()`, and which synthetic it is via `SBValue.GetTypeSynthetic()`. +If you want to interact with the underlying non-synthetic value, you can call `SBValue.GetNonSyntheticValue()`. @@ -83,18 +87,20 @@ class SyntheticProvider: def get_value(self) -> SBValue: ... ``` -Below are explanations of the methods, their quirks, and how they should generally be used. If a -method overrides an `SBValue` method, that method will be listed. +Below are explanations of the methods, their quirks, and how they should generally be used. +If a method overrides an `SBValue` method, that method will be listed. ### `__init__` -This function is called once per object, and must store the `valobj` in the python class so that it -is accessible elsewhere. Very little else should be done here. +This function is called once per object, and must store the `valobj` in the Python class so that it +is accessible elsewhere. +Very little else should be done here. ### (optional) `update` -This function is called prior to LLDB interacting with a variable, but after `__init__`. LLDB tracks -whether `update` has already been called. If it has been, and if it is not possible for the variable +This function is called prior to LLDB interacting with a variable, but after `__init__`. +LLDB tracks whether `update` has already been called. +If it has been, and if it is not possible for the variable to have changed (e.g. inspecting the same variable a second time without stepping), it will omit the call to `update`. @@ -107,15 +113,19 @@ Typical operations include storing the heap pointer, length, capacity, and eleme determining an enum variable's variant, or checking which slots of a `HashMap` are occupied. The bool returned from this function is somewhat complicated, see: -[`update` caching](#update-caching) below for more info. When in doubt, return `False`/`None`. -Currently (Nov 2025), none of the visualizers return `True`, but that may change as the debug info +[`update` caching](#update-caching) below for more info. +When in doubt, return `False`/`None`. +As of Nov 2025, +none of the visualizers return `True`, but that may change as the debug info test suite is improved. #### `update` caching -LLDB attempts to cache values when possible, including child values. This cache is effectively the +LLDB attempts to cache values when possible, including child values. +This cache is effectively the number of child objects, and the addresses of the underlying debugee memory that the child object -represents. By returning `True`, you indicate to LLDB that the number of children and the addresses +represents. +By returning `True`, you indicate to LLDB that the number of children and the addresses of those children have not changed since the last time `update` was run, meaning it can reuse the cached children. @@ -123,19 +133,23 @@ cached children. information**. Returning `False` indicates that there have been changes, the cache will be flushed, and the -children will be fetched from scratch. It is the safer option if you are unsure. +children will be fetched from scratch. +It is the safer option if you are unsure. -The only relationship that matters is parent-to-child. Grandchildren depend on the `update` function -of their direct parent, not that of the grandparent. +The only relationship that matters is parent-to-child. +Grandchildren depend on the `update` function of their direct parent, not that of the grandparent. -It is important to view the child cache as pointers-to-memory. For example, if a slice's `data_ptr` -value and `length` have not changed, returning `True` is appropriate. Even if the slice is mutable +It is important to view the child cache as pointers-to-memory. +For example, if a slice's `data_ptr` +value and `length` have not changed, returning `True` is appropriate. +Even if the slice is mutable and elements of it are overwritten (e.g. `slice[0] = 15`), because the child cache consists of *pointers*, they will reflect the new data at that memory location. Conversely, if `data_ptr` has changed, that means it is pointing to a new location in memory, the -child pointers are invalid, and the cache must be flushed. If the `length` has changed, we need to -flush the cache to reflect the new number of children. If `length` has changed but `data_ptr` has +child pointers are invalid, and the cache must be flushed. +If the `length` has changed, we need to flush the cache to reflect the new number of children. +If `length` has changed but `data_ptr` has not, it is possible to store the old children in the `SyntheticProvider` itself (e.g. `list[SBValue]`) and dole those out rather than generating them from scratch, only creating new children if they do not already exist in the `SyntheticProvider`'s list. @@ -143,7 +157,7 @@ children if they do not already exist in the `SyntheticProvider`'s list. For further clarification, see [this discussion](https://discourse.llvm.org/t/when-is-it-safe-to-cache-syntheticprovider-update/88608) > NOTE: when testing the caching behavior, do not rely on LLDB's heuristic to persist variables when -> stepping. Instead, store the variable in a python object (e.g. `v = lldb.frame.var("var_name")`), +> stepping. Instead, store the variable in a Python object (e.g. `v = lldb.frame.var("var_name")`), > step forward, and then inspect the stored variable. ### (optional) `has_children` @@ -159,19 +173,21 @@ Often, this will be a one-liner of `return True`/`return False` or > Overrides `SBValue.GetNumChildren` -Returns the total number of children that LLDB should try to access when printing the type. This -number **does not** need to match to total number of synthetic children. +Returns the total number of children that LLDB should try to access when printing the type. +This number **does not** need to match to total number of synthetic children. The `max_children` argument can be returned if calculating the number of children can be expensive (e.g. linked list). If this is not a consideration, `max_children` can be omitted from the function signature. Additionally, fields can be intentionally "hidden" from LLDB while still being accessible to the -user. For example, one might want a `vec![1, 2, 3]` to display only its elements, but still have the -`len` and `capacity` values accessible on request. By returning `3` from `num_children`, one can +user. +For example, one might want a `vec![1, 2, 3]` to display only its elements, but still have the +`len` and `capacity` values accessible on request. +By returning `3` from `num_children`, one can restrict LLDB to only displaying `[1, 2, 3]`, while users can still directly access `v.len` and -`v.capacity`. See: [Example Provider: Vec\](#example-provider-vect) to see an implementation of -this. +`v.capacity`. +See: [Example Provider: Vec\](#example-provider-vect) to see an implementation of this. ### `get_child_index` @@ -179,12 +195,14 @@ this. > > Affects `SBValue.GetChildMemberWithName` -Given a name, returns the index that the child should be accessed at. It is expected that the return -value of this function is passed directly to `get_child_at_index`. As with `num_children`, the +Given a name, returns the index that the child should be accessed at. +It is expected that the return value of this function is passed directly to `get_child_at_index`. +As with `num_children`, the values returned here *can* be arbitrary, so long as they are properly coordinated with `get_child_at_index`. -One special value is `$$dereference$$`. Accounting for this pseudo-field will allow LLDB to use the +One special value is `$$dereference$$`. +Accounting for this pseudo-field will allow LLDB to use the `SBValue` returned from `get_child_at_index` as the result of a dereference via LLDB's expression parser (e.g. `*val` and `val->field`) @@ -192,24 +210,28 @@ parser (e.g. `*val` and `val->field`) > Overrides `SBValue.GetChildAtIndex` -Given an index, returns a child `SBValue`. Often these are generated via +Given an index, returns a child `SBValue`. +Often these are generated via `SBValue.CreateValueFromAddress`, but less commonly `SBValue.CreateChildAtOffset`, -`SBValue.CreateValueFromExpression`, and `SBValue.CreateValueFromData`. These functions can be a +`SBValue.CreateValueFromExpression`, and `SBValue.CreateValueFromData`. +These functions can be a little finicky, so you may need to fiddle with them to get the output you want. -In some cases, `SBValue.Clone` is appropriate. It creates a new child that is an exact copy of an -existing child, but with a new name. This is useful for cases like tuples, which have field names of +In some cases, `SBValue.Clone` is appropriate. +It creates a new child that is an exact copy of an existing child, but with a new name. +This is useful for cases like tuples, which have field names of the style `__0`, `__1`, ... when we would prefer they were named `0`, `1`, ... -Small alterations can be made to the resulting child before it is returned. This is useful for -`&str`/`String`, where we would prefer if the children were displayed as +Small alterations can be made to the resulting child before it is returned. +This is useful for `&str`/`String`, where we would prefer if the children were displayed as `lldb.eFormatBytesWithASCII` rather than just as a decimal value. ### (optional) `get_type_name` > Overrides `SBValue.GetDisplayTypeName` -Overrides the displayed name of a type. For a synthetic `SBValue` whose type name is overridden, the +Overrides the displayed name of a type. +For a synthetic `SBValue` whose type name is overridden, the original type name can still be retrieved via `SBValue.GetTypeName()` and `SBValue.GetType().GetName()` @@ -228,44 +250,52 @@ access the generic parameters of the type. The `SBValue` returned is expected to be a primitive type or pointer, and is treated as the value of the variable in expressions. -> IMPORTANT: The `SBValue` returned **must be stored in the `SyntheticProvider`**. There is ->currently (Nov 2025) a bug where if the `SBValue` is acquired within `get_value` and not stored ->anywhere, Python will segfault when LLDB attempts to access the value. +> IMPORTANT: The `SBValue` returned **must be stored in the `SyntheticProvider`**. +> As of Nov 2025, +> there is a bug where if the `SBValue` is acquired within `get_value` and not stored +> anywhere, Python will segfault when LLDB attempts to access the value. ## Summary Providers -Summary providers are python functions of the following form: +Summary providers are Python functions of the following form: ```python def SummaryProvider(valobj: SBValue, _lldb_internal) -> str: ... ``` -Where the returned string is passed verbatim to the user. If the returned value isn't a string, it +Where the returned string is passed verbatim to the user. +If the returned value isn't a string, it is naively convered to a string (e.g. `return None` prints `"None"`, not an empty string). If the `SBValue` passed in is of a type that has a Synthetic Provider, `valobj.IsSynthetic()` will -return `True`, and the synthetic's corresponding functions will be used. If this is undesirable, the -original value can be retrieved via `valobj.GetNonSyntheticValue()`. This can be helpful in cases +return `True`, and the synthetic's corresponding functions will be used. +If this is undesirable, the original value can be retrieved via `valobj.GetNonSyntheticValue()`. +This can be helpful in cases like `String`, where individually calling `GetChildAtIndex` in a loop is much slower than accessing the heap pointer, reading the whole byte array directly from the debugee's memory, and using Python's `bytes.decode()`. ### Instance Summaries -Regular `SummaryProvider` functions take an opaque `SBValue`. That `SBValue` will reflect the type's +Regular `SummaryProvider` functions take an opaque `SBValue`. +That `SBValue` will reflect the type's `SyntheticProvider` if one exists, but we cannot access the `SyntheticProvider` instance itself, or -any of its internal implementation details. This is deterimental in cases where we need some of -those internal details to help complete the summary. Currently (Nov 2025), in the synthetic we just +any of its internal implementation details. +This is deterimental in cases where we need some of +those internal details to help complete the summary. +As of Nov 2025, in the synthetic we just run the non-synthetic value through the synthetic provider (`synth = SyntheticProvider(valobj.GetNonSyntheticValue(), _dict)`), but this is obviously suboptimal and there are plans to use the method outlined below. -Instead, we can leverage the Python module's state to allow for instance summaries. Prior art for +Instead, we can leverage the Python module's state to allow for instance summaries. +Prior art for this technique exists in the [old CodeLLDB Rust visualizer scripts](https://github.com/vadimcn/codelldb/blob/cf9574977b80e29c6de2c44d12f1071a53a54caf/formatters/rust.py#L110). In short: every Synthetic Provider's `__init__` function stores a unique ID and a weak reference to -`self` in a global dictionary. The Synthetic Provider class also implements a `get_summary` -function. The type's `SummaryProvider` is a function that looks up the unique ID in this dictionary, +`self` in a global dictionary. +The Synthetic Provider class also implements a `get_summary` function. +The type's `SummaryProvider` is a function that looks up the unique ID in this dictionary, then calls a `get_summary` on the instance it retrieves. ```python @@ -293,9 +323,11 @@ def InstanceSummaryProvider(valobj: SBValue, _dict) -> str: return SYNTH_BY_ID[valobj.GetNonSyntheticValue().GetID()].get_summary() ``` -For example, one might use this for the Enum synthetic provider. The summary would like to access +For example, one might use this for the Enum synthetic provider. +The summary would like to access the variant name, but there isn't a convenient way to reflect this via the type name or child-values -of the synthetic. By implementing an instance summary, we can retrieve the variant name via +of the synthetic. +By implementing an instance summary, we can retrieve the variant name via `self.variant.GetTypeName()` and some string manipulation. # Writing Visualizer Scripts @@ -304,18 +336,22 @@ of the synthetic. By implementing an instance summary, we can retrieve the varia >Visualizers must be written to account for both formats whenever possible. See: >[rust-codegen](./rust-codegen.md#dwarf-vs-pdb) for an overview of the differences -Scripts are injected into LLDB via the CLI command `command script import .py`. Once +Scripts are injected into LLDB via the CLI command `command script import .py`. +Once injected, classes and functions can be added to the synthetic/summary pool with `type synthetic add` -and `type summary add` respectively. The summaries and synthetics can be associated with a -"category", which is typically named after the language the providers are intended for. The category -we use will be called `Rust`. +and `type summary add` respectively. +The summaries and synthetics can be associated with a +"category", which is typically named after the language the providers are intended for. +The category we use will be called `Rust`. > TIP: all LLDB commands can be prefixed with `help` (e.g. `help type synthetic add`) for a brief description, list of arguments, and examples. -Currently (Nov 2025) we use `command source ...`, which executes a series of CLI commands from the +As of Nov 2025, +we use `command source ...`, which executes a series of CLI commands from the file [`lldb_commands`](https://github.com/rust-lang/rust/blob/main/src/etc/lldb_commands) to add -providers. This file is somewhat unwieldy, and will soon be supplanted by the Python API equivalent +providers. +This file is somewhat unwieldy, and will soon be supplanted by the Python API equivalent outlined below. ## `__lldb_init_module` @@ -327,16 +363,20 @@ def __lldb_init_module(debugger: SBDebugger, _lldb_internal) -> None: ... ``` This function is called at the end of `command script import ...`, but before control returns back -to the CLI. It allows the script to initialize its own state. - -Crucially, it is passed a reference to the debugger itself. This allows us to create the `Rust` -category and add providers to it. It can also allow us to conditionally change which providers we -use depending on what version of LLDB the script detects. This is vital for backwards compatibility +to the CLI. +It allows the script to initialize its own state. + +Crucially, it is passed a reference to the debugger itself. +This allows us to create the `Rust` category and add providers to it. +It can also allow us to conditionally change which providers we +use depending on what version of LLDB the script detects. +This is vital for backwards compatibility once we begin using recognizer functions, as recognizers were added in lldb 19.0. ## Visualizer Resolution -The order that visualizers resolve in is listed [here][formatters_101]. In short: +The order that visualizers resolve in is listed [here][formatters_101]. +In short: [formatters_101]: https://lldb.llvm.org/use/variable.html#finding-formatters-101 @@ -347,14 +387,17 @@ provider), use that * If none of the above work, iterate through the regex type matchers Within each of those steps, **iteration is done backwards** to allow new commands to "override" old -commands. This is important for cases like `Box` vs `Box`, were we want a specialized +commands. +This is important for cases like `Box` vs `Box`, were we want a specialized synthetic for the former, but a more generalized synthetic for the latter. ## Minutiae LLDB's API is very powerful, but there are some "gotchas" and unintuitive behavior, some of which -will be outlined below. The python implementation can be viewed at the path returned by the CLI -command `lldb -P` in `lldb\__init__.py`. In addition to the +will be outlined below. +The Python implementation can be viewed at the path returned by the CLI +command `lldb -P` in `lldb\__init__.py`. +In addition to the [examples in the lldb repo][synth_examples], there are also [C++ visualizers][plugin_cpp] that can be used as a reference (e.g. [LibCxxVector, the equivalent to `Vec`][cxx_vector]). While C++'s visualizers are written in C++ and have access to LLDB's internals, the API and general practices @@ -370,19 +413,22 @@ are very similar. children of the pointed-to-object are its own children. * The non-function fields are typically [`property()`][property] fields that point directly to the function anyway (e.g. `SBValue.type = property(GetType, None)`). Accessing through these shorthands -is a bit slower to access than just calling the function directly, so they should be avoided. Some +is a bit slower to access than just calling the function directly, so they should be avoided. +Some of the properties return special objects with special properties (e.g. `SBValue.member` returns an -object that acts like `dict[str, SBValue]` to access children). Internally, many of these special +object that acts like `dict[str, SBValue]` to access children). +Internally, many of these special objects just allocate a new class instance and call the function on the `SBValue` anyway, resulting in additional performance loss (e.g. `SBValue.member` internally just implements `__getitem__` which is the one-liner `return self.valobj.GetChildMemberWithName(name)`) * `SBValue.GetID` returns a unique `int` for each value for the duration of the debug session. -Synthetic `SBValue`'s have a different ID than their underlying `SBValue`. The underlying ID can be -retrieved via `SBValue.GetNonSyntheticValue().GetID()`. +Synthetic `SBValue`'s have a different ID than their underlying `SBValue`. +The underlying ID can be retrieved via `SBValue.GetNonSyntheticValue().GetID()`. * When manually calculating an address, `SBValue.GetValueAsAddress` should be preferred over `SBValue.GetValueAsUnsigned` due to [target-specific behavior][get_address] * Getting a string representation of an `SBValue` can be tricky because `GetSummary` requires a -summary provider and `GetValue` requires the type be representable by a primitive. In almost all +summary provider and `GetValue` requires the type be representable by a primitive. +In almost all cases where neither of those conditions are met, the type is a user defined struct that can be passed through `StructSummaryProvider`. @@ -393,12 +439,14 @@ passed through `StructSummaryProvider`. * "Aggregate type" means a non-primitive struct/class/union * "Template" is equivalent to "Generic" -* Types can be looked up by their name via `SBTarget.FindFirstType(type_name)`. `SBTarget` can be -acquired via `SBValue.GetTarget` +* Types can be looked up by their name via `SBTarget.FindFirstType(type_name)`. + `SBTarget` can be acquired via `SBValue.GetTarget` * `SBType.template_args` returns `None` instead of an empty list if the type has no generics * It is sometimes necessary to transform a type into the type you want via functions like -`SBType.GetArrayType` and `SBType.GetPointerType`. These functions cannot fail. They ask the -underlying LLDB `TypeSystem` plugin for the type, bypassing the debug info completely. Even if the +`SBType.GetArrayType` and `SBType.GetPointerType`. +These functions cannot fail. +They ask the underlying LLDB `TypeSystem` plugin for the type, bypassing the debug info completely. +Even if the type does not exist in the debug info at all, these functions can create the appropriate type. * `SBType.GetCanonicalType` is effectively `SBType.GetTypedefedType` + `SBType.GetUnqualifiedType`. Unlike `SBType.GetTypedefedType`, it will always return a valid `SBType` regardless of whether or @@ -411,11 +459,13 @@ always possible since the static fields are otherwise completely inaccessible. ## SyntheticProvider -We start with the typical prelude, using `__slots__` since we have known fields. In addition to the +We start with the typical prelude, using `__slots__` since we have known fields. +In addition to the object itself, we also need to store the type of the elements because `Vec`'s heap pointer is a -`*mut u8`, not a `*mut T`. Rust is a statically typed language, so the type of `T` will never -change. That means we can store it during initialization. The heap pointer, length, and capacity -*can* change though, and thus are default initialized here. +`*mut u8`, not a `*mut T`. +Rust is a statically typed language, so the type of `T` will never change. +That means we can store it during initialization. +The heap pointer, length, and capacity *can* change though, and thus are default initialized here. ```python import lldb @@ -452,12 +502,15 @@ class VecSyntheticProvider: For the implementation of `get_template_args` and `resolve_msvc_template_arg`, please see: [`lldb_providers.py`](https://github.com/rust-lang/rust/blob/main/src/etc/lldb_providers.py#L136). -Next, the update function. We check if the pointer or length have changed. We can ommit checking the -capacity, as the number of children will remain the same unless `len` changes. If changing the -capacity resulted in a reallocation, `data_ptr`'s address would be different. +Next, the update function. +We check if the pointer or length have changed. +We can ommit checking the +capacity, as the number of children will remain the same unless `len` changes. +If changing the capacity resulted in a reallocation, `data_ptr`'s address would be different. If `data_ptr` and `length` haven't changed, we can take advantage of LLDB's caching and return -early. If they have changed, we store the new values and tell LLDB to flush the cache. +early. +If they have changed, we store the new values and tell LLDB to flush the cache. ```python def update(self): @@ -490,9 +543,10 @@ def num_children(self) -> int: When accessing elements, we expect values of the format `[0]`, `[1]`, etc. to mimic indexing. Additionally, we still want the user to be able to quickly access the length and capacity, as they -can be very useful when debugging. We assign these values `u32::MAX - 1` and `u32::MAX - 2` -respectively, as we can almost surely guarantee that they will not overlap with element values. Note -that we can account for both the full and shorthand `capacity` name. +can be very useful when debugging. +We assign these values `u32::MAX - 1` and `u32::MAX - 2` +respectively, as we can almost surely guarantee that they will not overlap with element values. +Note that we can account for both the full and shorthand `capacity` name. ```python def get_child_index(self, name: str) -> int: @@ -527,17 +581,19 @@ def get_child_at_index(self, index: int) -> SBValue: return self.valobj.CreateValueFromAddress(f"[{index}]", addr, self.element_type) ``` -For the type's display name, we can strip the path qualifier. User defined types named -`Vec` will end up fully qualified, so there shouldn't be any ambiguity. We can also remove the -allocator generic, as it's very very rarely useful. We use `get_template_args` instead of -`self.element_type.GetName()` for 3 reasons: +For the type's display name, we can strip the path qualifier. +User defined types named `Vec` will end up fully qualified, so there shouldn't be any ambiguity. +We can also remove the allocator generic, as it's very very rarely useful. +We use `get_template_args` instead of `self.element_type.GetName()` for 3 reasons: 1. If we fail to resolve the element type for any reason, `self.valobj`'s type name can still let the user know what the real type of the element is 2. Type names are not subject to the limitations of DWARF and PDB nodes, so the template type in the name will reflect things like `*const`/`*mut` and `&`/`&mut`. -3. We do not currently (Nov 2025) normalize MSVC type names, but once we do, we will need to work with the -string-names of types anyway. It's also much easier to cache a string-to-string conversion compared +3. As of Nov 2025, +we don't normalize MSVC type names, but once we do, we will need to work with the +string-names of types anyway. +It's also much easier to cache a string-to-string conversion compared to an `SBType`-to-string conversion. ```python @@ -550,11 +606,14 @@ the `get_value` function. ## SummaryProvider -The summary provider is very simple thanks to our synthetic provider. The only real hiccup is that -`GetSummary` only returns a value if the object's type has a `SummaryProvider`. If it doesn't, it -will return an empty string which is not ideal. In a full set of visualizer scripts, we can ensure +The summary provider is very simple thanks to our synthetic provider. +The only real hiccup is that +`GetSummary` only returns a value if the object's type has a `SummaryProvider`. +If it doesn't, it will return an empty string which is not ideal. +In a full set of visualizer scripts, we can ensure that every type that doesn't have a `GetSummary()` or a `GetValue()` is a struct, and then delegate -to a generic `StructSummaryProvider`. For this demonstration, I will gloss over that detail. +to a generic `StructSummaryProvider`. +For this demonstration, I will gloss over that detail. ```python def VecSummaryProvider(valobj: SBValue, _lldb_internal) -> str: @@ -659,4 +718,4 @@ We can also confirm that the "hidden" length and capacity are still accessible: (unsigned long long) vec_v.capacity = 5 (lldb) v vec_v.cap (unsigned long long) vec_v.cap = 5 -``` \ No newline at end of file +``` diff --git a/src/doc/rustc-dev-guide/src/early_late_parameters.md b/src/doc/rustc-dev-guide/src/early-late-parameters.md similarity index 100% rename from src/doc/rustc-dev-guide/src/early_late_parameters.md rename to src/doc/rustc-dev-guide/src/early-late-parameters.md diff --git a/src/doc/rustc-dev-guide/src/feature-gates.md b/src/doc/rustc-dev-guide/src/feature-gates.md index 9806f73c483c2..d2a5173607144 100644 --- a/src/doc/rustc-dev-guide/src/feature-gates.md +++ b/src/doc/rustc-dev-guide/src/feature-gates.md @@ -12,7 +12,7 @@ mechanism][libs-gate]. See ["Stability in code"][adding] in the "Implementing new features" section for instructions. -[adding]: ./implementing_new_features.md#stability-in-code +[adding]: ./implementing-new-features.md#stability-in-code ## Removing a feature gate @@ -80,5 +80,5 @@ for instructions. There are additional steps you will need to take beyond just updating the declaration! -["Stability in code"]: ./implementing_new_features.md#stability-in-code -["Updating the feature-gate listing"]: ./stabilization_guide.md#updating-the-feature-gate-listing +["Stability in code"]: ./implementing-new-features.md#stability-in-code +["Updating the feature-gate listing"]: ./stabilization-guide.md#updating-the-feature-gate-listing diff --git a/src/doc/rustc-dev-guide/src/generic_parameters_summary.md b/src/doc/rustc-dev-guide/src/generic-parameters-summary.md similarity index 96% rename from src/doc/rustc-dev-guide/src/generic_parameters_summary.md rename to src/doc/rustc-dev-guide/src/generic-parameters-summary.md index da38ba0455c2c..29a07e297e5e6 100644 --- a/src/doc/rustc-dev-guide/src/generic_parameters_summary.md +++ b/src/doc/rustc-dev-guide/src/generic-parameters-summary.md @@ -16,7 +16,7 @@ trait Trait { The `ty::Generics` used for `foo` would contain `[U]` and a parent of `Some(Trait)`. `Trait` would have a `ty::Generics` containing `[Self, T]` with a parent of `None`. -The [`GenericParamDef`] struct is used to represent each individual generic parameter in a `ty::Generics` listing. The `GenericParamDef` struct contains information about the generic parameter, for example its name, defid, what kind of parameter it is (i.e. type, const, lifetime). +The [`GenericParamDef`] struct is used to represent each individual generic parameter in a `ty::Generics` listing. The `GenericParamDef` struct contains information about the generic parameter, for example its name, defid, what kind of parameter it is (i.e. type, const, lifetime). `GenericParamDef` also contains a `u32` index representing what position the parameter is (starting from the outermost parent), this is the value used to represent usages of generic parameters (more on this in the [chapter on representing types][ch_representing_types]). @@ -25,4 +25,4 @@ Interestingly, `ty::Generics` does not currently contain _every_ generic paramet [ch_representing_types]: ./ty.md [`ty::Generics`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/struct.Generics.html [`GenericParamDef`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/generics/struct.GenericParamDef.html -[ch_binders]: ./ty_module/binders.md +[ch_binders]: ./ty-module/binders.md diff --git a/src/doc/rustc-dev-guide/src/git.md b/src/doc/rustc-dev-guide/src/git.md index abe72b29cb1e2..c0b449e8fb287 100644 --- a/src/doc/rustc-dev-guide/src/git.md +++ b/src/doc/rustc-dev-guide/src/git.md @@ -1,20 +1,22 @@ # Using Git -The Rust project uses [Git] to manage its source code. In order to -contribute, you'll need some familiarity with its features so that your changes +The Rust project uses [Git] to manage its source code. +In order to contribute, you'll need some familiarity with its features so that your changes can be incorporated into the compiler. [Git]: https://git-scm.com The goal of this page is to cover some of the more common questions and -problems new contributors face. Although some Git basics will be covered here, +problems new contributors face. +Although some Git basics will be covered here, if you find that this is still a little too fast for you, it might make sense to first read some introductions to Git, such as the Beginner and Getting -started sections of [this tutorial from Atlassian][atlassian-git]. GitHub also -provides [documentation] and [guides] for beginners, or you can consult the +started sections of [this tutorial from Atlassian][atlassian-git]. +GitHub also provides [documentation] and [guides] for beginners, or you can consult the more in depth [book from Git]. -This guide is incomplete. If you run into trouble with git that this page doesn't help with, +This guide is incomplete. +If you run into trouble with git that this page doesn't help with, please [open an issue] so we can document how to fix it. [open an issue]: https://github.com/rust-lang/rustc-dev-guide/issues/new @@ -26,15 +28,15 @@ please [open an issue] so we can document how to fix it. ## Prerequisites We'll assume that you've installed Git, forked [rust-lang/rust], and cloned the -forked repo to your PC. We'll use the command line interface to interact +forked repo to your PC. +We'll use the command line interface to interact with Git; there are also a number of GUIs and IDE integrations that can generally do the same things. [rust-lang/rust]: https://github.com/rust-lang/rust -If you've cloned your fork, then you will be able to reference it with `origin` -in your local repo. It may be helpful to also set up a remote for the official -rust-lang/rust repo via +If you've cloned your fork, then you will be able to reference it with `origin` in your local repo. +It may be helpful to also set up a remote for the official rust-lang/rust repo via ```console git remote add upstream https://github.com/rust-lang/rust.git @@ -54,21 +56,19 @@ useful when contributing to other repositories in the Rust project. ## Standard Process -Below is the normal procedure that you're likely to use for most minor changes -and PRs: +Below is the normal procedure that you're likely to use for most minor changes and PRs: - 1. Ensure that you're making your changes on top of `main`: - `git checkout main`. + 1. Ensure that you're making your changes on top of `main`: `git checkout main`. 2. Get the latest changes from the Rust repo: `git pull upstream main --ff-only`. (see [No-Merge Policy][no-merge-policy] for more info about this). 3. Make a new branch for your change: `git checkout -b issue-12345-fix`. 4. Make some changes to the repo and test them. 5. Stage your changes via `git add src/changed/file.rs src/another/change.rs` - and then commit them with `git commit`. Of course, making intermediate commits - may be a good idea as well. Avoid `git add .`, as it makes it too easy to - unintentionally commit changes that should not be committed, such as submodule - updates. You can use `git status` to check if there are any files you forgot - to stage. + and then commit them with `git commit`. + Of course, making intermediate commits may be a good idea as well. + Avoid `git add .`, as it makes it too easy to + unintentionally commit changes that should not be committed, such as submodule updates. + You can use `git status` to check if there are any files you forgot to stage. 6. Push your changes to your fork: `git push --set-upstream origin issue-12345-fix` (After adding commits, you can use `git push` and after rebasing or pulling-and-rebasing, you can use `git push --force-with-lease`). @@ -100,14 +100,15 @@ Here are some common issues you might run into: ### I made a merge commit by accident. Git has two ways to update your branch with the newest changes: merging and rebasing. -Rust [uses rebasing][no-merge-policy]. If you make a merge commit, it's not too hard to fix: -`git rebase -i upstream/main`. +Rust [uses rebasing][no-merge-policy]. +If you make a merge commit, it's not too hard to fix: `git rebase -i upstream/main`. See [Rebasing](#rebasing) for more about rebasing. ### I deleted my fork on GitHub! -This is not a problem from git's perspective. If you run `git remote -v`, +This is not a problem from git's perspective. +If you run `git remote -v`, it will say something like this: ```console @@ -137,19 +138,21 @@ You might also notice conflicts in the web UI: ![conflict in src/tools/cargo](./img/submodule-conflicts.png) The most common cause is that you rebased after a change and ran `git add .` without first running -`x` to update the submodules. Alternatively, you might have run `cargo fmt` instead of `x fmt` +`x` to update the submodules. + Alternatively, you might have run `cargo fmt` instead of `x fmt` and modified files in a submodule, then committed the changes. To fix it, do the following things (if you changed a submodule other than cargo, replace `src/tools/cargo` with the path to that submodule): 1. See which commit has the accidental changes: `git log --stat -n1 src/tools/cargo` -2. Revert the changes to that commit: `git checkout ~ src/tools/cargo`. Type `~` - literally but replace `` with the output from step 1. +2. Revert the changes to that commit: `git checkout ~ src/tools/cargo`. + Type `~` literally but replace `` with the output from step 1. 3. Tell git to commit the changes: `git commit --fixup ` 4. Repeat steps 1-3 for all the submodules you modified. - If you modified the submodule in several different commits, you will need to repeat steps 1-3 - for each commit you modified. You'll know when to stop when the `git log` command shows a commit + for each commit you modified. + You'll know when to stop when the `git log` command shows a commit that's not authored by you. 5. Squash your changes into the existing commits: `git rebase --autosquash -i upstream/main` 6. [Push your changes](#standard-process). @@ -168,9 +171,11 @@ error: Please commit or stash them. (See for the difference between the two.) -This means you have made changes since the last time you made a commit. To be able to rebase, either +This means you have made changes since the last time you made a commit. +To be able to rebase, either commit your changes, or make a temporary commit called a "stash" to have them still not be committed -when you finish rebasing. You may want to configure git to make this "stash" automatically, which +when you finish rebasing. +You may want to configure git to make this "stash" automatically, which will prevent the "cannot rebase" error in nearly all cases: ```console @@ -191,8 +196,9 @@ rm -r src/stdarch ### I see `<<< HEAD`? -You were probably in the middle of a rebase or merge conflict. See -[Conflicts](#rebasing-and-conflicts) for how to fix the conflict. If you don't care about the changes +You were probably in the middle of a rebase or merge conflict. +See [Conflicts](#rebasing-and-conflicts) for how to fix the conflict. +If you don't care about the changes and just want to get a clean copy of the repository back, you can use `git reset`: ```console @@ -213,17 +219,19 @@ hint: 'git pull ...') before pushing again. hint: See the 'Note about fast-forwards' in 'git push --help' for details. ``` -The advice this gives is incorrect! Because of Rust's -["no-merge" policy](#no-merge-policy) the merge commit created by `git pull` -will not be allowed in the final PR, in addition to defeating the point of the -rebase! Use `git push --force-with-lease` instead. +The advice this gives is incorrect! +Because of Rust's ["no-merge" policy](#no-merge-policy), the merge commit created by `git pull` +will not be allowed in the final PR, in addition to defeating the point of the rebase! +Use `git push --force-with-lease` instead. ### Git is trying to rebase commits I didn't write? If you see many commits in your rebase list, or merge commits, or commits by other people that you -didn't write, it likely means you're trying to rebase over the wrong branch. For example, you may +didn't write, it likely means you're trying to rebase over the wrong branch. +For example, you may have a `rust-lang/rust` remote `upstream`, but ran `git rebase origin/main` instead of `git rebase -upstream/main`. The fix is to abort the rebase and use the correct branch instead: +upstream/main`. +The fix is to abort the rebase and use the correct branch instead: ```console git rebase --abort @@ -239,7 +247,8 @@ git rebase --interactive upstream/main ### Quick note about submodules When updating your local repository with `git pull`, you may notice that sometimes -Git says you have modified some files that you have never edited. For example, +Git says you have modified some files that you have never edited. +For example, running `git status` gives you something like (note the `new commits` mention): ```console @@ -263,17 +272,18 @@ git submodule update ``` Some submodules are not actually needed; for example, `src/llvm-project` doesn't need to be checked -out if you're using `download-ci-llvm`. To avoid having to keep fetching its history, you can use +out if you're using `download-ci-llvm`. + To avoid having to keep fetching its history, you can use `git submodule deinit -f src/llvm-project`, which will also avoid it showing as modified again. ## Rebasing and Conflicts When you edit your code locally, you are making changes to the version of -rust-lang/rust that existed when you created your feature branch. As such, when -you submit your PR it is possible that some of the changes that have been made +rust-lang/rust that existed when you created your feature branch. +As such, when you submit your PR, it is possible that some of the changes that have been made to rust-lang/rust since then are in conflict with the changes you've made. -When this happens, you need to resolve the conflicts before your changes can be -merged. To do that, you need to rebase your work on top of rust-lang/rust. +When this happens, you need to resolve the conflicts before your changes can be merged. +To do that, you need to rebase your work on top of rust-lang/rust. ### Rebasing @@ -294,13 +304,14 @@ git pull --rebase https://github.com/rust-lang/rust.git main > have rebased and fixed all conflicts. When you rebase a branch on main, all the changes on your branch are -reapplied to the most recent version of `main`. In other words, Git tries to +reapplied to the most recent version of `main`. +In other words, Git tries to pretend that the changes you made to the old version of `main` were instead -made to the new version of `main`. During this process, you should expect to -encounter at least one "rebase conflict." This happens when Git's attempt to -reapply the changes fails because your changes conflicted with other changes -that have been made. You can tell that this happened because you'll see -lines in the output that look like +made to the new version of `main`. +During this process, you should expect to +encounter at least one "rebase conflict". This happens when Git's attempt to +reapply the changes fails because your changes conflicted with other changes that have been made. +You can tell that this happened because you'll see lines in the output that look like ```console CONFLICT (content): Merge conflict in file.rs @@ -316,21 +327,23 @@ Your code >>>>>>> 8fbf656... Commit fixes 12345 ``` -This represents the lines in the file that Git could not figure out how to -rebase. The section between `<<<<<<< HEAD` and `=======` has the code from -`main`, while the other side has your version of the code. You'll need to -decide how to deal with the conflict. You may want to keep your changes, +This represents the lines in the file that Git could not figure out how to rebase. +The section between `<<<<<<< HEAD` and `=======` has the code from +`main`, while the other side has your version of the code. +You'll need to decide how to deal with the conflict. +You may want to keep your changes, keep the changes on `main`, or combine the two. -Generally, resolving the conflict consists of two steps: First, fix the -particular conflict. Edit the file to make the changes you want and remove the -`<<<<<<<`, `=======` and `>>>>>>>` lines in the process. Second, check the -surrounding code. If there was a conflict, its likely there are some logical -errors lying around too! It's a good idea to run `x check` here to make sure -there are no glaring errors. +Generally, resolving the conflict consists of two steps: First, fix the particular conflict. +Edit the file to make the changes you want and remove the +`<<<<<<<`, `=======` and `>>>>>>>` lines in the process. +Second, check the surrounding code. +If there was a conflict, its likely there are some logical errors lying around too! +It's a good idea to run `x check` here to make sure there are no glaring errors. Once you're all done fixing the conflicts, you need to stage the files that had -conflicts in them via `git add`. Afterwards, run `git rebase --continue` to let +conflicts in them via `git add`. +Afterwards, run `git rebase --continue` to let Git know that you've resolved the conflicts and it should finish the rebase. Once the rebase has succeeded, you'll want to update the associated branch on @@ -340,13 +353,11 @@ your fork with `git push --force-with-lease`. The [above section](#rebasing) is a specific guide on rebasing work and dealing with merge conflicts. -Here is some general advice about how to keep your local repo -up-to-date with upstream changes: +Here is some general advice about how to keep your local repo up-to-date with upstream changes: -Using `git pull upstream main` while on your local `main` branch regularly -will keep it up-to-date. You will also want to keep your feature branches -up-to-date as well. After pulling, you can checkout the feature branches -and rebase them: +Using `git pull upstream main` while on your local `main` branch regularly will keep it up-to-date. +You will also want to keep your feature branches up-to-date as well. +After pulling, you can checkout the feature branches and rebase them: ```console git checkout main @@ -367,21 +378,21 @@ feature branches are in sync with their state on the Github side. ### Squash your commits -"Squashing" commits into each other causes them to be merged into a single -commit. Both the upside and downside of this is that it simplifies the history. +"Squashing" commits into each other causes them to be merged into a single commit. +Both the upside and downside of this is that it simplifies the history. On the one hand, you lose track of the steps in which changes were made, but the history becomes easier to work with. If there are no conflicts and you are just squashing to clean up the history, -use `git rebase --interactive --keep-base main`. This keeps the fork point -of your PR the same, making it easier to review the diff of what happened +use `git rebase --interactive --keep-base main`. +This keeps the fork point of your PR the same, making it easier to review the diff of what happened across your rebases. Squashing can also be useful as part of conflict resolution. If your branch contains multiple consecutive rewrites of the same code, or if the rebase conflicts are extremely severe, you can use -`git rebase --interactive main` to gain more control over the process. This -allows you to choose to skip commits, edit the commits that you do not skip, +`git rebase --interactive main` to gain more control over the process. +This allows you to choose to skip commits, edit the commits that you do not skip, change the order in which they are applied, or "squash" them into each other. Alternatively, you can sacrifice the commit history like this: @@ -395,34 +406,35 @@ git rebase --continue ``` You also may want to squash just the last few commits together, possibly -because they only represent "fixups" and not real changes. For example, +because they only represent "fixups" and not real changes. +For example, `git rebase --interactive HEAD~2` will allow you to edit the two commits only. ### `git range-diff` After completing a rebase, and before pushing up your changes, you may want to -review the changes between your old branch and your new one. You can do that -with `git range-diff main @{upstream} HEAD`. +review the changes between your old branch and your new one. +You can do that with `git range-diff main @{upstream} HEAD`. The first argument to `range-diff`, `main` in this case, is the base revision -that you're comparing your old and new branch against. The second argument is +that you're comparing your old and new branch against. +The second argument is the old version of your branch; in this case, `@upstream` means the version that -you've pushed to GitHub, which is the same as what people will see in your pull -request. Finally, the third argument to `range-diff` is the *new* version of +you've pushed to GitHub, which is the same as what people will see in your pull request. +Finally, the third argument to `range-diff` is the *new* version of your branch; in this case, it is `HEAD`, which is the commit that is currently checked-out in your local repo. -Note that you can also use the equivalent, abbreviated form `git range-diff -main @{u} HEAD`. +Note that you can also use the equivalent, abbreviated form `git range-diff main @{u} HEAD`. Unlike in regular Git diffs, you'll see a `-` or `+` next to another `-` or `+` -in the range-diff output. The marker on the left indicates a change between the -old branch and the new branch, and the marker on the right indicates a change -you've committed. So, you can think of a range-diff as a "diff of diffs" since +in the range-diff output. +The marker on the left indicates a change between the +old branch and the new branch, and the marker on the right indicates a change you've committed. +So, you can think of a range-diff as a "diff of diffs" since it shows you the differences between your old diff and your new diff. -Here's an example of `git range-diff` output (taken from [Git's -docs][range-diff-example-docs]): +Here's an example of `git range-diff` output (taken from [Git's docs][range-diff-example-docs]): ```console -: ------- > 1: 0ddba11 Prepare for the inevitable! @@ -447,31 +459,33 @@ docs][range-diff-example-docs]): (Note that `git range-diff` output in your terminal will probably be easier to read than in this example because it will have colors.) -Another feature of `git range-diff` is that, unlike `git diff`, it will also -diff commit messages. This feature can be useful when amending several commit +Another feature of `git range-diff` is that, unlike `git diff`, it will also diff commit messages. +This feature can be useful when amending several commit messages so you can make sure you changed the right parts. `git range-diff` is a very useful command, but note that it can take some time -to get used to its output format. You may also find Git's documentation on the +to get used to its output format. +You may also find Git's documentation on the command useful, especially their ["Examples" section][range-diff-example-docs]. [range-diff-example-docs]: https://git-scm.com/docs/git-range-diff#_examples ## No-Merge Policy -The rust-lang/rust repo uses what is known as a "rebase workflow." This means -that merge commits in PRs are not accepted. As a result, if you are running -`git merge` locally, chances are good that you should be rebasing instead. Of -course, this is not always true; if your merge will just be a fast-forward, +The rust-lang/rust repo uses what is known as a "rebase workflow". This means +that merge commits in PRs are not accepted. +As a result, if you are running +`git merge` locally, chances are good that you should be rebasing instead. +Of course, this is not always true; if your merge will just be a fast-forward, like the merges that `git pull` usually performs, then no merge commit is -created and you have nothing to worry about. Running `git config merge.ff only` -(this will apply the config to the local repo) +created and you have nothing to worry about. +Running `git config merge.ff only` (this will apply the config to the local repo) once will ensure that all the merges you perform are of this type, so that you cannot make a mistake. -There are a number of reasons for this decision and like all others, it is a -tradeoff. The main advantage is the generally linear commit history. This -greatly simplifies bisecting and makes the history and commit log much easier +There are a number of reasons for this decision, and like all others, it is a tradeoff. +The main advantage is the generally linear commit history. +This greatly simplifies bisecting and makes the history and commit log much easier to follow and understand. ## Tips for reviewing @@ -490,15 +504,17 @@ You can also use `git diff -w origin/main` to view changes locally. To checkout PRs locally, you can use `git fetch upstream pull/NNNNN/head && git checkout FETCH_HEAD`. -You can also use github's cli tool. Github shows a button on PRs where you can copy-paste the -command to check it out locally. See for more info. +You can also use github's cli tool. +Github shows a button on PRs where you can copy-paste the command to check it out locally. +See for more info. ![`gh` suggestion](./img/github-cli.png) ### Using GitHub dev As an alternative to the GitHub web UI, GitHub Dev provides a web-based editor for browsing -repository and PRs. It can be opened by replacing `github.com` with `github.dev` in the URL +repository and PRs. +It can be opened by replacing `github.com` with `github.dev` in the URL or by pressing `.` on a GitHub page. See [the docs for github.dev editor](https://docs.github.com/en/codespaces/the-githubdev-web-based-editor) for more details. @@ -506,8 +522,8 @@ for more details. ### Moving large sections of code Git and Github's default diff view for large moves *within* a file is quite poor; it will show each -line as deleted and each line as added, forcing you to compare each line yourself. Git has an option -to show moved lines in a different color: +line as deleted and each line as added, forcing you to compare each line yourself. +Git has an option to show moved lines in a different color: ```console git log -p --color-moved=dimmed-zebra --color-moved-ws=allow-indentation-change @@ -517,12 +533,14 @@ See [the docs for `--color-moved`](https://git-scm.com/docs/git-diff#Documentati ### range-diff -See [the relevant section for PR authors](#git-range-diff). This can be useful for comparing code +See [the relevant section for PR authors](#git-range-diff). +This can be useful for comparing code that was force-pushed to make sure there are no unexpected changes. ### Ignoring changes to specific files -Many large files in the repo are autogenerated. To view a diff that ignores changes to those files, +Many large files in the repo are autogenerated. +To view a diff that ignores changes to those files, you can use the following syntax (e.g. Cargo.lock): ```console @@ -535,25 +553,28 @@ Arbitrary patterns are supported (e.g. `:!compiler/*`). Patterns use the same sy ## Git submodules **NOTE**: submodules are a nice thing to know about, but it *isn't* an absolute -prerequisite to contribute to `rustc`. If you are using Git for the first time, +prerequisite to contribute to `rustc`. +If you are using Git for the first time, you might want to get used to the main concepts of Git before reading this section. The `rust-lang/rust` repository uses [Git submodules] as a way to use other -Rust projects from within the `rust` repo. Examples include Rust's fork of -`llvm-project`, `cargo` and libraries like `stdarch` and `backtrace`. +Rust projects from within the `rust` repo. +Examples include Rust's fork of +`llvm-project`, `cargo`, and libraries like `stdarch` and `backtrace`. Those projects are developed and maintained in an separate Git (and GitHub) repository, and they have their own Git history/commits, issue tracker and PRs. Submodules allow us to create some sort of embedded sub-repository inside the `rust` repository and use them like they were directories in the `rust` repository. -Take `llvm-project` for example. `llvm-project` is maintained in the [`rust-lang/llvm-project`] -repository, but it is used in `rust-lang/rust` by the compiler for code generation and -optimization. We bring it in `rust` as a submodule, in the `src/llvm-project` folder. +Take `llvm-project` for example. +`llvm-project` is maintained in the [`rust-lang/llvm-project`] +repository, but it is used in `rust-lang/rust` by the compiler for code generation and optimization. +We bring it in `rust` as a submodule, in the `src/llvm-project` folder. The contents of submodules are ignored by Git: submodules are in some sense isolated -from the rest of the repository. However, if you try to `cd src/llvm-project` and then -run `git status`: +from the rest of the repository. +However, if you try to `cd src/llvm-project` and then run `git status`: ```console HEAD detached at 9567f08afc943 @@ -566,7 +587,8 @@ particular commit. This is because, like any dependency, we want to be able to control which version to use. Submodules allow us to do just that: every submodule is "pinned" to a certain -commit, which doesn't change unless modified manually. If you use `git checkout ` +commit, which doesn't change unless modified manually. +If you use `git checkout ` in the `llvm-project` directory and go back to the `rust` directory, you can stage this change like any other, e.g. by running `git add src/llvm-project`. (Note that if you *don't* stage the change to commit, then you run the risk that running @@ -576,10 +598,10 @@ it automatically "updates" the submodules.) This version selection is usually done by the maintainers of the project, and looks like [this][llvm-update]. -Git submodules take some time to get used to, so don't worry if it isn't perfectly -clear yet. You will rarely have to use them directly and, again, you don't need -to know everything about submodules to contribute to Rust. Just know that they -exist and that they correspond to some sort of embedded subrepository dependency +Git submodules take some time to get used to, so don't worry if it isn't perfectly clear yet. +You will rarely have to use them directly and, again, you don't need +to know everything about submodules to contribute to Rust. +Just know that they exist and that they correspond to some sort of embedded subrepository dependency that Git can nicely and fairly conveniently handle for us. ### Hard-resetting submodules @@ -639,13 +661,12 @@ src/gcc` in this example, you need to: 2. `rm -rf .git/modules//config` 3. `rm -rf .gitconfig.lock` if somehow the `.gitconfig` lock is orphaned. -Then do something like `./x fmt` to have bootstrap manage the submodule -checkouts for you. +Then do something like `./x fmt` to have bootstrap manage the submodule checkouts for you. ## Ignoring commits during `git blame` -Some commits contain large reformatting changes that don't otherwise change functionality. They can -be instructed to be ignored by `git blame` through +Some commits contain large reformatting changes that don't otherwise change functionality. +They can be instructed to be ignored by `git blame` through [`.git-blame-ignore-revs`](https://github.com/rust-lang/rust/blob/HEAD/.git-blame-ignore-revs): 1. Configure `git blame` to use `.git-blame-ignore-revs` as the list of commits to ignore: `git diff --git a/src/doc/rustc-dev-guide/src/hir-typeck/coercions.md b/src/doc/rustc-dev-guide/src/hir-typeck/coercions.md index 158ac0885d323..c63c64f5f945d 100644 --- a/src/doc/rustc-dev-guide/src/hir-typeck/coercions.md +++ b/src/doc/rustc-dev-guide/src/hir-typeck/coercions.md @@ -1,4 +1,5 @@ # Coercions + Coercions are implicit operations which transform a value into a different type. A coercion *site* is a position where a coercion is able to be implicitly performed. There are two kinds of coercion sites: - one-to-one diff --git a/src/doc/rustc-dev-guide/src/implementing_new_features.md b/src/doc/rustc-dev-guide/src/implementing-new-features.md similarity index 99% rename from src/doc/rustc-dev-guide/src/implementing_new_features.md rename to src/doc/rustc-dev-guide/src/implementing-new-features.md index 4526a7af0f463..bb5c0bbba6e3e 100644 --- a/src/doc/rustc-dev-guide/src/implementing_new_features.md +++ b/src/doc/rustc-dev-guide/src/implementing-new-features.md @@ -225,7 +225,7 @@ The below steps needs to be followed in order to implement a new unstable featur [`rustc_ast_passes::feature_gate::check_crate`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_ast_passes/feature_gate/fn.check_crate.html [value the stability of Rust]: https://github.com/rust-lang/rfcs/blob/master/text/1122-language-semver.md [stability in code]: #stability-in-code -[here]: ./stabilization_guide.md +[here]: ./stabilization-guide.md [tracking issue]: #tracking-issues [add-feature-gate]: ./feature-gates.md#adding-a-feature-gate @@ -278,7 +278,7 @@ backward incompatible changes are generally no longer permitted To learn more about stabilization, see the [stabilization guide][stab]. -[stab]: ./stabilization_guide.md +[stab]: ./stabilization-guide.md [rust-blog]: https://github.com/rust-lang/blog.rust-lang.org/ [twir]: https://github.com/rust-lang/this-week-in-rust [twir-cft]: https://this-week-in-rust.org/blog/2025/01/22/this-week-in-rust-583/#calls-for-testing diff --git a/src/doc/rustc-dev-guide/src/memory.md b/src/doc/rustc-dev-guide/src/memory.md index f766a51898e41..24e7205a3565b 100644 --- a/src/doc/rustc-dev-guide/src/memory.md +++ b/src/doc/rustc-dev-guide/src/memory.md @@ -54,14 +54,14 @@ represented as a slice `&'tcx [tcx.types.i32, tcx.types.u32]`). defined and discussed in depth in the [`AdtDef and DefId`][adtdefid] section. - [`Predicate`] defines something the trait system has to prove (see [traits] module). -[`GenericArgs`]: ./ty_module/generic_arguments.md#the-genericargs-type -[adtdefid]: ./ty_module/generic_arguments.md#adtdef-and-defid +[`GenericArgs`]: ./ty-module/generic-arguments.md#the-genericargs-type +[adtdefid]: ./ty-module/generic-arguments.md#adtdef-and-defid [`TraitRef`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.TraitRef.html [`AdtDef` and `DefId`]: ./ty.md#adts-representation [`def-id`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_hir/def_id/struct.DefId.html [`GenericArgs`]: ./generic_arguments.html#GenericArgs [`mk_args`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/context/struct.TyCtxt.html#method.mk_args -[adtdefid]: ./ty_module/generic_arguments.md#adtdef-and-defid +[adtdefid]: ./ty-module/generic-arguments.md#adtdef-and-defid [`Predicate`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/struct.Predicate.html [`TraitRef`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.TraitRef.html [`ty::TyKind`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/sty/type.TyKind.html diff --git a/src/doc/rustc-dev-guide/src/normalization.md b/src/doc/rustc-dev-guide/src/normalization.md index 53e20f1c0db7f..b458b04c17194 100644 --- a/src/doc/rustc-dev-guide/src/normalization.md +++ b/src/doc/rustc-dev-guide/src/normalization.md @@ -30,7 +30,7 @@ fn bar>() { } ``` -When an alias can't yet be normalized but may wind up normalizable in the [current environment](./typing_parameter_envs.md), we consider it to be an "ambiguous" alias. This can occur when an alias contains inference variables which prevent being able to determine how the trait is implemented: +When an alias can't yet be normalized but may wind up normalizable in the [current environment](./typing-parameter-envs.md), we consider it to be an "ambiguous" alias. This can occur when an alias contains inference variables which prevent being able to determine how the trait is implemented: ```rust fn foo() { // This alias is considered to be "ambiguous" @@ -113,7 +113,7 @@ fn bar() { foo::<{ FREE_CONST }>(); // The const arg is represented with some anonymous constant: // ```pseudo-rust - // const ANON: usize = FREE_CONST; + // const ANON: usize = FREE_CONST; // foo::(); // ``` } @@ -127,7 +127,7 @@ This is likely to change as const generics functionality is improved, for exampl There are two forms of normalization, structural (sometimes called *shallow*) and deep. Structural normalization should be thought of as only normalizing the "outermost" part of a type. On the other hand deep normalization will normalize *all* aliases in a type. -In practice structural normalization can result in more than just the outer layer of the type being normalized, but this behaviour should not be relied upon. Unnormalizable non-rigid aliases making use of bound variables (`for<'a>`) cannot be normalized by either kind of normalization. +In practice structural normalization can result in more than just the outer layer of the type being normalized, but this behaviour should not be relied upon. Unnormalizable non-rigid aliases making use of bound variables (`for<'a>`) cannot be normalized by either kind of normalization. As an example: conceptually, structurally normalizing the type `Vec<::Assoc>` would be a no-op, whereas deeply normalizing would give `Vec`. In practice even structural normalization would give `Vec`, though, again, this should not be relied upon. @@ -137,9 +137,9 @@ Changing the alias to use bound variables will result in different behaviour; `V Structurally normalizing aliases is a little bit more nuanced than replacing the alias with whatever it is defined as being equal to in its definition; the result of normalizing an alias should either be a rigid type or an inference variable (which will later be inferred to a rigid type). To accomplish this we do two things: -First, when normalizing an ambiguous alias it is normalized to an inference variable instead of leaving it as-is, this has two main effects: +First, when normalizing an ambiguous alias it is normalized to an inference variable instead of leaving it as-is, this has two main effects: - Even though an inference variable is not a rigid type, it will always wind up inferred *to* a rigid type so we ensure that the result of normalization will not need to be normalized again -- Inference variables are used in all cases where a type is non-rigid, allowing the rest of the compiler to not have to deal with *both* ambiguous aliases *and* inference variables +- Inference variables are used in all cases where a type is non-rigid, allowing the rest of the compiler to not have to deal with *both* ambiguous aliases *and* inference variables Secondly, instead of having normalization directly return the type specified in the definition of the alias, we normalize the type first before returning it[^1]. We do this so that normalization is idempotent/callers do not need to run it in a loop. @@ -207,7 +207,7 @@ In practice `query_normalize` is used for normalization in the borrow checker, a ##### `tcx.normalize_erasing_regions` -[`normalize_erasing_regions`][norm_erasing_regions] is generally used by parts of the compiler that are not doing type system analysis. This normalization entry point does not handle inference variables, lifetimes, or any diagnostics. Lints and codegen make heavy use of this entry point as they typically are working with fully inferred aliases that can be assumed to be well formed (or at least, are not responsible for erroring on). +[`normalize_erasing_regions`][norm_erasing_regions] is generally used by parts of the compiler that are not doing type system analysis. This normalization entry point does not handle inference variables, lifetimes, or any diagnostics. Lints and codegen make heavy use of this entry point as they typically are working with fully inferred aliases that can be assumed to be well formed (or at least, are not responsible for erroring on). [query_norm]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_trait_selection/infer/at/struct.At.html#method.query_normalize [norm_erasing_regions]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/struct.TyCtxt.html#method.normalize_erasing_regions @@ -232,7 +232,7 @@ One of the big changes between the old and new solver is our approach to when we ### Old solver -All types are expected to be normalized as soon as possible, so that all types encountered in the type system are either rigid or an inference variable (which will later be inferred to a rigid term). +All types are expected to be normalized as soon as possible, so that all types encountered in the type system are either rigid or an inference variable (which will later be inferred to a rigid term). As a concrete example: equality of aliases is implemented by assuming they're rigid and recursively equating the generic arguments of the alias. @@ -242,7 +242,7 @@ It's expected that all types potentially contain ambiguous or unnormalized alias As a concrete example: equality of aliases is implemented by a custom goal kind ([`PredicateKind::AliasRelate`][aliasrelate]) so that it can handle normalization of the aliases itself instead of assuming all alias types being equated are rigid. -Despite this approach we still deeply normalize during [writeback][writeback] for performance/simplicity, so that types in the MIR can still be assumed to have been deeply normalized. +Despite this approach we still deeply normalize during [writeback][writeback] for performance/simplicity, so that types in the MIR can still be assumed to have been deeply normalized. [aliasrelate]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.PredicateKind.html#variant.AliasRelate [writeback]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_hir_typeck/writeback/index.html @@ -257,7 +257,7 @@ It was a frequent occurrence that normalization calls would be missing, resultin ### Normalizing parameter environments -Another problem was that it was not possible to normalize `ParamEnv`s correctly in the old solver as normalization itself would expect a normalized `ParamEnv` in order to give correct results. See the chapter on `ParamEnv`s for more information: [`Typing/ParamEnv`s: Normalizing all bounds](./typing_parameter_envs.md#normalizing-all-bounds) +Another problem was that it was not possible to normalize `ParamEnv`s correctly in the old solver as normalization itself would expect a normalized `ParamEnv` in order to give correct results. See the chapter on `ParamEnv`s for more information: [`Typing/ParamEnv`s: Normalizing all bounds](./typing-parameter-envs.md#normalizing-all-bounds) ### Unnormalizable non-rigid aliases in higher ranked types @@ -269,7 +269,7 @@ Leaving the alias unnormalized would also be wrong as the old solver expects all Ultimately this means that it is not always possible to ensure all aliases inside of a value are rigid. -[universe]: borrow_check/region_inference/placeholders_and_universes.md#what-is-a-universe +[universe]: borrow-check/region-inference/placeholders-and-universes.md#what-is-a-universe [deeply_normalize]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_trait_selection/traits/normalize/trait.NormalizeExt.html#tymethod.deeply_normalize ## Handling uses of diverging aliases diff --git a/src/doc/rustc-dev-guide/src/offload/contributing.md b/src/doc/rustc-dev-guide/src/offload/contributing.md new file mode 100644 index 0000000000000..f3a1ed2150a1b --- /dev/null +++ b/src/doc/rustc-dev-guide/src/offload/contributing.md @@ -0,0 +1,32 @@ +# Contributing + +Contributions are always welcome. This project is experimental, so the documentation and code are likely incomplete. Please ask on [Zulip](https://rust-lang.zulipchat.com/#narrow/channel/422870-t-compiler.2Fgpgpu-backend) (preferred) or the Rust Community Discord for help if you get stuck or if our documentation is unclear. + +We generally try to automate as much of the compilation process as possible for users. However, as a contributor it might sometimes be easier to directly rewrite and compile the LLVM-IR modules (.ll) to quickly iterate on changes, without needing to repeatedly recompile rustc. For people familiar with LLVM we therefore have the shell script below. Only when you are then happy with the IR changes you can work on updating rustc to generate the new, desired output. + +```sh +set -e +# set -e to avoid continuing on errors, which would likely use stale artifacts +# inputs: +# lib.ll (host code) + host.out (device) + +# You only need to run the first three commands once to generate lib.ll and host.out from your rust code. + +# RUSTFLAGS="-Ctarget-cpu=gfx90a --emit=llvm-bc,llvm-ir -Zoffload=Device -Csave-temps -Zunstable-options" cargo +offload build -Zunstable-options -v --target amdgcn-amd-amdhsa -Zbuild-std=core -r +# +# RUSTFLAGS="--emit=llvm-bc,llvm-ir -Csave-temps -Zoffload=Host=/absolute/path/to/project/target/amdgcn-amd-amdhsa/release/deps/host.out -Zunstable-options" cargo +offload build -r +# +# cp target/release/deps/.ll lib.ll + +opt lib.ll -o lib.bc + +"clang-21" "-cc1" "-triple" "x86_64-unknown-linux-gnu" "-S" "-save-temps=cwd" "-disable-free" "-clear-ast-before-backend" "-main-file-name" "lib.rs" "-mrelocation-model" "pic" "-pic-level" "2" "-pic-is-pie" "-mframe-pointer=all" "-fmath-errno" "-ffp-contract=on" "-fno-rounding-math" "-mconstructor-aliases" "-funwind-tables=2" "-target-cpu" "x86-64" "-tune-cpu" "generic" "-resource-dir" "//rust/build/x86_64-unknown-linux-gnu/llvm/lib/clang/21" "-ferror-limit" "19" "-fopenmp" "-fopenmp-offload-mandatory" "-fgnuc-version=4.2.1" "-fskip-odr-check-in-gmf" "-fembed-offload-object=host.out" "-fopenmp-targets=amdgcn-amd-amdhsa" "-faddrsig" "-D__GCC_HAVE_DWARF2_CFI_ASM=1" "-o" "host.s" "-x" "ir" "lib.bc" + +"clang-21" "-cc1as" "-triple" "x86_64-unknown-linux-gnu" "-filetype" "obj" "-main-file-name" "lib.rs" "-target-cpu" "x86-64" "-mrelocation-model" "pic" "-o" "host.o" "host.s" + +"//rust/build/x86_64-unknown-linux-gnu/llvm/bin/clang-linker-wrapper" "--should-extract=gfx90a" "--device-compiler=amdgcn-amd-amdhsa=-g" "--device-compiler=amdgcn-amd-amdhsa=-save-temps=cwd" "--device-linker=amdgcn-amd-amdhsa=-lompdevice" "--host-triple=x86_64-unknown-linux-gnu" "--save-temps" "--linker-path=//rust/build/x86_64-unknown-linux-gnu/lld/bin/ld.lld" "--hash-style=gnu" "--eh-frame-hdr" "-m" "elf_x86_64" "-pie" "-dynamic-linker" "/lib64/ld-linux-x86-64.so.2" "-o" "a.out" "/lib/../lib64/Scrt1.o" "/lib/../lib64/crti.o" "/opt/rh/gcc-toolset-12/root/usr/lib/gcc/x86_64-redhat-linux/12/crtbeginS.o" "-L//rust/build/x86_64-unknown-linux-gnu/llvm/bin/../lib/x86_64-unknown-linux-gnu" "-L//rust/build/x86_64-unknown-linux-gnu/llvm/lib/clang/21/lib/x86_64-unknown-linux-gnu" "-L/opt/rh/gcc-toolset-12/root/usr/lib/gcc/x86_64-redhat-linux/12" "-L/opt/rh/gcc-toolset-12/root/usr/lib/gcc/x86_64-redhat-linux/12/../../../../lib64" "-L/lib/../lib64" "-L/usr/lib64" "-L/lib" "-L/usr/lib" "host.o" "-lstdc++" "-lm" "-lomp" "-lomptarget" "-L//rust/build/x86_64-unknown-linux-gnu/llvm/lib" "-lgcc_s" "-lgcc" "-lpthread" "-lc" "-lgcc_s" "-lgcc" "/opt/rh/gcc-toolset-12/root/usr/lib/gcc/x86_64-redhat-linux/12/crtendS.o" "/lib/../lib64/crtn.o" + +LIBOMPTARGET_INFO=-1 OFFLOAD_TRACK_ALLOCATION_TRACES=true ./a.out +``` + +Please update the `` placeholders on the `clang-linker-wrapper` invocation. You will likely also need to adjust the library paths. See the linked usage section for details: [usage](usage.md#compile-instructions) diff --git a/src/doc/rustc-dev-guide/src/opaque-types-impl-trait-inference.md b/src/doc/rustc-dev-guide/src/opaque-types-impl-trait-inference.md index 42600ad87f8c5..ac908493ee564 100644 --- a/src/doc/rustc-dev-guide/src/opaque-types-impl-trait-inference.md +++ b/src/doc/rustc-dev-guide/src/opaque-types-impl-trait-inference.md @@ -5,7 +5,7 @@ This kind of type inference is particularly complex because, unlike other kinds of type inference, it can work across functions and function bodies. -[hidden type]: ./borrow_check/region_inference/member_constraints.html?highlight=%22hidden%20type%22#member-constraints +[hidden type]: ./borrow-check/region-inference/member-constraints.html?highlight=%22hidden%20type%22#member-constraints [opaque type]: ./opaque-types-type-alias-impl-trait.md ## Running example diff --git a/src/doc/rustc-dev-guide/src/overview.md b/src/doc/rustc-dev-guide/src/overview.md index 1200a854f8edb..7858c09fe724d 100644 --- a/src/doc/rustc-dev-guide/src/overview.md +++ b/src/doc/rustc-dev-guide/src/overview.md @@ -153,7 +153,7 @@ the final binary. [`simplify_try`]: https://github.com/rust-lang/rust/pull/66282 [`Lexer`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_parse/lexer/struct.Lexer.html [`Ty<'tcx>`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/struct.Ty.html -[borrow checking]: borrow_check.md +[borrow checking]: borrow-check.md [codegen]: backend/codegen.md [hir]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_hir/index.html [lex]: the-parser.md @@ -344,7 +344,7 @@ Compiler performance is a problem that we would like to improve on (and are always working on). One aspect of that is parallelizing `rustc` itself. -Currently, there is only one part of rustc that is parallel by default: +Currently, there is only one part of rustc that is parallel by default: [code generation](./parallel-rustc.md#Codegen). However, the rest of the compiler is still not yet parallel. There have been @@ -428,7 +428,7 @@ For more details on bootstrapping, see - Definition: [`rustc_middle/src/mir`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/mir/index.html) - Definition of sources that manipulates the MIR: [`rustc_mir_build`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir_build/index.html), [`rustc_mir_dataflow`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir_dataflow/index.html), [`rustc_mir_transform`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_mir_transform/index.html) - The Borrow Checker - - Guide: [MIR Borrow Check](borrow_check.md) + - Guide: [MIR Borrow Check](borrow-check.md) - Definition: [`rustc_borrowck`](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/index.html) - Main entry point: [`mir_borrowck` query](https://doc.rust-lang.org/nightly/nightly-rustc/rustc_borrowck/fn.mir_borrowck.html) - `MIR` Optimizations diff --git a/src/doc/rustc-dev-guide/src/profiling.md b/src/doc/rustc-dev-guide/src/profiling.md index de06bd7cda7b3..519d9b5488cbd 100644 --- a/src/doc/rustc-dev-guide/src/profiling.md +++ b/src/doc/rustc-dev-guide/src/profiling.md @@ -12,7 +12,7 @@ Depending on what you're trying to measure, there are several different approach See [their docs](https://github.com/rust-lang/measureme/blob/master/summarize/README.md) for more information. - If you want function level performance data or even just more details than the above approaches: - - Consider using a native code profiler such as [perf](profiling/with_perf.md) + - Consider using a native code profiler such as [perf](profiling/with-perf.md) - or [tracy](https://github.com/nagisa/rust_tracy_client) for a nanosecond-precision, full-featured graphical interface. @@ -23,7 +23,7 @@ Depending on what you're trying to measure, there are several different approach - If you want to profile memory usage, you can use various tools depending on what operating system you are using. - - For Windows, read our [WPA guide](profiling/wpa_profiling.md). + - For Windows, read our [WPA guide](profiling/wpa-profiling.md). ## Optimizing rustc's bootstrap times with `cargo-llvm-lines` diff --git a/src/doc/rustc-dev-guide/src/profiling/with_perf.md b/src/doc/rustc-dev-guide/src/profiling/with-perf.md similarity index 99% rename from src/doc/rustc-dev-guide/src/profiling/with_perf.md rename to src/doc/rustc-dev-guide/src/profiling/with-perf.md index e452dde5226d4..50281b6b60088 100644 --- a/src/doc/rustc-dev-guide/src/profiling/with_perf.md +++ b/src/doc/rustc-dev-guide/src/profiling/with-perf.md @@ -62,7 +62,7 @@ cargo install addr2line --features="bin" Often we want to analyze a specific test from `perf.rust-lang.org`. The easiest way to do that is to use the [rustc-perf][rustc-perf] -benchmarking suite, this approach is described [here](with_rustc_perf.md). +benchmarking suite, this approach is described [here](with-rustc-perf.md). Instead of using the benchmark suite CLI, you can also profile the benchmarks manually. First, you need to clone the [rustc-perf][rustc-perf] repository: diff --git a/src/doc/rustc-dev-guide/src/profiling/with_rustc_perf.md b/src/doc/rustc-dev-guide/src/profiling/with-rustc-perf.md similarity index 100% rename from src/doc/rustc-dev-guide/src/profiling/with_rustc_perf.md rename to src/doc/rustc-dev-guide/src/profiling/with-rustc-perf.md diff --git a/src/doc/rustc-dev-guide/src/profiling/wpa_profiling.md b/src/doc/rustc-dev-guide/src/profiling/wpa-profiling.md similarity index 100% rename from src/doc/rustc-dev-guide/src/profiling/wpa_profiling.md rename to src/doc/rustc-dev-guide/src/profiling/wpa-profiling.md diff --git a/src/doc/rustc-dev-guide/src/rustdoc-internals/rustdoc-json-test-suite.md b/src/doc/rustc-dev-guide/src/rustdoc-internals/rustdoc-json-test-suite.md index 7a846b711326a..f9d9d1b59a919 100644 --- a/src/doc/rustc-dev-guide/src/rustdoc-internals/rustdoc-json-test-suite.md +++ b/src/doc/rustc-dev-guide/src/rustdoc-internals/rustdoc-json-test-suite.md @@ -32,10 +32,10 @@ It uses [JSONPath] as a query language, which takes a path, and returns a *list* - `//@ has `: Checks `` exists, i.e. matches at least 1 value. - `//@ !has `: Checks `` doesn't exist, i.e. matches 0 values. -- `//@ has `: Check `` exists, and at least 1 of the matches is equal to the given `` +- `//@ has `: Check `` exists, and at least 1 of the matches is equal to the given `` - `//@ !has `: Checks `` exists, but none of the matches equal the given ``. - `//@ is `: Check `` matches exactly one value, and it's equal to the given ``. -- `//@ is ...`: Check that `` matches to exactly every given ``. +- `//@ is ...`: Check that `` matches to exactly every given ``. Ordering doesn't matter here. - `//@ !is `: Check `` matches exactly one value, and that value is not equal to the given ``. - `//@ count `: Check that `` matches to `` of values. @@ -47,8 +47,9 @@ These are defined in [`directive.rs`]. Values can be either JSON values, or variables. -- JSON values are JSON literals, e.g. `true`, `"string"`, `{"key": "value"}`. - These often need to be quoted using `'`, to be processed as 1 value. See [§Argument splitting](#argument-splitting) +- JSON values are JSON literals, e.g. `true`, `"string"`, `{"key": "value"}`. + These often need to be quoted using `'`, to be processed as 1 value. + See [§Argument splitting](#argument-splitting) - Variables can be used to store the value in one path, and use it in later queries. They are set with the `//@ set = ` directive, and accessed with `$` diff --git a/src/doc/rustc-dev-guide/src/solve/opaque-types.md b/src/doc/rustc-dev-guide/src/solve/opaque-types.md index ac038e354f53f..6bb4534608dbd 100644 --- a/src/doc/rustc-dev-guide/src/solve/opaque-types.md +++ b/src/doc/rustc-dev-guide/src/solve/opaque-types.md @@ -51,7 +51,7 @@ We then check whether we're able to *semantically* unify the generic arguments o with the arguments of any opaque type already in the opaque types storage. If so, we unify the previously stored type with the expected type of this `normalizes-to` call: [source][eq-prev][^1]. -If not, we insert the expected type in the opaque types storage: [source][insert-storage][^2]. +If not, we insert the expected type in the opaque types storage: [source][insert-storage][^2]. Finally, we check whether the item bounds of the opaque hold for the expected type: [source][item-bounds-ck]. @@ -98,7 +98,7 @@ end up leaking placeholders. The handling of member constraints does not change in the new solver. See the [relevant existing chapter][member-constraints] for that. -[member-constraints]: ../borrow_check/region_inference/member_constraints.md +[member-constraints]: ../borrow-check/region-inference/member-constraints.md ## calling methods on opaque types diff --git a/src/doc/rustc-dev-guide/src/stability-guarantees.md b/src/doc/rustc-dev-guide/src/stability-guarantees.md index 21c4f3594d84e..ed548c250c840 100644 --- a/src/doc/rustc-dev-guide/src/stability-guarantees.md +++ b/src/doc/rustc-dev-guide/src/stability-guarantees.md @@ -14,7 +14,7 @@ This page gives an overview of our stability guarantees. ## rustc-dev-guide links * [Stabilizing library features](./stability.md) -* [Stabilizing language features](./stabilization_guide.md) +* [Stabilizing language features](./stabilization-guide.md) * [What qualifies as a bug fix?](./bug-fix-procedure.md#what-qualifies-as-a-bug-fix) ## Exemptions diff --git a/src/doc/rustc-dev-guide/src/stability.md b/src/doc/rustc-dev-guide/src/stability.md index 3c4c65fdd5a88..f2f2dd909fae9 100644 --- a/src/doc/rustc-dev-guide/src/stability.md +++ b/src/doc/rustc-dev-guide/src/stability.md @@ -4,7 +4,7 @@ This section is about the stability attributes and schemes that allow stable APIs to use unstable APIs internally in the rustc standard library. **NOTE**: this section is for *library* features, not *language* features. For instructions on -stabilizing a language feature see [Stabilizing Features](./stabilization_guide.md). +stabilizing a language feature see [Stabilizing Features](./stabilization-guide.md). ## unstable diff --git a/src/doc/rustc-dev-guide/src/stabilization_guide.md b/src/doc/rustc-dev-guide/src/stabilization-guide.md similarity index 99% rename from src/doc/rustc-dev-guide/src/stabilization_guide.md rename to src/doc/rustc-dev-guide/src/stabilization-guide.md index 6167546a2bc83..f159fad89596a 100644 --- a/src/doc/rustc-dev-guide/src/stabilization_guide.md +++ b/src/doc/rustc-dev-guide/src/stabilization-guide.md @@ -49,7 +49,7 @@ The stabilization reports summarizes: The [*Stabilization Template*][srt] includes a series of questions that aim to surface connections between this feature and lang's subteams (e.g. types, opsem, lang-docs, etc.) and to identify items that are commonly overlooked. -[srt]: ./stabilization_report_template.md +[srt]: ./stabilization-report-template.md The stabilization report is typically posted as the main comment on the stabilization PR (see the next section). diff --git a/src/doc/rustc-dev-guide/src/stabilization_report_template.md b/src/doc/rustc-dev-guide/src/stabilization-report-template.md similarity index 99% rename from src/doc/rustc-dev-guide/src/stabilization_report_template.md rename to src/doc/rustc-dev-guide/src/stabilization-report-template.md index 793f7d7e45cff..328907904a6f0 100644 --- a/src/doc/rustc-dev-guide/src/stabilization_report_template.md +++ b/src/doc/rustc-dev-guide/src/stabilization-report-template.md @@ -2,7 +2,7 @@ ## What is this? -This is a template for [stabilization reports](./stabilization_guide.md) of **language features**. The questions aim to solicit the details most often needed. These details help reviewers to identify potential problems upfront. Not all parts of the template will apply to every stabilization. If a question doesn't apply, explain briefly why. +This is a template for [stabilization reports](./stabilization-guide.md) of **language features**. The questions aim to solicit the details most often needed. These details help reviewers to identify potential problems upfront. Not all parts of the template will apply to every stabilization. If a question doesn't apply, explain briefly why. Copy everything after the separator and edit it as Markdown. Replace each *TODO* with your answer. diff --git a/src/doc/rustc-dev-guide/src/tests/ci.md b/src/doc/rustc-dev-guide/src/tests/ci.md index ce80b07fe08db..723926f2241fe 100644 --- a/src/doc/rustc-dev-guide/src/tests/ci.md +++ b/src/doc/rustc-dev-guide/src/tests/ci.md @@ -6,16 +6,15 @@ The primary goal of our CI system is to ensure that the `main` branch of From a high-level point of view, when you open a pull request at `rust-lang/rust`, the following will happen: -- A small [subset](#pull-request-builds) of tests and checks are run after each - push to the PR. +- A small [subset](#pull-request-builds) of tests and checks are run after each push to the PR. This should help catch common errors. - When the PR is approved, the [bors] bot enqueues the PR into a [merge queue]. - Once the PR gets to the front of the queue, bors will create a merge commit and run the [full test suite](#auto-builds) on it. The merge commit either contains only one specific PR or it can be a ["rollup"](#rollups) which combines multiple PRs together, to reduce CI costs and merge delays. -- Once the whole test suite finishes, two things can happen. Either CI fails - with an error that needs to be addressed by the developer, or CI succeeds and +- Once the whole test suite finishes, two things can happen. + Either CI fails with an error that needs to be addressed by the developer, or CI succeeds and the merge commit is then pushed to the `main` branch. If you want to modify what gets executed on CI, see [Modifying CI jobs](#modifying-ci-jobs). @@ -303,8 +302,7 @@ This is worth it because these release artifacts: - Allow perf testing even at a later date. - Allow bisection when bugs are discovered later. -- Ensure release quality since if we're always releasing, we can catch problems - early. +- Ensure release quality since if we're always releasing, we can catch problems early. ### Rollups @@ -449,8 +447,7 @@ If you want to determine which `bootstrap.toml` settings are used in CI for a particular job, it is probably easiest to just look at the build log. To do this: -1. Go to - +1. Go to to find the most recently successful build, and click on it. 2. Choose the job you are interested in on the left-hand side. 3. Click on the gear icon and choose "View raw logs" diff --git a/src/doc/rustc-dev-guide/src/tests/compiletest.md b/src/doc/rustc-dev-guide/src/tests/compiletest.md index 64276a9ea451a..5c4dfb6e0dd19 100644 --- a/src/doc/rustc-dev-guide/src/tests/compiletest.md +++ b/src/doc/rustc-dev-guide/src/tests/compiletest.md @@ -2,8 +2,8 @@ ## Introduction -`compiletest` is the main test harness of the Rust test suite. It allows test -authors to organize large numbers of tests (the Rust compiler has many +`compiletest` is the main test harness of the Rust test suite. +It allows test authors to organize large numbers of tests (the Rust compiler has many thousands), efficient test execution (parallel execution is supported), and allows the test author to configure behavior and expected results of both individual and groups of tests. @@ -22,9 +22,10 @@ individual and groups of tests. `compiletest` may check test code for compile-time or run-time success/failure. Tests are typically organized as a Rust source file with annotations in comments -before and/or within the test code. These comments serve to direct `compiletest` -on if or how to run the test, what behavior to expect, and more. See -[directives](directives.md) and the test suite documentation below for more details +before and/or within the test code. +These comments serve to direct `compiletest` +on if or how to run the test, what behavior to expect, and more. +See [directives](directives.md) and the test suite documentation below for more details on these annotations. See the [Adding new tests](adding.md) and [Best practices](best-practices.md) @@ -40,16 +41,18 @@ Additionally, bootstrap accepts several common arguments directly, e.g. `x test --no-capture --force-rerun --run --pass`. Compiletest itself tries to avoid running tests when the artifacts that are -involved (mainly the compiler) haven't changed. You can use `x test --test-args +involved (mainly the compiler) haven't changed. +You can use `x test --test-args --force-rerun` to rerun a test even when none of the inputs have changed. ## Test suites -All of the tests are in the [`tests`] directory. The tests are organized into -"suites", with each suite in a separate subdirectory. Each test suite behaves a -little differently, with different compiler behavior and different checks for -correctness. For example, the [`tests/incremental`] directory contains tests for -incremental compilation. The various suites are defined in +All of the tests are in the [`tests`] directory. +The tests are organized into "suites", with each suite in a separate subdirectory. +Each test suite behaves a +little differently, with different compiler behavior and different checks for correctness. +For example, the [`tests/incremental`] directory contains tests for incremental compilation. +The various suites are defined in [`src/tools/compiletest/src/common.rs`] in the `pub enum Mode` declaration. The following test suites are available, with links for more information: @@ -80,7 +83,7 @@ The following test suites are available, with links for more information: ### The build-std test suite -[`build-std`](#build-std-tests) test that -Zbuild-std works. +[`build-std`](#build-std-tests) tests that -Zbuild-std works. ### Rustdoc test suites @@ -105,10 +108,9 @@ Run-make tests pertaining to rustdoc are typically named `run-make/rustdoc-*/`. ### Pretty-printer tests -The tests in [`tests/pretty`] exercise the "pretty-printing" functionality of -`rustc`. The `-Z unpretty` CLI option for `rustc` causes it to translate the -input source into various different formats, such as the Rust source after macro -expansion. +The tests in [`tests/pretty`] exercise the "pretty-printing" functionality of `rustc`. +The `-Z unpretty` CLI option for `rustc` causes it to translate the +input source into various different formats, such as the Rust source after macro expansion. The pretty-printer tests have several [directives](directives.md) described below. These commands can significantly change the behavior of the test, but the @@ -125,17 +127,20 @@ If any of the commands above fail, then the test fails. The directives for pretty-printing tests are: - `pretty-mode` specifies the mode pretty-print tests should run in (that is, - the argument to `-Zunpretty`). The default is `normal` if not specified. + the argument to `-Zunpretty`). + The default is `normal` if not specified. - `pretty-compare-only` causes a pretty test to only compare the pretty-printed - output (stopping after step 3 from above). It will not try to compile the - expanded output to type check it. This is needed for a pretty-mode that does - not expand to valid Rust, or for other situations where the expanded output - cannot be compiled. + output (stopping after step 3 from above). + It will not try to compile the expanded output to type check it. + This is needed for a pretty-mode that does + not expand to valid Rust, or for other situations where the expanded output cannot be compiled. - `pp-exact` is used to ensure a pretty-print test results in specific output. If specified without a value, then it means the pretty-print output should - match the original source. If specified with a value, as in `//@ + match the original source. + If specified with a value, as in `//@ pp-exact:foo.pp`, it will ensure that the pretty-printed output matches the - contents of the given file. Otherwise, if `pp-exact` is not specified, then + contents of the given file. + Otherwise, if `pp-exact` is not specified, then the pretty-printed output will be pretty-printed one more time, and the output of the two pretty-printing rounds will be compared to ensure that the pretty-printed output converges to a steady state. @@ -144,13 +149,12 @@ The directives for pretty-printing tests are: ### Incremental tests -The tests in [`tests/incremental`] exercise incremental compilation. They use -[`revisions` directive](#revisions) to tell compiletest to run the compiler in a +The tests in [`tests/incremental`] exercise incremental compilation. +They use [`revisions` directive](#revisions) to tell compiletest to run the compiler in a series of steps. Compiletest starts with an empty directory with the `-C incremental` flag, and -then runs the compiler for each revision, reusing the incremental results from -previous steps. +then runs the compiler for each revision, reusing the incremental results from previous steps. The revisions should start with: @@ -158,8 +162,7 @@ The revisions should start with: * `rfail` — the test should compile successfully, but the executable should fail to run * `cfail` — the test should fail to compile -To make the revisions unique, you should add a suffix like `rpass1` and -`rpass2`. +To make the revisions unique, you should add a suffix like `rpass1` and `rpass2`. To simulate changing the source, compiletest also passes a `--cfg` flag with the current revision name. @@ -183,30 +186,31 @@ fn main() { foo(); } ``` `cfail` tests support the `forbid-output` directive to specify that a certain -substring must not appear anywhere in the compiler output. This can be useful to -ensure certain errors do not appear, but this can be fragile as error messages -change over time, and a test may no longer be checking the right thing but will -still pass. +substring must not appear anywhere in the compiler output. +This can be useful to ensure certain errors do not appear, but this can be fragile as error messages +change over time, and a test may no longer be checking the right thing but will still pass. `cfail` tests support the `should-ice` directive to specify that a test should -cause an Internal Compiler Error (ICE). This is a highly specialized directive +cause an Internal Compiler Error (ICE). +This is a highly specialized directive to check that the incremental cache continues to work after an ICE. -Incremental tests may use the attribute `#[rustc_clean(...)]` attribute. This attribute compares -the fingerprint from the current compilation session with the previous one. +Incremental tests may use the attribute `#[rustc_clean(...)]` attribute. +This attribute compares the fingerprint from the current compilation session with the previous one. The first revision should never have an active `rustc_clean` attribute, since it will always be dirty. -In the default mode, it asserts that the fingerprints must be the same. +In the default mode, it asserts that the fingerprints must be the same. The attribute takes the following arguments: * `cfg=""` — checks the cfg condition ``, and only runs the check if the config condition evaluates to true. This can be used to only run the `rustc_clean` attribute in a specific revision. -* `except=",,..."` — asserts that the query results for the listed queries must be different, +* `except=",,..."` — asserts that the query results for the listed queries must be different, rather than the same. -* `loaded_from_disk=",,..."` — asserts that the query results for the listed queries - were actually loaded from disk (not just marked green). +* `loaded_from_disk=",,..."` — asserts that the query results for the listed queries + were actually loaded from disk (not just marked green). This can be useful to ensure that a test is actually exercising the deserialization - logic for a particular query result. This can be combined with `except`. + logic for a particular query result. + This can be combined with `except`. A simple example of a test using `rustc_clean` is the [hello_world test]. @@ -215,9 +219,9 @@ A simple example of a test using `rustc_clean` is the [hello_world test]. ### Debuginfo tests -The tests in [`tests/debuginfo`] test debuginfo generation. They build a -program, launch a debugger, and issue commands to the debugger. A single test -can work with cdb, gdb, and lldb. +The tests in [`tests/debuginfo`] test debuginfo generation. +They build a program, launch a debugger, and issue commands to the debugger. +A single test can work with cdb, gdb, and lldb. Most tests should have the `//@ compile-flags: -g` directive or something similar to generate the appropriate debuginfo. @@ -228,8 +232,7 @@ The debuginfo tests consist of a series of debugger commands along with "check" lines which specify output that is expected from the debugger. The commands are comments of the form `// $DEBUGGER-command:$COMMAND` where -`$DEBUGGER` is the debugger being used and `$COMMAND` is the debugger command -to execute. +`$DEBUGGER` is the debugger being used and `$COMMAND` is the debugger command to execute. The debugger values can be: @@ -245,8 +248,7 @@ The command to check the output are of the form `// $DEBUGGER-check:$OUTPUT` where `$OUTPUT` is the output to expect. For example, the following will build the test, start the debugger, set a -breakpoint, launch the program, inspect a value, and check what the debugger -prints: +breakpoint, launch the program, inspect a value, and check what the debugger prints: ```rust,ignore //@ compile-flags: -g @@ -268,17 +270,16 @@ the debugger currently being used: - `min-cdb-version: 10.0.18317.1001` — ignores the test if the version of cdb is below the given version -- `min-gdb-version: 8.2` — ignores the test if the version of gdb is below the - given version +- `min-gdb-version: 8.2` — ignores the test if the version of gdb is below the given version - `ignore-gdb-version: 9.2` — ignores the test if the version of gdb is equal to the given version - `ignore-gdb-version: 7.11.90 - 8.0.9` — ignores the test if the version of gdb is in a range (inclusive) -- `min-lldb-version: 310` — ignores the test if the version of lldb is below - the given version -- `rust-lldb` — ignores the test if lldb is not contain the Rust plugin. NOTE: - The "Rust" version of LLDB doesn't exist anymore, so this will always be - ignored. This should probably be removed. +- `min-lldb-version: 310` — ignores the test if the version of lldb is below the given version +- `rust-lldb` — ignores the test if lldb is not contain the Rust plugin. + NOTE: The "Rust" version of LLDB doesn't exist anymore, so this will always be + ignored. + This should probably be removed. By passing the `--debugger` option to compiletest, you can specify a single debugger to run tests with. For example, `./x test tests/debuginfo -- --debugger gdb` will only test GDB commands. @@ -287,12 +288,12 @@ For example, `./x test tests/debuginfo -- --debugger gdb` will only test GDB com > > If you want to run lldb debuginfo tests locally, then currently on Windows it > is required that: -> +> > - You have Python 3.10 installed. > - You have `python310.dll` available in your `PATH` env var. This is not > provided by the standard Python installer you obtain from `python.org`; you > need to add this to `PATH` manually. -> +> > Otherwise the lldb debuginfo tests can produce crashes in mysterious ways. [`tests/debuginfo`]: https://github.com/rust-lang/rust/tree/HEAD/tests/debuginfo @@ -311,17 +312,18 @@ For example, `./x test tests/debuginfo -- --debugger gdb` will only test GDB com ### Codegen tests -The tests in [`tests/codegen-llvm`] test LLVM code generation. They compile the -test with the `--emit=llvm-ir` flag to emit LLVM IR. They then run the LLVM -[FileCheck] tool. The test is annotated with various `// CHECK` comments to -check the generated code. See the [FileCheck] documentation for a tutorial and -more information. +The tests in [`tests/codegen-llvm`] test LLVM code generation. +They compile the test with the `--emit=llvm-ir` flag to emit LLVM IR. +They then run the LLVM [FileCheck] tool. +The test is annotated with various `// CHECK` comments to check the generated code. +See the [FileCheck] documentation for a tutorial and more information. See also the [assembly tests](#assembly-tests) for a similar set of tests. By default, codegen tests will have `//@ needs-target-std` *implied* (that the target needs to support std), *unless* the `#![no_std]`/`#![no_core]` attribute -was specified in the test source. You can override this behavior and explicitly +was specified in the test source. +You can override this behavior and explicitly write `//@ needs-target-std` to only run the test when target supports std, even if the test is `#![no_std]`/`#![no_core]`. @@ -334,17 +336,15 @@ If you need to work with `#![no_std]` cross-compiling tests, consult the ### Assembly tests -The tests in [`tests/assembly-llvm`] test LLVM assembly output. They compile the test -with the `--emit=asm` flag to emit a `.s` file with the assembly output. They -then run the LLVM [FileCheck] tool. +The tests in [`tests/assembly-llvm`] test LLVM assembly output. +They compile the test with the `--emit=asm` flag to emit a `.s` file with the assembly output. +They then run the LLVM [FileCheck] tool. Each test should be annotated with the `//@ assembly-output:` directive with a -value of either `emit-asm` or `ptx-linker` to indicate the type of assembly -output. +value of either `emit-asm` or `ptx-linker` to indicate the type of assembly output. -Then, they should be annotated with various `// CHECK` comments to check the -assembly output. See the [FileCheck] documentation for a tutorial and more -information. +Then, they should be annotated with various `// CHECK` comments to check the assembly output. +See the [FileCheck] documentation for a tutorial and more information. See also the [codegen tests](#codegen-tests) for a similar set of tests. @@ -364,13 +364,11 @@ monomorphization collection pass, i.e., `-Zprint-mono-items`, and then special annotations in the file are used to compare against that. Then, the test should be annotated with comments of the form `//~ MONO_ITEM -name` where `name` is the monomorphized string printed by rustc like `fn ::foo`. +name` where `name` is the monomorphized string printed by rustc like `fn ::foo`. To check for CGU partitioning, a comment of the form `//~ MONO_ITEM name @@ cgu` -where `cgu` is a space separated list of the CGU names and the linkage -information in brackets. For example: `//~ MONO_ITEM static function::FOO @@ -statics[Internal]` +where `cgu` is a space separated list of the CGU names and the linkage information in brackets. +For example: `//~ MONO_ITEM static function::FOO @@ statics[Internal]` [`tests/codegen-units`]: https://github.com/rust-lang/rust/tree/HEAD/tests/codegen-units @@ -378,8 +376,8 @@ statics[Internal]` ### Mir-opt tests The tests in [`tests/mir-opt`] check parts of the generated MIR to make sure it -is generated correctly and is doing the expected optimizations. Check out the -[MIR Optimizations](../mir/optimizations.md) chapter for more. +is generated correctly and is doing the expected optimizations. +Check out the [MIR Optimizations](../mir/optimizations.md) chapter for more. Compiletest will build the test with several flags to dump the MIR output and set a baseline for optimizations: @@ -391,23 +389,24 @@ set a baseline for optimizations: * `-Zdump-mir-exclude-pass-number` The test should be annotated with `// EMIT_MIR` comments that specify files that -will contain the expected MIR output. You can use `x test --bless` to create the -initial expected files. +will contain the expected MIR output. +You can use `x test --bless` to create the initial expected files. There are several forms the `EMIT_MIR` comment can take: - `// EMIT_MIR $MIR_PATH.mir` — This will check that the given filename matches - the exact output from the MIR dump. For example, + the exact output from the MIR dump. + For example, `my_test.main.SimplifyCfg-elaborate-drops.after.mir` will load that file from the test directory, and compare it against the dump from rustc. Checking the "after" file (which is after optimization) is useful if you are - interested in the final state after an optimization. Some rare cases may want - to use the "before" file for completeness. + interested in the final state after an optimization. + Some rare cases may want to use the "before" file for completeness. - `// EMIT_MIR $MIR_PATH.diff` — where `$MIR_PATH` is the filename of the MIR - dump, such as `my_test_name.my_function.EarlyOtherwiseBranch`. Compiletest - will diff the `.before.mir` and `.after.mir` files, and compare the diff + dump, such as `my_test_name.my_function.EarlyOtherwiseBranch`. + Compiletest will diff the `.before.mir` and `.after.mir` files, and compare the diff output to the expected `.diff` file from the `EMIT_MIR` comment. This is useful if you want to see how an optimization changes the MIR. @@ -417,8 +416,8 @@ There are several forms the `EMIT_MIR` comment can take: check that the output matches the given file. By default 32 bit and 64 bit targets use the same dump files, which can be -problematic in the presence of pointers in constants or other bit width -dependent things. In that case you can add `// EMIT_MIR_FOR_EACH_BIT_WIDTH` to +problematic in the presence of pointers in constants or other bit width dependent things. +In that case you can add `// EMIT_MIR_FOR_EACH_BIT_WIDTH` to your test, causing separate files to be generated for 32bit and 64bit systems. [`tests/mir-opt`]: https://github.com/rust-lang/rust/tree/HEAD/tests/mir-opt @@ -428,9 +427,8 @@ your test, causing separate files to be generated for 32bit and 64bit systems. The tests in [`tests/run-make`] and [`tests/run-make-cargo`] are general-purpose tests using Rust *recipes*, which are small programs (`rmake.rs`) allowing -arbitrary Rust code such as `rustc` invocations, and is supported by a -[`run_make_support`] library. Using Rust recipes provide the ultimate in -flexibility. +arbitrary Rust code such as `rustc` invocations, and is supported by a [`run_make_support`] library. +Using Rust recipes provide the ultimate in flexibility. `run-make` tests should be used if no other test suites better suit your needs. @@ -441,9 +439,11 @@ faster-to-iterate test suite). ### `build-std` tests -The tests in [`tests/build-std`] check that `-Zbuild-std` works. This is currently -just a run-make test suite with a single recipe. The recipe generates test cases -and runs them in parallel. +The tests in [`tests/build-std`] check that `-Zbuild-std` works. +This is currently just a run-make test suite with a single recipe. +The recipe generates test cases and runs them in parallel. + +[`tests/build-std`]: https://github.com/rust-lang/rust/tree/HEAD/tests/build-std #### Using Rust recipes @@ -455,18 +455,16 @@ If you need new utilities or functionality, consider extending and improving the [`run_make_support`] library. Compiletest directives like `//@ only-` or `//@ ignore-` are -supported in `rmake.rs`, like in UI tests. However, revisions or building -auxiliary via directives are not currently supported. +supported in `rmake.rs`, like in UI tests. +However, revisions or building auxiliary via directives are not currently supported. `rmake.rs` and `run-make-support` may *not* use any nightly/unstable features, -as they must be compilable by a stage 0 rustc that may be a beta or even stable -rustc. +as they must be compilable by a stage 0 rustc that may be a beta or even stable rustc. #### Quickly check if `rmake.rs` tests can be compiled You can quickly check if `rmake.rs` tests can be compiled without having to -build stage1 rustc by forcing `rmake.rs` to be compiled with the stage0 -compiler: +build stage1 rustc by forcing `rmake.rs` to be compiled with the stage0 compiler: ```bash $ COMPILETEST_FORCE_STAGE0=1 x test --stage 0 tests/run-make/ @@ -523,7 +521,8 @@ Then add a corresponding entry to `"rust-analyzer.linkedProjects"` ### Coverage tests The tests in [`tests/coverage`] are shared by multiple test modes that test -coverage instrumentation in different ways. Running the `coverage` test suite +coverage instrumentation in different ways. +Running the `coverage` test suite will automatically run each test in all of the different coverage modes. Each mode also has an alias to run the coverage tests in just that mode: @@ -541,31 +540,28 @@ Each mode also has an alias to run the coverage tests in just that mode: ``` If a particular test should not be run in one of the coverage test modes for -some reason, use the `//@ ignore-coverage-map` or `//@ ignore-coverage-run` -directives. +some reason, use the `//@ ignore-coverage-map` or `//@ ignore-coverage-run` directives. #### `coverage-map` suite In `coverage-map` mode, these tests verify the mappings between source code -regions and coverage counters that are emitted by LLVM. They compile the test -with `--emit=llvm-ir`, then use a custom tool ([`src/tools/coverage-dump`]) to -extract and pretty-print the coverage mappings embedded in the IR. These tests -don't require the profiler runtime, so they run in PR CI jobs and are easy to +regions and coverage counters that are emitted by LLVM. +They compile the test with `--emit=llvm-ir`, then use a custom tool ([`src/tools/coverage-dump`]) to +extract and pretty-print the coverage mappings embedded in the IR. +These tests don't require the profiler runtime, so they run in PR CI jobs and are easy to run/bless locally. These coverage map tests can be sensitive to changes in MIR lowering or MIR -optimizations, producing mappings that are different but produce identical -coverage reports. +optimizations, producing mappings that are different but produce identical coverage reports. As a rule of thumb, any PR that doesn't change coverage-specific code should **feel free to re-bless** the `coverage-map` tests as necessary, without -worrying about the actual changes, as long as the `coverage-run` tests still -pass. +worrying about the actual changes, as long as the `coverage-run` tests still pass. #### `coverage-run` suite -In `coverage-run` mode, these tests perform an end-to-end test of coverage -reporting. They compile a test program with coverage instrumentation, run that +In `coverage-run` mode, these tests perform an end-to-end test of coverage reporting. +They compile a test program with coverage instrumentation, run that program to produce raw coverage data, and then use LLVM tools to process that data into a human-readable code coverage report. @@ -585,8 +581,8 @@ as part of the full set of CI jobs used for merging. #### `coverage-run-rustdoc` suite The tests in [`tests/coverage-run-rustdoc`] also run instrumented doctests and -include them in the coverage report. This avoids having to build rustdoc when -only running the main `coverage` suite. +include them in the coverage report. +This avoids having to build rustdoc when only running the main `coverage` suite. [`tests/coverage`]: https://github.com/rust-lang/rust/tree/HEAD/tests/coverage [`src/tools/coverage-dump`]: https://github.com/rust-lang/rust/tree/HEAD/src/tools/coverage-dump @@ -595,13 +591,12 @@ only running the main `coverage` suite. ### Crash tests [`tests/crashes`] serve as a collection of tests that are expected to cause the -compiler to ICE, panic or crash in some other way, so that accidental fixes are -tracked. Formerly, this was done at but +compiler to ICE, panic or crash in some other way, so that accidental fixes are tracked. +Formerly, this was done at but doing it inside the rust-lang/rust testsuite is more convenient. -It is imperative that a test in the suite causes rustc to ICE, panic, or -crash in some other way. A test will "pass" if rustc exits with an exit status -other than 1 or 0. +It is imperative that a test in the suite causes rustc to ICE, panic, or crash in some other way. +A test will "pass" if rustc exits with an exit status other than 1 or 0. If you want to see verbose stdout/stderr, you need to set `COMPILETEST_VERBOSE_CRASHES=1`, e.g. @@ -610,18 +605,17 @@ If you want to see verbose stdout/stderr, you need to set $ COMPILETEST_VERBOSE_CRASHES=1 ./x test tests/crashes/999999.rs --stage 1 ``` -Anyone can add ["untracked" crashes] from the issue tracker. It's strongly -recommended to include test cases from several issues in a single PR. +Anyone can add ["untracked" crashes] from the issue tracker. +It's strongly recommended to include test cases from several issues in a single PR. When you do so, each issue number should be noted in the file name (`12345.rs` -should suffice) and also inside the file by means of a `//@ known-bug: #12345` -directive. Please [label][labeling] the relevant issues with `S-bug-has-test` -once your PR is merged. +should suffice) and also inside the file by means of a `//@ known-bug: #12345` directive. +Please [label][labeling] the relevant issues with `S-bug-has-test` once your PR is merged. If you happen to fix one of the crashes, please move it to a fitting -subdirectory in `tests/ui` and give it a meaningful name. Please add a doc -comment at the top of the file explaining why this test exists, even better if -you can briefly explain how the example causes rustc to crash previously and -what was done to prevent rustc to ICE / panic / crash. +subdirectory in `tests/ui` and give it a meaningful name. +Please add a doc comment at the top of the file explaining why this test exists. +Even better will be if you can briefly explain how the example caused rustc to crash previously, +and what was done to fix it. Adding @@ -633,8 +627,8 @@ Fixes #MMMMM to the description of your pull request will ensure the corresponding tickets be closed automatically upon merge. -Make sure that your fix actually fixes the root cause of the issue and not just -a subset first. The issue numbers can be found in the file name or the `//@ +Make sure that your fix actually fixes the root cause of the issue and not just a subset first. +The issue numbers can be found in the file name or the `//@ known-bug` directive inside the test file. [`tests/crashes`]: https://github.com/rust-lang/rust/tree/HEAD/tests/crashes @@ -652,8 +646,8 @@ There are multiple [directives](directives.md) to assist with that: - `aux-codegen-backend` - `proc-macro` -`aux-build` will build a separate crate from the named source file. The source -file should be in a directory called `auxiliary` beside the test file. +`aux-build` will build a separate crate from the named source file. +The source file should be in a directory called `auxiliary` beside the test file. ```rust,ignore //@ aux-build: my-helper.rs @@ -663,44 +657,48 @@ extern crate my_helper; ``` The aux crate will be built as a dylib if possible (unless on a platform that -does not support them, or the `no-prefer-dynamic` header is specified in the aux -file). The `-L` flag is used to find the extern crates. +does not support them, or the `no-prefer-dynamic` header is specified in the aux file). +The `-L` flag is used to find the extern crates. -`aux-crate` is very similar to `aux-build`. However, it uses the `--extern` flag +`aux-crate` is very similar to `aux-build`. +However, it uses the `--extern` flag to link to the extern crate to make the crate be available as an extern prelude. That allows you to specify the additional syntax of the `--extern` flag, such as -renaming a dependency. For example, `//@ aux-crate:foo=bar.rs` will compile +renaming a dependency. +For example, `//@ aux-crate:foo=bar.rs` will compile `auxiliary/bar.rs` and make it available under then name `foo` within the test. -This is similar to how Cargo does dependency renaming. It is also possible to +This is similar to how Cargo does dependency renaming. +It is also possible to specify [`--extern` modifiers](https://github.com/rust-lang/rust/issues/98405). For example, `//@ aux-crate:noprelude:foo=bar.rs`. -`aux-bin` is similar to `aux-build` but will build a binary instead of a -library. The binary will be available in `auxiliary/bin` relative to the working -directory of the test. +`aux-bin` is similar to `aux-build` but will build a binary instead of a library. +The binary will be available in `auxiliary/bin` relative to the working directory of the test. `aux-codegen-backend` is similar to `aux-build`, but will then pass the compiled -dylib to `-Zcodegen-backend` when building the main file. This will only work -for tests in `tests/ui-fulldeps`, since it requires the use of compiler crates. +dylib to `-Zcodegen-backend` when building the main file. +This will only work for tests in `tests/ui-fulldeps`, since it requires the use of compiler crates. ### Auxiliary proc-macro If you want a proc-macro dependency, then you can use the `proc-macro` directive. This directive behaves just like `aux-build`, i.e. that you should place the proc-macro test auxiliary file under a `auxiliary` folder under the -same parent folder as the main test file. However, it also has four additional +same parent folder as the main test file. +However, it also has four additional preset behavior compared to `aux-build` for the proc-macro test auxiliary: 1. The aux test file is built with `--crate-type=proc-macro`. 2. The aux test file is built without `-C prefer-dynamic`, i.e. it will not try to produce a dylib for the aux crate. 3. The aux crate is made available to the test file via extern prelude with - `--extern `. Note that since UI tests default to edition + `--extern `. + Note that since UI tests default to edition 2015, you still need to specify `extern ` unless the main test file is using an edition that is 2018 or newer if you want to use the aux crate name in a `use` import. -4. The `proc_macro` crate is made available as an extern prelude module. Same - edition 2015 vs newer edition distinction for `extern proc_macro;` applies. +4. The `proc_macro` crate is made available as an extern prelude module. + The same edition 2015 vs newer edition distinction for `extern proc_macro;` applies. For example, you might have a test `tests/ui/cat/meow.rs` and proc-macro auxiliary `tests/ui/cat/auxiliary/whiskers.rs`: @@ -742,19 +740,19 @@ pub fn identity(ts: TokenStream) -> TokenStream { ## Revisions -Revisions allow a single test file to be used for multiple tests. This is done -by adding a special directive at the top of the file: +Revisions allow a single test file to be used for multiple tests. +This is done by adding a special directive at the top of the file: ```rust,ignore //@ revisions: foo bar baz ``` This will result in the test being compiled (and tested) three times, once with -`--cfg foo`, once with `--cfg bar`, and once with `--cfg baz`. You can therefore -use `#[cfg(foo)]` etc within the test to tweak each of these results. +`--cfg foo`, once with `--cfg bar`, and once with `--cfg baz`. +You can therefore use `#[cfg(foo)]` etc within the test to tweak each of these results. -You can also customize directives and expected error messages to a particular -revision. To do this, add `[revision-name]` after the `//@` for directives, and +You can also customize directives and expected error messages to a particular revision. +To do this, add `[revision-name]` after the `//@` for directives, and after `//` for UI error annotations, like so: ```rust,ignore @@ -767,8 +765,7 @@ fn test_foo() { } ``` -Multiple revisions can be specified in a comma-separated list, such as -`//[foo,bar,baz]~^`. +Multiple revisions can be specified in a comma-separated list, such as `//[foo,bar,baz]~^`. In test suites that use the LLVM [FileCheck] tool, the current revision name is also registered as an additional prefix for FileCheck directives: @@ -785,10 +782,10 @@ also registered as an additional prefix for FileCheck directives: fn main() {} ``` -Note that not all directives have meaning when customized to a revision. For -example, the `ignore-test` directives (and all "ignore" directives) currently -only apply to the test as a whole, not to particular revisions. The only -directives that are intended to really work when customized to a revision are +Note that not all directives have meaning when customized to a revision. +For example, the `ignore-test` directives (and all "ignore" directives) currently +only apply to the test as a whole, not to particular revisions. +The only directives that are intended to really work when customized to a revision are error patterns and compiler flags. @@ -800,14 +797,13 @@ The following test suites support revisions: - coverage - debuginfo - rustdoc UI tests -- incremental (these are special in that they inherently cannot be run in - parallel) +- incremental (these are special in that they inherently cannot be run in parallel) ### Ignoring unused revision names Normally, revision names mentioned in other directives and error annotations -must correspond to an actual revision declared in a `revisions` directive. This is -enforced by an `./x test tidy` check. +must correspond to an actual revision declared in a `revisions` directive. +This is enforced by an `./x test tidy` check. If a revision name needs to be temporarily removed from the revision list for some reason, the above check can be suppressed by adding the revision name to an @@ -823,8 +819,7 @@ used to compare the behavior of all tests with different compiler flags enabled. This can help highlight what differences might appear with certain flags, and check for any problems that might arise. -To run the tests in a different mode, you need to pass the `--compare-mode` CLI -flag: +To run the tests in a different mode, you need to pass the `--compare-mode` CLI flag: ```bash ./x test tests/ui --compare-mode=chalk @@ -834,20 +829,17 @@ The possible compare modes are: - `polonius` — Runs with Polonius with `-Zpolonius`. - `chalk` — Runs with Chalk with `-Zchalk`. -- `split-dwarf` — Runs with unpacked split-DWARF with - `-Csplit-debuginfo=unpacked`. -- `split-dwarf-single` — Runs with packed split-DWARF with - `-Csplit-debuginfo=packed`. +- `split-dwarf` — Runs with unpacked split-DWARF with `-Csplit-debuginfo=unpacked`. +- `split-dwarf-single` — Runs with packed split-DWARF with `-Csplit-debuginfo=packed`. See [UI compare modes](ui.md#compare-modes) for more information about how UI tests support different output for different modes. -In CI, compare modes are only used in one Linux builder, and only with the -following settings: +In CI, compare modes are only used in one Linux builder, and only with the following settings: -- `tests/debuginfo`: Uses `split-dwarf` mode. This helps ensure that none of the - debuginfo tests are affected when enabling split-DWARF. +- `tests/debuginfo`: Uses `split-dwarf` mode. + This helps ensure that none of the debuginfo tests are affected when enabling split-DWARF. -Note that compare modes are separate to [revisions](#revisions). All revisions -are tested when running `./x test tests/ui`, however compare-modes must be +Note that compare modes are separate to [revisions](#revisions). +All revisions are tested when running `./x test tests/ui`, however compare-modes must be manually run individually via the `--compare-mode` flag. diff --git a/src/doc/rustc-dev-guide/src/tests/perf.md b/src/doc/rustc-dev-guide/src/tests/perf.md index a0aa3c0331745..567b2d7a97e48 100644 --- a/src/doc/rustc-dev-guide/src/tests/perf.md +++ b/src/doc/rustc-dev-guide/src/tests/perf.md @@ -19,7 +19,7 @@ The result of a perf run is a comparison between two versions of the compiler (by their commit hashes). You can also use `rustc-perf` to manually benchmark and profile the compiler -[locally](../profiling/with_rustc_perf.md). +[locally](../profiling/with-rustc-perf.md). ### Automatic perf runs diff --git a/src/doc/rustc-dev-guide/src/tests/ui.md b/src/doc/rustc-dev-guide/src/tests/ui.md index e13419d1e01cc..7332d1fb38515 100644 --- a/src/doc/rustc-dev-guide/src/tests/ui.md +++ b/src/doc/rustc-dev-guide/src/tests/ui.md @@ -1,20 +1,18 @@ # UI tests -UI tests are a particular [test suite](compiletest.md#test-suites) of -compiletest. +UI tests are a particular [test suite](compiletest.md#test-suites) of compiletest. ## Introduction The tests in [`tests/ui`] are a collection of general-purpose tests which primarily focus on validating the console output of the compiler, but can be -used for many other purposes. For example, tests can also be configured to [run -the resulting program](#controlling-passfail-expectations) to verify its -behavior. +used for many other purposes. +For example, tests can also be configured to [run +the resulting program](#controlling-passfail-expectations) to verify its behavior. For a survey of each subdirectory's purpose under `tests/ui`, consult the [README.md](https://github.com/rust-lang/rust/tree/HEAD/tests/ui/README.md). -This is useful if you write a new test, and are looking for a category to -place it in. +This is useful if you write a new test, and are looking for a category to place it in. If you need to work with `#![no_std]` cross-compiling tests, consult the [`minicore` test auxiliary](./minicore.md) chapter. @@ -28,61 +26,63 @@ A test consists of a Rust source file located in the `tests/ui` directory. and testing category - placing tests directly in `tests/ui` is not permitted. Compiletest will use `rustc` to compile the test, and compare the output against -the expected output which is stored in a `.stdout` or `.stderr` file located -next to the test. See [Output comparison](#output-comparison) for more. +the expected output which is stored in a `.stdout` or `.stderr` file located next to the test. +See [Output comparison](#output-comparison) for more. -Additionally, errors and warnings should be annotated with comments within the -source file. See [Error annotations](#error-annotations) for more. +Additionally, errors and warnings should be annotated with comments within the source file. +See [Error annotations](#error-annotations) for more. Compiletest [directives](directives.md) in the form of special comments prefixed with `//@` control how the test is compiled and what the expected behavior is. -Tests are expected to fail to compile, since most tests are testing compiler -errors. You can change that behavior with a directive, see [Controlling +Tests are expected to fail to compile, since most tests are testing compiler errors. +You can change that behavior with a directive, see [Controlling pass/fail expectations](#controlling-passfail-expectations). -By default, a test is built as an executable binary. If you need a different -crate type, you can use the `#![crate_type]` attribute to set it as needed. +By default, a test is built as an executable binary. +If you need a different crate type, you can use the `#![crate_type]` attribute to set it as needed. ## Output comparison UI tests store the expected output from the compiler in `.stderr` and `.stdout` -snapshots next to the test. You normally generate these files with the `--bless` -CLI option, and then inspect them manually to verify they contain what you -expect. +snapshots next to the test. +You normally generate these files with the `--bless` +CLI option, and then inspect them manually to verify they contain what you expect. The output is normalized to ignore unwanted differences, see the -[Normalization](#normalization) section. If the file is missing, then -compiletest expects the corresponding output to be empty. +[Normalization](#normalization) section. +If the file is missing, then compiletest expects the corresponding output to be empty. A common reason to use normalization, revisions, and most of the other following tools, -is to account for platform differences. Consider alternatives to these tools, like +is to account for platform differences. +Consider alternatives to these tools, like e.g. using the `extern "rust-invalid"` ABI that is invalid on every platform instead of fixing the test to use cross-compilation and testing every possibly-invalid ABI. -There can be multiple stdout/stderr files. The general form is: +There can be multiple stdout/stderr files. +The general form is: ```text *test-name*`.`*revision*`.`*compare_mode*`.`*extension* ``` -- *test-name* cannot contain dots. This is so that the general form of test +- *test-name* cannot contain dots. + This is so that the general form of test output filenames have a predictable form we can pattern match on in order to track stray test output files. -- *revision* is the [revision](#cfg-revisions) name. This is not included when - not using revisions. -- *compare_mode* is the [compare mode](#compare-modes). This will only be - checked when the given compare mode is active. If the file does not exist, +- *revision* is the [revision](#cfg-revisions) name. + This is not included when not using revisions. +- *compare_mode* is the [compare mode](#compare-modes). + This will only be checked when the given compare mode is active. + If the file does not exist, then compiletest will check for a file without the compare mode. - *extension* is the kind of output being checked: - `stderr` — compiler stderr - `stdout` — compiler stdout - `run.stderr` — stderr when running the test - `run.stdout` — stdout when running the test - - `64bit.stderr` — compiler stderr with `stderr-per-bitwidth` directive on a - 64-bit target - - `32bit.stderr` — compiler stderr with `stderr-per-bitwidth` directive on a - 32-bit target + - `64bit.stderr` — compiler stderr with `stderr-per-bitwidth` directive on a 64-bit target + - `32bit.stderr` — compiler stderr with `stderr-per-bitwidth` directive on a 32-bit target A simple example would be `foo.stderr` next to a `foo.rs` test. A more complex example would be `foo.my-revision.polonius.stderr`. @@ -90,17 +90,16 @@ A more complex example would be `foo.my-revision.polonius.stderr`. There are several [directives](directives.md) which will change how compiletest will check for output files: -- `stderr-per-bitwidth` — checks separate output files based on the target - pointer width. Consider using the `normalize-stderr` directive instead (see - [Normalization](#normalization)). +- `stderr-per-bitwidth` — checks separate output files based on the target pointer width. + Consider using the `normalize-stderr` directive instead (see [Normalization](#normalization)). - `dont-check-compiler-stderr` — Ignores stderr from the compiler. - `dont-check-compiler-stdout` — Ignores stdout from the compiler. - `compare-output-by-lines` — Some tests have non-deterministic orders of output, so we need to compare by lines. UI tests run with `-Zdeduplicate-diagnostics=no` flag which disables rustc's -built-in diagnostic deduplication mechanism. This means you may see some -duplicate messages in the output. This helps illuminate situations where -duplicate diagnostics are being generated. +built-in diagnostic deduplication mechanism. +This means you may see some duplicate messages in the output. +This helps illuminate situations where duplicate diagnostics are being generated. ### Normalization @@ -109,22 +108,22 @@ platforms, mainly about filenames. Compiletest makes the following replacements on the compiler output: -- The directory where the test is defined is replaced with `$DIR`. Example: - `/path/to/rust/tests/ui/error-codes` +- The directory where the test is defined is replaced with `$DIR`. + Example: `/path/to/rust/tests/ui/error-codes` - The directory to the standard library source is replaced with `$SRC_DIR`. Example: `/path/to/rust/library` - Line and column numbers for paths in `$SRC_DIR` are replaced with `LL:COL`. This helps ensure that changes to the layout of the standard library do not - cause widespread changes to the `.stderr` files. Example: - `$SRC_DIR/alloc/src/sync.rs:53:46` -- The base directory where the test's output goes is replaced with - `$TEST_BUILD_DIR`. This only comes up in a few rare circumstances. Example: - `/path/to/rust/build/x86_64-unknown-linux-gnu/test/ui` + cause widespread changes to the `.stderr` files. + Example: `$SRC_DIR/alloc/src/sync.rs:53:46` +- The base directory where the test's output goes is replaced with `$TEST_BUILD_DIR`. + This only comes up in a few rare circumstances. + Example: `/path/to/rust/build/x86_64-unknown-linux-gnu/test/ui` - The real directory to the standard library source is replaced with `$SRC_DIR_REAL`. - The real directory to the compiler source is replaced with `$COMPILER_DIR_REAL`. - Tabs are replaced with `\t`. -- Backslashes (`\`) are converted to forward slashes (`/`) within paths (using a - heuristic). This helps normalize differences with Windows-style paths. +- Backslashes (`\`) are converted to forward slashes (`/`) within paths (using a heuristic). + This helps normalize differences with Windows-style paths. - CRLF newlines are converted to LF. - Error line annotations like `//~ ERROR some message` are removed. - Various v0 and legacy symbol hashes are replaced with placeholders like @@ -135,21 +134,23 @@ the compiler itself to apply some changes to the diagnostic output to make it more suitable for UI testing. For example, it will anonymize line numbers in the output (line numbers -prefixing each source line are replaced with `LL`). In extremely rare -situations, this mode can be disabled with the directive `//@ +prefixing each source line are replaced with `LL`). +In extremely rare situations, this mode can be disabled with the directive `//@ compile-flags: -Z ui-testing=no`. -When using `-Z ui-testing=no` the `--diagnostic-width` argument should also +When using `-Z ui-testing=no`, the `--diagnostic-width` argument should also be set to avoid tests failing or passing depending on the width of the terminal from which the UI test suite is being run. Note: The line and column numbers for `-->` lines pointing to the test are *not* -normalized, and left as-is. This ensures that the compiler continues to point to -the correct location, and keeps the stderr files readable. Ideally all -line/column information would be retained, but small changes to the source +normalized, and left as-is. +This ensures that the compiler continues to point to +the correct location, and keeps the stderr files readable. +Ideally all line/column information would be retained, but small changes to the source causes large diffs, and more frequent merge conflicts and test errors. -Sometimes these built-in normalizations are not enough. In such cases, you may +Sometimes these built-in normalizations are not enough. +In such cases, you may provide custom normalization rules using `normalize-*` directives, e.g. ```rust,ignore @@ -161,8 +162,8 @@ provide custom normalization rules using `normalize-*` directives, e.g. This tells the test, on 32-bit platforms, whenever the compiler writes `fn() (32 bits)` to stderr, it should be normalized to read `fn() ($PTR bits)` instead. -Similar for 64-bit. The replacement is performed by regexes using default regex -flavor provided by `regex` crate. +Similar for 64-bit. +The replacement is performed by regexes using default regex flavor provided by `regex` crate. The corresponding reference file will use the normalized output to test both 32-bit and 64-bit platforms: @@ -175,16 +176,15 @@ The corresponding reference file will use the normalized output to test both ... ``` -Please see [`ui/transmute/main.rs`][mrs] and [`main.stderr`] for a concrete -usage example. +Please see [`ui/transmute/main.rs`][mrs] and [`main.stderr`] for a concrete usage example. [mrs]: https://github.com/rust-lang/rust/blob/HEAD/tests/ui/transmute/main.rs [`main.stderr`]: https://github.com/rust-lang/rust/blob/HEAD/tests/ui/transmute/main.stderr ## Error annotations -Error annotations specify the errors that the compiler is expected to emit. They -are "attached" to the line in source where the error is located. +Error annotations specify the errors that the compiler is expected to emit. +They are "attached" to the line in source where the error is located. ```rust,ignore fn main() { @@ -193,30 +193,30 @@ fn main() { ``` Although UI tests have a `.stderr` file which contains the entire compiler -output, UI tests require that errors are also annotated within the source. This -redundancy helps avoid mistakes since the `.stderr` files are usually -auto-generated. It also helps to directly see where the error spans are expected -to point to by looking at one file instead of having to compare the `.stderr` -file with the source. Finally, they ensure that no additional unexpected errors -are generated. +output, UI tests require that errors are also annotated within the source. +This redundancy helps avoid mistakes since the `.stderr` files are usually +auto-generated. +It also helps to directly see where the error spans are expected +to point to by looking at one file instead of having to compare the `.stderr` file with the source. +Finally, they ensure that no additional unexpected errors are generated. They have several forms, but generally are a comment with the diagnostic level -(such as `ERROR`) and a substring of the expected error output. You don't have -to write out the entire message, just make sure to include the important part of -the message to make it self-documenting. +(such as `ERROR`) and a substring of the expected error output. +You don't have to write out the entire message, +but be sure to include the important part of the message to make it self-documenting. -Most error annotations need to match with the line of the diagnostic. There are -several ways to match the message with the line (see the examples below): +Most error annotations need to match with the line of the diagnostic. +There are several ways to match the message with the line (see the examples below): * `~`: Associates the error level and message with the *current* line -* `~^`: Associates the error level and message with the *previous* error - annotation line. Each caret (`^`) that you add adds a line to this, so `~^^^` +* `~^`: Associates the error level and message with the *previous* error annotation line. + Each caret (`^`) that you add adds a line to this, so `~^^^` is three lines above the error annotation line. * `~|`: Associates the error level and message with the *same* line as the *previous comment*. This is more convenient than using multiple carets when there are multiple messages associated with the same line. -* `~v`: Associates the error level and message with the *next* error - annotation line. Each symbol (`v`) that you add adds a line to this, so `~vvv` +* `~v`: Associates the error level and message with the *next* error annotation line. + Each symbol (`v`) that you add adds a line to this, so `~vvv` is three lines below the error annotation line. Example: @@ -260,8 +260,8 @@ fn main() { #### Positioned below error line -Use the `//~^` idiom with number of carets in the string to indicate the number -of lines above. In the example below, the error line is four lines above the +Use the `//~^` idiom with number of carets in the string to indicate the number of lines above. +In the example below, the error line is four lines above the error annotation line so four carets are included in the annotation. ```rust,ignore @@ -296,8 +296,8 @@ fn main() { #### Positioned above error line -Use the `//~v` idiom with number of v's in the string to indicate the number -of lines below. This is typically used in lexer or parser tests matching on errors like unclosed +Use the `//~v` idiom with number of v's in the string to indicate the number of lines below. +This is typically used in lexer or parser tests matching on errors like unclosed delimiter or unclosed literal happening at the end of file. ```rust,ignore @@ -337,8 +337,8 @@ fn main() { ``` We want to ensure this shows "index out of bounds", but we cannot use the `ERROR` -annotation since the runtime error doesn't have any span. Then it's time to use the -`error-pattern` directive: +annotation since the runtime error doesn't have any span. +Then it's time to use the `error-pattern` directive: ```rust,ignore //@ error-pattern: index out of bounds @@ -385,7 +385,8 @@ by the compiler instead of or in addition to structured json. `//~` by default. Other kinds only need to be line-annotated if at least one annotation of that kind appears -in the test file. For example, one `//~ NOTE` will also require all other `//~ NOTE`s in the file +in the test file. +For example, one `//~ NOTE` will also require all other `//~ NOTE`s in the file to be written out explicitly. Use directive `//@ dont-require-annotations` to opt out of exhaustive annotations. @@ -398,15 +399,16 @@ for example secondary lines of multiline diagnostics, or ubiquitous diagnostics like `aborting due to N previous errors`. UI tests use the `-A unused` flag by default to ignore all unused warnings, as -unused warnings are usually not the focus of a test. However, simple code -samples often have unused warnings. If the test is specifically testing an +unused warnings are usually not the focus of a test. +However, simple code samples often have unused warnings. +If the test is specifically testing an unused warning, just add the appropriate `#![warn(unused)]` attribute as needed. ### `cfg` revisions When using [revisions](compiletest.md#revisions), different messages can be -conditionally checked based on the current revision. This is done by placing the -revision cfg name in brackets like this: +conditionally checked based on the current revision. +This is done by placing the revision cfg name in brackets like this: ```rust,ignore //@ edition:2018 @@ -428,7 +430,8 @@ In this example, the second error message is only emitted in the `mir` revision. The `thir` revision only emits the first error. If the `cfg` causes the compiler to emit different output, then a test can have -multiple `.stderr` files for the different outputs. In the example above, there +multiple `.stderr` files for the different outputs. +In the example above, there would be a `.mir.stderr` and `.thir.stderr` file with the different outputs of the different revisions. @@ -439,10 +442,10 @@ the different revisions. ## Controlling pass/fail expectations By default, a UI test is expected to **generate a compile error** because most -of the tests are checking for invalid input and error diagnostics. However, you -can also make UI tests where compilation is expected to succeed, and you can -even run the resulting program. Just add one of the following -[directives](directives.md): +of the tests are checking for invalid input and error diagnostics. +However, you can also make UI tests where compilation is expected to succeed, and you can +even run the resulting program. +Just add one of the following [directives](directives.md): - Pass directives: - `//@ check-pass` — compilation should succeed but skip codegen @@ -460,32 +463,32 @@ even run the resulting program. Just add one of the following - Second time is to ensure that the full compile fails - `//@ run-fail` — compilation should succeed, but running the resulting binary should make it exit with a code in the range `1..=127` which - indicates regular failure. On targets without unwind support, crashes - are also accepted. + indicates regular failure. + On targets without unwind support, crashes are also accepted. - `//@ run-crash` — compilation should succeed, but running the resulting - binary should fail with a crash. Crashing is defined as "not exiting with - a code in the range `0..=127`". Example on Linux: Termination by `SIGABRT` - or `SIGSEGV`. Example on Windows: Exiting with the code for - `STATUS_ILLEGAL_INSTRUCTION` (`0xC000001D`). + binary should fail with a crash. + Crashing is defined as "not exiting with a code in the range `0..=127`". + - Example on Linux: Termination by `SIGABRT` or `SIGSEGV`. + - Example on Windows: Exiting with the code for `STATUS_ILLEGAL_INSTRUCTION` (`0xC000001D`). - `//@ run-fail-or-crash` — compilation should succeed, but running the - resulting binary should either `run-fail` or `run-crash`. Useful if a test - crashes on some targets but just fails on others. + resulting binary should either `run-fail` or `run-crash`. + Useful if a test crashes on some targets but just fails on others. -For `run-pass`. `run-fail`, `run-crash` and `run-fail-or-crash` tests, by -default the output of the program itself is not checked. +For `run-pass`, `run-fail`, `run-crash`, and `run-fail-or-crash` tests, +the output of the program itself is not checked by default. -If you want to check the output of running the program, include the -`check-run-results` directive. This will check for a `.run.stderr` and +If you want to check the output of running the program, include the `check-run-results` directive. +This will check for a `.run.stderr` and `.run.stdout` files to compare against the actual output of the program. -Tests with the `*-pass` directives can be overridden with the `--pass` -command-line option: +Tests with the `*-pass` directives can be overridden with the `--pass` command-line option: ```sh ./x test tests/ui --pass check ``` -The `--pass` option only affects UI tests. Using `--pass check` can run the UI +The `--pass` option only affects UI tests. +Using `--pass check` can run the UI test suite much faster (roughly twice as fast on my system), though obviously not exercising as much. @@ -496,13 +499,12 @@ test won't work properly with that override. ## Known bugs The `known-bug` directive may be used for tests that demonstrate a known bug -that has not yet been fixed. Adding tests for known bugs is helpful for several -reasons, including: +that has not yet been fixed. +Adding tests for known bugs is helpful for several reasons, including: -1. Maintaining a functional test that can be conveniently reused when the bug is - fixed. -2. Providing a sentinel that will fail if the bug is incidentally fixed. This - can alert the developer so they know that the associated issue has been fixed +1. Maintaining a functional test that can be conveniently reused when the bug is fixed. +2. Providing a sentinel that will fail if the bug is incidentally fixed. + This can alert the developer so they know that the associated issue has been fixed and can possibly be closed. This directive takes comma-separated issue numbers as arguments, or `"unknown"`: @@ -513,21 +515,21 @@ This directive takes comma-separated issue numbers as arguments, or `"unknown"`: - `//@ known-bug: unknown` (when there is no known issue yet; preferably open one if it does not already exist) -Do not include [error annotations](#error-annotations) in a test with -`known-bug`. The test should still include other normal directives and -stdout/stderr files. +Do not include [error annotations](#error-annotations) in a test with `known-bug`. +The test should still include other normal directives and stdout/stderr files. ## Test organization When deciding where to place a test file, please try to find a subdirectory that -best matches what you are trying to exercise. Do your best to keep things -organized. Admittedly it can be difficult as some tests can overlap different +best matches what you are trying to exercise. +Do your best to keep things organized. +Admittedly, it can be difficult as some tests can overlap different categories, and the existing layout may not fit well. -Name the test by a concise description of what the test is checking. Avoid -including the issue number in the test name. See [best -practices](best-practices.md) for a more in-depth discussion of this. +Name the test by a concise description of what the test is checking. +Avoid including the issue number in the test name. +See [best practices](best-practices.md) for a more in-depth discussion of this. Ideally, the test should be added to a directory that helps identify what piece of code is being tested here (e.g., @@ -535,30 +537,29 @@ of code is being tested here (e.g., When writing a new feature, you may want to **create a subdirectory to store your tests**. For example, if you are implementing RFC 1234 ("Widgets"), then it -might make sense to put the tests in a directory like -`tests/ui/rfc1234-widgets/`. +might make sense to put the tests in a directory like `tests/ui/rfc1234-widgets/`. In other cases, there may already be a suitable directory. -Over time, the [`tests/ui`] directory has grown very fast. There is a check in -[tidy](intro.md#tidy) that will ensure none of the subdirectories has more than -1000 entries. Having too many files causes problems because it isn't editor/IDE -friendly and the GitHub UI won't show more than 1000 entries. However, since -`tests/ui` (UI test root directory) and `tests/ui/issues` directories have more -than 1000 entries, we set a different limit for those directories. So, please -avoid putting a new test there and try to find a more relevant place. +Over time, the [`tests/ui`] directory has grown very fast. +There is a check in [tidy](intro.md#tidy) that will ensure none of the subdirectories has more than +1000 entries. +Having too many files causes problems because it isn't editor/IDE +friendly and the GitHub UI won't show more than 1000 entries. +However, since `tests/ui` (UI test root directory) and `tests/ui/issues` directories have more +than 1000 entries, we set a different limit for those directories. +So, please avoid putting a new test there and try to find a more relevant place. -For example, if your test is related to closures, you should put it in -`tests/ui/closures`. When you reach the limit, you could increase it by tweaking -[here][ui test tidy]. +For example, if your test is related to closures, you should put it in `tests/ui/closures`. +When you reach the limit, you could increase it by tweaking [here][ui test tidy]. [ui test tidy]: https://github.com/rust-lang/rust/blob/HEAD/src/tools/tidy/src/ui_tests.rs ## Rustfix tests UI tests can validate that diagnostic suggestions apply correctly and that the -resulting changes compile correctly. This can be done with the `run-rustfix` -directive: +resulting changes compile correctly. +This can be done with the `run-rustfix` directive: ```rust,ignore //@ run-rustfix @@ -574,37 +575,34 @@ pub struct not_camel_case {} Rustfix tests should have a file with the `.fixed` extension which contains the source file after the suggestion has been applied. -- When the test is run, compiletest first checks that the correct lint/warning - is generated. -- Then, it applies the suggestion and compares against `.fixed` (they must - match). -- Finally, the fixed source is compiled, and this compilation is required to - succeed. +- When the test is run, compiletest first checks that the correct lint/warning is generated. +- Then, it applies the suggestion and compares against `.fixed` (they must match). +- Finally, the fixed source is compiled, and this compilation is required to succeed. Usually when creating a rustfix test you will generate the `.fixed` file automatically with the `x test --bless` option. The `run-rustfix` directive will cause *all* suggestions to be applied, even if -they are not [`MachineApplicable`](../diagnostics.md#suggestions). If this is a -problem, then you can add the `rustfix-only-machine-applicable` directive in -addition to `run-rustfix`. This should be used if there is a mixture of -different suggestion levels, and some of the non-machine-applicable ones do not -apply cleanly. +they are not [`MachineApplicable`](../diagnostics.md#suggestions). +If this is a problem, then you can add the `rustfix-only-machine-applicable` directive in +addition to `run-rustfix`. +This should be used if there is a mixture of +different suggestion levels, and some of the non-machine-applicable ones do not apply cleanly. ## Compare modes [Compare modes](compiletest.md#compare-modes) can be used to run all tests with -different flags from what they are normally compiled with. In some cases, this -might result in different output from the compiler. To support this, different +different flags from what they are normally compiled with. +In some cases, this might result in different output from the compiler. +To support this, different output files can be saved which contain the output based on the compare mode. For example, when using the Polonius mode, a test `foo.rs` will first look for -expected output in `foo.polonius.stderr`, falling back to the usual `foo.stderr` -if not found. This is useful as different modes can sometimes result in -different diagnostics and behavior. This can help track which tests have -differences between the modes, and to visually inspect those diagnostic -differences. +expected output in `foo.polonius.stderr`, falling back to the usual `foo.stderr` if not found. +This is useful as different modes can sometimes result in different diagnostics and behavior. +This can help track which tests have +differences between the modes, and to visually inspect those diagnostic differences. If in the rare case you encounter a test that has different behavior, you can run something like the following to generate the alternate stderr file: @@ -618,15 +616,15 @@ Currently none of the compare modes are checked in CI for UI tests. ## `rustc_*` TEST attributes The compiler defines several perma-unstable `#[rustc_*]` attributes gated behind -the internal feature `rustc_attrs` that dump extra compiler-internal -information. See the corresponding subsection in [compiler debugging] for more -details. +the internal feature `rustc_attrs` that dump extra compiler-internal information. +See the corresponding subsection in [compiler debugging] for more details. -They can be used in tests to more precisely, legibly and easily test internal +They can be used in tests to more precisely, legibly, and easily test internal compiler state in cases where it would otherwise be very hard to do the same -with "user-facing" Rust alone. Indeed, one could say that this slightly abuses -the term "UI" (*user* interface) and turns such UI tests from black-box tests -into white-box ones. Use them carefully and sparingly. +with "user-facing" Rust alone. +Indeed, one could say that this slightly abuses +the term "UI" (*user* interface) and turns such UI tests from black-box tests into white-box ones. +Use them carefully and sparingly. [compiler debugging]: ../compiler-debugging.md#rustc_-test-attributes @@ -636,6 +634,7 @@ By default, test suites under UI test mode (`tests/ui`, `tests/ui-fulldeps`, but not `tests/rustdoc-ui`) will specify - `-A unused` +- `-W unused_attributes` (since these tend to be interesting for ui tests) - `-A internal_features` If: @@ -650,5 +649,4 @@ in-source lint level attributes as required. Note that the `rustfix` version will *not* have `-A unused` passed, meaning that you may have to `#[allow(unused)]` to suppress `unused` -lints on the rustfix'd file (because we might be testing rustfix -on `unused` lints themselves). +lints on the rustfix'd file (because we might be testing rustfix on `unused` lints themselves). diff --git a/src/doc/rustc-dev-guide/src/tracing.md b/src/doc/rustc-dev-guide/src/tracing.md index 4d52f9c865081..28c0bcc737caf 100644 --- a/src/doc/rustc-dev-guide/src/tracing.md +++ b/src/doc/rustc-dev-guide/src/tracing.md @@ -1,14 +1,15 @@ # Using tracing to debug the compiler The compiler has a lot of [`debug!`] (or `trace!`) calls, which print out logging information -at many points. These are very useful to at least narrow down the location of +at many points. +These are very useful to at least narrow down the location of a bug if not to find it entirely, or just to orient yourself as to why the compiler is doing a particular thing. [`debug!`]: https://docs.rs/tracing/0.1/tracing/macro.debug.html -To see the logs, you need to set the `RUSTC_LOG` environment variable to your -log filter. The full syntax of the log filters can be found in the [rustdoc +To see the logs, you need to set the `RUSTC_LOG` environment variable to your log filter. +The full syntax of the log filters can be found in the [rustdoc of `tracing-subscriber`](https://docs.rs/tracing-subscriber/0.2.24/tracing_subscriber/filter/struct.EnvFilter.html#directives). ## Function level filters @@ -47,69 +48,74 @@ RUSTC_LOG=rustc_borrowck[do_mir_borrowck] ### I don't want all calls If you are compiling libcore, you likely don't want *all* borrowck dumps, but only one -for a specific function. You can filter function calls by their arguments by regexing them. +for a specific function. +You can filter function calls by their arguments by regexing them. ``` RUSTC_LOG=[do_mir_borrowck{id=\.\*from_utf8_unchecked\.\*}] ``` -will only give you the logs of borrowchecking `from_utf8_unchecked`. Note that you will -still get a short message per ignored `do_mir_borrowck`, but none of the things inside those -calls. This helps you in looking through the calls that are happening and helps you adjust +will only give you the logs of borrowchecking `from_utf8_unchecked`. +Note that you will +still get a short message per ignored `do_mir_borrowck`, but none of the things inside those calls. +This helps you in looking through the calls that are happening and helps you adjust your regex if you mistyped it. ## Query level filters Every [query](query.md) is automatically tagged with a logging span so that -you can display all log messages during the execution of the query. For -example, if you want to log everything during type checking: +you can display all log messages during the execution of the query. +For example, if you want to log everything during type checking: ``` RUSTC_LOG=[typeck] ``` The query arguments are included as a tracing field which means that you can -filter on the debug display of the arguments. For example, the `typeck` query -has an argument `key: LocalDefId` of what is being checked. You can use a -regex to match on that `LocalDefId` to log type checking for a specific +filter on the debug display of the arguments. +For example, the `typeck` query has an argument `key: LocalDefId` of what is being checked. +You can use a regex to match on that `LocalDefId` to log type checking for a specific function: ``` RUSTC_LOG=[typeck{key=.*name_of_item.*}] ``` -Different queries have different arguments. You can find a list of queries and -their arguments in +Different queries have different arguments. +You can find a list of queries and their arguments in [`rustc_middle/src/query/mod.rs`](https://github.com/rust-lang/rust/blob/HEAD/compiler/rustc_middle/src/query/mod.rs#L18). ## Broad module level filters You can also use filters similar to the `log` crate's filters, which will enable -everything within a specific module. This is often too verbose and too unstructured, +everything within a specific module. +This is often too verbose and too unstructured, so it is recommended to use function level filters. Your log filter can be just `debug` to get all `debug!` output and higher (e.g., it will also include `info!`), or `path::to::module` to get *all* output (which will include `trace!`) from a particular module, or -`path::to::module=debug` to get `debug!` output and higher from a particular -module. +`path::to::module=debug` to get `debug!` output and higher from a particular module. For example, to get the `debug!` output and higher for a specific module, you can run the compiler with `RUSTC_LOG=path::to::module=debug rustc my-file.rs`. All `debug!` output will then appear in standard error. -Note that you can use a partial path and the filter will still work. For -example, if you want to see `info!` output from only +Note that you can use a partial path and the filter will still work. +For example, if you want to see `info!` output from only `rustdoc::passes::collect_intra_doc_links`, you could use `RUSTDOC_LOG=rustdoc::passes::collect_intra_doc_links=info` *or* you could use `RUSTDOC_LOG=rustdoc::passes::collect_intra=info`. -If you are developing rustdoc, use `RUSTDOC_LOG` instead. If you are developing -Miri, use `MIRI_LOG` instead. You get the idea :) +If you are developing rustdoc, use `RUSTDOC_LOG` instead. +If you are developing Miri, use `MIRI_LOG` instead. +You get the idea :) See the [`tracing`] crate's docs, and specifically the docs for [`debug!`] to -see the full syntax you can use. (Note: unlike the compiler, the [`tracing`] -crate and its examples use the `RUSTC_LOG` environment variable. rustc, rustdoc, +see the full syntax you can use. +(Note: unlike the compiler, the [`tracing`] +crate and its examples use the `RUSTC_LOG` environment variable. +rustc, rustdoc, and other tools set custom environment variables.) **Note that unless you use a very strict filter, the logger will emit a lot of @@ -157,18 +163,20 @@ $ RUSTDOC_LOG=rustdoc=debug rustdoc +stage1 my-file.rs ## Log colors By default, rustc (and other tools, like rustdoc and Miri) will be smart about -when to use ANSI colors in the log output. If they are outputting to a terminal, +when to use ANSI colors in the log output. +If they are outputting to a terminal, they will use colors, and if they are outputting to a file or being piped -somewhere else, they will not. However, it's hard to read log output in your +somewhere else, they will not. +However, it's hard to read log output in your terminal unless you have a very strict filter, so you may want to pipe the -output to a pager like `less`. But then there won't be any colors, which makes -it hard to pick out what you're looking for! +output to a pager like `less`. +But then there won't be any colors, which makes it hard to pick out what you're looking for! You can override whether to have colors in log output with the `RUSTC_LOG_COLOR` environment variable (or `RUSTDOC_LOG_COLOR` for rustdoc, or `MIRI_LOG_COLOR` for Miri, etc.). There are three options: `auto` (the default), `always`, and -`never`. So, if you want to enable colors when piping to `less`, use something -similar to this command: +`never`. +So, if you want to enable colors when piping to `less`, use something similar to this command: ```bash # The `-R` switch tells less to print ANSI colors without escaping them. @@ -176,18 +184,17 @@ $ RUSTC_LOG=debug RUSTC_LOG_COLOR=always rustc +stage1 ... | less -R ``` Note that `MIRI_LOG_COLOR` will only color logs that come from Miri, not logs -from rustc functions that Miri calls. Use `RUSTC_LOG_COLOR` to color logs from -rustc. +from rustc functions that Miri calls. +Use `RUSTC_LOG_COLOR` to color logs from rustc. ## How to keep or remove `debug!` and `trace!` calls from the resulting binary While calls to `error!`, `warn!` and `info!` are included in every build of the compiler, calls to `debug!` and `trace!` are only included in the program if -`debug-logging=true` is turned on in bootstrap.toml (it is +`rust.debug-logging=true` is turned on in bootstrap.toml (it is turned off by default), so if you don't see `DEBUG` logs, especially if you run the compiler with `RUSTC_LOG=rustc rustc some.rs` and only see -`INFO` logs, make sure that `debug-logging=true` is turned on in your -bootstrap.toml. +`INFO` logs, make sure that `debug-logging=true` is turned on in your bootstrap.toml. ## Logging etiquette and conventions @@ -196,9 +203,9 @@ about the performance of adding "unnecessary" calls to `debug!` and leaving them commit - they won't slow down the performance of what we ship. That said, there can also be excessive tracing calls, especially -when they are redundant with other calls nearby or in functions called from -here. There is no perfect balance to hit here, and is left to the reviewer's -discretion to decide whether to let you leave `debug!` statements in or whether to ask +when they are redundant with other calls nearby or in functions called from here. +There is no perfect balance to hit here, and it is left to the reviewer's +discretion to decide whether to let you leave `debug!` statements in, or whether to ask you to remove them before merging. It may be preferable to use `trace!` over `debug!` for very noisy logs. @@ -219,8 +226,8 @@ debug!(x = ?random_operation(tcx)); ``` Then if someone runs a debug `rustc` with `RUSTC_LOG=rustc::foo`, then -`random_operation()` will run. `RUSTC_LOG` filters that do not enable this -debug statement will not execute `random_operation`. +`random_operation()` will run. +`RUSTC_LOG` filters that do not enable this debug statement will not execute `random_operation`. This means that you should not put anything too expensive or likely to crash there - that would annoy anyone who wants to use logging for that module. diff --git a/src/doc/rustc-dev-guide/src/traits/caching.md b/src/doc/rustc-dev-guide/src/traits/caching.md index c44722a1d9a33..be72f6e89f9ac 100644 --- a/src/doc/rustc-dev-guide/src/traits/caching.md +++ b/src/doc/rustc-dev-guide/src/traits/caching.md @@ -61,7 +61,7 @@ to be pretty clearly safe and also still retains a very high hit rate **TODO**: it looks like `pick_candidate_cache` no longer exists. In general, is this section still accurate at all? -[`ParamEnv`]: ../typing_parameter_envs.html +[`ParamEnv`]: ../typing-parameter-envs.html [`tcx`]: ../ty.html [#18290]: https://github.com/rust-lang/rust/issues/18290 [#22019]: https://github.com/rust-lang/rust/issues/22019 diff --git a/src/doc/rustc-dev-guide/src/traits/resolution.md b/src/doc/rustc-dev-guide/src/traits/resolution.md index ccb2b04268e85..f668d6ccf6198 100644 --- a/src/doc/rustc-dev-guide/src/traits/resolution.md +++ b/src/doc/rustc-dev-guide/src/traits/resolution.md @@ -130,9 +130,9 @@ Once this first pass is done, we can examine the set of candidates. If it is a singleton set, then we are done: this is the only impl in scope that could possibly apply. Otherwise, we can **winnow** down the set of candidates by using where clauses and other conditions. Winnowing uses -`evaluate_candidate` to check whether the nested obligations may apply. -If this still leaves more than 1 candidate, we use ` fn candidate_should_be_dropped_in_favor_of` -to prefer some candidates over others. +`evaluate_candidate` to check whether the nested obligations may apply. +If this still leaves more than 1 candidate, we use ` fn candidate_should_be_dropped_in_favor_of` +to prefer some candidates over others. If this reduced set yields a single, unambiguous entry, we're good to go, @@ -181,7 +181,7 @@ in that list. If so, it is considered satisfied. More precisely, we want to check whether there is a where-clause obligation that is for the same trait (or some subtrait) and which can match against the obligation. -[parameter environment]: ../typing_parameter_envs.html +[parameter environment]: ../typing-parameter-envs.html Consider this simple example: @@ -240,8 +240,8 @@ confirmation is done based on (in this case) the `Target` type parameter. As mentioned above, during type checking, we do not store the results of trait selection. At codegen time, we repeat the trait selection to choose a particular -impl for each method call. This is done using `fn codegen_select_candidate`. -In this second selection, we do not consider any where-clauses to be in scope +impl for each method call. This is done using `fn codegen_select_candidate`. +In this second selection, we do not consider any where-clauses to be in scope because we know that each resolution will resolve to a particular impl. One interesting twist has to do with nested obligations. In general, in codegen, diff --git a/src/doc/rustc-dev-guide/src/ty-fold.md b/src/doc/rustc-dev-guide/src/ty-fold.md index 120a266e3536a..bf0a51e6b7cfb 100644 --- a/src/doc/rustc-dev-guide/src/ty-fold.md +++ b/src/doc/rustc-dev-guide/src/ty-fold.md @@ -99,7 +99,7 @@ it replaces it for something from the list of substitutions, otherwise recursive To replace it, calls [ty_for_param] and all that does is index into the list of substitutions with the index of the `Param`. -[a previous chapter]: ty_module/instantiating_binders.md +[a previous chapter]: ty-module/instantiating-binders.md [`TypeFoldable`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/trait.TypeFoldable.html [`TypeFolder`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/trait.TypeFolder.html [`fold_ty`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/trait.TypeFolder.html#method.fold_ty diff --git a/src/doc/rustc-dev-guide/src/ty_module/binders.md b/src/doc/rustc-dev-guide/src/ty-module/binders.md similarity index 100% rename from src/doc/rustc-dev-guide/src/ty_module/binders.md rename to src/doc/rustc-dev-guide/src/ty-module/binders.md diff --git a/src/doc/rustc-dev-guide/src/ty_module/early_binder.md b/src/doc/rustc-dev-guide/src/ty-module/early-binder.md similarity index 100% rename from src/doc/rustc-dev-guide/src/ty_module/early_binder.md rename to src/doc/rustc-dev-guide/src/ty-module/early-binder.md diff --git a/src/doc/rustc-dev-guide/src/ty_module/generic_arguments.md b/src/doc/rustc-dev-guide/src/ty-module/generic-arguments.md similarity index 100% rename from src/doc/rustc-dev-guide/src/ty_module/generic_arguments.md rename to src/doc/rustc-dev-guide/src/ty-module/generic-arguments.md diff --git a/src/doc/rustc-dev-guide/src/ty_module/instantiating_binders.md b/src/doc/rustc-dev-guide/src/ty-module/instantiating-binders.md similarity index 95% rename from src/doc/rustc-dev-guide/src/ty_module/instantiating_binders.md rename to src/doc/rustc-dev-guide/src/ty-module/instantiating-binders.md index 7e29b95437140..3c7408465c446 100644 --- a/src/doc/rustc-dev-guide/src/ty_module/instantiating_binders.md +++ b/src/doc/rustc-dev-guide/src/ty-module/instantiating-binders.md @@ -21,7 +21,7 @@ Unlike `EarlyBinder` we typically do not instantiate `Binder` with some concrete ## Instantiating with inference variables -We instantiate binders with inference variables when we are trying to infer a possible instantiation of the binder, e.g. calling higher ranked function pointers or attempting to use a higher ranked where-clause to prove some bound. For example, given the `higher_ranked_fn_ptr` from the example above, if we were to call it with `&10_u32` we would: +We instantiate binders with inference variables when we are trying to infer a possible instantiation of the binder, e.g. calling higher ranked function pointers or attempting to use a higher ranked where-clause to prove some bound. For example, given the `higher_ranked_fn_ptr` from the example above, if we were to call it with `&10_u32` we would: - Instantiate the binder with infer vars yielding a signature of `fn(&'?0 u32) -> &'?0 u32)` - Equate the type of the provided argument `&10_u32` (&'static u32) with the type in the signature, `&'?0 u32`, inferring `'?0 = 'static` - The provided arguments were correct as we were successfully able to unify the types of the provided arguments with the types of the arguments in fn ptr signature @@ -35,7 +35,7 @@ Instantiating binders with inference variables can be accomplished by using the ## Instantiating with placeholders -Placeholders are very similar to `Ty/ConstKind::Param`/`ReEarlyParam`, they represent some unknown type that is only equal to itself. `Ty`/`Const` and `Region` all have a [`Placeholder`] variant that is comprised of a [`Universe`] and a [`BoundVar`]. +Placeholders are very similar to `Ty/ConstKind::Param`/`ReEarlyParam`, they represent some unknown type that is only equal to itself. `Ty`/`Const` and `Region` all have a [`Placeholder`] variant that is comprised of a [`Universe`] and a [`BoundVar`]. The `Universe` tracks which binder the placeholder originated from, and the `BoundVar` tracks which parameter on said binder that this placeholder corresponds to. Equality of placeholders is determined solely by whether the universes are equal and the `BoundVar`s are equal. See the [chapter on Placeholders and Universes][ch_placeholders_universes] for more information. @@ -49,7 +49,7 @@ Note: in the original example of this chapter it was mentioned that we should no ### Why have both `RePlaceholder` and `ReBound`? -You may be wondering why we have both of these variants, afterall the data stored in `Placeholder` is effectively equivalent to that of `ReBound`: something to track which binder, and an index to track which parameter the `Binder` introduced. +You may be wondering why we have both of these variants, afterall the data stored in `Placeholder` is effectively equivalent to that of `ReBound`: something to track which binder, and an index to track which parameter the `Binder` introduced. The main reason for this is that `Bound` is a more syntactic representation of bound variables whereas `Placeholder` is a more semantic representation. As a concrete example: ```rust @@ -77,7 +77,7 @@ Given these trait implementations `u32: Bar` should _not_ hold. `&'a u32` only i This end result is incorrect as we had two separate binders introducing their own generic parameters, the trait bound should have ended up as something like `for<'a1, 'a2> &'^1 u32: Other<'^0>` which is _not_ satisfied by the `impl<'a> Other<'a> for &'a u32`. While in theory we could make this work it would be quite involved and more complex than the current setup, we would have to: -- "rewrite" bound variables to have a higher `DebruijnIndex` whenever instantiating a `Binder`/`EarlyBinder` with a `Bound` ty/const/region +- "rewrite" bound variables to have a higher `DebruijnIndex` whenever instantiating a `Binder`/`EarlyBinder` with a `Bound` ty/const/region - When inferring an inference variable to a bound var, if that bound var is from a binder entered after creating the infer var, we would have to lower the `DebruijnIndex` of the var. - Separately track what binder an inference variable was created inside of, also what the innermost binder it can name parameters from (currently we only have to track the latter) - When resolving inference variables rewrite any bound variables according to the current binder depth of the infcx @@ -90,18 +90,18 @@ where for<'a> T: Trait<'a, for<'b> fn(&'b T, &'a u32)> { ... } ``` -That where clause would be written as: -`for<'a> T: Trait<'^0, for<'b> fn(&'^0 T, &'^1_0 u32)>` +That where clause would be written as: +`for<'a> T: Trait<'^0, for<'b> fn(&'^0 T, &'^1_0 u32)>` Despite there being two references to the `'a` parameter they are both represented differently: `^0` and `^1_0`, due to the fact that the latter usage is nested under a second `Binder` for the inner function pointer type. This is in contrast to `Placeholder` ty/const/regions which do not have this limitation due to the fact that `Universe`s are specific to the current `InferCtxt` not the usage site of the parameter. -It is trivially possible to instantiate `EarlyBinder`s and unify inference variables with existing `Placeholder`s as no matter what context the `Placeholder` is in, it will have the same representation. As an example if we were to instantiate the binder on the higher ranked where clause from above, it would be represented like so: -`T: Trait<'!1_0, for<'b> fn(&'^0 T, &'!1_0 u32)>` +It is trivially possible to instantiate `EarlyBinder`s and unify inference variables with existing `Placeholder`s as no matter what context the `Placeholder` is in, it will have the same representation. As an example if we were to instantiate the binder on the higher ranked where clause from above, it would be represented like so: +`T: Trait<'!1_0, for<'b> fn(&'^0 T, &'!1_0 u32)>` the `RePlaceholder` representation for both usages of `'a` are the same despite one being underneath another `Binder`. -If we were to then instantiate the binder on the function pointer we would get a type such as: -`fn(&'!2_0 T, ^'!1_0 u32)` +If we were to then instantiate the binder on the function pointer we would get a type such as: +`fn(&'!2_0 T, ^'!1_0 u32)` the `RePlaceholder` for the `'b` parameter is in a higher universe to track the fact that its binder was instantiated after the binder for `'a`. ## Instantiating with `ReLateParam` @@ -119,8 +119,8 @@ impl Trait for Whatever { b } } -``` -the lifetime `'a` in the type `&'a u32` in the function body would be represented as: +``` +the lifetime `'a` in the type `&'a u32` in the function body would be represented as: ``` ReLateParam( {impl#0}::foo, @@ -135,10 +135,10 @@ Generally whenever we have a `Binder` for late bound parameters on a function/cl As a concrete example, accessing the signature of a function we are type checking will be represented as `EarlyBinder>`. As we are already "inside" of these binders, we would call `instantiate_identity` followed by `liberate_late_bound_regions`. [`liberate_late_bound_regions`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/context/struct.TyCtxt.html#method.liberate_late_bound_regions -[representing-types]: param_ty_const_regions.md +[representing-types]: param-ty-const-regions.md [`BoundRegionKind`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/enum.BoundRegionKind.html [`enter_forall`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_trait_selection/infer/struct.InferCtxt.html#method.enter_forall -[ch_placeholders_universes]: ../borrow_check/region_inference/placeholders_and_universes.md +[ch_placeholders_universes]: ../borrow-check/region-inference/placeholders-and-universes.md [`instantiate_binder_with_fresh_vars`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_trait_selection/infer/struct.InferCtxt.html#method.instantiate_binder_with_fresh_vars [`InferCtxt`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_trait_selection/infer/struct.InferCtxt.html [`EarlyBinder`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.EarlyBinder.html diff --git a/src/doc/rustc-dev-guide/src/ty_module/param_ty_const_regions.md b/src/doc/rustc-dev-guide/src/ty-module/param-ty-const-regions.md similarity index 97% rename from src/doc/rustc-dev-guide/src/ty_module/param_ty_const_regions.md rename to src/doc/rustc-dev-guide/src/ty-module/param-ty-const-regions.md index 493693c9a4400..ce4e887862c7a 100644 --- a/src/doc/rustc-dev-guide/src/ty_module/param_ty_const_regions.md +++ b/src/doc/rustc-dev-guide/src/ty-module/param-ty-const-regions.md @@ -49,9 +49,9 @@ impl Foo { } ``` -Concretely given the `ty::Generics` for the item the parameter is defined on, if the index is `2` then starting from the root `parent`, it will be the third parameter to be introduced. For example in the above example, `Z` has index `2` and is the third generic parameter to be introduced, starting from the `impl` block. +Concretely given the `ty::Generics` for the item the parameter is defined on, if the index is `2` then starting from the root `parent`, it will be the third parameter to be introduced. For example in the above example, `Z` has index `2` and is the third generic parameter to be introduced, starting from the `impl` block. -The index fully defines the `Ty` and is the only part of `TyKind::Param` that matters for reasoning about the code we are compiling. +The index fully defines the `Ty` and is the only part of `TyKind::Param` that matters for reasoning about the code we are compiling. Generally we do not care what the name is and only use the index. The name is included for diagnostics and debug logs as otherwise it would be incredibly difficult to understand the output, i.e. `Vec: Sized` vs `Vec: Sized`. In debug output, parameter types are @@ -59,7 +59,7 @@ often printed out as `{name}/#{index}`, for example in the function `foo` if we An alternative representation would be to only have the name, however using an index is more efficient as it means we can index into `GenericArgs` when instantiating generic parameters with some arguments. We would otherwise have to store `GenericArgs` as a `HashMap` and do a hashmap lookup everytime we used a generic item. -In theory an index would also allow for having multiple distinct parameters that use the same name, e.g. +In theory an index would also allow for having multiple distinct parameters that use the same name, e.g. `impl Foo { fn bar() { .. } }`. The rules against shadowing make this difficult but those language rules could change in the future. @@ -85,9 +85,9 @@ fn foo<'a, 'b, T: 'a>(one: T, two: &'a &'b u32) -> &'b u32 { `RegionKind::LateParam` is discussed more in the chapter on [instantiating binders][ch_instantiating_binders]. -[ch_early_late_bound]: ../early_late_parameters.md +[ch_early_late_bound]: ../early-late-parameters.md [ch_binders]: ./binders.md -[ch_instantiating_binders]: ./instantiating_binders.md +[ch_instantiating_binders]: ./instantiating-binders.md [`BoundRegionKind`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/enum.BoundRegionKind.html [`RegionKind::EarlyParam`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.RegionKind.html#variant.ReEarlyParam [`RegionKind::LateParam`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_middle/ty/type.RegionKind.html#variant.ReLateParam diff --git a/src/doc/rustc-dev-guide/src/typing_parameter_envs.md b/src/doc/rustc-dev-guide/src/typing-parameter-envs.md similarity index 100% rename from src/doc/rustc-dev-guide/src/typing_parameter_envs.md rename to src/doc/rustc-dev-guide/src/typing-parameter-envs.md diff --git a/src/doc/rustc-dev-guide/src/walkthrough.md b/src/doc/rustc-dev-guide/src/walkthrough.md index 5ba89f984a703..212fb298fd0b3 100644 --- a/src/doc/rustc-dev-guide/src/walkthrough.md +++ b/src/doc/rustc-dev-guide/src/walkthrough.md @@ -41,18 +41,15 @@ Here is a quick list. We will go through each of these in order below. As I mentioned before, not all of these are needed for every type of contribution. -- **Idea discussion/Pre-RFC** A Pre-RFC is an early draft or design discussion - of a feature. +- **Idea discussion/Pre-RFC** A Pre-RFC is an early draft or design discussion of a feature. This stage is intended to flesh out the design space a bit and get a grasp on the different merits and problems with an idea. - It's a great way to get early feedback on your idea before presenting it to the wider - audience. + It's a great way to get early feedback on your idea before presenting it to the wider audience. You can find the original discussion [here][prerfc]. -- **RFC** This is when you formally present your idea to the community for - consideration. +- **RFC** This is when you formally present your idea to the community for consideration. You can find the RFC [here][rfc]. -- **Implementation** Implement your idea unstably in the compiler. You can - find the original implementation [here][impl1]. +- **Implementation** Implement your idea unstably in the compiler. + You can find the original implementation [here][impl1]. - **Possibly iterate/refine** As the community gets experience with your feature on the nightly compiler and in `std`, there may be additional feedback about design choice that might be adjusted. @@ -97,7 +94,7 @@ If that sounds like a lot of work, it's because it is. But no fear! Even if you're not a compiler hacker, you can get great feedback by doing a _pre-RFC_. This is an _informal_ discussion of the idea. -The best place to do this is internals.rust-lang.org. +The best place to do this is [internals.rust-lang.org](https://internals.rust-lang.org). Your post doesn't have to follow any particular structure. It doesn't even need to be a cohesive idea. Generally, you will get tons of feedback that you can integrate back to produce a good RFC. @@ -114,8 +111,7 @@ In this case, the discussion converged pretty quickly, but for some ideas, a lot more discussion can happen (e.g. see [this RFC][nonascii] which received a whopping 684 comments!). If that happens, don't be discouraged; -it means the community is interested in your idea, but it perhaps needs some -adjustments. +it means the community is interested in your idea, but it perhaps needs some adjustments. [nonascii]: https://github.com/rust-lang/rfcs/pull/2457 @@ -138,10 +134,10 @@ last chance for people to bring up objections. When the FCP is over, the disposition is adopted. Here are the three possible dispositions: -- _Merge_: accept the feature. Here is the proposal to merge for our [`?` macro - feature][rfcmerge]. -- _Close_: this feature in its current form is not a good fit for rust. Don't - be discouraged if this happens to your RFC, and don't take it personally. +- _Merge_: accept the feature. + Here is the proposal to merge for our [`?` macro feature][rfcmerge]. +- _Close_: this feature in its current form is not a good fit for rust. + Don't be discouraged if this happens to your RFC, and don't take it personally. This is not a reflection on you, but rather a community decision that rust will go a different direction. - _Postpone_: there is interest in going this direction but not at the moment. @@ -159,6 +155,21 @@ Here is the tracking issue on for our [`?` macro feature][tracking]. [tracking]: https://github.com/rust-lang/rust/issues/48075 +## Experimental RFC (eRFC) + +An eRFC is a variant of the RFC process used for complex features where the high-level need +is clear, but the design space is too large to settle on a detailed specification upfront. +Instead of providing a final design, an eRFC outlines a high-level strategy to authorize +a period of active experimentation. +This allows the team to implement the feature behind +a feature gate and gather practical data, which then informs a subsequent formal RFC for stabilization. +While this process was used for major features like coroutines ([see RFC 2033][rfc2033]), +the explicit "eRFC" label is rarely used today. +The project now generally prefers approving a standard +RFC for an initial version and iterating on it through the nightly channel before final stabilization. + +[rfc2033]: https://github.com/rust-lang/rfcs/pull/2033#issuecomment-309057591 + ## Implementation @@ -186,8 +197,8 @@ When a new feature is implemented, it goes behind a _feature gate_, which means you have to use `#![feature(my_feature_name)]` to use the feature. The feature gate is removed when the feature is stabilized. -**Most bug fixes and improvements** don't require a feature gate. You can just -make your changes/improvements. +**Most bug fixes and improvements** don't require a feature gate. +You can just make your changes/improvements. When you open a PR on the [rust-lang/rust], a bot will assign your PR to a reviewer. If there is a particular Rust team member you are working with, you can @@ -204,8 +215,7 @@ When you finished iterating on the changes, you can mark the PR as `S-waiting-on-author` label and add the `S-waiting-on-review` label. Feel free to ask questions or discuss things you don't understand or disagree with. -However, recognize that the PR won't be merged unless someone on the Rust team approves -it. +However, recognize that the PR won't be merged unless someone on the Rust team approves it. If a reviewer leave a comment like `r=me after fixing ...`, that means they approve the PR and you can merge it with comment with `@bors r=reviewer-github-id`(e.g. `@bors r=eddyb`) to merge it after fixing trivial issues. @@ -222,8 +232,7 @@ If all tests pass, the PR is merged and becomes part of the next nightly compile There are a couple of things that may happen for some PRs during the review process -- If the change is substantial enough, the reviewer may request an FCP on - the PR. +- If the change is substantial enough, the reviewer may request an FCP on the PR. This gives all members of the appropriate team a chance to review the changes. - If the change may cause breakage, the reviewer may request a [crater] run. This compiles the compiler with your changes and then attempts to compile all @@ -284,6 +293,6 @@ A note is added to the [Release notes][relnotes] about the feature. [stab]: https://github.com/rust-lang/rust/pull/56245 -Steps to stabilize the feature can be found at [Stabilizing Features](./stabilization_guide.md). +Steps to stabilize the feature can be found at [Stabilizing Features](./stabilization-guide.md). [relnotes]: https://github.com/rust-lang/rust/blob/HEAD/RELEASES.md diff --git a/tests/ui/hygiene/unpretty-debug-lifetimes.rs b/tests/ui/hygiene/unpretty-debug-lifetimes.rs new file mode 100644 index 0000000000000..ee8be21b60d01 --- /dev/null +++ b/tests/ui/hygiene/unpretty-debug-lifetimes.rs @@ -0,0 +1,18 @@ +//@ check-pass +//@ compile-flags: -Zunpretty=expanded,hygiene + +// Regression test for lifetime hygiene annotations in -Zunpretty=expanded,hygiene +// Previously, lifetimes were missing the #N syntax context suffix. + +// Don't break whenever Symbol numbering changes +//@ normalize-stdout: "\d+#" -> "0#" + +#![feature(decl_macro)] +#![feature(no_core)] +#![no_core] + +macro lifetime_hygiene($f:ident<$a:lifetime>) { + fn $f<$a, 'a>() {} +} + +lifetime_hygiene!(f<'a>); diff --git a/tests/ui/hygiene/unpretty-debug-lifetimes.stdout b/tests/ui/hygiene/unpretty-debug-lifetimes.stdout new file mode 100644 index 0000000000000..28a5c70a02d79 --- /dev/null +++ b/tests/ui/hygiene/unpretty-debug-lifetimes.stdout @@ -0,0 +1,31 @@ +//@ check-pass +//@ compile-flags: -Zunpretty=expanded,hygiene + +// Regression test for lifetime hygiene annotations in -Zunpretty=expanded,hygiene +// Previously, lifetimes were missing the #N syntax context suffix. + +// Don't break whenever Symbol numbering changes +//@ normalize-stdout: "\d+#" -> "0#" + +#![feature /* 0#0 */(decl_macro)] +#![feature /* 0#0 */(no_core)] +#![no_core /* 0#0 */] + +macro lifetime_hygiene + /* + 0#0 + */ { + ($f:ident<$a:lifetime>) => { fn $f<$a, 'a>() {} } +} +fn f /* 0#0 */<'a /* 0#0 */, 'a /* 0#1 */>() {} + + +/* +Expansions: +crate0::{{expn0}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Root +crate0::{{expn1}}: parent: crate0::{{expn0}}, call_site_ctxt: #0, def_site_ctxt: #0, kind: Macro(Bang, "lifetime_hygiene") + +SyntaxContexts: +#0: parent: #0, outer_mark: (crate0::{{expn0}}, Opaque) +#1: parent: #0, outer_mark: (crate0::{{expn1}}, Opaque) +*/ diff --git a/tests/ui/lint/unused-parens-labeled-break-issue-143256.rs b/tests/ui/lint/unused-parens-labeled-break-issue-143256.rs new file mode 100644 index 0000000000000..8594e646f605e --- /dev/null +++ b/tests/ui/lint/unused-parens-labeled-break-issue-143256.rs @@ -0,0 +1,25 @@ +//@ check-pass +// testcase for https://github.com/rust-lang/rust/issues/143256 + +#![deny(unused_parens)] +#![allow(unreachable_code, unused_variables, dead_code)] + +fn foo() { + let _x = || 'outer: loop { + let inner = 'inner: loop { + let i = Default::default(); + // the parentheses here are necessary + if (break 'outer i) { + loop { + break 'inner 5i8; + } + } else if true { + break 'inner 6; + } + break 7; + }; + break inner < 8; + }; +} + +fn main() {} diff --git a/typos.toml b/typos.toml index 25083174cb8fb..e486a7c1722cd 100644 --- a/typos.toml +++ b/typos.toml @@ -46,6 +46,7 @@ unstalled = "unstalled" # short for un-stalled # # tidy-alphabetical-start definitinon = "definition" +similarlty = "similarity" # tidy-alphabetical-end [default.extend-identifiers]