diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs index 66c8056d0c2af..66d4dca00726f 100644 --- a/compiler/rustc_abi/src/callconv/reg.rs +++ b/compiler/rustc_abi/src/callconv/reg.rs @@ -35,6 +35,7 @@ impl Reg { reg_ctor!(f32, Float, 32); reg_ctor!(f64, Float, 64); + reg_ctor!(f128, Float, 128); } impl Reg { diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs index bda04129e5795..dd260aede4894 100644 --- a/compiler/rustc_ast_passes/src/errors.rs +++ b/compiler/rustc_ast_passes/src/errors.rs @@ -3,7 +3,7 @@ use rustc_abi::ExternAbi; use rustc_ast::ParamKindOrd; use rustc_errors::codes::*; -use rustc_errors::{Applicability, Diag, EmissionGuarantee, Subdiagnostic, inline_fluent}; +use rustc_errors::{Applicability, Diag, EmissionGuarantee, Subdiagnostic}; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; use rustc_span::{Ident, Span, Symbol}; @@ -927,19 +927,15 @@ pub(crate) struct FeatureOnNonNightly { pub sugg: Option, } +#[derive(Subdiagnostic)] +#[help( + "the feature `{$name}` has been stable since `{$since}` and no longer requires an attribute to enable" +)] pub(crate) struct StableFeature { pub name: Symbol, pub since: Symbol, } -impl Subdiagnostic for StableFeature { - fn add_to_diag(self, diag: &mut Diag<'_, G>) { - diag.arg("name", self.name); - diag.arg("since", self.since); - diag.help(inline_fluent!("the feature `{$name}` has been stable since `{$since}` and no longer requires an attribute to enable")); - } -} - #[derive(Diagnostic)] #[diag("`{$f1}` and `{$f2}` are incompatible, using them at the same time is not allowed")] #[help("remove one of these features")] diff --git a/compiler/rustc_attr_parsing/src/attributes/proc_macro_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/proc_macro_attrs.rs index 3674aa7124abb..f9ace7e25d1b3 100644 --- a/compiler/rustc_attr_parsing/src/attributes/proc_macro_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/proc_macro_attrs.rs @@ -1,3 +1,6 @@ +use rustc_hir::lints::AttributeLintKind; +use rustc_session::lint::builtin::AMBIGUOUS_DERIVE_HELPERS; + use super::prelude::*; const PROC_MACRO_ALLOWED_TARGETS: AllowedTargets = @@ -126,6 +129,13 @@ fn parse_derive_like( cx.expected_identifier(ident.span); return None; } + if rustc_feature::is_builtin_attr_name(ident.name) { + cx.emit_lint( + AMBIGUOUS_DERIVE_HELPERS, + AttributeLintKind::AmbiguousDeriveHelpers, + ident.span, + ); + } attributes.push(ident.name); } } diff --git a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs index 18d700a20d319..41b1836588deb 100644 --- a/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs +++ b/compiler/rustc_attr_parsing/src/attributes/test_attrs.rs @@ -228,3 +228,32 @@ impl NoArgsAttributeParser for RustcOutlivesParser { ]); const CREATE: fn(Span) -> AttributeKind = |_| AttributeKind::RustcOutlives; } + +pub(crate) struct TestRunnerParser; + +impl SingleAttributeParser for TestRunnerParser { + const PATH: &[Symbol] = &[sym::test_runner]; + const ATTRIBUTE_ORDER: AttributeOrder = AttributeOrder::KeepOutermost; + const ON_DUPLICATE: OnDuplicate = OnDuplicate::Error; + const ALLOWED_TARGETS: AllowedTargets = AllowedTargets::AllowList(&[Allow(Target::Crate)]); + const TEMPLATE: AttributeTemplate = template!(List: &["path"]); + + fn convert(cx: &mut AcceptContext<'_, '_, S>, args: &ArgParser) -> Option { + let Some(list) = args.list() else { + cx.expected_list(cx.attr_span, args); + return None; + }; + + let Some(single) = list.single() else { + cx.expected_single_argument(list.span); + return None; + }; + + let Some(meta) = single.meta_item() else { + cx.unexpected_literal(single.span()); + return None; + }; + + Some(AttributeKind::TestRunner(meta.path().0.clone())) + } +} diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 47b988bcf338b..87aa4150becd3 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -218,6 +218,7 @@ attribute_parsers!( Single, Single, Single, + Single, Single, Single, Single, diff --git a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs index 6ed07cf9b1c8c..a927c30fae325 100644 --- a/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/bound_region_errors.rs @@ -24,7 +24,6 @@ use rustc_traits::{type_op_ascribe_user_type_with_span, type_op_prove_predicate_ use tracing::{debug, instrument}; use crate::MirBorrowckCtxt; -use crate::region_infer::values::RegionElement; use crate::session_diagnostics::{ HigherRankedErrorCause, HigherRankedLifetimeError, HigherRankedSubtypeError, }; @@ -49,11 +48,12 @@ impl<'tcx> UniverseInfo<'tcx> { UniverseInfo::RelateTys { expected, found } } + /// Report an error where an element erroneously made its way into `placeholder`. pub(crate) fn report_erroneous_element( &self, mbcx: &mut MirBorrowckCtxt<'_, '_, 'tcx>, placeholder: ty::PlaceholderRegion<'tcx>, - error_element: RegionElement<'tcx>, + error_element: Option>, cause: ObligationCause<'tcx>, ) { match *self { @@ -146,14 +146,14 @@ pub(crate) trait TypeOpInfo<'tcx> { ) -> Option>; /// Constraints require that `error_element` appear in the - /// values of `placeholder`, but this cannot be proven to + /// values of `placeholder`, but this cannot be proven to /// hold. Report an error. #[instrument(level = "debug", skip(self, mbcx))] fn report_erroneous_element( &self, mbcx: &mut MirBorrowckCtxt<'_, '_, 'tcx>, placeholder: ty::PlaceholderRegion<'tcx>, - error_element: RegionElement<'tcx>, + error_element: Option>, cause: ObligationCause<'tcx>, ) { let tcx = mbcx.infcx.tcx; @@ -172,19 +172,17 @@ pub(crate) trait TypeOpInfo<'tcx> { ty::PlaceholderRegion::new(adjusted_universe.into(), placeholder.bound), ); - let error_region = - if let RegionElement::PlaceholderRegion(error_placeholder) = error_element { - let adjusted_universe = - error_placeholder.universe.as_u32().checked_sub(base_universe.as_u32()); - adjusted_universe.map(|adjusted| { - ty::Region::new_placeholder( - tcx, - ty::PlaceholderRegion::new(adjusted.into(), error_placeholder.bound), - ) - }) - } else { - None - }; + // FIXME: one day this should just be error_element, + // and this method shouldn't do anything. + let error_region = error_element.and_then(|e| { + let adjusted_universe = e.universe.as_u32().checked_sub(base_universe.as_u32()); + adjusted_universe.map(|adjusted| { + ty::Region::new_placeholder( + tcx, + ty::PlaceholderRegion::new(adjusted.into(), e.bound), + ) + }) + }); debug!(?placeholder_region); diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs index ae389d1a6e10c..ca6b9965f1e9e 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs @@ -29,7 +29,6 @@ use tracing::{debug, instrument, trace}; use super::{LIMITATION_NOTE, OutlivesSuggestionBuilder, RegionName, RegionNameSource}; use crate::nll::ConstraintDescription; -use crate::region_infer::values::RegionElement; use crate::region_infer::{BlameConstraint, TypeTest}; use crate::session_diagnostics::{ FnMutError, FnMutReturnTypeErr, GenericDoesNotLiveLongEnough, LifetimeOutliveErr, @@ -104,15 +103,9 @@ pub(crate) enum RegionErrorKind<'tcx> { /// A generic bound failure for a type test (`T: 'a`). TypeTestError { type_test: TypeTest<'tcx> }, - /// Higher-ranked subtyping error. - BoundUniversalRegionError { - /// The placeholder free region. - longer_fr: RegionVid, - /// The region element that erroneously must be outlived by `longer_fr`. - error_element: RegionElement<'tcx>, - /// The placeholder region. - placeholder: ty::PlaceholderRegion<'tcx>, - }, + /// 'p outlives 'r, which does not hold. 'p is always a placeholder + /// and 'r is some other region. + PlaceholderOutlivesIllegalRegion { longer_fr: RegionVid, illegally_outlived_r: RegionVid }, /// Any other lifetime error. RegionError { @@ -360,28 +353,11 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } } - RegionErrorKind::BoundUniversalRegionError { + RegionErrorKind::PlaceholderOutlivesIllegalRegion { longer_fr, - placeholder, - error_element, + illegally_outlived_r, } => { - let error_vid = self.regioncx.region_from_element(longer_fr, &error_element); - - // Find the code to blame for the fact that `longer_fr` outlives `error_fr`. - let cause = self - .regioncx - .best_blame_constraint( - longer_fr, - NllRegionVariableOrigin::Placeholder(placeholder), - error_vid, - ) - .0 - .cause; - - let universe = placeholder.universe; - let universe_info = self.regioncx.universe_info(universe); - - universe_info.report_erroneous_element(self, placeholder, error_element, cause); + self.report_erroneous_rvid_reaches_placeholder(longer_fr, illegally_outlived_r) } RegionErrorKind::RegionError { fr_origin, longer_fr, shorter_fr, is_reported } => { @@ -412,6 +388,43 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { outlives_suggestion.add_suggestion(self); } + /// Report that `longer_fr: error_vid`, which doesn't hold, + /// where `longer_fr` is a placeholder. + fn report_erroneous_rvid_reaches_placeholder( + &mut self, + longer_fr: RegionVid, + error_vid: RegionVid, + ) { + use NllRegionVariableOrigin::*; + + let origin_longer = self.regioncx.definitions[longer_fr].origin; + + let Placeholder(placeholder) = origin_longer else { + bug!("Expected {longer_fr:?} to come from placeholder!"); + }; + + // FIXME: Is throwing away the existential region really the best here? + let error_region = match self.regioncx.definitions[error_vid].origin { + FreeRegion | Existential { .. } => None, + Placeholder(other_placeholder) => Some(other_placeholder), + }; + + // Find the code to blame for the fact that `longer_fr` outlives `error_fr`. + let cause = + self.regioncx.best_blame_constraint(longer_fr, origin_longer, error_vid).0.cause; + + // FIXME these methods should have better names, and also probably not be this generic. + // FIXME note that we *throw away* the error element here! We probably want to + // thread it through the computation further down and use it, but there currently isn't + // anything there to receive it. + self.regioncx.universe_info(placeholder.universe).report_erroneous_element( + self, + placeholder, + error_region, + cause, + ); + } + /// Report an error because the universal region `fr` was required to outlive /// `outlived_fr` but it is not known to do so. For example: /// @@ -872,6 +885,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { for alias_ty in alias_tys { if alias_ty.span.desugaring_kind().is_some() { // Skip `async` desugaring `impl Future`. + continue; } if let TyKind::TraitObject(_, lt) = alias_ty.kind { if lt.kind == hir::LifetimeKind::ImplicitObjectLifetimeDefault { diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index 6ed70b39c5b7f..5cdda777723b3 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -1379,11 +1379,11 @@ impl<'tcx> RegionInferenceContext<'tcx> { .elements_contained_in(longer_fr_scc) .find(|e| *e != RegionElement::PlaceholderRegion(placeholder)) { + let illegally_outlived_r = self.region_from_element(longer_fr, &error_element); // Stop after the first error, it gets too noisy otherwise, and does not provide more information. - errors_buffer.push(RegionErrorKind::BoundUniversalRegionError { + errors_buffer.push(RegionErrorKind::PlaceholderOutlivesIllegalRegion { longer_fr, - error_element, - placeholder, + illegally_outlived_r, }); } else { debug!("check_bound_universal_region: all bounds satisfied"); @@ -1572,7 +1572,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } /// Get the region outlived by `longer_fr` and live at `element`. - pub(crate) fn region_from_element( + fn region_from_element( &self, longer_fr: RegionVid, element: &RegionElement<'tcx>, diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs index 1343c8ccf4a59..862ce3e62cc95 100644 --- a/compiler/rustc_builtin_macros/src/errors.rs +++ b/compiler/rustc_builtin_macros/src/errors.rs @@ -1002,20 +1002,6 @@ pub(crate) struct AsmUnsupportedClobberAbi { pub(crate) macro_name: &'static str, } -#[derive(Diagnostic)] -#[diag("`test_runner` argument must be a path")] -pub(crate) struct TestRunnerInvalid { - #[primary_span] - pub(crate) span: Span, -} - -#[derive(Diagnostic)] -#[diag("`#![test_runner(..)]` accepts exactly 1 argument")] -pub(crate) struct TestRunnerNargs { - #[primary_span] - pub(crate) span: Span, -} - #[derive(Diagnostic)] #[diag("expected token: `,`")] pub(crate) struct ExpectedCommaInList { diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs index 8d6969b0ca125..b5d63511fce9b 100644 --- a/compiler/rustc_builtin_macros/src/test_harness.rs +++ b/compiler/rustc_builtin_macros/src/test_harness.rs @@ -8,10 +8,11 @@ use rustc_ast::entry::EntryPointType; use rustc_ast::mut_visit::*; use rustc_ast::visit::Visitor; use rustc_ast::{ModKind, attr}; -use rustc_errors::DiagCtxtHandle; +use rustc_attr_parsing::AttributeParser; use rustc_expand::base::{ExtCtxt, ResolverExpand}; use rustc_expand::expand::{AstFragment, ExpansionConfig}; use rustc_feature::Features; +use rustc_hir::attrs::AttributeKind; use rustc_session::Session; use rustc_session::lint::builtin::UNNAMEABLE_TEST_ITEMS; use rustc_span::hygiene::{AstPass, SyntaxContext, Transparency}; @@ -60,7 +61,7 @@ pub fn inject( // Do this here so that the test_runner crate attribute gets marked as used // even in non-test builds - let test_runner = get_test_runner(dcx, krate); + let test_runner = get_test_runner(sess, features, krate); if sess.is_test_crate() { let panic_strategy = match (panic_strategy, sess.opts.unstable_opts.panic_abort_tests) { @@ -386,20 +387,16 @@ fn get_test_name(i: &ast::Item) -> Option { attr::first_attr_value_str_by_name(&i.attrs, sym::rustc_test_marker) } -fn get_test_runner(dcx: DiagCtxtHandle<'_>, krate: &ast::Crate) -> Option { - let test_attr = attr::find_by_name(&krate.attrs, sym::test_runner)?; - let meta_list = test_attr.meta_item_list()?; - let span = test_attr.span; - match &*meta_list { - [single] => match single.meta_item() { - Some(meta_item) if meta_item.is_word() => return Some(meta_item.path.clone()), - _ => { - dcx.emit_err(errors::TestRunnerInvalid { span }); - } - }, - _ => { - dcx.emit_err(errors::TestRunnerNargs { span }); - } +fn get_test_runner(sess: &Session, features: &Features, krate: &ast::Crate) -> Option { + match AttributeParser::parse_limited( + sess, + &krate.attrs, + sym::test_runner, + krate.spans.inner_span, + krate.id, + Some(features), + ) { + Some(rustc_hir::Attribute::Parsed(AttributeKind::TestRunner(path))) => Some(path), + _ => None, } - None } diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs index 760e23f2171bc..fc5c634d95709 100644 --- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs +++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs @@ -2,29 +2,26 @@ //! standalone executable. use std::env; -use std::fs::{self, File}; +use std::fs::File; use std::io::BufWriter; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; use std::thread::JoinHandle; use cranelift_object::{ObjectBuilder, ObjectModule}; use rustc_codegen_ssa::assert_module_sources::CguReuse; -use rustc_codegen_ssa::back::link::ensure_removed; +use rustc_codegen_ssa::back::write::{CompiledModules, produce_final_output_artifacts}; use rustc_codegen_ssa::base::determine_cgu_reuse; -use rustc_codegen_ssa::{ - CodegenResults, CompiledModule, CrateInfo, ModuleKind, errors as ssa_errors, -}; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, CrateInfo, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{IntoDynSyncSend, par_map}; use rustc_hir::attrs::Linkage as RLinkage; -use rustc_metadata::fs::copy_to_stdout; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::mono::{CodegenUnit, MonoItem, MonoItemData, Visibility}; use rustc_session::Session; -use rustc_session::config::{OutFileName, OutputFilenames, OutputType}; +use rustc_session::config::{OutputFilenames, OutputType}; use crate::base::CodegenedFunction; use crate::concurrency_limiter::{ConcurrencyLimiter, ConcurrencyLimiterToken}; @@ -125,201 +122,20 @@ impl OngoingCodegen { sess.dcx().abort_if_errors(); - let codegen_results = CodegenResults { - modules, - allocator_module: self.allocator_module, - crate_info: self.crate_info, - }; + let compiled_modules = CompiledModules { modules, allocator_module: self.allocator_module }; - produce_final_output_artifacts(sess, &codegen_results, outputs); + produce_final_output_artifacts(sess, &compiled_modules, outputs); - (codegen_results, work_products) - } -} - -// Adapted from https://github.com/rust-lang/rust/blob/73476d49904751f8d90ce904e16dfbc278083d2c/compiler/rustc_codegen_ssa/src/back/write.rs#L547C1-L706C2 -fn produce_final_output_artifacts( - sess: &Session, - codegen_results: &CodegenResults, - crate_output: &OutputFilenames, -) { - let user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &OutFileName| match to { - OutFileName::Stdout => { - if let Err(e) = copy_to_stdout(from) { - sess.dcx().emit_err(ssa_errors::CopyPath::new(from, to.as_path(), e)); - } - } - OutFileName::Real(path) => { - if let Err(e) = fs::copy(from, path) { - sess.dcx().emit_err(ssa_errors::CopyPath::new(from, path, e)); - } - } - }; - - let copy_if_one_unit = |output_type: OutputType, keep_numbered: bool| { - if codegen_results.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let path = crate_output.temp_path_for_cgu( - output_type, - &codegen_results.modules[0].name, - sess.invocation_temp.as_deref(), - ); - let output = crate_output.path(output_type); - if !output_type.is_text_output() && output.is_tty() { - sess.dcx() - .emit_err(ssa_errors::BinaryOutputToTty { shorthand: output_type.shorthand() }); - } else { - copy_gracefully(&path, &output); - } - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - ensure_removed(sess.dcx(), &path); - } - } else { - if crate_output.outputs.contains_explicit_name(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.dcx() - .emit_warn(ssa_errors::IgnoringEmitPath { extension: output_type.extension() }); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.dcx() - .emit_warn(ssa_errors::IgnoringOutput { extension: output_type.extension() }); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; + ( + CodegenResults { + crate_info: self.crate_info, - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode | OutputType::ThinLinkBitcode => { - // Cranelift doesn't have bitcode - // user_wants_bitcode = true; - // // Copy to .bc, but always keep the .0.bc. There is a later - // // check to figure out if we should delete .0.bc files, or keep - // // them for making an rlib. - // copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - // Cranelift IR text already emitted during codegen - // copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - // Currently no support for emitting raw assembly files - // copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | OutputType::Metadata | OutputType::Exe | OutputType::DepInfo => {} - } + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + }, + work_products, + ) } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units().as_usize() > 1; - - let keep_numbered_objects = - needs_crate_object || (user_wants_objects && sess.codegen_units().as_usize() > 1); - - for module in codegen_results.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - ensure_removed(sess.dcx(), path); - } - } - - if let Some(ref path) = module.dwarf_object { - if !keep_numbered_objects { - ensure_removed(sess.dcx(), path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - ensure_removed(sess.dcx(), path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref allocator_module) = codegen_results.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - ensure_removed(sess.dcx(), path); - } - } - } - } - - if sess.opts.json_artifact_notifications { - if codegen_results.modules.len() == 1 { - codegen_results.modules[0].for_each_output(|_path, ty| { - if sess.opts.output_types.contains_key(&ty) { - let descr = ty.shorthand(); - // for single cgu file is renamed to drop cgu specific suffix - // so we regenerate it the same way - let path = crate_output.path(ty); - sess.dcx().emit_artifact_notification(path.as_path(), descr); - } - }); - } else { - for module in &codegen_results.modules { - module.for_each_output(|path, ty| { - if sess.opts.output_types.contains_key(&ty) { - let descr = ty.shorthand(); - sess.dcx().emit_artifact_notification(path, descr); - } - }); - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. } fn make_module(sess: &Session, name: String) -> UnwindModule { diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs index a49dc9be34583..e7daf57de0f03 100644 --- a/compiler/rustc_codegen_cranelift/src/lib.rs +++ b/compiler/rustc_codegen_cranelift/src/lib.rs @@ -23,7 +23,6 @@ extern crate rustc_hir; extern crate rustc_incremental; extern crate rustc_index; extern crate rustc_log; -extern crate rustc_metadata; extern crate rustc_session; extern crate rustc_span; extern crate rustc_symbol_mangling; diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index c93a2e8f8da52..dda777a540027 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -50,7 +50,7 @@ struct LtoData { } fn prepare_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, each_linked_rlib_for_lto: &[PathBuf], dcx: DiagCtxtHandle<'_>, ) -> LtoData { @@ -111,7 +111,7 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> { /// Performs fat LTO by merging all modules into a single one and returning it /// for further optimization. pub(crate) fn run_fat( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -132,7 +132,7 @@ pub(crate) fn run_fat( } fn fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, _dcx: DiagCtxtHandle<'_>, modules: Vec>, mut serialized_modules: Vec<(SerializedModule, CString)>, @@ -283,7 +283,7 @@ impl ModuleBufferMethods for ModuleBuffer { /// lists, one of the modules that need optimization and another for modules that /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, @@ -345,7 +345,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. fn thin_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, _dcx: DiagCtxtHandle<'_>, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, @@ -520,11 +520,9 @@ fn thin_lto( pub fn optimize_thin_module( thin_module: ThinModule, - _cgcx: &CodegenContext, + _cgcx: &CodegenContext, ) -> ModuleCodegen { //let module_name = &thin_module.shared.module_names[thin_module.idx]; - /*let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap()); - let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(&dcx, e))?;*/ // Right now the implementation we've got only works over serialized // modules, so we create a fresh new LLVM context and parse the module diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index b6223c5be370a..5e96447234758 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -14,10 +14,10 @@ use rustc_target::spec::SplitDebuginfo; use crate::base::add_pic_option; use crate::errors::CopyBitcode; -use crate::{GccCodegenBackend, GccContext, LtoMode}; +use crate::{GccContext, LtoMode}; pub(crate) fn codegen( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, @@ -227,7 +227,7 @@ pub(crate) fn codegen( } pub(crate) fn save_temp_bitcode( - cgcx: &CodegenContext, + cgcx: &CodegenContext, _module: &ModuleCodegen, _name: &str, ) { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 553e4d3d2fe09..68939c9476d61 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -22,13 +22,18 @@ use rustc_codegen_ssa::traits::{ ArgAbiBuilderMethods, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, IntrinsicCallBuilderMethods, LayoutTypeCodegenMethods, }; +use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; -use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; +#[cfg(feature = "master")] +use rustc_middle::ty::layout::FnAbiOf; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_span::{Span, Symbol, sym}; use rustc_target::callconv::{ArgAbi, PassMode}; -use crate::abi::{FnAbiGccExt, GccType}; +#[cfg(feature = "master")] +use crate::abi::FnAbiGccExt; +use crate::abi::GccType; use crate::builder::Builder; use crate::common::{SignType, TypeReflection}; use crate::context::CodegenCx; @@ -617,8 +622,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc *func } else { self.linkage.set(FunctionType::Extern); - let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); - let fn_ty = fn_abi.gcc_type(self); let func = match sym { "llvm.fma.f16" => { @@ -631,13 +634,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc self.intrinsics.borrow_mut().insert(sym.to_string(), func); - self.on_stack_function_params - .borrow_mut() - .insert(func, fn_ty.on_stack_param_indices); - #[cfg(feature = "master")] - for fn_attr in fn_ty.fn_attributes { - func.add_attribute(fn_attr); - } + self.on_stack_function_params.borrow_mut().insert(func, FxHashSet::default()); crate::attributes::from_fn_attrs(self, func, instance); diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index cc88fd02435e5..d490650c37f76 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -374,7 +374,7 @@ impl ExtraBackendMethods for GccCodegenBackend { _features: &[String], ) -> TargetMachineFactoryFn { // TODO(antoyo): set opt level. - Arc::new(|_| Ok(())) + Arc::new(|_, _| ()) } } @@ -421,14 +421,14 @@ unsafe impl Sync for SyncContext {} impl WriteBackendMethods for GccCodegenBackend { type Module = GccContext; type TargetMachine = (); - type TargetMachineError = (); type ModuleBuffer = ModuleBuffer; type ThinData = ThinData; type ThinBuffer = ThinBuffer; fn run_and_optimize_fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + _tm_factory: TargetMachineFactoryFn, // FIXME(bjorn3): Limit LTO exports to these symbols _exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -438,7 +438,7 @@ impl WriteBackendMethods for GccCodegenBackend { } fn run_thin_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, // FIXME(bjorn3): Limit LTO exports to these symbols _exported_symbols_for_lto: &[String], @@ -458,7 +458,7 @@ impl WriteBackendMethods for GccCodegenBackend { } fn optimize( - _cgcx: &CodegenContext, + _cgcx: &CodegenContext, _shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, @@ -467,15 +467,16 @@ impl WriteBackendMethods for GccCodegenBackend { } fn optimize_thin( - cgcx: &CodegenContext, + cgcx: &CodegenContext, _shared_emitter: &SharedEmitter, + _tm_factory: TargetMachineFactoryFn, thin: ThinModule, ) -> ModuleCodegen { back::lto::optimize_thin_module(thin, cgcx) } fn codegen( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 71327ed6d2d16..5bd856a3ac435 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -9,7 +9,9 @@ use std::{io, iter, slice}; use object::read::archive::ArchiveFile; use object::{Object, ObjectSection}; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared}; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter}; +use rustc_codegen_ssa::back::write::{ + CodegenContext, FatLtoInput, SharedEmitter, TargetMachineFactoryFn, +}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; @@ -33,7 +35,7 @@ use crate::{LlvmCodegenBackend, ModuleLlvm}; const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; fn prepare_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], dcx: DiagCtxtHandle<'_>, @@ -123,7 +125,7 @@ fn prepare_lto( fn get_bitcode_slice_from_object_data<'a>( obj: &'a [u8], - cgcx: &CodegenContext, + cgcx: &CodegenContext, ) -> Result<&'a [u8], LtoBitcodeFromRlib> { // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR // that won't work. Fortunately, if that's what we have we can just return the object directly, @@ -149,8 +151,9 @@ fn get_bitcode_slice_from_object_data<'a>( /// Performs fat LTO by merging all modules into a single one and returning it /// for further optimization. pub(crate) fn run_fat( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -161,14 +164,22 @@ pub(crate) fn run_fat( prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); - fat_lto(cgcx, dcx, shared_emitter, modules, upstream_modules, &symbols_below_threshold) + fat_lto( + cgcx, + dcx, + shared_emitter, + tm_factory, + modules, + upstream_modules, + &symbols_below_threshold, + ) } /// Performs thin LTO by performing necessary global analysis and returning two /// lists, one of the modules that need optimization and another for modules that /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -195,9 +206,10 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu } fn fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, modules: Vec>, mut serialized_modules: Vec<(SerializedModule, CString)>, symbols_below_threshold: &[*const libc::c_char], @@ -252,7 +264,7 @@ fn fat_lto( assert!(!serialized_modules.is_empty(), "must have at least one serialized module"); let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); - let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx); + let llvm_module = ModuleLlvm::parse(cgcx, tm_factory, &name, buffer.data(), dcx); ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module) } }; @@ -381,7 +393,7 @@ impl Drop for Linker<'_> { /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. fn thin_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, @@ -585,7 +597,7 @@ pub(crate) fn enable_autodiff_settings(ad: &[config::AutoDiff]) { } pub(crate) fn run_pass_manager( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen, thin: bool, @@ -726,8 +738,9 @@ impl Drop for ThinBuffer { } pub(crate) fn optimize_thin_module( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, thin_module: ThinModule, ) -> ModuleCodegen { let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); @@ -740,7 +753,7 @@ pub(crate) fn optimize_thin_module( // into that context. One day, however, we may do this for upstream // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. - let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx); + let module_llvm = ModuleLlvm::parse(cgcx, tm_factory, module_name, thin_module.data(), dcx); let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. if cgcx.module_config.embed_bitcode() { diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index fb07794e1bba3..2bb5b5db5e485 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -38,8 +38,8 @@ use crate::builder::SBuilder; use crate::builder::gpu_offload::scalar_width; use crate::common::AsCCharPtr; use crate::errors::{ - CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression, - WithLlvmError, WriteBytecode, + CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, ParseTargetMachineConfig, + UnknownCompression, WithLlvmError, WriteBytecode, }; use crate::llvm::diagnostic::OptimizationDiagnosticKind::*; use crate::llvm::{self, DiagnosticInfo}; @@ -111,8 +111,7 @@ pub(crate) fn create_informational_target_machine( // Can't use query system here quite yet because this function is invoked before the query // system/tcx is set up. let features = llvm_util::global_llvm_features(sess, only_base_features); - target_machine_factory(sess, config::OptLevel::No, &features)(config) - .unwrap_or_else(|err| llvm_err(sess.dcx(), err)) + target_machine_factory(sess, config::OptLevel::No, &features)(sess.dcx(), config) } pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { @@ -138,8 +137,7 @@ pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTar tcx.sess, tcx.backend_optimization_level(()), tcx.global_backend_features(()), - )(config) - .unwrap_or_else(|err| llvm_err(tcx.dcx(), err)) + )(tcx.dcx(), config) } fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) { @@ -278,7 +276,7 @@ pub(crate) fn target_machine_factory( let large_data_threshold = sess.opts.unstable_opts.large_data_threshold.unwrap_or(0); let prof = SelfProfilerRef::clone(&sess.prof); - Arc::new(move |config: TargetMachineFactoryConfig| { + Arc::new(move |dcx: DiagCtxtHandle<'_>, config: TargetMachineFactoryConfig| { // Self-profile timer for invoking a factory to create a target machine. let _prof_timer = prof.generic_activity("target_machine_factory_inner"); @@ -320,11 +318,12 @@ pub(crate) fn target_machine_factory( use_wasm_eh, large_data_threshold, ) + .unwrap_or_else(|err| dcx.emit_fatal(ParseTargetMachineConfig(err))) }) } pub(crate) fn save_temp_bitcode( - cgcx: &CodegenContext, + cgcx: &CodegenContext, module: &ModuleCodegen, name: &str, ) { @@ -359,14 +358,14 @@ pub(crate) enum CodegenDiagnosticsStage { } pub(crate) struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a SharedEmitter), + data: *mut (&'a CodegenContext, &'a SharedEmitter), llcx: &'a llvm::Context, old_handler: Option<&'a llvm::DiagnosticHandler>, } impl<'a> DiagnosticHandlers<'a> { pub(crate) fn new( - cgcx: &'a CodegenContext, + cgcx: &'a CodegenContext, shared_emitter: &'a SharedEmitter, llcx: &'a llvm::Context, module: &ModuleCodegen, @@ -432,7 +431,7 @@ impl<'a> Drop for DiagnosticHandlers<'a> { } fn report_inline_asm( - cgcx: &CodegenContext, + cgcx: &CodegenContext, msg: String, level: llvm::DiagnosticLevel, cookie: u64, @@ -464,8 +463,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void if user.is_null() { return; } - let (cgcx, shared_emitter) = - unsafe { *(user as *const (&CodegenContext, &SharedEmitter)) }; + let (cgcx, shared_emitter) = unsafe { *(user as *const (&CodegenContext, &SharedEmitter)) }; let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); @@ -561,7 +559,7 @@ pub(crate) enum AutodiffStage { } pub(crate) unsafe fn llvm_optimize( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, module: &ModuleCodegen, thin_lto_buffer: Option<&mut *mut llvm::ThinLTOBuffer>, @@ -893,7 +891,7 @@ pub(crate) unsafe fn llvm_optimize( // Unsafe due to LLVM calls. pub(crate) fn optimize( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, @@ -984,7 +982,7 @@ pub(crate) fn optimize( } pub(crate) fn codegen( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, @@ -1239,7 +1237,7 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: asm } -pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> &'static CStr { +pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> &'static CStr { if cgcx.target_is_like_darwin { c"__LLVM,__bitcode" } else if cgcx.target_is_like_aix { @@ -1251,7 +1249,7 @@ pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> /// Embed the bitcode of an LLVM module for LTO in the LLVM module itself. fn embed_bitcode( - cgcx: &CodegenContext, + cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: &[u8], @@ -1335,11 +1333,7 @@ fn embed_bitcode( // when using MSVC linker. We do this only for data, as linker can fix up // code references on its own. // See #26591, #27438 -fn create_msvc_imps( - cgcx: &CodegenContext, - llcx: &llvm::Context, - llmod: &llvm::Module, -) { +fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { if !cgcx.msvc_imps_needed { return; } diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 20eba352bc41e..37ca2a9597451 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -94,15 +94,13 @@ pub(crate) struct LtoBitcodeFromRlib { } #[derive(Diagnostic)] -pub enum LlvmError<'a> { +pub(crate) enum LlvmError<'a> { #[diag("could not write output to {$path}")] WriteOutput { path: &'a Path }, #[diag("could not create LLVM TargetMachine for triple: {$triple}")] CreateTargetMachine { triple: SmallCStr }, #[diag("failed to run LLVM passes")] RunLlvmPasses, - #[diag("failed to serialize module {$name}")] - SerializeModule { name: &'a str }, #[diag("failed to write LLVM IR to {$path}")] WriteIr { path: &'a Path }, #[diag("failed to prepare thin LTO context")] @@ -115,8 +113,6 @@ pub enum LlvmError<'a> { PrepareThinLtoModule, #[diag("failed to parse bitcode for LTO module")] ParseBitcode, - #[diag("failed to prepare autodiff: src: {$src}, target: {$target}, {$error}")] - PrepareAutoDiff { src: String, target: String, error: String }, } pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String); @@ -130,9 +126,6 @@ impl Diagnostic<'_, G> for WithLlvmError<'_> { "could not create LLVM TargetMachine for triple: {$triple}: {$llvm_err}" ), RunLlvmPasses => inline_fluent!("failed to run LLVM passes: {$llvm_err}"), - SerializeModule { .. } => { - inline_fluent!("failed to serialize module {$name}: {$llvm_err}") - } WriteIr { .. } => inline_fluent!("failed to write LLVM IR to {$path}: {$llvm_err}"), PrepareThinLtoContext => { inline_fluent!("failed to prepare thin LTO context: {$llvm_err}") @@ -147,9 +140,6 @@ impl Diagnostic<'_, G> for WithLlvmError<'_> { inline_fluent!("failed to prepare thin LTO module: {$llvm_err}") } ParseBitcode => inline_fluent!("failed to parse bitcode for LTO module: {$llvm_err}"), - PrepareAutoDiff { .. } => inline_fluent!( - "failed to prepare autodiff: {$llvm_err}, src: {$src}, target: {$target}, {$error}" - ), }; self.0 .into_diag(dcx, level) diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e035f0809d685..c0e7c18b4d470 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -646,10 +646,32 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ) -> Self::Value { let tcx = self.tcx(); - // FIXME remove usage of fn_abi - let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); - assert!(!fn_abi.ret.is_indirect()); - let fn_ty = fn_abi.llvm_type(self); + let fn_ty = instance.ty(tcx, self.typing_env()); + let fn_sig = match *fn_ty.kind() { + ty::FnDef(def_id, args) => { + tcx.instantiate_bound_regions_with_erased(tcx.fn_sig(def_id).instantiate(tcx, args)) + } + _ => unreachable!(), + }; + assert!(!fn_sig.c_variadic); + + let ret_layout = self.layout_of(fn_sig.output()); + let llreturn_ty = if ret_layout.is_zst() { + self.type_void() + } else { + ret_layout.immediate_llvm_type(self) + }; + + let mut llargument_tys = Vec::with_capacity(fn_sig.inputs().len()); + for &arg in fn_sig.inputs() { + let arg_layout = self.layout_of(arg); + if arg_layout.is_zst() { + continue; + } + llargument_tys.push(arg_layout.immediate_llvm_type(self)); + } + + let fn_ty = self.type_func(&llargument_tys, llreturn_ty); let fn_ptr = if let Some(&llfn) = self.intrinsic_instances.borrow().get(&instance) { llfn @@ -665,12 +687,11 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let llfn = declare_raw_fn( self, sym, - fn_abi.llvm_cconv(self), + llvm::CCallConv, llvm::UnnamedAddr::Global, llvm::Visibility::Default, fn_ty, ); - fn_abi.apply_attrs_llfn(self, llfn, Some(instance)); llfn }; diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index bf3ec1f393302..8ed5380980470 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -25,7 +25,6 @@ use std::path::PathBuf; use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; use context::SimpleCx; -use errors::ParseTargetMachineConfig; use llvm_util::target_config; use rustc_ast::expand::allocator::AllocatorMethod; use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule}; @@ -152,7 +151,6 @@ impl WriteBackendMethods for LlvmCodegenBackend { type Module = ModuleLlvm; type ModuleBuffer = back::lto::ModuleBuffer; type TargetMachine = OwnedTargetMachine; - type TargetMachineError = crate::errors::LlvmError<'static>; type ThinData = back::lto::ThinData; type ThinBuffer = back::lto::ThinBuffer; fn print_pass_timings(&self) { @@ -164,8 +162,9 @@ impl WriteBackendMethods for LlvmCodegenBackend { print!("{stats}"); } fn run_and_optimize_fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -173,6 +172,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { let mut module = back::lto::run_fat( cgcx, shared_emitter, + tm_factory, exported_symbols_for_lto, each_linked_rlib_for_lto, modules, @@ -185,7 +185,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { module } fn run_thin_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -202,7 +202,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { ) } fn optimize( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, @@ -210,14 +210,15 @@ impl WriteBackendMethods for LlvmCodegenBackend { back::write::optimize(cgcx, shared_emitter, module, config) } fn optimize_thin( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, thin: ThinModule, ) -> ModuleCodegen { - back::lto::optimize_thin_module(cgcx, shared_emitter, thin) + back::lto::optimize_thin_module(cgcx, shared_emitter, tm_factory, thin) } fn codegen( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, @@ -440,22 +441,9 @@ impl ModuleLlvm { } } - fn tm_from_cgcx( - cgcx: &CodegenContext, - name: &str, - dcx: DiagCtxtHandle<'_>, - ) -> OwnedTargetMachine { - let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name); - match (cgcx.tm_factory)(tm_factory_config) { - Ok(m) => m, - Err(e) => { - dcx.emit_fatal(ParseTargetMachineConfig(e)); - } - } - } - fn parse( - cgcx: &CodegenContext, + cgcx: &CodegenContext, + tm_factory: TargetMachineFactoryFn, name: &CStr, buffer: &[u8], dcx: DiagCtxtHandle<'_>, @@ -464,7 +452,7 @@ impl ModuleLlvm { let llcx = llvm::LLVMContextCreate(); llvm::LLVMContextSetDiscardValueNames(llcx, cgcx.fewer_names.to_llvm_bool()); let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx); - let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx); + let tm = tm_factory(dcx, TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap())); ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) } } diff --git a/compiler/rustc_codegen_ssa/src/back/lto.rs b/compiler/rustc_codegen_ssa/src/back/lto.rs index e3cf28acfa537..80b3b5a4d7c06 100644 --- a/compiler/rustc_codegen_ssa/src/back/lto.rs +++ b/compiler/rustc_codegen_ssa/src/back/lto.rs @@ -127,10 +127,7 @@ pub(super) fn exported_symbols_for_lto( symbols_below_threshold } -pub(super) fn check_lto_allowed( - cgcx: &CodegenContext, - dcx: DiagCtxtHandle<'_>, -) { +pub(super) fn check_lto_allowed(cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>) { if cgcx.lto == Lto::ThinLocal { // Crate local LTO is always allowed return; diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index 444f7877689fc..c814f8db521cf 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -29,7 +29,8 @@ use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; use rustc_session::config::{ - self, CrateType, Lto, OutFileName, OutputFilenames, OutputType, Passes, SwitchWithOptPath, + self, CrateType, Lto, OptLevel, OutFileName, OutputFilenames, OutputType, Passes, + SwitchWithOptPath, }; use rustc_span::source_map::SourceMap; use rustc_span::{FileName, InnerSpan, Span, SpanData}; @@ -287,10 +288,7 @@ pub struct TargetMachineFactoryConfig { } impl TargetMachineFactoryConfig { - pub fn new( - cgcx: &CodegenContext, - module_name: &str, - ) -> TargetMachineFactoryConfig { + pub fn new(cgcx: &CodegenContext, module_name: &str) -> TargetMachineFactoryConfig { let split_dwarf_file = if cgcx.target_can_use_split_dwarf { cgcx.output_filenames.split_dwarf_path( cgcx.split_debuginfo, @@ -313,17 +311,16 @@ impl TargetMachineFactoryConfig { pub type TargetMachineFactoryFn = Arc< dyn Fn( + DiagCtxtHandle<'_>, TargetMachineFactoryConfig, - ) -> Result< - ::TargetMachine, - ::TargetMachineError, - > + Send + ) -> ::TargetMachine + + Send + Sync, >; /// Additional resources used by optimize_and_codegen (not module specific) #[derive(Clone)] -pub struct CodegenContext { +pub struct CodegenContext { // Resources needed when running LTO pub prof: SelfProfilerRef, pub lto: Lto, @@ -337,7 +334,8 @@ pub struct CodegenContext { pub output_filenames: Arc, pub invocation_temp: Option, pub module_config: Arc, - pub tm_factory: TargetMachineFactoryFn, + pub opt_level: OptLevel, + pub backend_features: Vec, pub msvc_imps_needed: bool, pub is_pe_coff: bool, pub target_can_use_split_dwarf: bool, @@ -364,7 +362,7 @@ pub struct CodegenContext { } fn generate_thin_lto_work( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -399,9 +397,9 @@ fn generate_thin_lto_work( .collect() } -struct CompiledModules { - modules: Vec, - allocator_module: Option, +pub struct CompiledModules { + pub modules: Vec, + pub allocator_module: Option, } enum MaybeLtoModules { @@ -410,7 +408,7 @@ enum MaybeLtoModules { allocator_module: Option, }, FatLto { - cgcx: CodegenContext, + cgcx: CodegenContext, exported_symbols_for_lto: Arc>, each_linked_rlib_file_for_lto: Vec, needs_fat_lto: Vec>, @@ -418,7 +416,7 @@ enum MaybeLtoModules { Vec<(SerializedModule<::ModuleBuffer>, WorkProduct)>, }, ThinLto { - cgcx: CodegenContext, + cgcx: CodegenContext, exported_symbols_for_lto: Arc>, each_linked_rlib_file_for_lto: Vec, needs_thin_lto: Vec<(String, ::ThinBuffer)>, @@ -534,7 +532,7 @@ fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( work_products } -fn produce_final_output_artifacts( +pub fn produce_final_output_artifacts( sess: &Session, compiled_modules: &CompiledModules, crate_output: &OutputFilenames, @@ -842,7 +840,7 @@ pub(crate) fn compute_per_cgu_lto_type( } fn execute_optimize_work_item( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: SharedEmitter, mut module: ModuleCodegen, ) -> WorkItemResult { @@ -897,8 +895,8 @@ fn execute_optimize_work_item( } } -fn execute_copy_from_cache_work_item( - cgcx: &CodegenContext, +fn execute_copy_from_cache_work_item( + cgcx: &CodegenContext, shared_emitter: SharedEmitter, module: CachedModuleCodegen, ) -> CompiledModule { @@ -986,8 +984,9 @@ fn execute_copy_from_cache_work_item( } fn do_fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: SharedEmitter, + tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], mut needs_fat_lto: Vec>, @@ -1007,6 +1006,7 @@ fn do_fat_lto( let module = B::run_and_optimize_fat_lto( cgcx, &shared_emitter, + tm_factory, exported_symbols_for_lto, each_linked_rlib_for_lto, needs_fat_lto, @@ -1015,8 +1015,9 @@ fn do_fat_lto( } fn do_thin_lto<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, + cgcx: &'a CodegenContext, shared_emitter: SharedEmitter, + tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: Arc>, each_linked_rlib_for_lto: Vec, needs_thin_lto: Vec<(String, ::ThinBuffer)>, @@ -1053,7 +1054,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( // bunch of work items onto our queue to do LTO. This all // happens on the coordinator thread but it's very quick so // we don't worry about tokens. - for (work, cost) in generate_thin_lto_work( + for (work, cost) in generate_thin_lto_work::( cgcx, dcx, &exported_symbols_for_lto, @@ -1097,7 +1098,13 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( while used_token_count < tokens.len() + 1 && let Some((item, _)) = work_items.pop() { - spawn_thin_lto_work(&cgcx, shared_emitter.clone(), coordinator_send.clone(), item); + spawn_thin_lto_work( + &cgcx, + shared_emitter.clone(), + Arc::clone(&tm_factory), + coordinator_send.clone(), + item, + ); used_token_count += 1; } } else { @@ -1158,13 +1165,14 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( } fn execute_thin_lto_work_item( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: SharedEmitter, + tm_factory: TargetMachineFactoryFn, module: lto::ThinModule, ) -> CompiledModule { let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name()); - let module = B::optimize_thin(cgcx, &shared_emitter, module); + let module = B::optimize_thin(cgcx, &shared_emitter, tm_factory, module); B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config) } @@ -1292,8 +1300,9 @@ fn start_executing_work( }) .expect("failed to spawn helper thread"); - let ol = tcx.backend_optimization_level(()); - let backend_features = tcx.global_backend_features(()); + let opt_level = tcx.backend_optimization_level(()); + let backend_features = tcx.global_backend_features(()).clone(); + let tm_factory = backend.target_machine_factory(tcx.sess, opt_level, &backend_features); let remark_dir = if let Some(ref dir) = sess.opts.unstable_opts.remark_dir { let result = fs::create_dir_all(dir).and_then(|_| dir.canonicalize()); @@ -1305,7 +1314,7 @@ fn start_executing_work( None }; - let cgcx = CodegenContext:: { + let cgcx = CodegenContext { crate_types: tcx.crate_types().to_vec(), lto: sess.lto(), use_linker_plugin_lto: sess.opts.cg.linker_plugin_lto.enabled(), @@ -1320,7 +1329,8 @@ fn start_executing_work( incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), output_filenames: Arc::clone(tcx.output_filenames(())), module_config: regular_config, - tm_factory: backend.target_machine_factory(tcx.sess, ol, backend_features), + opt_level, + backend_features, msvc_imps_needed: msvc_imps_needed(tcx), is_pe_coff: tcx.sess.target.is_like_windows, target_can_use_split_dwarf: tcx.sess.target_can_use_split_dwarf(), @@ -1776,9 +1786,10 @@ fn start_executing_work( assert!(needs_fat_lto.is_empty()); if cgcx.lto == Lto::ThinLocal { - compiled_modules.extend(do_thin_lto( + compiled_modules.extend(do_thin_lto::( &cgcx, shared_emitter.clone(), + tm_factory, exported_symbols_for_lto, each_linked_rlib_file_for_lto, needs_thin_lto, @@ -1872,7 +1883,7 @@ fn start_executing_work( pub(crate) struct WorkerFatalError; fn spawn_work<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, + cgcx: &'a CodegenContext, shared_emitter: SharedEmitter, coordinator_send: Sender>, llvm_start_time: &mut Option>, @@ -1910,8 +1921,9 @@ fn spawn_work<'a, B: ExtraBackendMethods>( } fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, + cgcx: &'a CodegenContext, shared_emitter: SharedEmitter, + tm_factory: TargetMachineFactoryFn, coordinator_send: Sender, work: ThinLtoWorkItem, ) { @@ -1922,7 +1934,9 @@ fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( ThinLtoWorkItem::CopyPostLtoArtifacts(m) => { execute_copy_from_cache_work_item(&cgcx, shared_emitter, m) } - ThinLtoWorkItem::ThinLto(m) => execute_thin_lto_work_item(&cgcx, shared_emitter, m), + ThinLtoWorkItem::ThinLto(m) => { + execute_thin_lto_work_item(&cgcx, shared_emitter, tm_factory, m) + } })); let msg = match result { @@ -2159,34 +2173,52 @@ impl OngoingCodegen { each_linked_rlib_file_for_lto, needs_fat_lto, lto_import_only_modules, - } => CompiledModules { - modules: vec![do_fat_lto( - &cgcx, - shared_emitter, - &exported_symbols_for_lto, - &each_linked_rlib_file_for_lto, - needs_fat_lto, - lto_import_only_modules, - )], - allocator_module: None, - }, + } => { + let tm_factory = self.backend.target_machine_factory( + sess, + cgcx.opt_level, + &cgcx.backend_features, + ); + + CompiledModules { + modules: vec![do_fat_lto( + &cgcx, + shared_emitter, + tm_factory, + &exported_symbols_for_lto, + &each_linked_rlib_file_for_lto, + needs_fat_lto, + lto_import_only_modules, + )], + allocator_module: None, + } + } MaybeLtoModules::ThinLto { cgcx, exported_symbols_for_lto, each_linked_rlib_file_for_lto, needs_thin_lto, lto_import_only_modules, - } => CompiledModules { - modules: do_thin_lto( - &cgcx, - shared_emitter, - exported_symbols_for_lto, - each_linked_rlib_file_for_lto, - needs_thin_lto, - lto_import_only_modules, - ), - allocator_module: None, - }, + } => { + let tm_factory = self.backend.target_machine_factory( + sess, + cgcx.opt_level, + &cgcx.backend_features, + ); + + CompiledModules { + modules: do_thin_lto::( + &cgcx, + shared_emitter, + tm_factory, + exported_symbols_for_lto, + each_linked_rlib_file_for_lto, + needs_thin_lto, + lto_import_only_modules, + ), + allocator_module: None, + } + } }); shared_emitter_main.check(sess, true); diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 897b5f957e154..a50a1da7aaa84 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -130,14 +130,14 @@ pub(crate) struct CopyPathBuf { // Reports Paths using `Debug` implementation rather than Path's `Display` implementation. #[derive(Diagnostic)] #[diag("could not copy {$from} to {$to}: {$error}")] -pub struct CopyPath<'a> { +pub(crate) struct CopyPath<'a> { from: DebugArgPath<'a>, to: DebugArgPath<'a>, error: Error, } impl<'a> CopyPath<'a> { - pub fn new(from: &'a Path, to: &'a Path, error: Error) -> CopyPath<'a> { + pub(crate) fn new(from: &'a Path, to: &'a Path, error: Error) -> CopyPath<'a> { CopyPath { from: DebugArgPath(from), to: DebugArgPath(to), error } } } @@ -154,19 +154,19 @@ impl IntoDiagArg for DebugArgPath<'_> { #[diag( "option `-o` or `--emit` is used to write binary output type `{$shorthand}` to stdout, but stdout is a tty" )] -pub struct BinaryOutputToTty { +pub(crate) struct BinaryOutputToTty { pub shorthand: &'static str, } #[derive(Diagnostic)] #[diag("ignoring emit path because multiple .{$extension} files were produced")] -pub struct IgnoringEmitPath { +pub(crate) struct IgnoringEmitPath { pub extension: &'static str, } #[derive(Diagnostic)] #[diag("ignoring -o because multiple .{$extension} files were produced")] -pub struct IgnoringOutput { +pub(crate) struct IgnoringOutput { pub extension: &'static str, } diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index e1d23841118cb..0232ba39ac1f3 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -4,13 +4,14 @@ use rustc_errors::DiagCtxtHandle; use rustc_middle::dep_graph::WorkProduct; use crate::back::lto::{SerializedModule, ThinModule}; -use crate::back::write::{CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter}; +use crate::back::write::{ + CodegenContext, FatLtoInput, ModuleConfig, SharedEmitter, TargetMachineFactoryFn, +}; use crate::{CompiledModule, ModuleCodegen}; pub trait WriteBackendMethods: Clone + 'static { type Module: Send + Sync; type TargetMachine; - type TargetMachineError; type ModuleBuffer: ModuleBufferMethods; type ThinData: Send + Sync; type ThinBuffer: ThinBufferMethods; @@ -18,8 +19,9 @@ pub trait WriteBackendMethods: Clone + 'static { /// Performs fat LTO by merging all modules into a single one, running autodiff /// if necessary and running any further optimizations fn run_and_optimize_fat_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -28,7 +30,7 @@ pub trait WriteBackendMethods: Clone + 'static { /// lists, one of the modules that need optimization and another for modules that /// can simply be copied over from the incr. comp. cache. fn run_thin_lto( - cgcx: &CodegenContext, + cgcx: &CodegenContext, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -38,18 +40,19 @@ pub trait WriteBackendMethods: Clone + 'static { fn print_pass_timings(&self); fn print_statistics(&self); fn optimize( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ); fn optimize_thin( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, + tm_factory: TargetMachineFactoryFn, thin: ThinModule, ) -> ModuleCodegen; fn codegen( - cgcx: &CodegenContext, + cgcx: &CodegenContext, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, diff --git a/compiler/rustc_hir/src/attrs/data_structures.rs b/compiler/rustc_hir/src/attrs/data_structures.rs index 7050bb6ea70d8..8e68a3d002334 100644 --- a/compiler/rustc_hir/src/attrs/data_structures.rs +++ b/compiler/rustc_hir/src/attrs/data_structures.rs @@ -5,7 +5,7 @@ pub use ReprAttr::*; use rustc_abi::Align; pub use rustc_ast::attr::data_structures::*; use rustc_ast::token::DocFragmentKind; -use rustc_ast::{AttrStyle, ast}; +use rustc_ast::{AttrStyle, Path, ast}; use rustc_data_structures::fx::FxIndexMap; use rustc_error_messages::{DiagArgValue, IntoDiagArg}; use rustc_macros::{Decodable, Encodable, HashStable_Generic, PrintAttribute}; @@ -1367,6 +1367,9 @@ pub enum AttributeKind { /// `#[unsafe(force_target_feature(enable = "...")]`. TargetFeature { features: ThinVec<(Symbol, Span)>, attr_span: Span, was_forced: bool }, + /// Represents `#![test_runner(path)]` + TestRunner(Path), + /// Represents `#[thread_local]` ThreadLocal, diff --git a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs index 73ac1533cef56..e68ab1c42bafd 100644 --- a/compiler/rustc_hir/src/attrs/encode_cross_crate.rs +++ b/compiler/rustc_hir/src/attrs/encode_cross_crate.rs @@ -176,6 +176,7 @@ impl AttributeKind { ShouldPanic { .. } => No, Stability { .. } => Yes, TargetFeature { .. } => No, + TestRunner(..) => Yes, ThreadLocal => No, TrackCaller(..) => Yes, TypeLengthLimit { .. } => No, diff --git a/compiler/rustc_hir/src/attrs/pretty_printing.rs b/compiler/rustc_hir/src/attrs/pretty_printing.rs index 20efd72e20f7f..8fce529010150 100644 --- a/compiler/rustc_hir/src/attrs/pretty_printing.rs +++ b/compiler/rustc_hir/src/attrs/pretty_printing.rs @@ -3,6 +3,7 @@ use std::ops::Deref; use std::path::PathBuf; use rustc_abi::Align; +use rustc_ast::ast::{Path, join_path_idents}; use rustc_ast::attr::data_structures::CfgEntry; use rustc_ast::attr::version::RustcVersion; use rustc_ast::token::{CommentKind, DocFragmentKind}; @@ -106,6 +107,16 @@ impl PrintAttribute for PathBuf { p.word(self.display().to_string()); } } +impl PrintAttribute for Path { + fn should_render(&self) -> bool { + true + } + + fn print_attribute(&self, p: &mut Printer) { + p.word(join_path_idents(self.segments.iter().map(|seg| seg.ident))); + } +} + macro_rules! print_skip { ($($t: ty),* $(,)?) => {$( impl PrintAttribute for $t { diff --git a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs index edaf33e493c04..588747f46d17d 100644 --- a/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs +++ b/compiler/rustc_hir_analysis/src/coherence/inherent_impls.rs @@ -110,7 +110,22 @@ impl<'tcx> InherentCollect<'tcx> { Ok(()) } else { let impl_span = self.tcx.def_span(impl_def_id); - Err(self.tcx.dcx().emit_err(errors::InherentTyOutsideNew { span: impl_span })) + let mut err = errors::InherentTyOutsideNew { span: impl_span, note: None }; + + if let hir::TyKind::Path(rustc_hir::QPath::Resolved(_, path)) = + self.tcx.hir_node_by_def_id(impl_def_id).expect_item().expect_impl().self_ty.kind + && let rustc_hir::def::Res::Def(DefKind::TyAlias, def_id) = path.res + { + let ty_name = self.tcx.def_path_str(def_id); + let alias_ty_name = self.tcx.type_of(def_id).skip_binder().to_string(); + err.note = Some(errors::InherentTyOutsideNewAliasNote { + span: self.tcx.def_span(def_id), + ty_name, + alias_ty_name, + }); + } + + Err(self.tcx.dcx().emit_err(err)) } } diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 6a23b42ae0981..fb79789df76ea 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -1223,11 +1223,27 @@ pub(crate) struct InherentTyOutsideRelevant { #[derive(Diagnostic)] #[diag("cannot define inherent `impl` for a type outside of the crate where the type is defined", code = E0116)] -#[note("define and implement a trait or new type instead")] +#[help( + "consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it" +)] +#[note( + "for more details about the orphan rules, see " +)] pub(crate) struct InherentTyOutsideNew { #[primary_span] #[label("impl for type defined outside of crate")] pub span: Span, + #[subdiagnostic] + pub note: Option, +} + +#[derive(Subdiagnostic)] +#[note("`{$ty_name}` does not define a new type, only an alias of `{$alias_ty_name}` defined here")] +pub(crate) struct InherentTyOutsideNewAliasNote { + #[primary_span] + pub span: Span, + pub ty_name: String, + pub alias_ty_name: String, } #[derive(Diagnostic)] diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs index 3f13a102684e0..b5094d736dd57 100644 --- a/compiler/rustc_hir_typeck/src/cast.rs +++ b/compiler/rustc_hir_typeck/src/cast.rs @@ -572,6 +572,17 @@ impl<'a, 'tcx> CastCheck<'tcx> { let metadata = known_metadata.unwrap_or("type-specific metadata"); let known_wide = known_metadata.is_some(); let span = self.cast_span; + let param_note = (!known_wide) + .then(|| match cast_ty.kind() { + ty::RawPtr(pointee, _) => match pointee.kind() { + ty::Param(param) => { + Some(errors::IntToWideParamNote { param: param.name }) + } + _ => None, + }, + _ => None, + }) + .flatten(); fcx.dcx().emit_err(errors::IntToWide { span, metadata, @@ -579,6 +590,7 @@ impl<'a, 'tcx> CastCheck<'tcx> { cast_ty, expr_if_nightly, known_wide, + param_note, }); } CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => { diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index a5de1014dd7a3..ad0467ddec966 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -590,6 +590,14 @@ pub(crate) struct IntToWide<'tcx> { )] pub expr_if_nightly: Option, pub known_wide: bool, + #[subdiagnostic] + pub param_note: Option, +} + +#[derive(Subdiagnostic)] +#[note("the type parameter `{$param}` is not known to be `Sized`, so this pointer may be wide")] +pub(crate) struct IntToWideParamNote { + pub param: Symbol, } #[derive(Subdiagnostic)] diff --git a/compiler/rustc_lint/src/early/diagnostics.rs b/compiler/rustc_lint/src/early/diagnostics.rs index 7681eedc75ed0..3da2b1bf4069e 100644 --- a/compiler/rustc_lint/src/early/diagnostics.rs +++ b/compiler/rustc_lint/src/early/diagnostics.rs @@ -383,6 +383,10 @@ pub fn decorate_attribute_lint( lints::DocAutoCfgExpectsHideOrShow.decorate_lint(diag) } + &AttributeLintKind::AmbiguousDeriveHelpers => { + lints::AmbiguousDeriveHelpers.decorate_lint(diag) + } + &AttributeLintKind::DocAutoCfgHideShowUnexpectedItem { attr_name } => { lints::DocAutoCfgHideShowUnexpectedItem { attr_name }.decorate_lint(diag) } diff --git a/compiler/rustc_lint/src/lints.rs b/compiler/rustc_lint/src/lints.rs index 0aa5199cffc6e..e290098238e93 100644 --- a/compiler/rustc_lint/src/lints.rs +++ b/compiler/rustc_lint/src/lints.rs @@ -1975,27 +1975,17 @@ pub(crate) struct OverflowingBinHex<'a> { pub sign_bit_sub: Option>, } +#[derive(Subdiagnostic)] pub(crate) enum OverflowingBinHexSign { + #[note( + "the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}` and will become `{$actually}{$ty}`" + )] Positive, + #[note("the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}`")] + #[note("and the value `-{$lit}` will become `{$actually}{$ty}`")] Negative, } -impl Subdiagnostic for OverflowingBinHexSign { - fn add_to_diag(self, diag: &mut Diag<'_, G>) { - match self { - OverflowingBinHexSign::Positive => { - diag.note(inline_fluent!("the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}` and will become `{$actually}{$ty}`")); - } - OverflowingBinHexSign::Negative => { - diag.note(inline_fluent!( - "the literal `{$lit}` (decimal `{$dec}`) does not fit into the type `{$ty}`" - )); - diag.note(inline_fluent!("and the value `-{$lit}` will become `{$actually}{$ty}`")); - } - } - } -} - #[derive(Subdiagnostic)] pub(crate) enum OverflowingBinHexSub<'a> { #[suggestion( @@ -3752,6 +3742,10 @@ pub(crate) struct DocAliasDuplicated { #[diag("only `hide` or `show` are allowed in `#[doc(auto_cfg(...))]`")] pub(crate) struct DocAutoCfgExpectsHideOrShow; +#[derive(LintDiagnostic)] +#[diag("there exists a built-in attribute with the same name")] +pub(crate) struct AmbiguousDeriveHelpers; + #[derive(LintDiagnostic)] #[diag("`#![doc(auto_cfg({$attr_name}(...)))]` only accepts identifiers or key/value items")] pub(crate) struct DocAutoCfgHideShowUnexpectedItem { diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 488e3a70b6695..9e80ddbd551f9 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -17,6 +17,7 @@ declare_lint_pass! { AARCH64_SOFTFLOAT_NEON, ABSOLUTE_PATHS_NOT_STARTING_WITH_CRATE, AMBIGUOUS_ASSOCIATED_ITEMS, + AMBIGUOUS_DERIVE_HELPERS, AMBIGUOUS_GLOB_IMPORTED_TRAITS, AMBIGUOUS_GLOB_IMPORTS, AMBIGUOUS_GLOB_REEXPORTS, @@ -4267,6 +4268,75 @@ declare_lint! { }; } +declare_lint! { + /// The `ambiguous_derive_helpers` lint detects cases where a derive macro's helper attribute + /// is the same name as that of a built-in attribute. + /// + /// ### Example + /// + /// ```rust,ignore (proc-macro) + /// #![crate_type = "proc-macro"] + /// #![deny(ambiguous_derive_helpers)] + /// + /// use proc_macro::TokenStream; + /// + /// #[proc_macro_derive(Trait, attributes(ignore))] + /// pub fn example(input: TokenStream) -> TokenStream { + /// TokenStream::new() + /// } + /// ``` + /// + /// Produces: + /// + /// ```text + /// warning: there exists a built-in attribute with the same name + /// --> file.rs:5:39 + /// | + /// 5 | #[proc_macro_derive(Trait, attributes(ignore))] + /// | ^^^^^^ + /// | + /// = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + /// = note: for more information, see issue #151152 + /// = note: `#[deny(ambiguous_derive_helpers)]` (part of `#[deny(future_incompatible)]`) on by default + /// ``` + /// + /// ### Explanation + /// + /// Attempting to use this helper attribute will throw an error: + /// + /// ```rust,ignore (needs-dependency) + /// #[derive(Trait)] + /// struct Example { + /// #[ignore] + /// fields: () + /// } + /// ``` + /// + /// Produces: + /// + /// ```text + /// error[E0659]: `ignore` is ambiguous + /// --> src/lib.rs:5:7 + /// | + /// 5 | #[ignore] + /// | ^^^^^^ ambiguous name + /// | + /// = note: ambiguous because of a name conflict with a builtin attribute + /// = note: `ignore` could refer to a built-in attribute + /// note: `ignore` could also refer to the derive helper attribute defined here + /// --> src/lib.rs:3:10 + /// | + /// 3 | #[derive(Trait)] + /// | ^^^^^ + /// ``` + pub AMBIGUOUS_DERIVE_HELPERS, + Warn, + "detects derive helper attributes that are ambiguous with built-in attributes", + @future_incompatible = FutureIncompatibleInfo { + reason: fcw!(FutureReleaseError #151276), + }; +} + declare_lint! { /// The `private_interfaces` lint detects types in a primary interface of an item, /// that are more private than the item itself. Primary interface of an item is all diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs index 0c454ec60f4a6..a1b8b135819a0 100644 --- a/compiler/rustc_lint_defs/src/lib.rs +++ b/compiler/rustc_lint_defs/src/lib.rs @@ -800,6 +800,7 @@ pub enum AttributeLintKind { attr_name: Symbol, }, DocInvalid, + AmbiguousDeriveHelpers, DocUnknownInclude { span: Span, inner: &'static str, diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs index d30e8120993fe..a74d387ad5a4a 100644 --- a/compiler/rustc_metadata/src/dependency_format.rs +++ b/compiler/rustc_metadata/src/dependency_format.rs @@ -68,8 +68,7 @@ use crate::creader::CStore; use crate::errors::{ BadPanicStrategy, CrateDepMultiple, IncompatiblePanicInDropStrategy, IncompatibleWithImmediateAbort, IncompatibleWithImmediateAbortCore, LibRequired, - NonStaticCrateDep, RequiredPanicStrategy, RlibRequired, RustcDriverHelp, RustcLibRequired, - TwoPanicRuntimes, + NonStaticCrateDep, RequiredPanicStrategy, RlibRequired, RustcLibRequired, TwoPanicRuntimes, }; pub(crate) fn calculate(tcx: TyCtxt<'_>) -> Dependencies { @@ -318,7 +317,7 @@ fn add_library( .drain(..) .map(|cnum| NonStaticCrateDep { crate_name_: tcx.crate_name(cnum) }) .collect(), - rustc_driver_help: linking_to_rustc_driver.then_some(RustcDriverHelp), + rustc_driver_help: linking_to_rustc_driver, }); } } diff --git a/compiler/rustc_metadata/src/errors.rs b/compiler/rustc_metadata/src/errors.rs index da26f855b6c79..e8b6cb8d02f6d 100644 --- a/compiler/rustc_metadata/src/errors.rs +++ b/compiler/rustc_metadata/src/errors.rs @@ -43,8 +43,8 @@ pub struct CrateDepMultiple { pub crate_name: Symbol, #[subdiagnostic] pub non_static_deps: Vec, - #[subdiagnostic] - pub rustc_driver_help: Option, + #[help("`feature(rustc_private)` is needed to link to the compiler's `rustc_driver` library")] + pub rustc_driver_help: bool, } #[derive(Subdiagnostic)] @@ -54,10 +54,6 @@ pub struct NonStaticCrateDep { pub crate_name_: Symbol, } -#[derive(Subdiagnostic)] -#[help("`feature(rustc_private)` is needed to link to the compiler's `rustc_driver` library")] -pub struct RustcDriverHelp; - #[derive(Diagnostic)] #[diag("cannot link together two panic runtimes: {$prev_name} and {$cur_name}")] pub struct TwoPanicRuntimes { diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index b80f096ec9935..3c844eac1fca0 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -49,8 +49,21 @@ pub struct QueryVTable<'tcx, C: QueryCache> { // Offset of this query's cache field in the QueryCaches struct pub query_cache: usize, pub will_cache_on_disk_for_key_fn: Option>, - pub execute_query: fn(tcx: TyCtxt<'tcx>, k: C::Key) -> C::Value, - pub compute_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> C::Value, + + /// Function pointer that calls `tcx.$query(key)` for this query and + /// discards the returned value. + /// + /// This is a weird thing to be doing, and probably not what you want. + /// It is used for loading query results from disk-cache in some cases. + pub call_query_method_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key), + + /// Function pointer that actually calls this query's provider. + /// Also performs some associated secondary tasks; see the macro-defined + /// implementation in `mod invoke_provider_fn` for more details. + /// + /// This should be the only code that calls the provider function. + pub invoke_provider_fn: fn(tcx: TyCtxt<'tcx>, key: C::Key) -> C::Value, + pub try_load_from_disk_fn: Option>, pub is_loadable_from_disk_fn: Option>, pub hash_result: HashResult, diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index 6499f31bb935b..e73d9a1bf56ab 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -2786,12 +2786,14 @@ pub(crate) struct UnknownTokenStart { pub escaped: String, #[subdiagnostic] pub sugg: Option, - #[subdiagnostic] - pub null: Option, + #[help( + "source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used" + )] + pub null: bool, #[subdiagnostic] pub repeat: Option, - #[subdiagnostic] - pub invisible: Option, + #[help("invisible characters like '{$escaped}' are not usually visible in text editors")] + pub invisible: bool, } #[derive(Subdiagnostic)] @@ -2837,16 +2839,6 @@ pub(crate) struct UnknownTokenRepeat { pub repeats: usize, } -#[derive(Subdiagnostic)] -#[help("invisible characters like '{$escaped}' are not usually visible in text editors")] -pub(crate) struct InvisibleCharacter; - -#[derive(Subdiagnostic)] -#[help( - "source files must contain UTF-8 encoded text, unexpected null bytes might occur when a different encoding is used" -)] -pub(crate) struct UnknownTokenNull; - #[derive(Diagnostic)] pub(crate) enum UnescapeError { #[diag("invalid unicode character escape")] diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index 76f610df1eb09..cd90655125b2b 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -459,8 +459,8 @@ impl<'psess, 'src> Lexer<'psess, 'src> { span: self.mk_sp(start, self.pos + Pos::from_usize(repeats * c.len_utf8())), escaped: escaped_char(c), sugg, - null: if c == '\x00' { Some(errors::UnknownTokenNull) } else { None }, - invisible: if INVISIBLE_CHARACTERS.contains(&c) { Some(errors::InvisibleCharacter) } else { None }, + null: c == '\x00', + invisible: INVISIBLE_CHARACTERS.contains(&c), repeat: if repeats > 0 { swallow_next_invalid = repeats; Some(errors::UnknownTokenRepeat { repeats }) diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 2287b8bf483f6..b6f0e9fedd6d8 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -364,6 +364,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | AttributeKind::RustcVariance | AttributeKind::RustcVarianceOfOpaques | AttributeKind::ShouldPanic { .. } + | AttributeKind::TestRunner(..) | AttributeKind::ThreadLocal | AttributeKind::TypeLengthLimit { .. } | AttributeKind::UnstableFeatureBound(..) @@ -411,9 +412,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | sym::rustc_mir // crate-level attrs, are checked below | sym::feature - | sym::register_tool - | sym::test_runner, - .. + | sym::register_tool, .. ] => {} [name, rest@..] => { match BUILTIN_ATTRIBUTE_MAP.get(name) { diff --git a/compiler/rustc_pattern_analysis/src/errors.rs b/compiler/rustc_pattern_analysis/src/errors.rs index 64d4ff4220430..e7448613cc14e 100644 --- a/compiler/rustc_pattern_analysis/src/errors.rs +++ b/compiler/rustc_pattern_analysis/src/errors.rs @@ -56,22 +56,14 @@ pub struct OverlappingRangeEndpoints { pub overlap: Vec, } +#[derive(Subdiagnostic)] +#[label("this range overlaps on `{$range}`...")] pub struct Overlap { + #[primary_span] pub span: Span, pub range: String, // a printed pattern } -impl Subdiagnostic for Overlap { - fn add_to_diag(self, diag: &mut Diag<'_, G>) { - let Overlap { span, range } = self; - - // FIXME(mejrs) unfortunately `#[derive(LintDiagnostic)]` - // does not support `#[subdiagnostic(eager)]`... - let message = format!("this range overlaps on `{range}`..."); - diag.span_label(span, message); - } -} - #[derive(LintDiagnostic)] #[diag("exclusive range missing `{$max}`")] pub struct ExclusiveRangeMissingMax { diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index d7e95d43ecaad..b246f077e747e 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs @@ -416,7 +416,8 @@ fn execute_job_non_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>( } let prof_timer = qcx.tcx.prof.query_provider(); - let result = qcx.start_query(job_id, query.depth_limit(), || query.compute(qcx, key)); + // Call the query provider. + let result = qcx.start_query(job_id, query.depth_limit(), || query.invoke_provider(qcx, key)); let dep_node_index = qcx.tcx.dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -459,18 +460,21 @@ fn execute_job_incr<'tcx, C: QueryCache, const FLAGS: QueryFlags>( let (result, dep_node_index) = qcx.start_query(job_id, query.depth_limit(), || { if query.anon() { - return dep_graph_data - .with_anon_task_inner(qcx.tcx, query.dep_kind(), || query.compute(qcx, key)); + // Call the query provider inside an anon task. + return dep_graph_data.with_anon_task_inner(qcx.tcx, query.dep_kind(), || { + query.invoke_provider(qcx, key) + }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = dep_node_opt.unwrap_or_else(|| query.construct_dep_node(qcx.tcx, &key)); + // Call the query provider. dep_graph_data.with_task( dep_node, (qcx, query), key, - |(qcx, query), key| query.compute(qcx, key), + |(qcx, query), key| query.invoke_provider(qcx, key), query.hash_result(), ) }); @@ -547,7 +551,8 @@ fn try_load_from_disk_and_cache_in_memory<'tcx, C: QueryCache, const FLAGS: Quer let prof_timer = qcx.tcx.prof.query_provider(); // The dep-graph for this computation is already in-place. - let result = qcx.tcx.dep_graph.with_ignore(|| query.compute(qcx, *key)); + // Call the query provider. + let result = qcx.tcx.dep_graph.with_ignore(|| query.invoke_provider(qcx, *key)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 2241c3f440874..e1c22c187b230 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -110,15 +110,18 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'t } } - // Don't use this method to compute query results, instead use the methods on TyCtxt. + /// Calls `tcx.$query(key)` for this query, and discards the returned value. + /// See [`QueryVTable::call_query_method_fn`] for details of this strange operation. #[inline(always)] - fn execute_query(self, tcx: TyCtxt<'tcx>, key: C::Key) -> C::Value { - (self.vtable.execute_query)(tcx, key) + fn call_query_method(self, tcx: TyCtxt<'tcx>, key: C::Key) { + (self.vtable.call_query_method_fn)(tcx, key) } + /// Calls the actual provider function for this query. + /// See [`QueryVTable::invoke_provider_fn`] for more details. #[inline(always)] - fn compute(self, qcx: QueryCtxt<'tcx>, key: C::Key) -> C::Value { - (self.vtable.compute_fn)(qcx.tcx, key) + fn invoke_provider(self, qcx: QueryCtxt<'tcx>, key: C::Key) -> C::Value { + (self.vtable.invoke_provider_fn)(qcx.tcx, key) } #[inline(always)] diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 62401fd04015b..9a07df361800c 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -458,7 +458,9 @@ fn try_load_from_on_disk_cache<'tcx, C: QueryCache, const FLAGS: QueryFlags>( panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash) }); if query.will_cache_on_disk_for_key(tcx, &key) { - let _ = query.execute_query(tcx, key); + // Call `tcx.$query(key)` for its side-effect of loading the disk-cached + // value into memory. + query.call_query_method(tcx, key); } } @@ -626,14 +628,15 @@ macro_rules! define_queries { } } - /// Defines a `compute` function for this query, to be used as a - /// function pointer in the query's vtable. - mod compute_fn { + /// Defines an `invoke_provider` function that calls the query's provider, + /// to be used as a function pointer in the query's vtable. + /// + /// To mark a short-backtrace boundary, the function's actual name + /// (after demangling) must be `__rust_begin_short_backtrace`. + mod invoke_provider_fn { use super::*; use ::rustc_middle::queries::$name::{Key, Value, provided_to_erased}; - /// This function would be named `compute`, but we also want it - /// to mark the boundaries of an omitted region in backtraces. #[inline(never)] pub(crate) fn __rust_begin_short_backtrace<'tcx>( tcx: TyCtxt<'tcx>, @@ -644,10 +647,13 @@ macro_rules! define_queries { // Call the actual provider function for this query. let provided_value = call_provider!([$($modifiers)*][tcx, $name, key]); + rustc_middle::ty::print::with_reduced_queries!({ tracing::trace!(?provided_value); }); + // Erase the returned value, because `QueryVTable` uses erased values. + // For queries with `arena_cache`, this also arena-allocates the value. provided_to_erased(tcx, provided_value) } } @@ -667,8 +673,12 @@ macro_rules! define_queries { } { None }), - execute_query: |tcx, key| erase::erase_val(tcx.$name(key)), - compute_fn: self::compute_fn::__rust_begin_short_backtrace, + call_query_method_fn: |tcx, key| { + // Call the query method for its side-effect of loading a value + // from disk-cache; the caller doesn't need the value. + let _ = tcx.$name(key); + }, + invoke_provider_fn: self::invoke_provider_fn::__rust_begin_short_backtrace, try_load_from_disk_fn: if_cache_on_disk!([$($modifiers)*] { Some(|tcx, key, prev_index, index| { // Check the `cache_on_disk_if` condition for this key. diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index f09b987157996..9a88f74b5b179 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1015,11 +1015,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { span, name, param_kind: is_type, - help: self - .tcx - .sess - .is_nightly_build() - .then_some(errs::ParamInNonTrivialAnonConstHelp), + help: self.tcx.sess.is_nightly_build(), }) } ResolutionError::ParamInEnumDiscriminant { name, param_kind: is_type } => self diff --git a/compiler/rustc_resolve/src/errors.rs b/compiler/rustc_resolve/src/errors.rs index 7afdbf6a469f8..28c5a128ea36a 100644 --- a/compiler/rustc_resolve/src/errors.rs +++ b/compiler/rustc_resolve/src/errors.rs @@ -429,14 +429,10 @@ pub(crate) struct ParamInNonTrivialAnonConst { pub(crate) name: Symbol, #[subdiagnostic] pub(crate) param_kind: ParamKindInNonTrivialAnonConst, - #[subdiagnostic] - pub(crate) help: Option, + #[help("add `#![feature(generic_const_exprs)]` to allow generic const expressions")] + pub(crate) help: bool, } -#[derive(Subdiagnostic)] -#[help("add `#![feature(generic_const_exprs)]` to allow generic const expressions")] -pub(crate) struct ParamInNonTrivialAnonConstHelp; - #[derive(Debug)] #[derive(Subdiagnostic)] pub(crate) enum ParamKindInNonTrivialAnonConst { diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs index 78ad139cff795..4e7622d08462e 100644 --- a/compiler/rustc_resolve/src/imports.rs +++ b/compiler/rustc_resolve/src/imports.rs @@ -406,8 +406,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } } else if !old_glob_decl.vis().is_at_least(glob_decl.vis(), self.tcx) { // We are glob-importing the same item but with greater visibility. - old_glob_decl.vis.set_unchecked(glob_decl.vis()); - old_glob_decl + // FIXME: Update visibility in place, but without regressions + // (#152004, #151124, #152347). + glob_decl } else if glob_decl.is_ambiguity_recursive() && !old_glob_decl.is_ambiguity_recursive() { // Overwriting a non-ambiguous glob import with an ambiguous glob import. old_glob_decl.ambiguity.set_unchecked(Some(glob_decl)); diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 8794c4ff8b025..95f0d3e67ef2d 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -3510,12 +3510,7 @@ impl<'ast, 'ra, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { span: lifetime_ref.ident.span, name: lifetime_ref.ident.name, param_kind: errors::ParamKindInNonTrivialAnonConst::Lifetime, - help: self - .r - .tcx - .sess - .is_nightly_build() - .then_some(errors::ParamInNonTrivialAnonConstHelp), + help: self.r.tcx.sess.is_nightly_build(), }) .emit(); } diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index fc732170dcb73..f55d03d89e8ff 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -1,214 +1,203 @@ -// FIXME: This needs an audit for correctness and completeness. - use rustc_abi::{ - BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Scalar, Size, TyAbiInterface, - TyAndLayout, + Align, BackendRepr, FieldsShape, Float, HasDataLayout, Primitive, Reg, Size, TyAbiInterface, + TyAndLayout, Variants, }; use crate::callconv::{ArgAbi, ArgAttribute, CastTarget, FnAbi, Uniform}; -use crate::spec::{Env, HasTargetSpec, Os}; - -#[derive(Clone, Debug)] -struct Sdata { - pub prefix: [Option; 8], - pub prefix_index: usize, - pub last_offset: Size, - pub has_float: bool, - pub arg_attribute: ArgAttribute, -} - -fn arg_scalar(cx: &C, scalar: &Scalar, offset: Size, mut data: Sdata) -> Sdata -where - C: HasDataLayout, -{ - let dl = cx.data_layout(); - - if !matches!(scalar.primitive(), Primitive::Float(Float::F32 | Float::F64)) { - return data; - } - - data.has_float = true; - - if !data.last_offset.is_aligned(dl.f64_align) && data.last_offset < offset { - if data.prefix_index == data.prefix.len() { - return data; - } - data.prefix[data.prefix_index] = Some(Reg::i32()); - data.prefix_index += 1; - data.last_offset = data.last_offset + Reg::i32().size; - } - - for _ in 0..((offset - data.last_offset).bits() / 64) - .min((data.prefix.len() - data.prefix_index) as u64) - { - data.prefix[data.prefix_index] = Some(Reg::i64()); - data.prefix_index += 1; - data.last_offset = data.last_offset + Reg::i64().size; - } +use crate::spec::{HasTargetSpec, Os}; - if data.last_offset < offset { - if data.prefix_index == data.prefix.len() { - return data; - } - data.prefix[data.prefix_index] = Some(Reg::i32()); - data.prefix_index += 1; - data.last_offset = data.last_offset + Reg::i32().size; - } - - if data.prefix_index == data.prefix.len() { - return data; - } +// NOTE: GCC and Clang/LLVM have disagreements that the ABI doesn't resolve, we match the +// Clang/LLVM behavior in these cases. - if scalar.primitive() == Primitive::Float(Float::F32) { - data.arg_attribute = ArgAttribute::InReg; - data.prefix[data.prefix_index] = Some(Reg::f32()); - data.last_offset = offset + Reg::f32().size; - } else { - data.prefix[data.prefix_index] = Some(Reg::f64()); - data.last_offset = offset + Reg::f64().size; - } - data.prefix_index += 1; - data +#[derive(Copy, Clone)] +enum DoubleWord { + F64, + F128Start, + F128End, + Words([Word; 2]), } -fn arg_scalar_pair( - cx: &C, - scalar1: &Scalar, - scalar2: &Scalar, - mut offset: Size, - mut data: Sdata, -) -> Sdata -where - C: HasDataLayout, -{ - data = arg_scalar(cx, scalar1, offset, data); - match (scalar1.primitive(), scalar2.primitive()) { - (Primitive::Float(Float::F32), _) => offset += Reg::f32().size, - (_, Primitive::Float(Float::F64)) => offset += Reg::f64().size, - (Primitive::Int(i, _signed), _) => offset += i.size(), - (Primitive::Pointer(_), _) => offset += Reg::i64().size, - _ => {} - } - - if !offset.bytes().is_multiple_of(4) - && matches!(scalar2.primitive(), Primitive::Float(Float::F32 | Float::F64)) - { - offset += Size::from_bytes(4 - (offset.bytes() % 4)); - } - data = arg_scalar(cx, scalar2, offset, data); - data +#[derive(Copy, Clone)] +enum Word { + F32, + Integer, } -fn parse_structure<'a, Ty, C>( +fn classify<'a, Ty, C>( cx: &C, - layout: TyAndLayout<'a, Ty>, - mut data: Sdata, - mut offset: Size, -) -> Sdata -where + arg_layout: &TyAndLayout<'a, Ty>, + offset: Size, + double_words: &mut [DoubleWord; 4], +) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { - if let FieldsShape::Union(_) = layout.fields { - return data; - } - - match layout.backend_repr { - BackendRepr::Scalar(scalar) => { - data = arg_scalar(cx, &scalar, offset, data); - } - BackendRepr::Memory { .. } => { - for i in 0..layout.fields.count() { - if offset < layout.fields.offset(i) { - offset = layout.fields.offset(i); + // If this function does not update the `double_words` array, the value will be passed via + // integer registers. The array is initialized with `DoubleWord::Words([Word::Integer; 2])`. + + match arg_layout.backend_repr { + BackendRepr::Scalar(scalar) => match scalar.primitive() { + Primitive::Float(float) => { + if offset.is_aligned(Ord::min(*float.align(cx), Align::EIGHT)) { + let index = offset.bytes_usize() / 8; + match float { + Float::F128 => { + double_words[index] = DoubleWord::F128Start; + double_words[index + 1] = DoubleWord::F128End; + } + Float::F64 => { + double_words[index] = DoubleWord::F64; + } + Float::F32 => match &mut double_words[index] { + DoubleWord::Words(words) => { + words[(offset.bytes_usize() % 8) / 4] = Word::F32; + } + _ => unreachable!(), + }, + Float::F16 => { + // Match LLVM by passing `f16` in integer registers. + } + } + } else { + /* pass unaligned floats in integer registers */ } - data = parse_structure(cx, layout.field(cx, i), data.clone(), offset); } - } - _ => { - if let BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr { - data = arg_scalar_pair(cx, scalar1, scalar2, offset, data); + Primitive::Int(_, _) | Primitive::Pointer(_) => { /* pass in integer registers */ } + }, + BackendRepr::SimdVector { .. } => {} + BackendRepr::ScalableVector { .. } => {} + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { + FieldsShape::Primitive => { + unreachable!("aggregates can't have `FieldsShape::Primitive`") } - } + FieldsShape::Union(_) => { + if !arg_layout.is_zst() { + if arg_layout.is_transparent() { + let non_1zst_elem = arg_layout.non_1zst_field(cx).expect("not exactly one non-1-ZST field in non-ZST repr(transparent) union").1; + classify(cx, &non_1zst_elem, offset, double_words); + } + } + } + FieldsShape::Array { .. } => {} + FieldsShape::Arbitrary { .. } => match arg_layout.variants { + Variants::Multiple { .. } => {} + Variants::Single { .. } | Variants::Empty => { + // Match Clang by ignoring whether a struct is packed and just considering + // whether individual fields are aligned. GCC currently uses only integer + // registers when passing packed structs. + for i in arg_layout.fields.index_by_increasing_offset() { + classify( + cx, + &arg_layout.field(cx, i), + offset + arg_layout.fields.offset(i), + double_words, + ); + } + } + }, + }, } - - data } -fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, in_registers_max: Size) -where +fn classify_arg<'a, Ty, C>( + cx: &C, + arg: &mut ArgAbi<'a, Ty>, + in_registers_max: Size, + total_double_word_count: &mut usize, +) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { + // 64-bit SPARC allocates argument stack space in 64-bit chunks (double words), some of which + // are promoted to registers based on their position on the stack. + + // Keep track of the total number of double words used by arguments so far. This allows padding + // arguments to be inserted where necessary to ensure that 16-aligned arguments are passed in an + // aligned set of registers. + + let pad = !total_double_word_count.is_multiple_of(2) && arg.layout.align.abi.bytes() == 16; + // The number of double words used by this argument. + let double_word_count = arg.layout.size.bytes_usize().div_ceil(8); + // The number of double words before this argument, including any padding. + let start_double_word_count = *total_double_word_count + usize::from(pad); + if arg.layout.pass_indirectly_in_non_rustic_abis(cx) { arg.make_indirect(); + *total_double_word_count += 1; return; } + if !arg.layout.is_aggregate() { arg.extend_integer_width_to(64); + *total_double_word_count = start_double_word_count + double_word_count; return; } let total = arg.layout.size; if total > in_registers_max { arg.make_indirect(); + *total_double_word_count += 1; return; } - match arg.layout.fields { - FieldsShape::Primitive => unreachable!(), - FieldsShape::Array { .. } => { - // Arrays are passed indirectly - arg.make_indirect(); - return; - } - FieldsShape::Union(_) => { - // Unions and are always treated as a series of 64-bit integer chunks - } - FieldsShape::Arbitrary { .. } => { - // Structures with floating point numbers need special care. - - let mut data = parse_structure( - cx, - arg.layout, - Sdata { - prefix: [None; 8], - prefix_index: 0, - last_offset: Size::ZERO, - has_float: false, - arg_attribute: ArgAttribute::default(), - }, - Size::ZERO, - ); - - if data.has_float { - // Structure { float, int, int } doesn't like to be handled like - // { float, long int }. Other way around it doesn't mind. - if data.last_offset < arg.layout.size - && !data.last_offset.bytes().is_multiple_of(8) - && data.prefix_index < data.prefix.len() - { - data.prefix[data.prefix_index] = Some(Reg::i32()); - data.prefix_index += 1; - data.last_offset += Reg::i32().size; - } + *total_double_word_count = start_double_word_count + double_word_count; - let mut rest_size = arg.layout.size - data.last_offset; - if !rest_size.bytes().is_multiple_of(8) && data.prefix_index < data.prefix.len() { - data.prefix[data.prefix_index] = Some(Reg::i32()); - rest_size = rest_size - Reg::i32().size; - } + const ARGUMENT_REGISTERS: usize = 8; - arg.cast_to( - CastTarget::prefixed(data.prefix, Uniform::new(Reg::i64(), rest_size)) - .with_attrs(data.arg_attribute.into()), - ); - return; + let mut double_words = [DoubleWord::Words([Word::Integer; 2]); ARGUMENT_REGISTERS / 2]; + classify(cx, &arg.layout, Size::ZERO, &mut double_words); + + let mut regs = [None; ARGUMENT_REGISTERS]; + let mut i = 0; + let mut push = |reg| { + regs[i] = Some(reg); + i += 1; + }; + let mut attrs = ArgAttribute::empty(); + + for (index, double_word) in double_words.into_iter().enumerate() { + if arg.layout.size.bytes_usize() <= index * 8 { + break; + } + match double_word { + // `f128` must be aligned to be assigned a float register. + DoubleWord::F128Start if (start_double_word_count + index).is_multiple_of(2) => { + push(Reg::f128()); + } + DoubleWord::F128Start => { + // Clang currently handles this case nonsensically, always returning a packed + // `struct { long double x; }` in an aligned quad floating-point register even when + // the `long double` isn't aligned on the stack, which also makes all future + // arguments get passed in the wrong registers. This passes the `f128` in integer + // registers when it is unaligned, same as with `f32` and `f64`. + push(Reg::i64()); + push(Reg::i64()); + } + DoubleWord::F128End => {} // Already handled by `F128Start` + DoubleWord::F64 => push(Reg::f64()), + DoubleWord::Words([Word::Integer, Word::Integer]) => push(Reg::i64()), + DoubleWord::Words(words) => { + attrs |= ArgAttribute::InReg; + for word in words { + match word { + Word::F32 => push(Reg::f32()), + Word::Integer => push(Reg::i32()), + } + } } } } - arg.cast_to(Uniform::new(Reg::i64(), total)); + let cast_target = match regs { + [Some(reg), None, rest @ ..] => { + // Just a single register is needed for this value. + debug_assert!(rest.iter().all(|x| x.is_none())); + CastTarget::from(reg) + } + _ => CastTarget::prefixed(regs, Uniform::new(Reg::i8(), Size::ZERO)), + }; + + arg.cast_to_and_pad_i32(cast_target.with_attrs(attrs.into()), pad); } pub(crate) fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>) @@ -217,23 +206,26 @@ where C: HasDataLayout + HasTargetSpec, { if !fn_abi.ret.is_ignore() && fn_abi.ret.layout.is_sized() { - classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32)); + // A return value of 32 bytes or smaller is passed via registers. + classify_arg(cx, &mut fn_abi.ret, Size::from_bytes(32), &mut 0); } + // sparc64-unknown-linux-{gnu,musl,uclibc} doesn't ignore ZSTs. + let passes_zsts = matches!(cx.target_spec().os, Os::Linux); + + let mut double_word_count = 0; for arg in fn_abi.args.iter_mut() { if !arg.layout.is_sized() { continue; } if arg.is_ignore() { - // sparc64-unknown-linux-{gnu,musl,uclibc} doesn't ignore ZSTs. - if cx.target_spec().os == Os::Linux - && matches!(cx.target_spec().env, Env::Gnu | Env::Musl | Env::Uclibc) - && arg.layout.is_zst() - { + if passes_zsts && arg.layout.is_zst() { arg.make_indirect_from_ignore(); + double_word_count += 1; } - return; + continue; } - classify_arg(cx, arg, Size::from_bytes(16)); + // An argument of 16 bytes or smaller is passed via registers. + classify_arg(cx, arg, Size::from_bytes(16), &mut double_word_count); } } diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/trait_impl_difference.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/trait_impl_difference.rs index 2a3268d3339ed..5a8dd0364bee3 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/trait_impl_difference.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/trait_impl_difference.rs @@ -16,7 +16,7 @@ use tracing::debug; use crate::error_reporting::infer::nice_region_error::NiceRegionError; use crate::error_reporting::infer::nice_region_error::placeholder_error::Highlighted; -use crate::errors::{ConsiderBorrowingParamHelp, RelationshipHelp, TraitImplDiff}; +use crate::errors::{ConsiderBorrowingParamHelp, TraitImplDiff}; use crate::infer::{RegionResolutionError, ValuePairs}; impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { @@ -117,7 +117,7 @@ impl<'a, 'tcx> NiceRegionError<'a, 'tcx> { trait_sp, note: (), param_help: ConsiderBorrowingParamHelp { spans: visitor.types.to_vec() }, - rel_help: visitor.types.is_empty().then_some(RelationshipHelp), + rel_help: visitor.types.is_empty(), expected, found, }; diff --git a/compiler/rustc_trait_selection/src/errors.rs b/compiler/rustc_trait_selection/src/errors.rs index 013e4b522fb54..0b19a470b726e 100644 --- a/compiler/rustc_trait_selection/src/errors.rs +++ b/compiler/rustc_trait_selection/src/errors.rs @@ -1230,12 +1230,6 @@ impl Subdiagnostic for ConsiderBorrowingParamHelp { } } -#[derive(Subdiagnostic)] -#[help( - "verify the lifetime relationships in the `trait` and `impl` between the `self` argument, the other inputs and its output" -)] -pub struct RelationshipHelp; - #[derive(Diagnostic)] #[diag("`impl` item signature doesn't match `trait` item signature")] pub struct TraitImplDiff { @@ -1251,10 +1245,10 @@ pub struct TraitImplDiff { pub note: (), #[subdiagnostic] pub param_help: ConsiderBorrowingParamHelp, - #[subdiagnostic] - // Seems like subdiagnostics are always pushed to the end, so this one - // also has to be a subdiagnostic to maintain order. - pub rel_help: Option, + #[help( + "verify the lifetime relationships in the `trait` and `impl` between the `self` argument, the other inputs and its output" + )] + pub rel_help: bool, pub expected: String, pub found: String, } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 62f3667ad7f4f..bebc0707e26d9 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -764,14 +764,20 @@ fn layout_of_uncached<'tcx>( } ty::Alias(..) => { - // NOTE(eddyb) `layout_of` query should've normalized these away, - // if that was possible, so there's no reason to try again here. - let err = if ty.has_param() { + // In case we're still in a generic context, aliases might be rigid. E.g. + // if we've got a `T: Trait` where-bound, `T::Assoc` cannot be normalized + // in the current context. + // + // For some builtin traits, generic aliases can be rigid even in an empty environment, + // e.g. `::Metadata`. + // + // Due to trivial bounds, this can even be the case if the alias does not reference + // any generic parameters, e.g. a `for<'a> u32: Trait<'a>` where-bound means that + // `>::Assoc` is rigid. + let err = if ty.has_param() || !cx.typing_env.param_env.caller_bounds().is_empty() { LayoutError::TooGeneric(ty) } else { - // This is only reachable with unsatisfiable predicates. For example, if we have - // `u8: Iterator`, then we can't compute the layout of `::Item`. - LayoutError::Unknown(ty) + unreachable!("invalid rigid alias in layout_of after normalization: {ty:?}"); }; return Err(error(cx, err)); } diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs index 97aafbc7b6994..4ddfcde57280e 100644 --- a/library/alloc/src/collections/binary_heap/mod.rs +++ b/library/alloc/src/collections/binary_heap/mod.rs @@ -581,6 +581,40 @@ impl BinaryHeap { pub fn with_capacity_in(capacity: usize, alloc: A) -> BinaryHeap { BinaryHeap { data: Vec::with_capacity_in(capacity, alloc) } } + + /// Creates a `BinaryHeap` using the supplied `vec`. This does not rebuild the heap, + /// so `vec` must already be a max-heap. + /// + /// # Safety + /// + /// The supplied `vec` must be a max-heap, i.e. for all indices `0 < i < vec.len()`, + /// `vec[(i - 1) / 2] >= vec[i]`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(binary_heap_from_raw_vec)] + /// + /// use std::collections::BinaryHeap; + /// let heap = BinaryHeap::from([1, 2, 3]); + /// let vec = heap.into_vec(); + /// + /// // Safety: vec is the output of heap.from_vec(), so is a max-heap. + /// let mut new_heap = unsafe { + /// BinaryHeap::from_raw_vec(vec) + /// }; + /// assert_eq!(new_heap.pop(), Some(3)); + /// assert_eq!(new_heap.pop(), Some(2)); + /// assert_eq!(new_heap.pop(), Some(1)); + /// assert_eq!(new_heap.pop(), None); + /// ``` + #[unstable(feature = "binary_heap_from_raw_vec", issue = "152500")] + #[must_use] + pub unsafe fn from_raw_vec(vec: Vec) -> BinaryHeap { + BinaryHeap { data: vec } + } } impl BinaryHeap { diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 2e16f2cf27e7b..366fa8e150f8f 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -1316,7 +1316,7 @@ def bootstrap(args): # Give a hard error if `--config` or `RUST_BOOTSTRAP_CONFIG` are set to a missing path, # but not if `bootstrap.toml` hasn't been created. if not using_default_path or os.path.exists(toml_path): - with open(toml_path) as config: + with open(toml_path, encoding="utf-8") as config: config_toml = config.read() else: config_toml = "" diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index f70b350de283b..1c162a79c4c44 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -1246,7 +1246,7 @@ impl<'src> Classifier<'src> { LiteralKind::Float { .. } | LiteralKind::Int { .. } => Class::Number, }, TokenKind::GuardedStrPrefix => return no_highlight(sink), - TokenKind::RawIdent if let Some((TokenKind::Bang, _)) = self.peek_non_trivia() => { + TokenKind::RawIdent if self.check_if_macro_call("") => { self.new_macro_span(text, sink, before, file_span); return; } @@ -1268,9 +1268,7 @@ impl<'src> Classifier<'src> { // So if it's not a keyword which can be followed by a value (like `if` or // `return`) and the next non-whitespace token is a `!`, then we consider // it's a macro. - if !NON_MACRO_KEYWORDS.contains(&text) - && matches!(self.peek_non_trivia(), Some((TokenKind::Bang, _))) - { + if !NON_MACRO_KEYWORDS.contains(&text) && self.check_if_macro_call(text) { self.new_macro_span(text, sink, before, file_span); return; } @@ -1278,7 +1276,7 @@ impl<'src> Classifier<'src> { } // If it's not a keyword and the next non whitespace token is a `!`, then // we consider it's a macro. - _ if matches!(self.peek_non_trivia(), Some((TokenKind::Bang, _))) => { + _ if self.check_if_macro_call(text) => { self.new_macro_span(text, sink, before, file_span); return; } @@ -1339,6 +1337,37 @@ impl<'src> Classifier<'src> { self.tokens.stop_peeking(); None } + + fn check_if_macro_call(&mut self, ident: &str) -> bool { + let mut has_bang = false; + let is_macro_rule_ident = ident == "macro_rules"; + + while let Some((kind, _)) = self.tokens.peek_next() { + if let TokenKind::Whitespace + | TokenKind::LineComment { doc_style: None } + | TokenKind::BlockComment { doc_style: None, .. } = kind + { + continue; + } + if !has_bang { + if kind != TokenKind::Bang { + break; + } + has_bang = true; + continue; + } + self.tokens.stop_peeking(); + if is_macro_rule_ident { + return matches!(kind, TokenKind::Ident | TokenKind::RawIdent); + } + return matches!( + kind, + TokenKind::OpenParen | TokenKind::OpenBracket | TokenKind::OpenBrace + ); + } + self.tokens.stop_peeking(); + false + } } fn is_keyword(symbol: Symbol) -> bool { diff --git a/src/librustdoc/lib.rs b/src/librustdoc/lib.rs index 21ac851e54863..27f62eb48f3a1 100644 --- a/src/librustdoc/lib.rs +++ b/src/librustdoc/lib.rs @@ -17,6 +17,7 @@ #![feature(rustc_private)] #![feature(test)] #![feature(trim_prefix_suffix)] +#![recursion_limit = "256"] #![warn(rustc::internal)] // tidy-alphabetical-end diff --git a/tests/assembly-llvm/sparc-struct-abi.rs b/tests/assembly-llvm/sparc-struct-abi.rs index c6e83b6f8dabe..41ac2be09b653 100644 --- a/tests/assembly-llvm/sparc-struct-abi.rs +++ b/tests/assembly-llvm/sparc-struct-abi.rs @@ -9,6 +9,7 @@ #![crate_type = "lib"] #![feature(no_core, lang_items)] #![no_core] +#![feature(f128)] extern crate minicore; use minicore::*; @@ -21,8 +22,33 @@ pub struct Franta { d: f32, } +#[repr(C, packed)] +struct Misaligned(i32, f64); + +#[repr(C)] +struct AlignToMakeAssemblyShorter(T, f64); + +#[repr(C)] +pub struct Floats(i32, f32, f64, f128); + +#[repr(C)] +pub struct LessFloats(f32, i32, f64); + +#[repr(C)] +pub struct NotMisaligned(i32, Misaligned); + +#[repr(C, align(16))] +pub struct Align16(f64, i32, i32); + +impl Copy for Misaligned {} +impl Copy for AlignToMakeAssemblyShorter {} +impl Copy for Floats {} +impl Copy for LessFloats {} +impl Copy for NotMisaligned {} +impl Copy for Align16 {} + // NB: due to delay slots the `ld` following the call is actually executed before the call. -#[no_mangle] +#[unsafe(no_mangle)] pub unsafe extern "C" fn callee(arg: Franta) { // CHECK-LABEL: callee: // CHECK: st %f3, [[PLACE_D:.*]] @@ -54,7 +80,7 @@ extern "C" { fn tail_call_avoidance_fn(); } -#[no_mangle] +#[unsafe(no_mangle)] pub unsafe extern "C" fn caller() { // CHECK-LABEL: caller: // CHECK: ld [{{.*}}], %f0 @@ -62,7 +88,168 @@ pub unsafe extern "C" fn caller() { // CHECK: ld [{{.*}}], %f2 // CHECK: ld [{{.*}}], %f3 // CHECK: call opaque_callee - // CHECK: mov 3, %o2 + // CHECK: mov 3, %o2 opaque_callee(Franta { a: 1.0, b: 2.0, c: 3.0, d: 4.0 }, 3); tail_call_avoidance_fn(); } + +// Check that misaligned floats aren't promoted to floating point registers. +// CHECK-LABEL: misaligned_arg: +#[unsafe(no_mangle)] +extern "C" fn misaligned_arg(x: &mut AlignToMakeAssemblyShorter, value: Misaligned) { + // CHECK: srlx %o2, 32, %o2 + // CHECK-NEXT: stx %o1, [%o0] + // CHECK-NEXT: retl + // CHECK-NEXT: st %o2, [%o0+8] + x.0 = value; +} + +// CHECK-LABEL: misaligned_ret: +#[unsafe(no_mangle)] +extern "C" fn misaligned_ret(x: &AlignToMakeAssemblyShorter) -> Misaligned { + // CHECK: ld [%o0+8], %o1 + // CHECK-NEXT: ldx [%o0], %o0 + // CHECK-NEXT: retl + // CHECK-NEXT: sllx %o1, 32, %o1 + x.0 +} + +// Check structs where 32 >= size > 16 are promoted to register only as an argument. +// Also check that the various floating-point types are promoted to the correct registers. +// CHECK-LABEL: floats_arg: +#[unsafe(no_mangle)] +extern "C" fn floats_arg(x: &mut Floats, value: Floats) { + // CHECK: ldx [%o1+24], %o2 + // CHECK-NEXT: ldx [%o1+16], %o3 + // CHECK-NEXT: ldx [%o1+8], %o4 + // CHECK-NEXT: ldx [%o1], %o1 + // CHECK-NEXT: stx %o2, [%o0+24] + // CHECK-NEXT: stx %o3, [%o0+16] + // CHECK-NEXT: stx %o4, [%o0+8] + // CHECK-NEXT: retl + // CHECK-NEXT: stx %o1, [%o0] + *x = value; +} + +// CHECK-LABEL: floats_ret: +#[unsafe(no_mangle)] +extern "C" fn floats_ret(x: &Floats) -> Floats { + // CHECK: ld [%o0+4], %f1 + // CHECK-NEXT: ldd [%o0+8], %f2 + // CHECK-NEXT: ldd [%o0+16], %f4 + // CHECK-NEXT: ld [%o0], %o1 + // CHECK-NEXT: ldd [%o0+24], %f6 + // CHECK-NEXT: retl + // CHECK-NEXT: sllx %o1, 32, %o0 + *x +} + +// Check float promotion when passing as an argument with a struct where size <= 16. +// CHECK-LABEL: less_floats_arg: +#[unsafe(no_mangle)] +extern "C" fn less_floats_arg(x: &mut LessFloats, value: LessFloats) { + // CHECK: st %f2, [%o0] + // CHECK-NEXT: st %o1, [%o0+4] + // CHECK-NEXT: retl + // CHECK-NEXT: std %f4, [%o0+8] + *x = value; +} + +// CHECK-LABEL: less_floats_ret: +#[unsafe(no_mangle)] +extern "C" fn less_floats_ret(x: &LessFloats) -> LessFloats { + // CHECK: ld [%o0], %f0 + // CHECK-NEXT: ldd [%o0+8], %f2 + // CHECK-NEXT: retl + // CHECK-NEXT: ld [%o0+4], %o0 + *x +} + +// Check fields are promoted if they are aligned in the overall structure. +// This matches Clang's behaviour but not GCC's. +// CHECK-LABEL: not_misaligned_arg: +#[unsafe(no_mangle)] +extern "C" fn not_misaligned_arg( + x: &mut AlignToMakeAssemblyShorter, + value: NotMisaligned, +) { + // CHECK: stx %o1, [%o0] + // CHECK-NEXT: retl + // CHECK-NEXT: std %f4, [%o0+8] + x.0 = value; +} + +// CHECK-LABEL: not_misaligned_ret: +#[unsafe(no_mangle)] +extern "C" fn not_misaligned_ret(x: &AlignToMakeAssemblyShorter) -> NotMisaligned { + // CHECK: ldx [%o0], %o1 + // CHECK-NEXT: ldd [%o0+8], %f2 + // CHECK-NEXT: retl + // CHECK-NEXT: mov %o1, %o0 + x.0 +} + +// Check that 16-aligned structs are allocated the correct registers. +// CHECK-LABEL: align_16_arg: +#[unsafe(no_mangle)] +extern "C" fn align_16_arg(x: &mut Align16, value: Align16) { + // CHECK: std %f4, [%o0] + // CHECK-NEXT: retl + // CHECK-NEXT: stx %o3, [%o0+8] + *x = value; +} + +// CHECK-LABEL: align_16_ret: +#[unsafe(no_mangle)] +extern "C" fn align_16_ret(x: &Align16) -> Align16 { + // CHECK: ldd [%o0], %f0 + // CHECK-NEXT: retl + // CHECK-NEXT: ldx [%o0+8], %o1 + *x +} + +// Check ZST args don't prevent further arguments from being processed. +// CHECK-LABEL: zst_arg: +#[unsafe(no_mangle)] +extern "C" fn zst_arg(_: (), value: LessFloats, x: &mut LessFloats) { + // CHECK: st %f0, [%o2] + // CHECK-NEXT: st %o0, [%o2+4] + // CHECK-NEXT: retl + // CHECK-NEXT: std %f2, [%o2+8] + *x = value; +} + +#[repr(C)] +struct I32F32Input { + a: i32, + b: f32, +} + +#[repr(C)] +struct I32F32Output { + b: f32, + a: i32, +} + +// The clang/LLVM implementation mentions that this case requires special handling. +// CHECK-LABEL: i32_f32: +#[unsafe(no_mangle)] +extern "C" fn i32_f32(input: I32F32Input) -> I32F32Output { + // CHECK: srlx %o0, 32, %o0 + // CHECK-NEXT: fmovs %f1, %f0 + // CHECK-NEXT: retl + // CHECK-NEXT: nop + I32F32Output { a: input.a, b: input.b } +} + +#[repr(C)] +pub struct C { + a: f64, + b: f32, +} + +// regression test for https://github.com/rust-lang/rust/issues/147883. +#[unsafe(no_mangle)] +pub extern "C" fn foo(c: C) -> C { + c +} diff --git a/tests/codegen-llvm/cast-target-abi.rs b/tests/codegen-llvm/cast-target-abi.rs index 101e73e33c915..6f1ab4572ee0d 100644 --- a/tests/codegen-llvm/cast-target-abi.rs +++ b/tests/codegen-llvm/cast-target-abi.rs @@ -119,7 +119,7 @@ pub extern "C" fn returns_twou16s() -> TwoU16s { // aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) // loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) // powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) -// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:{ i64, i64 }]] {{.*}}[[ABI_VALUE:%.+]]) // x86_64-SAME: ([[ABI_TYPE:{ i64, i16 }]] {{.*}}[[ABI_VALUE:%.+]]) #[no_mangle] #[inline(never)] @@ -148,7 +148,7 @@ pub extern "C" fn returns_fiveu16s() -> FiveU16s { // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] - // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] @@ -217,7 +217,7 @@ pub extern "C" fn returns_doubledouble() -> DoubleDouble { // aarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) // loongarch64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) // powerpc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) -// sparc64-SAME: ([[ABI_TYPE:\[2 x i64\]]] {{.*}}[[ABI_VALUE:%.+]]) +// sparc64-SAME: ([[ABI_TYPE:{ i64, i64 }]] {{.*}}[[ABI_VALUE:%.+]]) // x86_64-SAME: ([[ABI_TYPE:{ i64, i32 }]] {{.*}}[[ABI_VALUE:%.+]]) #[no_mangle] #[inline(never)] @@ -246,7 +246,7 @@ pub extern "C" fn returns_three32s() -> Three32s { // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] - // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // aarch64: ret [[ABI_TYPE]] [[ABI_VALUE]] @@ -399,7 +399,7 @@ pub fn call_fiveu16s() { // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] - // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i16 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // CHECK: call void @receives_fiveu16s([[ABI_TYPE]] [[ABI_VALUE]]) @@ -424,7 +424,7 @@ pub fn return_fiveu16s() -> FiveU16s { // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() - // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_fiveu16s() + // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i64 }]] @returns_fiveu16s() // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i16 }]] @returns_fiveu16s() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] @@ -595,7 +595,7 @@ pub fn call_three32s() { // aarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // loongarch64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // powerpc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] - // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:\[2 x i64\]]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] + // sparc64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i64 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // x86_64: [[ABI_VALUE:%.+]] = load [[ABI_TYPE:{ i64, i32 }]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] // CHECK: call void @receives_three32s([[ABI_TYPE]] [[ABI_VALUE]]) @@ -619,7 +619,7 @@ pub fn return_three32s() -> Three32s { // aarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() // loongarch64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() - // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:\[2 x i64\]]] @returns_three32s() + // sparc64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i64 }]] @returns_three32s() // x86_64: [[ABI_VALUE:%.+]] = call [[ABI_TYPE:{ i64, i32 }]] @returns_three32s() // aarch64: store [[ABI_TYPE]] [[ABI_VALUE]], ptr [[ABI_ALLOCA]], align [[ABI_ALIGN]] diff --git a/tests/rustdoc-html/source-code-pages/macro-call-2.rs b/tests/rustdoc-html/source-code-pages/macro-call-2.rs new file mode 100644 index 0000000000000..d9c3df57c6021 --- /dev/null +++ b/tests/rustdoc-html/source-code-pages/macro-call-2.rs @@ -0,0 +1,18 @@ +// This is yet another test to ensure that only macro calls are considered as such +// by the rustdoc highlighter, in particular when named `macro_rules`. +// This is a regression test for . + +#![crate_name = "foo"] + +//@ has src/foo/macro-call-2.rs.html +//@ count - '//code/span[@class="macro"]' 2 +//@ has - '//code/span[@class="macro"]' 'macro_rules!' +//@ has - '//code/span[@class="macro"]' 'r#macro_rules!' + +macro_rules! r#macro_rules { + () => { + fn main() {} + } +} + +r#macro_rules!(); diff --git a/tests/rustdoc-html/source-code-pages/macro-call.rs b/tests/rustdoc-html/source-code-pages/macro-call.rs new file mode 100644 index 0000000000000..df2d22aa9b60a --- /dev/null +++ b/tests/rustdoc-html/source-code-pages/macro-call.rs @@ -0,0 +1,29 @@ +// This is yet another test to ensure that only macro calls are considered as such +// by the rustdoc highlighter. +// This is a regression test for . + +#![crate_name = "foo"] + +//@ has src/foo/macro-call.rs.html +//@ count - '//code/span[@class="macro"]' 2 +//@ has - '//code/span[@class="macro"]' 'panic!' +//@ has - '//code/span[@class="macro"]' 'macro_rules!' + +pub struct Layout; + +impl Layout { + pub fn new() {} +} + +pub fn bar() { + let layout = Layout::new::(); + if layout != Layout::new::() { + panic!(); + } + let macro_rules = 3; + if macro_rules != 3 {} +} + +macro_rules! blob { + () => {} +} diff --git a/tests/rustdoc-html/type-layout.rs b/tests/rustdoc-html/type-layout.rs index 482b8b597dd30..84d2aa8060079 100644 --- a/tests/rustdoc-html/type-layout.rs +++ b/tests/rustdoc-html/type-layout.rs @@ -64,6 +64,12 @@ pub type GenericTypeAlias = (Generic<(u32, ())>, Generic); //@ hasraw type_layout/type.Edges.html 'Unable to compute type layout, possibly due to this type having generic parameters. Layout can only be computed for concrete, fully-instantiated types.' pub type Edges<'a, E> = std::borrow::Cow<'a, [E]>; +pub trait Project { type Assoc; } +// We can't compute layout as the alias stays rigid. A `LayoutError::TooGeneric` is returned. +//@ hasraw type_layout/struct.RigidAlias.html 'Unable to compute type layout, possibly due to this type having generic parameters. Layout can only be computed for concrete, fully-instantiated types.' +//@ !hasraw - 'Size: ' +pub struct RigidAlias(<() as Project>::Assoc) where for<'a> (): Project; + //@ !hasraw type_layout/trait.MyTrait.html 'Size: ' pub trait MyTrait {} @@ -92,9 +98,3 @@ pub enum Uninhabited {} //@ hasraw type_layout/struct.Uninhabited2.html 'Size: ' //@ hasraw - '8 bytes (uninhabited)' pub struct Uninhabited2(std::convert::Infallible, u64); - -pub trait Project { type Assoc; } -// We can't compute layout. A `LayoutError::Unknown` is returned. -//@ hasraw type_layout/struct.Unknown.html 'Unable to compute type layout.' -//@ !hasraw - 'Size: ' -pub struct Unknown(<() as Project>::Assoc) where for<'a> (): Project; diff --git a/tests/ui/attributes/ambiguous_derive_helpers.rs b/tests/ui/attributes/ambiguous_derive_helpers.rs new file mode 100644 index 0000000000000..aee498a7067a5 --- /dev/null +++ b/tests/ui/attributes/ambiguous_derive_helpers.rs @@ -0,0 +1,15 @@ +//@ force-host +//@ no-prefer-dynamic + +#![crate_type = "proc-macro"] +#![deny(ambiguous_derive_helpers)] + +extern crate proc_macro; + +use proc_macro::TokenStream; + +#[proc_macro_derive(Trait, attributes(ignore))] //~ ERROR there exists a built-in attribute with the same name +//~| WARN this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! +pub fn deriving(input: TokenStream) -> TokenStream { + TokenStream::new() +} diff --git a/tests/ui/attributes/ambiguous_derive_helpers.stderr b/tests/ui/attributes/ambiguous_derive_helpers.stderr new file mode 100644 index 0000000000000..a1eb8d172c25f --- /dev/null +++ b/tests/ui/attributes/ambiguous_derive_helpers.stderr @@ -0,0 +1,16 @@ +error: there exists a built-in attribute with the same name + --> $DIR/ambiguous_derive_helpers.rs:11:39 + | +LL | #[proc_macro_derive(Trait, attributes(ignore))] + | ^^^^^^ + | + = warning: this was previously accepted by the compiler but is being phased out; it will become a hard error in a future release! + = note: for more information, see issue #151276 +note: the lint level is defined here + --> $DIR/ambiguous_derive_helpers.rs:5:9 + | +LL | #![deny(ambiguous_derive_helpers)] + | ^^^^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/cast/fat-ptr-cast.stderr b/tests/ui/cast/fat-ptr-cast.stderr index 2b0bceebf15ca..c6354122f5685 100644 --- a/tests/ui/cast/fat-ptr-cast.stderr +++ b/tests/ui/cast/fat-ptr-cast.stderr @@ -81,6 +81,8 @@ LL | let s = 0 as *const T; | - ^^^^^^^^ creating a `*const T` requires both an address and type-specific metadata | | | consider casting this expression to `*const ()`, then using `core::ptr::from_raw_parts` + | + = note: the type parameter `T` is not known to be `Sized`, so this pointer may be wide error: aborting due to 11 previous errors diff --git a/tests/ui/error-codes/E0116.stderr b/tests/ui/error-codes/E0116.stderr index 1ea5a57f46db1..20e3b196226af 100644 --- a/tests/ui/error-codes/E0116.stderr +++ b/tests/ui/error-codes/E0116.stderr @@ -4,7 +4,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl Vec {} | ^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error: aborting due to 1 previous error diff --git a/tests/ui/imports/overwrite-different-vis-2.rs b/tests/ui/imports/overwrite-different-vis-2.rs new file mode 100644 index 0000000000000..50369eafb41c2 --- /dev/null +++ b/tests/ui/imports/overwrite-different-vis-2.rs @@ -0,0 +1,18 @@ +// Regression test for issue #152347. + +//@ check-pass +//@ edition: 2018.. + +use outer::*; // must be before `mod outer` +mod outer { + mod inner { + pub fn f() {} + } + + use inner::*; + pub use inner::*; +} + +fn main() { + f(); +} diff --git a/tests/ui/imports/overwrite-vis-unused.rs b/tests/ui/imports/overwrite-vis-unused.rs new file mode 100644 index 0000000000000..0217fb6250837 --- /dev/null +++ b/tests/ui/imports/overwrite-vis-unused.rs @@ -0,0 +1,12 @@ +// Regression test for issues #152004 and #151124. + +#![deny(unused)] + +mod m { + pub struct S {} +} + +use m::*; //~ ERROR unused import: `m::*` +pub use m::*; + +fn main() {} diff --git a/tests/ui/imports/overwrite-vis-unused.stderr b/tests/ui/imports/overwrite-vis-unused.stderr new file mode 100644 index 0000000000000..a38aa4d5f070a --- /dev/null +++ b/tests/ui/imports/overwrite-vis-unused.stderr @@ -0,0 +1,15 @@ +error: unused import: `m::*` + --> $DIR/overwrite-vis-unused.rs:9:5 + | +LL | use m::*; + | ^^^^ + | +note: the lint level is defined here + --> $DIR/overwrite-vis-unused.rs:3:9 + | +LL | #![deny(unused)] + | ^^^^^^ + = note: `#[deny(unused_imports)]` implied by `#[deny(unused)]` + +error: aborting due to 1 previous error + diff --git a/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.rs b/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.rs new file mode 100644 index 0000000000000..b13b6f418d425 --- /dev/null +++ b/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.rs @@ -0,0 +1,8 @@ +use std::rc::Rc; +pub struct Foo; + +pub type Function = Rc; + +impl Function {} +//~^ ERROR cannot define inherent `impl` for a type outside of the crate where the type is defined [E0116] +fn main(){} diff --git a/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.stderr b/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.stderr new file mode 100644 index 0000000000000..a62f7f82ba9d9 --- /dev/null +++ b/tests/ui/incoherent-inherent-impls/insufficient-suggestion-issue-141679.stderr @@ -0,0 +1,17 @@ +error[E0116]: cannot define inherent `impl` for a type outside of the crate where the type is defined + --> $DIR/insufficient-suggestion-issue-141679.rs:6:1 + | +LL | impl Function {} + | ^^^^^^^^^^^^^ impl for type defined outside of crate + | + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see +note: `Function` does not define a new type, only an alias of `Rc` defined here + --> $DIR/insufficient-suggestion-issue-141679.rs:4:1 + | +LL | pub type Function = Rc; + | ^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0116`. diff --git a/tests/ui/incoherent-inherent-impls/no-attr-empty-impl.stderr b/tests/ui/incoherent-inherent-impls/no-attr-empty-impl.stderr index f8491697910c0..de61c3900d461 100644 --- a/tests/ui/incoherent-inherent-impls/no-attr-empty-impl.stderr +++ b/tests/ui/incoherent-inherent-impls/no-attr-empty-impl.stderr @@ -4,7 +4,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl extern_crate::StructWithAttr {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error[E0116]: cannot define inherent `impl` for a type outside of the crate where the type is defined --> $DIR/no-attr-empty-impl.rs:7:1 @@ -12,7 +13,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl extern_crate::StructNoAttr {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error[E0116]: cannot define inherent `impl` for a type outside of the crate where the type is defined --> $DIR/no-attr-empty-impl.rs:10:1 @@ -20,7 +22,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl extern_crate::EnumWithAttr {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error[E0116]: cannot define inherent `impl` for a type outside of the crate where the type is defined --> $DIR/no-attr-empty-impl.rs:13:1 @@ -28,7 +31,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl extern_crate::EnumNoAttr {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error[E0390]: cannot define inherent `impl` for primitive types --> $DIR/no-attr-empty-impl.rs:16:1 diff --git a/tests/ui/incoherent-inherent-impls/no-other-unrelated-errors.stderr b/tests/ui/incoherent-inherent-impls/no-other-unrelated-errors.stderr index 2a33262f83898..f01817e294430 100644 --- a/tests/ui/incoherent-inherent-impls/no-other-unrelated-errors.stderr +++ b/tests/ui/incoherent-inherent-impls/no-other-unrelated-errors.stderr @@ -4,7 +4,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl Vec {} | ^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error: aborting due to 1 previous error diff --git a/tests/ui/layout/rigid-alias-no-params.rs b/tests/ui/layout/rigid-alias-no-params.rs new file mode 100644 index 0000000000000..bc2622eb7ca64 --- /dev/null +++ b/tests/ui/layout/rigid-alias-no-params.rs @@ -0,0 +1,30 @@ +//@ compile-flags: -O -Cdebug-assertions=on +//@ build-pass + +// A regression test for #151791. Computing the layout of +// `>::Archived` fails as the alias +// is still rigid as the where-bound in scope shadows the impl. +// +// This previously caused an incorrect error during MIR optimizations. + +struct ArchivedString; + +pub trait ArchiveWith<'a> { + type Archived; +} + +struct AsOwned; +impl ArchiveWith<'_> for AsOwned { + type Archived = ArchivedString; +} + +fn foo<'a>() +where + AsOwned: ArchiveWith<'a>, +{ + let _ = unsafe { &*std::ptr::dangling::<>::Archived>() }; +} + +fn main() { + foo(); +} diff --git a/tests/ui/traits/trait-or-new-type-instead.stderr b/tests/ui/traits/trait-or-new-type-instead.stderr index 5f5aa3ac56982..ad12a84a4b808 100644 --- a/tests/ui/traits/trait-or-new-type-instead.stderr +++ b/tests/ui/traits/trait-or-new-type-instead.stderr @@ -4,7 +4,8 @@ error[E0116]: cannot define inherent `impl` for a type outside of the crate wher LL | impl Option { | ^^^^^^^^^^^^^^^^^ impl for type defined outside of crate | - = note: define and implement a trait or new type instead + = help: consider defining a trait and implementing it for the type or using a newtype wrapper like `struct MyType(ExternalType);` and implement it + = note: for more details about the orphan rules, see error: aborting due to 1 previous error diff --git a/tests/ui/where-clauses/projection-bound-unsatisfied-item-bounds-mit-opt-ice.rs b/tests/ui/where-clauses/projection-bound-unsatisfied-item-bounds-mit-opt-ice.rs index 80eec709eecbc..9448c8a2f9112 100644 --- a/tests/ui/where-clauses/projection-bound-unsatisfied-item-bounds-mit-opt-ice.rs +++ b/tests/ui/where-clauses/projection-bound-unsatisfied-item-bounds-mit-opt-ice.rs @@ -2,7 +2,7 @@ //@ build-pass // A regression test for #149081. The environment of `size` and `align` -// currently means that the item bound of`T::Assoc` doesn't hold. This can +// currently means that the item bound of `T::Assoc` doesn't hold. This can // result in normalization failures and ICE during MIR optimizations. // // This will no longer be an issue once #149283 is implemented.