Skip to content
Open
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions compiler/rustc_middle/src/query/inner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,6 @@ pub(crate) fn query_feed<'tcx, Cache>(
tcx: TyCtxt<'tcx>,
dep_kind: DepKind,
query_vtable: &QueryVTable<'tcx, Cache>,
cache: &Cache,
key: Cache::Key,
value: Cache::Value,
) where
Expand All @@ -110,7 +109,7 @@ pub(crate) fn query_feed<'tcx, Cache>(
let format_value = query_vtable.format_value;

// Check whether the in-memory cache already has a value for this key.
match try_get_cached(tcx, cache, &key) {
match try_get_cached(tcx, &query_vtable.query_cache, &key) {
Some(old) => {
// The query already has a cached value for this key.
// That's OK if both values are the same, i.e. they have the same hash,
Expand Down Expand Up @@ -153,7 +152,7 @@ pub(crate) fn query_feed<'tcx, Cache>(
query_vtable.hash_result,
query_vtable.format_value,
);
cache.complete(key, value, dep_node_index);
query_vtable.query_cache.complete(key, value, dep_node_index);
}
}
}
31 changes: 6 additions & 25 deletions compiler/rustc_middle/src/query/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ pub use sealed::IntoQueryParam;
use crate::dep_graph;
use crate::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex};
use crate::ich::StableHashingContext;
use crate::queries::{
ExternProviders, PerQueryVTables, Providers, QueryArenas, QueryCaches, QueryEngine, QueryStates,
};
use crate::queries::{ExternProviders, PerQueryVTables, Providers, QueryArenas, QueryEngine};
use crate::query::on_disk_cache::{CacheEncoder, EncodedDepNodeIndex, OnDiskCache};
use crate::query::stack::{QueryStackDeferred, QueryStackFrame, QueryStackFrameExtra};
use crate::query::{QueryCache, QueryInfo, QueryJob};
Expand Down Expand Up @@ -108,10 +106,8 @@ pub struct QueryVTable<'tcx, C: QueryCache> {
pub dep_kind: DepKind,
/// How this query deals with query cycle errors.
pub cycle_error_handling: CycleErrorHandling,
// Offset of this query's state field in the QueryStates struct
pub query_state: usize,
// Offset of this query's cache field in the QueryCaches struct
pub query_cache: usize,
pub query_state: QueryState<'tcx, C::Key>,
pub query_cache: C,
pub will_cache_on_disk_for_key_fn: Option<WillCacheOnDiskForKeyFn<'tcx, C::Key>>,

/// Function pointer that calls `tcx.$query(key)` for this query and
Expand Down Expand Up @@ -155,9 +151,7 @@ pub struct QuerySystemFns {
}

pub struct QuerySystem<'tcx> {
pub states: QueryStates<'tcx>,
pub arenas: WorkerLocal<QueryArenas<'tcx>>,
pub caches: QueryCaches<'tcx>,
pub query_vtables: PerQueryVTables<'tcx>,

/// This provides access to the incremental compilation on-disk cache for query results.
Expand Down Expand Up @@ -445,11 +439,6 @@ macro_rules! define_callbacks {
)*
}

#[derive(Default)]
pub struct QueryCaches<'tcx> {
$($(#[$attr])* pub $name: $name::Storage<'tcx>,)*
}

impl<'tcx> $crate::query::TyCtxtEnsureOk<'tcx> {
$($(#[$attr])*
#[inline(always)]
Expand All @@ -461,7 +450,7 @@ macro_rules! define_callbacks {
[$($modifiers)*]
self.tcx,
self.tcx.query_system.fns.engine.$name,
&self.tcx.query_system.caches.$name,
&self.tcx.query_system.query_vtables.$name.query_cache,
$crate::query::IntoQueryParam::into_query_param(key),
false,
)
Expand All @@ -475,7 +464,7 @@ macro_rules! define_callbacks {
crate::query::inner::query_ensure(
self.tcx,
self.tcx.query_system.fns.engine.$name,
&self.tcx.query_system.caches.$name,
&self.tcx.query_system.query_vtables.$name.query_cache,
$crate::query::IntoQueryParam::into_query_param(key),
true,
);
Expand All @@ -502,7 +491,7 @@ macro_rules! define_callbacks {
erase::restore_val::<$V>(inner::query_get_at(
self.tcx,
self.tcx.query_system.fns.engine.$name,
&self.tcx.query_system.caches.$name,
&self.tcx.query_system.query_vtables.$name.query_cache,
self.span,
$crate::query::IntoQueryParam::into_query_param(key),
))
Expand All @@ -518,13 +507,6 @@ macro_rules! define_callbacks {
)*
}

#[derive(Default)]
pub struct QueryStates<'tcx> {
$(
pub $name: $crate::query::QueryState<'tcx, $($K)*>,
)*
}

pub struct Providers {
$(
/// This is the provider for the query. Use `Find references` on this to
Expand Down Expand Up @@ -594,7 +576,6 @@ macro_rules! define_feedable {
tcx,
dep_kind,
&tcx.query_system.query_vtables.$name,
&tcx.query_system.caches.$name,
key,
erased_value,
);
Expand Down
18 changes: 8 additions & 10 deletions compiler/rustc_query_impl/src/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,12 +244,12 @@ fn wait_for_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(

match result {
Ok(()) => {
let Some((v, index)) = query.query_cache(tcx).lookup(&key) else {
let Some((v, index)) = query.query_cache().lookup(&key) else {
outline(|| {
// We didn't find the query result in the query cache. Check if it was
// poisoned due to a panic instead.
let key_hash = sharded::make_hash(&key);
let shard = query.query_state(tcx).active.lock_shard_by_hash(key_hash);
let shard = query.query_state().active.lock_shard_by_hash(key_hash);
match shard.find(key_hash, equivalent_key(&key)) {
// The query we waited on panicked. Continue unwinding here.
Some((_, ActiveKeyStatus::Poisoned)) => FatalError.raise(),
Expand Down Expand Up @@ -278,9 +278,8 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
key: C::Key,
dep_node: Option<DepNode>,
) -> (C::Value, Option<DepNodeIndex>) {
let state = query.query_state(tcx);
let key_hash = sharded::make_hash(&key);
let mut state_lock = state.active.lock_shard_by_hash(key_hash);
let mut state_lock = query.query_state().active.lock_shard_by_hash(key_hash);

// For the parallel compiler we need to check both the query cache and query state structures
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
Expand All @@ -289,7 +288,7 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
// executing, but another thread may have already completed the query and stores it result
// in the query cache.
if tcx.sess.threads() > 1 {
if let Some((value, index)) = query.query_cache(tcx).lookup(&key) {
if let Some((value, index)) = query.query_cache().lookup(&key) {
tcx.prof.query_cache_hit(index.into());
return (value, Some(index));
}
Expand All @@ -308,7 +307,7 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
// Drop the lock before we start executing the query
drop(state_lock);

execute_job::<C, FLAGS, INCR>(query, tcx, state, key, key_hash, id, dep_node)
execute_job::<C, FLAGS, INCR>(query, tcx, key, key_hash, id, dep_node)
}
Entry::Occupied(mut entry) => {
match &mut entry.get_mut().1 {
Expand Down Expand Up @@ -340,15 +339,14 @@ fn try_execute_query<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: b
fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
query: SemiDynamicQueryDispatcher<'tcx, C, FLAGS>,
tcx: TyCtxt<'tcx>,
state: &'tcx QueryState<'tcx, C::Key>,
key: C::Key,
key_hash: u64,
id: QueryJobId,
dep_node: Option<DepNode>,
) -> (C::Value, Option<DepNodeIndex>) {
// Set up a guard object that will automatically poison the query if a
// panic occurs while executing the query (or any intermediate plumbing).
let job_guard = ActiveJobGuard { state, key, key_hash };
let job_guard = ActiveJobGuard { state: query.query_state(), key, key_hash };

debug_assert_eq!(tcx.dep_graph.is_fully_enabled(), INCR);

Expand All @@ -359,7 +357,7 @@ fn execute_job<'tcx, C: QueryCache, const FLAGS: QueryFlags, const INCR: bool>(
execute_job_non_incr(query, tcx, key, id)
};

let cache = query.query_cache(tcx);
let cache = query.query_cache();
if query.feedable() {
// We should not compute queries that also got a value via feeding.
// This can't happen, as query feeding adds the very dependencies to the fed query
Expand Down Expand Up @@ -680,7 +678,7 @@ pub(crate) fn force_query<'tcx, C: QueryCache, const FLAGS: QueryFlags>(
) {
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
if let Some((_, index)) = query.query_cache(tcx).lookup(&key) {
if let Some((_, index)) = query.query_cache().lookup(&key) {
tcx.prof.query_cache_hit(index.into());
return;
}
Expand Down
26 changes: 5 additions & 21 deletions compiler/rustc_query_impl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ use std::marker::ConstParamTy;

use rustc_data_structures::sync::AtomicU64;
use rustc_middle::dep_graph::{self, DepKind, DepNode, DepNodeIndex, SerializedDepNodeIndex};
use rustc_middle::queries::{
self, ExternProviders, Providers, QueryCaches, QueryEngine, QueryStates,
};
use rustc_middle::queries::{self, ExternProviders, Providers, QueryEngine};
use rustc_middle::query::on_disk_cache::{CacheEncoder, EncodedDepNodeIndex, OnDiskCache};
use rustc_middle::query::plumbing::{
HashResult, QueryState, QuerySystem, QuerySystemFns, QueryVTable,
Expand Down Expand Up @@ -104,26 +102,14 @@ impl<'tcx, C: QueryCache, const FLAGS: QueryFlags> SemiDynamicQueryDispatcher<'t

// Don't use this method to access query results, instead use the methods on TyCtxt.
#[inline(always)]
fn query_state(self, tcx: TyCtxt<'tcx>) -> &'tcx QueryState<'tcx, C::Key> {
// Safety:
// This is just manually doing the subfield referencing through pointer math.
unsafe {
&*(&tcx.query_system.states as *const QueryStates<'tcx>)
.byte_add(self.vtable.query_state)
.cast::<QueryState<'tcx, C::Key>>()
}
fn query_state(self) -> &'tcx QueryState<'tcx, C::Key> {
&self.vtable.query_state
}

// Don't use this method to access query results, instead use the methods on TyCtxt.
#[inline(always)]
fn query_cache(self, tcx: TyCtxt<'tcx>) -> &'tcx C {
// Safety:
// This is just manually doing the subfield referencing through pointer math.
unsafe {
&*(&tcx.query_system.caches as *const QueryCaches<'tcx>)
.byte_add(self.vtable.query_cache)
.cast::<C>()
}
fn query_cache(self) -> &'tcx C {
&self.vtable.query_cache
}

/// Calls `tcx.$query(key)` for this query, and discards the returned value.
Expand Down Expand Up @@ -245,9 +231,7 @@ pub fn query_system<'tcx>(
incremental: bool,
) -> QuerySystem<'tcx> {
QuerySystem {
states: Default::default(),
arenas: Default::default(),
caches: Default::default(),
query_vtables: make_query_vtables(),
on_disk_cache,
fns: QuerySystemFns {
Expand Down
46 changes: 27 additions & 19 deletions compiler/rustc_query_impl/src/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use rustc_middle::bug;
#[expect(unused_imports, reason = "used by doc comments")]
use rustc_middle::dep_graph::DepKindVTable;
use rustc_middle::dep_graph::{
self, DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex, dep_kinds,
self, DepKind, DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex, dep_kinds,
};
use rustc_middle::query::on_disk_cache::{
AbsoluteBytePos, CacheDecoder, CacheEncoder, EncodedDepNodeIndex,
Expand Down Expand Up @@ -123,7 +123,7 @@ pub fn collect_active_jobs_from_all_queries<'tcx>(
if complete { Ok(job_map_out) } else { Err(job_map_out) }
}

pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool {
pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool {
tcx.dep_graph.try_mark_green(tcx, dep_node).is_some()
}

Expand Down Expand Up @@ -273,12 +273,17 @@ macro_rules! item_if_cache_on_disk {
}

/// The deferred part of a deferred query stack frame.
fn mk_query_stack_frame_extra<'tcx, Cache>(
(tcx, vtable, key): (TyCtxt<'tcx>, &'tcx QueryVTable<'tcx, Cache>, Cache::Key),
fn mk_query_stack_frame_extra<'tcx, K>(
(tcx, name, dep_kind, description_fn, key): (
TyCtxt<'tcx>,
&'static str,
DepKind,
fn(TyCtxt<'tcx>, K) -> String,
K,
),
Comment on lines +276 to +283
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggestion: Based on my local testing, I think this entire commit can be avoided by just adding a QueryVTable<'tcx, C>: DynSync bound to create_deferred_query_stack_frame.

(This makes sense because the state and cache should be DynSync, and the caller knows that this happens to be true of every concrete state and cache.)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you apply this suggestion, the commit description of the next commit will need some adjustment.

) -> QueryStackFrameExtra
where
Cache: QueryCache,
Cache::Key: Key,
K: Key + Copy,
{
let def_id = key.key_as_def_id();

Expand All @@ -287,21 +292,21 @@ where
let reduce_queries = with_reduced_queries();

// Avoid calling queries while formatting the description
let description = ty::print::with_no_queries!((vtable.description_fn)(tcx, key));
let description = ty::print::with_no_queries!(description_fn(tcx, key));
let description = if tcx.sess.verbose_internals() {
format!("{description} [{name:?}]", name = vtable.name)
format!("{description} [{name:?}]")
} else {
description
};
let span = if vtable.dep_kind == dep_graph::dep_kinds::def_span || reduce_queries {
let span = if dep_kind == dep_graph::dep_kinds::def_span || reduce_queries {
// The `def_span` query is used to calculate `default_span`,
// so exit to avoid infinite recursion.
None
} else {
Some(key.default_span(tcx))
};

let def_kind = if vtable.dep_kind == dep_graph::dep_kinds::def_kind || reduce_queries {
let def_kind = if dep_kind == dep_graph::dep_kinds::def_kind || reduce_queries {
// Try to avoid infinite recursion.
None
} else {
Expand Down Expand Up @@ -331,7 +336,10 @@ where
let def_id: Option<DefId> = key.key_as_def_id();
let def_id_for_ty_in_cycle: Option<DefId> = key.def_id_for_ty_in_cycle();

let info = QueryStackDeferred::new((tcx, vtable, key), mk_query_stack_frame_extra);
let info = QueryStackDeferred::new(
(tcx, vtable.name, vtable.dep_kind, vtable.description_fn, key),
mk_query_stack_frame_extra,
);
QueryStackFrame::new(info, kind, hash, def_id, def_id_for_ty_in_cycle)
}

Expand All @@ -346,9 +354,8 @@ pub(crate) fn encode_query_results<'a, 'tcx, Q, C: QueryCache, const FLAGS: Quer
{
let _timer = tcx.prof.generic_activity_with_arg("encode_query_results_for", query.name());

assert!(all_inactive(query.query_state(tcx)));
let cache = query.query_cache(tcx);
cache.iter(&mut |key, value, dep_node| {
assert!(all_inactive(query.query_state()));
query.query_cache().iter(&mut |key, value, dep_node| {
if query.will_cache_on_disk_for_key(tcx, key) {
let dep_node = SerializedDepNodeIndex::new(dep_node.index());

Expand All @@ -368,7 +375,7 @@ pub(crate) fn query_key_hash_verify<'tcx, C: QueryCache, const FLAGS: QueryFlags
) {
let _timer = tcx.prof.generic_activity_with_arg("query_key_hash_verify_for", query.name());

let cache = query.query_cache(tcx);
let cache = query.query_cache();
let mut map = UnordMap::with_capacity(cache.len());
cache.iter(&mut |key, _, _| {
let node = DepNode::construct(tcx, query.dep_kind(), key);
Expand Down Expand Up @@ -569,8 +576,8 @@ macro_rules! define_queries {
eval_always: is_eval_always!([$($modifiers)*]),
dep_kind: dep_graph::dep_kinds::$name,
cycle_error_handling: cycle_error_handling!([$($modifiers)*]),
query_state: std::mem::offset_of!(QueryStates<'tcx>, $name),
query_cache: std::mem::offset_of!(QueryCaches<'tcx>, $name),
query_state: Default::default(),
query_cache: Default::default(),
Comment on lines +579 to +580
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question/suggestion: Should we get rid of the redundant query_ and rename these to state and cache?

will_cache_on_disk_for_key_fn: if_cache_on_disk!([$($modifiers)*] {
Some(::rustc_middle::queries::_cache_on_disk_if_fns::$name)
} {
Expand Down Expand Up @@ -665,7 +672,8 @@ macro_rules! define_queries {
};

// Call `gather_active_jobs_inner` to do the actual work.
let res = crate::execution::gather_active_jobs_inner(&tcx.query_system.states.$name,
let res = crate::execution::gather_active_jobs_inner(
&tcx.query_system.query_vtables.$name.query_state,
tcx,
make_frame,
require_complete,
Expand All @@ -691,7 +699,7 @@ macro_rules! define_queries {
$crate::profiling_support::alloc_self_profile_query_strings_for_query_cache(
tcx,
stringify!($name),
&tcx.query_system.caches.$name,
&tcx.query_system.query_vtables.$name.query_cache,
string_cache,
)
}
Expand Down