Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 29 additions & 2 deletions compiler/rustc_query_impl/src/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ use rustc_middle::ty::TyCtxt;
use rustc_middle::verify_ich::incremental_verify_ich;
use rustc_span::{DUMMY_SP, Span};

use crate::collect_active_jobs_from_all_queries;
use crate::dep_graph::{DepNode, DepNodeIndex};
use crate::for_each_query_vtable;
use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle};
use crate::plumbing::{current_query_job, next_job_id, start_query};

Expand All @@ -30,14 +30,41 @@ pub(crate) fn all_inactive<'tcx, K>(state: &QueryState<'tcx, K>) -> bool {
state.active.lock_shards().all(|shard| shard.is_empty())
}

/// Returns a map of currently active query jobs, collected from all queries.
///
/// If `require_complete` is `true`, this function locks all shards of the
/// query results to produce a complete map, which always returns `Ok`.
/// Otherwise, it may return an incomplete map as an error if any shard
/// lock cannot be acquired.
///
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
pub fn collect_active_jobs_from_all_queries<'tcx>(
tcx: TyCtxt<'tcx>,
require_complete: bool,
) -> Result<QueryJobMap<'tcx>, QueryJobMap<'tcx>> {
let mut job_map_out = QueryJobMap::default();
let mut complete = true;

for_each_query_vtable!(ALL, tcx, |query| {
let res = gather_active_jobs(query, tcx, require_complete, &mut job_map_out);
if res.is_none() {
complete = false;
}
});

if complete { Ok(job_map_out) } else { Err(job_map_out) }
}

/// Internal plumbing for collecting the set of active jobs for this query.
///
/// Should only be called from `collect_active_jobs_from_all_queries`.
///
/// (We arbitrarily use the word "gather" when collecting the jobs for
/// each individual query, so that we have distinct function names to
/// grep for.)
pub(crate) fn gather_active_jobs<'tcx, C>(
fn gather_active_jobs<'tcx, C>(
query: &'tcx QueryVTable<'tcx, C>,
tcx: TyCtxt<'tcx>,
require_complete: bool,
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_query_impl/src/job.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ use rustc_middle::ty::TyCtxt;
use rustc_session::Session;
use rustc_span::{DUMMY_SP, Span};

use crate::collect_active_jobs_from_all_queries;
use crate::execution::collect_active_jobs_from_all_queries;

/// Map from query job IDs to job information collected by
/// `collect_active_jobs_from_all_queries`.
Expand Down
11 changes: 6 additions & 5 deletions compiler/rustc_query_impl/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@
use rustc_data_structures::sync::AtomicU64;
use rustc_middle::dep_graph;
use rustc_middle::queries::{self, ExternProviders, Providers};
use rustc_middle::query::on_disk_cache::{CacheEncoder, EncodedDepNodeIndex, OnDiskCache};
use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_middle::query::plumbing::{QuerySystem, QueryVTable};
use rustc_middle::query::{AsLocalQueryKey, QueryCache, QueryMode};
use rustc_middle::ty::TyCtxt;
use rustc_span::Span;

pub use crate::dep_kind_vtables::make_dep_kind_vtables;
pub use crate::execution::collect_active_jobs_from_all_queries;
pub use crate::job::{QueryJobMap, break_query_cycles, print_query_stack};
use crate::profiling_support::QueryKeyStringCache;

#[macro_use]
mod plumbing;
Expand Down Expand Up @@ -66,7 +66,8 @@ pub fn query_system<'tcx>(
rustc_middle::rustc_with_all_queries! { define_queries! }

pub fn provide(providers: &mut rustc_middle::util::Providers) {
providers.hooks.alloc_self_profile_query_strings = alloc_self_profile_query_strings;
providers.hooks.query_key_hash_verify_all = query_key_hash_verify_all;
providers.hooks.encode_all_query_results = encode_all_query_results;
providers.hooks.alloc_self_profile_query_strings =
profiling_support::alloc_self_profile_query_strings;
providers.hooks.query_key_hash_verify_all = plumbing::query_key_hash_verify_all;
providers.hooks.encode_all_query_results = plumbing::encode_all_query_results;
}
149 changes: 61 additions & 88 deletions compiler/rustc_query_impl/src/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ use rustc_span::def_id::LOCAL_CRATE;
use crate::error::{QueryOverflow, QueryOverflowNote};
use crate::execution::{all_inactive, force_query};
use crate::job::find_dep_kind_root;
use crate::{GetQueryVTable, collect_active_jobs_from_all_queries};
use crate::{GetQueryVTable, collect_active_jobs_from_all_queries, for_each_query_vtable};

fn depth_limit_error<'tcx>(tcx: TyCtxt<'tcx>, job: QueryJobId) {
let job_map =
Expand Down Expand Up @@ -146,7 +146,17 @@ where
QueryStackFrame::new(info, kind, def_id, def_id_for_ty_in_cycle)
}

pub(crate) fn encode_query_results<'a, 'tcx, C, V>(
pub(crate) fn encode_all_query_results<'tcx>(
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
for_each_query_vtable!(CACHE_ON_DISK, tcx, |query| {
encode_query_results(tcx, query, encoder, query_result_index)
});
}

fn encode_query_results<'a, 'tcx, C, V>(
tcx: TyCtxt<'tcx>,
query: &'tcx QueryVTable<'tcx, C>,
encoder: &mut CacheEncoder<'a, 'tcx>,
Expand All @@ -172,7 +182,17 @@ pub(crate) fn encode_query_results<'a, 'tcx, C, V>(
});
}

pub(crate) fn query_key_hash_verify<'tcx, C: QueryCache>(
pub(crate) fn query_key_hash_verify_all<'tcx>(tcx: TyCtxt<'tcx>) {
if tcx.sess.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions) {
tcx.sess.time("query_key_hash_verify_all", || {
for_each_query_vtable!(ALL, tcx, |query| {
query_key_hash_verify(query, tcx);
});
});
}
}

fn query_key_hash_verify<'tcx, C: QueryCache>(
query: &'tcx QueryVTable<'tcx, C>,
tcx: TyCtxt<'tcx>,
) {
Expand Down Expand Up @@ -510,95 +530,48 @@ macro_rules! define_queries {
}
}

/// Returns a map of currently active query jobs, collected from all queries.
/// Given a filter condition (e.g. `ALL` or `CACHE_ON_DISK`), a `tcx`,
/// and a closure expression that accepts `&QueryVTable`, this macro
/// calls that closure with each query vtable that satisfies the filter
/// condition.
///
/// If `require_complete` is `true`, this function locks all shards of the
/// query results to produce a complete map, which always returns `Ok`.
/// Otherwise, it may return an incomplete map as an error if any shard
/// lock cannot be acquired.
/// This needs to be a macro, because the vtables can have different
/// key/value/cache types for different queries.
///
/// Prefer passing `false` to `require_complete` to avoid potential deadlocks,
/// especially when called from within a deadlock handler, unless a
/// complete map is needed and no deadlock is possible at this call site.
pub fn collect_active_jobs_from_all_queries<'tcx>(
tcx: TyCtxt<'tcx>,
require_complete: bool,
) -> Result<QueryJobMap<'tcx>, QueryJobMap<'tcx>> {
let mut job_map_out = QueryJobMap::default();
let mut complete = true;

$(
let res = crate::execution::gather_active_jobs(
&tcx.query_system.query_vtables.$name,
tcx,
require_complete,
&mut job_map_out,
);
if res.is_none() {
complete = false;
}
)*

if complete { Ok(job_map_out) } else { Err(job_map_out) }
}

/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
/// This macro's argument syntax is specifically intended to look like
/// plain Rust code, so that `for_each_query_vtable!(..)` calls will be
/// formatted by rustfmt.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
if !tcx.prof.enabled() {
return;
}

let _prof_timer = tcx.sess.prof.generic_activity("self_profile_alloc_query_strings");

let mut string_cache = QueryKeyStringCache::new();

$(
$crate::profiling_support::alloc_self_profile_query_strings_for_query_cache(
tcx,
stringify!($name),
&tcx.query_system.query_vtables.$name.cache,
&mut string_cache,
);
)*

tcx.sess.prof.store_query_cache_hits();
}

fn encode_all_query_results<'tcx>(
tcx: TyCtxt<'tcx>,
encoder: &mut CacheEncoder<'_, 'tcx>,
query_result_index: &mut EncodedDepNodeIndex,
) {
$(
#[cfg($cache_on_disk)]
{
$crate::plumbing::encode_query_results(
tcx,
&tcx.query_system.query_vtables.$name,
encoder,
query_result_index,
)
}
)*
/// To avoid too much nested-macro complication, filter conditions are
/// implemented by hand as needed.
macro_rules! for_each_query_vtable {
// Call with all queries.
(ALL, $tcx:expr, $closure:expr) => {{
let tcx: rustc_middle::ty::TyCtxt<'_> = $tcx;
$(
let query: &rustc_middle::query::plumbing::QueryVTable<'_, _> =
&tcx.query_system.query_vtables.$name;
$closure(query);
)*
}};

// Only call with queries that can potentially cache to disk.
//
// This allows the use of trait bounds that only need to be satisfied
// by the subset of queries that actually cache to disk.
(CACHE_ON_DISK, $tcx:expr, $closure:expr) => {{
let tcx: rustc_middle::ty::TyCtxt<'_> = $tcx;
$(
#[cfg($cache_on_disk)]
{
let query: &rustc_middle::query::plumbing::QueryVTable<'_, _> =
&tcx.query_system.query_vtables.$name;
$closure(query);
}
)*
}}
}

pub fn query_key_hash_verify_all<'tcx>(tcx: TyCtxt<'tcx>) {
if tcx.sess.opts.unstable_opts.incremental_verify_ich || cfg!(debug_assertions) {
tcx.sess.time("query_key_hash_verify_all", || {
$(
$crate::plumbing::query_key_hash_verify(
&tcx.query_system.query_vtables.$name,
tcx
);
)*
})
}
}
pub(crate) use for_each_query_vtable;
}
}
39 changes: 32 additions & 7 deletions compiler/rustc_query_impl/src/profiling_support.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,11 @@ use rustc_data_structures::profiling::SelfProfiler;
use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, LocalDefId};
use rustc_hir::definitions::DefPathData;
use rustc_middle::query::QueryCache;
use rustc_middle::query::plumbing::QueryVTable;
use rustc_middle::ty::TyCtxt;

use crate::for_each_query_vtable;

pub(crate) struct QueryKeyStringCache {
def_id_cache: FxHashMap<DefId, StringId>,
}
Expand Down Expand Up @@ -172,13 +175,35 @@ where
}
}

/// All self-profiling events generated by the query engine use
/// virtual `StringId`s for their `event_id`. This method makes all
/// those virtual `StringId`s point to actual strings.
///
/// If we are recording only summary data, the ids will point to
/// just the query names. If we are recording query keys too, we
/// allocate the corresponding strings here.
pub(crate) fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) {
if !tcx.prof.enabled() {
return;
}

let _prof_timer = tcx.sess.prof.generic_activity("self_profile_alloc_query_strings");

let mut string_cache = QueryKeyStringCache::new();

for_each_query_vtable!(ALL, tcx, |query| {
alloc_self_profile_query_strings_for_query_cache(tcx, query, &mut string_cache);
});

tcx.sess.prof.store_query_cache_hits();
}

/// Allocate the self-profiling query strings for a single query cache. This
/// method is called from `alloc_self_profile_query_strings` which knows all
/// the queries via macro magic.
pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
tcx: TyCtxt<'tcx>,
query_name: &'static str,
query_cache: &C,
query: &'tcx QueryVTable<'tcx, C>,
string_cache: &mut QueryKeyStringCache,
) where
C: QueryCache,
Expand All @@ -193,14 +218,14 @@ pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
if profiler.query_key_recording_enabled() {
let mut query_string_builder = QueryKeyStringBuilder::new(profiler, tcx, string_cache);

let query_name = profiler.get_or_alloc_cached_string(query_name);
let query_name = profiler.get_or_alloc_cached_string(query.name);

// Since building the string representation of query keys might
// need to invoke queries itself, we cannot keep the query caches
// locked while doing so. Instead we copy out the
// `(query_key, dep_node_index)` pairs and release the lock again.
let mut query_keys_and_indices = Vec::new();
query_cache.for_each(&mut |k, _, i| query_keys_and_indices.push((*k, i)));
query.cache.for_each(&mut |k, _, i| query_keys_and_indices.push((*k, i)));

// Now actually allocate the strings. If allocating the strings
// generates new entries in the query cache, we'll miss them but
Expand All @@ -221,14 +246,14 @@ pub(crate) fn alloc_self_profile_query_strings_for_query_cache<'tcx, C>(
}
} else {
// In this branch we don't allocate query keys
let query_name = profiler.get_or_alloc_cached_string(query_name);
let query_name = profiler.get_or_alloc_cached_string(query.name);
let event_id = event_id_builder.from_label(query_name).to_string_id();

// FIXME(eddyb) make this O(1) by using a pre-cached query name `EventId`,
// instead of passing the `DepNodeIndex` to `finish_with_query_invocation_id`,
// when recording the event in the first place.
let mut query_invocation_ids = Vec::new();
query_cache.for_each(&mut |_, _, i| {
query.cache.for_each(&mut |_, _, i| {
query_invocation_ids.push(i.into());
});

Expand Down
Loading