Rollup merge of #126812 - compiler-errors:tcx-cx, r=lcnr

Rename `tcx` to `cx` in new solver generic code

self-explanatory, should be last major churn-y rename

r? lcnr
This commit is contained in:
Matthias Krüger 2024-06-26 07:50:17 +02:00 committed by GitHub
commit dc22ffc725
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 333 additions and 347 deletions

View File

@ -32,14 +32,14 @@ where
&mut self,
goal: Goal<I, (I::Term, I::Term, ty::AliasRelationDirection)>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let Goal { param_env, predicate: (lhs, rhs, direction) } = goal;
debug_assert!(lhs.to_alias_term().is_some() || rhs.to_alias_term().is_some());
// Structurally normalize the lhs.
let lhs = if let Some(alias) = lhs.to_alias_term() {
let term = self.next_term_infer_of_kind(lhs);
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
self.add_normalizes_to_goal(goal.with(cx, ty::NormalizesTo { alias, term }));
term
} else {
lhs
@ -48,7 +48,7 @@ where
// Structurally normalize the rhs.
let rhs = if let Some(alias) = rhs.to_alias_term() {
let term = self.next_term_infer_of_kind(rhs);
self.add_normalizes_to_goal(goal.with(tcx, ty::NormalizesTo { alias, term }));
self.add_normalizes_to_goal(goal.with(cx, ty::NormalizesTo { alias, term }));
term
} else {
rhs

View File

@ -36,11 +36,11 @@ where
{
fn self_ty(self) -> I::Ty;
fn trait_ref(self, tcx: I) -> ty::TraitRef<I>;
fn trait_ref(self, cx: I) -> ty::TraitRef<I>;
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self;
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self;
fn trait_def_id(self, tcx: I) -> I::DefId;
fn trait_def_id(self, cx: I) -> I::DefId;
/// Try equating an assumption predicate against a goal's predicate. If it
/// holds, then execute the `then` callback, which should do any additional
@ -82,7 +82,7 @@ where
assumption: I::Clause,
) -> Result<Candidate<I>, NoSolution> {
Self::probe_and_match_goal_against_assumption(ecx, source, goal, assumption, |ecx| {
let tcx = ecx.cx();
let cx = ecx.cx();
let ty::Dynamic(bounds, _, _) = goal.predicate.self_ty().kind() else {
panic!("expected object type in `probe_and_consider_object_bound_candidate`");
};
@ -91,7 +91,7 @@ where
structural_traits::predicates_for_object_candidate(
ecx,
goal.param_env,
goal.predicate.trait_ref(tcx),
goal.predicate.trait_ref(cx),
bounds,
),
);
@ -340,15 +340,15 @@ where
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
tcx.for_each_relevant_impl(
goal.predicate.trait_def_id(tcx),
let cx = self.cx();
cx.for_each_relevant_impl(
goal.predicate.trait_def_id(cx),
goal.predicate.self_ty(),
|impl_def_id| {
// For every `default impl`, there's always a non-default `impl`
// that will *also* apply. There's no reason to register a candidate
// for this impl, since it is *not* proof that the trait goal holds.
if tcx.impl_is_default(impl_def_id) {
if cx.impl_is_default(impl_def_id) {
return;
}
@ -366,8 +366,8 @@ where
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let trait_def_id = goal.predicate.trait_def_id(tcx);
let cx = self.cx();
let trait_def_id = goal.predicate.trait_def_id(cx);
// N.B. When assembling built-in candidates for lang items that are also
// `auto` traits, then the auto trait candidate that is assembled in
@ -378,47 +378,47 @@ where
// `solve::trait_goals` instead.
let result = if let Err(guar) = goal.predicate.error_reported() {
G::consider_error_guaranteed_candidate(self, guar)
} else if tcx.trait_is_auto(trait_def_id) {
} else if cx.trait_is_auto(trait_def_id) {
G::consider_auto_trait_candidate(self, goal)
} else if tcx.trait_is_alias(trait_def_id) {
} else if cx.trait_is_alias(trait_def_id) {
G::consider_trait_alias_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Sized) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Sized) {
G::consider_builtin_sized_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Copy)
|| tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Clone)
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Copy)
|| cx.is_lang_item(trait_def_id, TraitSolverLangItem::Clone)
{
G::consider_builtin_copy_clone_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointerLike) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::PointerLike) {
G::consider_builtin_pointer_like_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FnPtrTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::FnPtrTrait) {
G::consider_builtin_fn_ptr_trait_candidate(self, goal)
} else if let Some(kind) = self.cx().fn_trait_kind_from_def_id(trait_def_id) {
G::consider_builtin_fn_trait_candidates(self, goal, kind)
} else if let Some(kind) = self.cx().async_fn_trait_kind_from_def_id(trait_def_id) {
G::consider_builtin_async_fn_trait_candidates(self, goal, kind)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncFnKindHelper) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncFnKindHelper) {
G::consider_builtin_async_fn_kind_helper_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Tuple) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Tuple) {
G::consider_builtin_tuple_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::PointeeTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::PointeeTrait) {
G::consider_builtin_pointee_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Future) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Future) {
G::consider_builtin_future_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Iterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Iterator) {
G::consider_builtin_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::FusedIterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::FusedIterator) {
G::consider_builtin_fused_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncIterator) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncIterator) {
G::consider_builtin_async_iterator_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Coroutine) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Coroutine) {
G::consider_builtin_coroutine_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::DiscriminantKind) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::DiscriminantKind) {
G::consider_builtin_discriminant_kind_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncDestruct) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::AsyncDestruct) {
G::consider_builtin_async_destruct_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Destruct) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Destruct) {
G::consider_builtin_destruct_candidate(self, goal)
} else if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::TransmuteTrait) {
} else if cx.is_lang_item(trait_def_id, TraitSolverLangItem::TransmuteTrait) {
G::consider_builtin_transmute_candidate(self, goal)
} else {
Err(NoSolution)
@ -428,7 +428,7 @@ where
// There may be multiple unsize candidates for a trait with several supertraits:
// `trait Foo: Bar<A> + Bar<B>` and `dyn Foo: Unsize<dyn Bar<_>>`
if tcx.is_lang_item(trait_def_id, TraitSolverLangItem::Unsize) {
if cx.is_lang_item(trait_def_id, TraitSolverLangItem::Unsize) {
candidates.extend(G::consider_structural_builtin_unsize_candidates(self, goal));
}
}
@ -557,8 +557,8 @@ where
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
if !tcx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(tcx)) {
let cx = self.cx();
if !cx.trait_may_be_implemented_via_object(goal.predicate.trait_def_id(cx)) {
return;
}
@ -596,7 +596,7 @@ where
};
// Do not consider built-in object impls for non-object-safe types.
if bounds.principal_def_id().is_some_and(|def_id| !tcx.trait_is_object_safe(def_id)) {
if bounds.principal_def_id().is_some_and(|def_id| !cx.trait_is_object_safe(def_id)) {
return;
}
@ -614,7 +614,7 @@ where
self,
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
bound.with_self_ty(tcx, self_ty),
bound.with_self_ty(cx, self_ty),
));
}
}
@ -624,14 +624,13 @@ where
// since we don't need to look at any supertrait or anything if we are doing
// a projection goal.
if let Some(principal) = bounds.principal() {
let principal_trait_ref = principal.with_self_ty(tcx, self_ty);
for (idx, assumption) in D::elaborate_supertraits(tcx, principal_trait_ref).enumerate()
{
let principal_trait_ref = principal.with_self_ty(cx, self_ty);
for (idx, assumption) in D::elaborate_supertraits(cx, principal_trait_ref).enumerate() {
candidates.extend(G::probe_and_consider_object_bound_candidate(
self,
CandidateSource::BuiltinImpl(BuiltinImplSource::Object(idx)),
goal,
assumption.upcast(tcx),
assumption.upcast(cx),
));
}
}
@ -649,11 +648,11 @@ where
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let cx = self.cx();
candidates.extend(self.probe_trait_candidate(CandidateSource::CoherenceUnknowable).enter(
|ecx| {
let trait_ref = goal.predicate.trait_ref(tcx);
let trait_ref = goal.predicate.trait_ref(cx);
if ecx.trait_ref_is_knowable(goal.param_env, trait_ref)? {
Err(NoSolution)
} else {
@ -678,9 +677,9 @@ where
goal: Goal<I, G>,
candidates: &mut Vec<Candidate<I>>,
) {
let tcx = self.cx();
let cx = self.cx();
let trait_goal: Goal<I, ty::TraitPredicate<I>> =
goal.with(tcx, goal.predicate.trait_ref(tcx));
goal.with(cx, goal.predicate.trait_ref(cx));
let mut trait_candidates_from_env = vec![];
self.probe(|_| ProbeKind::ShadowedEnvProbing).enter(|ecx| {

View File

@ -23,7 +23,7 @@ where
D: SolverDelegate<Interner = I>,
I: Interner,
{
let tcx = ecx.cx();
let cx = ecx.cx();
match ty.kind() {
ty::Uint(_)
| ty::Int(_)
@ -36,7 +36,7 @@ where
| ty::Char => Ok(vec![]),
// Treat `str` like it's defined as `struct str([u8]);`
ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(tcx, Ty::new_u8(tcx)))]),
ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(cx, Ty::new_u8(cx)))]),
ty::Dynamic(..)
| ty::Param(..)
@ -79,21 +79,21 @@ where
.cx()
.bound_coroutine_hidden_types(def_id)
.into_iter()
.map(|bty| bty.instantiate(tcx, args))
.map(|bty| bty.instantiate(cx, args))
.collect()),
// For `PhantomData<T>`, we pass `T`.
ty::Adt(def, args) if def.is_phantom_data() => Ok(vec![ty::Binder::dummy(args.type_at(0))]),
ty::Adt(def, args) => {
Ok(def.all_field_tys(tcx).iter_instantiated(tcx, args).map(ty::Binder::dummy).collect())
Ok(def.all_field_tys(cx).iter_instantiated(cx, args).map(ty::Binder::dummy).collect())
}
ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => {
// We can resolve the `impl Trait` to its concrete type,
// which enforces a DAG between the functions requiring
// the auto trait bounds in question.
Ok(vec![ty::Binder::dummy(tcx.type_of(def_id).instantiate(tcx, args))])
Ok(vec![ty::Binder::dummy(cx.type_of(def_id).instantiate(cx, args))])
}
}
}
@ -247,18 +247,18 @@ where
// Returns a binder of the tupled inputs types and output type from a builtin callable type.
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Interner>(
tcx: I,
cx: I,
self_ty: I::Ty,
goal_kind: ty::ClosureKind,
) -> Result<Option<ty::Binder<I, (I::Ty, I::Ty)>>, NoSolution> {
match self_ty.kind() {
// keep this in sync with assemble_fn_pointer_candidates until the old solver is removed.
ty::FnDef(def_id, args) => {
let sig = tcx.fn_sig(def_id);
if sig.skip_binder().is_fn_trait_compatible() && !tcx.has_target_features(def_id) {
let sig = cx.fn_sig(def_id);
if sig.skip_binder().is_fn_trait_compatible() && !cx.has_target_features(def_id) {
Ok(Some(
sig.instantiate(tcx, args)
.map_bound(|sig| (Ty::new_tup(tcx, sig.inputs().as_slice()), sig.output())),
sig.instantiate(cx, args)
.map_bound(|sig| (Ty::new_tup(cx, sig.inputs().as_slice()), sig.output())),
))
} else {
Err(NoSolution)
@ -268,7 +268,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
ty::FnPtr(sig) => {
if sig.is_fn_trait_compatible() {
Ok(Some(
sig.map_bound(|sig| (Ty::new_tup(tcx, sig.inputs().as_slice()), sig.output())),
sig.map_bound(|sig| (Ty::new_tup(cx, sig.inputs().as_slice()), sig.output())),
))
} else {
Err(NoSolution)
@ -323,10 +323,10 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
}
coroutine_closure_to_certain_coroutine(
tcx,
cx,
goal_kind,
// No captures by ref, so this doesn't matter.
Region::new_static(tcx),
Region::new_static(cx),
def_id,
args,
sig,
@ -339,9 +339,9 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_callable<I: Intern
}
coroutine_closure_to_ambiguous_coroutine(
tcx,
cx,
goal_kind, // No captures by ref, so this doesn't matter.
Region::new_static(tcx),
Region::new_static(cx),
def_id,
args,
sig,
@ -403,7 +403,7 @@ pub(in crate::solve) struct AsyncCallableRelevantTypes<I: Interner> {
// which enforces the closure is actually callable with the given trait. When we
// know the kind already, we can short-circuit this check.
pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I: Interner>(
tcx: I,
cx: I,
self_ty: I::Ty,
goal_kind: ty::ClosureKind,
env_region: I::Region,
@ -422,9 +422,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
return Err(NoSolution);
}
coroutine_closure_to_certain_coroutine(
tcx, goal_kind, env_region, def_id, args, sig,
)
coroutine_closure_to_certain_coroutine(cx, goal_kind, env_region, def_id, args, sig)
} else {
// When we don't know the closure kind (and therefore also the closure's upvars,
// which are computed at the same time), we must delay the computation of the
@ -435,15 +433,15 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
// coroutine upvars respecting the closure kind.
nested.push(
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper),
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
cx,
cx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper),
[kind_ty, Ty::from_closure_kind(cx, goal_kind)],
)
.upcast(tcx),
.upcast(cx),
);
coroutine_closure_to_ambiguous_coroutine(
tcx, goal_kind, env_region, def_id, args, sig,
cx, goal_kind, env_region, def_id, args, sig,
)
};
@ -458,21 +456,21 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
}
ty::FnDef(..) | ty::FnPtr(..) => {
let bound_sig = self_ty.fn_sig(tcx);
let bound_sig = self_ty.fn_sig(cx);
let sig = bound_sig.skip_binder();
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
let future_trait_def_id = cx.require_lang_item(TraitSolverLangItem::Future);
// `FnDef` and `FnPtr` only implement `AsyncFn*` when their
// return type implements `Future`.
let nested = vec![
bound_sig
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
.upcast(tcx),
.rebind(ty::TraitRef::new(cx, future_trait_def_id, [sig.output()]))
.upcast(cx),
];
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
let future_output_def_id = cx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(cx, future_output_def_id, [sig.output()]);
Ok((
bound_sig.rebind(AsyncCallableRelevantTypes {
tupled_inputs_ty: Ty::new_tup(tcx, sig.inputs().as_slice()),
tupled_inputs_ty: Ty::new_tup(cx, sig.inputs().as_slice()),
output_coroutine_ty: sig.output(),
coroutine_return_ty: future_output_ty,
}),
@ -483,13 +481,13 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
let args = args.as_closure();
let bound_sig = args.sig();
let sig = bound_sig.skip_binder();
let future_trait_def_id = tcx.require_lang_item(TraitSolverLangItem::Future);
let future_trait_def_id = cx.require_lang_item(TraitSolverLangItem::Future);
// `Closure`s only implement `AsyncFn*` when their return type
// implements `Future`.
let mut nested = vec![
bound_sig
.rebind(ty::TraitRef::new(tcx, future_trait_def_id, [sig.output()]))
.upcast(tcx),
.rebind(ty::TraitRef::new(cx, future_trait_def_id, [sig.output()]))
.upcast(cx),
];
// Additionally, we need to check that the closure kind
@ -501,7 +499,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
}
} else {
let async_fn_kind_trait_def_id =
tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper);
cx.require_lang_item(TraitSolverLangItem::AsyncFnKindHelper);
// When we don't know the closure kind (and therefore also the closure's upvars,
// which are computed at the same time), we must delay the computation of the
// generator's upvars. We do this using the `AsyncFnKindHelper`, which as a trait
@ -511,16 +509,16 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
// coroutine upvars respecting the closure kind.
nested.push(
ty::TraitRef::new(
tcx,
cx,
async_fn_kind_trait_def_id,
[kind_ty, Ty::from_closure_kind(tcx, goal_kind)],
[kind_ty, Ty::from_closure_kind(cx, goal_kind)],
)
.upcast(tcx),
.upcast(cx),
);
}
let future_output_def_id = tcx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(tcx, future_output_def_id, [sig.output()]);
let future_output_def_id = cx.require_lang_item(TraitSolverLangItem::FutureOutput);
let future_output_ty = Ty::new_projection(cx, future_output_def_id, [sig.output()]);
Ok((
bound_sig.rebind(AsyncCallableRelevantTypes {
tupled_inputs_ty: sig.inputs().get(0).unwrap(),
@ -565,7 +563,7 @@ pub(in crate::solve) fn extract_tupled_inputs_and_output_from_async_callable<I:
/// Given a coroutine-closure, project to its returned coroutine when we are *certain*
/// that the closure's kind is compatible with the goal.
fn coroutine_closure_to_certain_coroutine<I: Interner>(
tcx: I,
cx: I,
goal_kind: ty::ClosureKind,
goal_region: I::Region,
def_id: I::DefId,
@ -573,9 +571,9 @@ fn coroutine_closure_to_certain_coroutine<I: Interner>(
sig: ty::CoroutineClosureSignature<I>,
) -> I::Ty {
sig.to_coroutine_given_kind_and_upvars(
tcx,
cx,
args.parent_args(),
tcx.coroutine_for_closure(def_id),
cx.coroutine_for_closure(def_id),
goal_kind,
goal_region,
args.tupled_upvars_ty(),
@ -589,20 +587,20 @@ fn coroutine_closure_to_certain_coroutine<I: Interner>(
///
/// Note that we do not also push a `AsyncFnKindHelper` goal here.
fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
tcx: I,
cx: I,
goal_kind: ty::ClosureKind,
goal_region: I::Region,
def_id: I::DefId,
args: ty::CoroutineClosureArgs<I>,
sig: ty::CoroutineClosureSignature<I>,
) -> I::Ty {
let upvars_projection_def_id = tcx.require_lang_item(TraitSolverLangItem::AsyncFnKindUpvars);
let upvars_projection_def_id = cx.require_lang_item(TraitSolverLangItem::AsyncFnKindUpvars);
let tupled_upvars_ty = Ty::new_projection(
tcx,
cx,
upvars_projection_def_id,
[
I::GenericArg::from(args.kind_ty()),
Ty::from_closure_kind(tcx, goal_kind).into(),
Ty::from_closure_kind(cx, goal_kind).into(),
goal_region.into(),
sig.tupled_inputs_ty.into(),
args.tupled_upvars_ty().into(),
@ -610,10 +608,10 @@ fn coroutine_closure_to_ambiguous_coroutine<I: Interner>(
],
);
sig.to_coroutine(
tcx,
cx,
args.parent_args(),
Ty::from_closure_kind(tcx, goal_kind),
tcx.coroutine_for_closure(def_id),
Ty::from_closure_kind(cx, goal_kind),
cx.coroutine_for_closure(def_id),
tupled_upvars_ty,
)
}
@ -668,28 +666,28 @@ where
D: SolverDelegate<Interner = I>,
I: Interner,
{
let tcx = ecx.cx();
let cx = ecx.cx();
let mut requirements = vec![];
requirements
.extend(tcx.super_predicates_of(trait_ref.def_id).iter_instantiated(tcx, trait_ref.args));
.extend(cx.super_predicates_of(trait_ref.def_id).iter_instantiated(cx, trait_ref.args));
// FIXME(associated_const_equality): Also add associated consts to
// the requirements here.
for associated_type_def_id in tcx.associated_type_def_ids(trait_ref.def_id) {
for associated_type_def_id in cx.associated_type_def_ids(trait_ref.def_id) {
// associated types that require `Self: Sized` do not show up in the built-in
// implementation of `Trait for dyn Trait`, and can be dropped here.
if tcx.generics_require_sized_self(associated_type_def_id) {
if cx.generics_require_sized_self(associated_type_def_id) {
continue;
}
requirements
.extend(tcx.item_bounds(associated_type_def_id).iter_instantiated(tcx, trait_ref.args));
.extend(cx.item_bounds(associated_type_def_id).iter_instantiated(cx, trait_ref.args));
}
let mut replace_projection_with = HashMap::default();
for bound in object_bounds.iter() {
if let ty::ExistentialPredicate::Projection(proj) = bound.skip_binder() {
let proj = proj.with_self_ty(tcx, trait_ref.self_ty());
let proj = proj.with_self_ty(cx, trait_ref.self_ty());
let old_ty = replace_projection_with.insert(proj.def_id(), bound.rebind(proj));
assert_eq!(
old_ty,
@ -709,7 +707,7 @@ where
folder
.nested
.into_iter()
.chain(folded_requirements.into_iter().map(|clause| Goal::new(tcx, param_env, clause)))
.chain(folded_requirements.into_iter().map(|clause| Goal::new(cx, param_env, clause)))
.collect()
}

View File

@ -239,14 +239,14 @@ where
/// This function takes care of setting up the inference context, setting the anchor,
/// and registering opaques from the canonicalized input.
fn enter_canonical<R>(
tcx: I,
cx: I,
search_graph: &'a mut search_graph::SearchGraph<I>,
canonical_input: CanonicalInput<I>,
canonical_goal_evaluation: &mut ProofTreeBuilder<D>,
f: impl FnOnce(&mut EvalCtxt<'_, D>, Goal<I, I::Predicate>) -> R,
) -> R {
let (ref delegate, input, var_values) =
SolverDelegate::build_with_canonical(tcx, search_graph.solver_mode(), &canonical_input);
SolverDelegate::build_with_canonical(cx, search_graph.solver_mode(), &canonical_input);
let mut ecx = EvalCtxt {
delegate,
@ -292,9 +292,9 @@ where
/// Instead of calling this function directly, use either [EvalCtxt::evaluate_goal]
/// if you're inside of the solver or [SolverDelegateEvalExt::evaluate_root_goal] if you're
/// outside of it.
#[instrument(level = "debug", skip(tcx, search_graph, goal_evaluation), ret)]
#[instrument(level = "debug", skip(cx, search_graph, goal_evaluation), ret)]
fn evaluate_canonical_goal(
tcx: I,
cx: I,
search_graph: &'a mut search_graph::SearchGraph<I>,
canonical_input: CanonicalInput<I>,
goal_evaluation: &mut ProofTreeBuilder<D>,
@ -307,12 +307,12 @@ where
// The actual solver logic happens in `ecx.compute_goal`.
let result = ensure_sufficient_stack(|| {
search_graph.with_new_goal(
tcx,
cx,
canonical_input,
&mut canonical_goal_evaluation,
|search_graph, canonical_goal_evaluation| {
EvalCtxt::enter_canonical(
tcx,
cx,
search_graph,
canonical_input,
canonical_goal_evaluation,
@ -506,7 +506,7 @@ where
///
/// Goals for the next step get directly added to the nested goals of the `EvalCtxt`.
fn evaluate_added_goals_step(&mut self) -> Result<Option<Certainty>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let mut goals = core::mem::take(&mut self.nested_goals);
// If this loop did not result in any progress, what's our final certainty.
@ -516,7 +516,7 @@ where
// RHS does not affect projection candidate assembly.
let unconstrained_rhs = self.next_term_infer_of_kind(goal.predicate.term);
let unconstrained_goal = goal.with(
tcx,
cx,
ty::NormalizesTo { alias: goal.predicate.alias, term: unconstrained_rhs },
);
@ -777,7 +777,7 @@ where
// NOTE: this check is purely an optimization, the structural eq would
// always fail if the term is not an inference variable.
if term.is_infer() {
let tcx = self.cx();
let cx = self.cx();
// We need to relate `alias` to `term` treating only the outermost
// constructor as rigid, relating any contained generic arguments as
// normal. We do this by first structurally equating the `term`
@ -787,8 +787,8 @@ where
// Alternatively we could modify `Equate` for this case by adding another
// variant to `StructurallyRelateAliases`.
let identity_args = self.fresh_args_for_item(alias.def_id);
let rigid_ctor = ty::AliasTerm::new_from_args(tcx, alias.def_id, identity_args);
let ctor_term = rigid_ctor.to_term(tcx);
let rigid_ctor = ty::AliasTerm::new_from_args(cx, alias.def_id, identity_args);
let ctor_term = rigid_ctor.to_term(cx);
let obligations =
self.delegate.eq_structurally_relating_aliases(param_env, term, ctor_term)?;
debug_assert!(obligations.is_empty());

View File

@ -323,13 +323,13 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> ProofTreeBuilder<D> {
pub fn finalize_canonical_goal_evaluation(
&mut self,
tcx: I,
cx: I,
) -> Option<I::CanonicalGoalEvaluationStepRef> {
self.as_mut().map(|this| match this {
DebugSolver::CanonicalGoalEvaluation(evaluation) => {
let final_revision = mem::take(&mut evaluation.final_revision).unwrap();
let final_revision =
tcx.intern_canonical_goal_evaluation_step(final_revision.finalize());
cx.intern_canonical_goal_evaluation_step(final_revision.finalize());
let kind = WipCanonicalGoalEvaluationKind::Interned { final_revision };
assert_eq!(evaluation.kind.replace(kind), None);
final_revision

View File

@ -34,7 +34,7 @@ use crate::delegate::SolverDelegate;
/// How many fixpoint iterations we should attempt inside of the solver before bailing
/// with overflow.
///
/// We previously used `tcx.recursion_limit().0.checked_ilog2().unwrap_or(0)` for this.
/// We previously used `cx.recursion_limit().0.checked_ilog2().unwrap_or(0)` for this.
/// However, it feels unlikely that uncreasing the recursion limit by a power of two
/// to get one more itereation is every useful or desirable. We now instead used a constant
/// here. If there ever ends up some use-cases where a bigger number of fixpoint iterations
@ -285,7 +285,7 @@ where
}
fn response_no_constraints_raw<I: Interner>(
tcx: I,
cx: I,
max_universe: ty::UniverseIndex,
variables: I::CanonicalVars,
certainty: Certainty,
@ -294,10 +294,10 @@ fn response_no_constraints_raw<I: Interner>(
max_universe,
variables,
value: Response {
var_values: ty::CanonicalVarValues::make_identity(tcx, variables),
// FIXME: maybe we should store the "no response" version in tcx, like
// we do for tcx.types and stuff.
external_constraints: tcx.mk_external_constraints(ExternalConstraintsData::default()),
var_values: ty::CanonicalVarValues::make_identity(cx, variables),
// FIXME: maybe we should store the "no response" version in cx, like
// we do for cx.types and stuff.
external_constraints: cx.mk_external_constraints(ExternalConstraintsData::default()),
certainty,
},
defining_opaque_types: Default::default(),

View File

@ -19,21 +19,21 @@ where
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let inherent = goal.predicate.alias.expect_ty(tcx);
let cx = self.cx();
let inherent = goal.predicate.alias.expect_ty(cx);
let impl_def_id = tcx.parent(inherent.def_id);
let impl_def_id = cx.parent(inherent.def_id);
let impl_args = self.fresh_args_for_item(impl_def_id);
// Equate impl header and add impl where clauses
self.eq(
goal.param_env,
inherent.self_ty(),
tcx.type_of(impl_def_id).instantiate(tcx, impl_args),
cx.type_of(impl_def_id).instantiate(cx, impl_args),
)?;
// Equate IAT with the RHS of the project goal
let inherent_args = inherent.rebase_inherent_args_onto_impl(impl_args, tcx);
let inherent_args = inherent.rebase_inherent_args_onto_impl(impl_args, cx);
// Check both where clauses on the impl and IAT
//
@ -43,12 +43,12 @@ where
// and I don't think the assoc item where-bounds are allowed to be coinductive.
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(inherent.def_id)
.iter_instantiated(tcx, inherent_args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(inherent.def_id)
.iter_instantiated(cx, inherent_args)
.map(|pred| goal.with(cx, pred)),
);
let normalized = tcx.type_of(inherent.def_id).instantiate(tcx, inherent_args);
let normalized = cx.type_of(inherent.def_id).instantiate(cx, inherent_args);
self.instantiate_normalizes_to_term(goal, normalized.into());
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}

View File

@ -84,16 +84,16 @@ where
self.self_ty()
}
fn trait_ref(self, tcx: I) -> ty::TraitRef<I> {
self.alias.trait_ref(tcx)
fn trait_ref(self, cx: I) -> ty::TraitRef<I> {
self.alias.trait_ref(cx)
}
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(tcx, self_ty)
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(cx, self_ty)
}
fn trait_def_id(self, tcx: I) -> I::DefId {
self.trait_def_id(tcx)
fn trait_def_id(self, cx: I) -> I::DefId {
self.trait_def_id(cx)
}
fn probe_and_match_goal_against_assumption(
@ -105,7 +105,7 @@ where
) -> Result<Candidate<I>, NoSolution> {
if let Some(projection_pred) = assumption.as_projection_clause() {
if projection_pred.projection_def_id() == goal.predicate.def_id() {
let tcx = ecx.cx();
let cx = ecx.cx();
ecx.probe_trait_candidate(source).enter(|ecx| {
let assumption_projection_pred =
ecx.instantiate_binder_with_infer(projection_pred);
@ -120,9 +120,9 @@ where
// Add GAT where clauses from the trait's definition
ecx.add_goals(
GoalSource::Misc,
tcx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.alias.args)
.map(|pred| goal.with(tcx, pred)),
cx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(cx, goal.predicate.alias.args)
.map(|pred| goal.with(cx, pred)),
);
then(ecx)
@ -140,19 +140,19 @@ where
goal: Goal<I, NormalizesTo<I>>,
impl_def_id: I::DefId,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let goal_trait_ref = goal.predicate.alias.trait_ref(tcx);
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
let goal_trait_ref = goal.predicate.alias.trait_ref(cx);
let impl_trait_ref = cx.impl_trait_ref(impl_def_id);
if !ecx.cx().args_may_unify_deep(
goal.predicate.alias.trait_ref(tcx).args,
goal.predicate.alias.trait_ref(cx).args,
impl_trait_ref.skip_binder().args,
) {
return Err(NoSolution);
}
// We have to ignore negative impls when projecting.
let impl_polarity = tcx.impl_polarity(impl_def_id);
let impl_polarity = cx.impl_polarity(impl_def_id);
match impl_polarity {
ty::ImplPolarity::Negative => return Err(NoSolution),
ty::ImplPolarity::Reservation => {
@ -163,22 +163,22 @@ where
ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args);
ecx.eq(goal.param_env, goal_trait_ref, impl_trait_ref)?;
let where_clause_bounds = tcx
let where_clause_bounds = cx
.predicates_of(impl_def_id)
.iter_instantiated(tcx, impl_args)
.map(|pred| goal.with(tcx, pred));
.iter_instantiated(cx, impl_args)
.map(|pred| goal.with(cx, pred));
ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds);
// Add GAT where clauses from the trait's definition
ecx.add_goals(
GoalSource::Misc,
tcx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.alias.args)
.map(|pred| goal.with(tcx, pred)),
cx.own_predicates_of(goal.predicate.def_id())
.iter_instantiated(cx, goal.predicate.alias.args)
.map(|pred| goal.with(cx, pred)),
);
// In case the associated item is hidden due to specialization, we have to
@ -195,21 +195,21 @@ where
};
let error_response = |ecx: &mut EvalCtxt<'_, D>, msg: &str| {
let guar = tcx.delay_bug(msg);
let error_term = match goal.predicate.alias.kind(tcx) {
ty::AliasTermKind::ProjectionTy => Ty::new_error(tcx, guar).into(),
ty::AliasTermKind::ProjectionConst => Const::new_error(tcx, guar).into(),
let guar = cx.delay_bug(msg);
let error_term = match goal.predicate.alias.kind(cx) {
ty::AliasTermKind::ProjectionTy => Ty::new_error(cx, guar).into(),
ty::AliasTermKind::ProjectionConst => Const::new_error(cx, guar).into(),
kind => panic!("expected projection, found {kind:?}"),
};
ecx.instantiate_normalizes_to_term(goal, error_term);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
};
if !tcx.has_item_definition(target_item_def_id) {
if !cx.has_item_definition(target_item_def_id) {
return error_response(ecx, "missing item");
}
let target_container_def_id = tcx.parent(target_item_def_id);
let target_container_def_id = cx.parent(target_item_def_id);
// Getting the right args here is complex, e.g. given:
// - a goal `<Vec<u32> as Trait<i32>>::Assoc<u64>`
@ -229,22 +229,22 @@ where
target_container_def_id,
)?;
if !tcx.check_args_compatible(target_item_def_id, target_args) {
if !cx.check_args_compatible(target_item_def_id, target_args) {
return error_response(ecx, "associated item has mismatched arguments");
}
// Finally we construct the actual value of the associated type.
let term = match goal.predicate.alias.kind(tcx) {
let term = match goal.predicate.alias.kind(cx) {
ty::AliasTermKind::ProjectionTy => {
tcx.type_of(target_item_def_id).map_bound(|ty| ty.into())
cx.type_of(target_item_def_id).map_bound(|ty| ty.into())
}
ty::AliasTermKind::ProjectionConst => {
if tcx.features().associated_const_equality() {
if cx.features().associated_const_equality() {
panic!("associated const projection is not supported yet")
} else {
ty::EarlyBinder::bind(
Const::new_error_with_message(
tcx,
cx,
"associated const projection is not supported yet",
)
.into(),
@ -254,7 +254,7 @@ where
kind => panic!("expected projection, found {kind:?}"),
};
ecx.instantiate_normalizes_to_term(goal, term.instantiate(tcx, target_args));
ecx.instantiate_normalizes_to_term(goal, term.instantiate(cx, target_args));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
@ -316,10 +316,10 @@ where
goal: Goal<I, Self>,
goal_kind: ty::ClosureKind,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let tupled_inputs_and_output =
match structural_traits::extract_tupled_inputs_and_output_from_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
)? {
@ -329,19 +329,19 @@ where
}
};
let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
ty::TraitRef::new(tcx, tcx.require_lang_item(TraitSolverLangItem::Sized), [output])
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output])
});
let pred = tupled_inputs_and_output
.map_bound(|(inputs, output)| ty::ProjectionPredicate {
projection_term: ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), inputs],
),
term: output.into(),
})
.upcast(tcx);
.upcast(cx);
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
@ -350,7 +350,7 @@ where
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[(GoalSource::ImplWhereBound, goal.with(tcx, output_is_sized_pred))],
[(GoalSource::ImplWhereBound, goal.with(cx, output_is_sized_pred))],
)
}
@ -359,27 +359,23 @@ where
goal: Goal<I, Self>,
goal_kind: ty::ClosureKind,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let env_region = match goal_kind {
ty::ClosureKind::Fn | ty::ClosureKind::FnMut => goal.predicate.alias.args.region_at(2),
// Doesn't matter what this region is
ty::ClosureKind::FnOnce => Region::new_static(tcx),
ty::ClosureKind::FnOnce => Region::new_static(cx),
};
let (tupled_inputs_and_output_and_coroutine, nested_preds) =
structural_traits::extract_tupled_inputs_and_output_from_async_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
env_region,
)?;
let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
|AsyncCallableRelevantTypes { output_coroutine_ty: output_ty, .. }| {
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
[output_ty],
)
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output_ty])
},
);
@ -390,23 +386,23 @@ where
output_coroutine_ty,
coroutine_return_ty,
}| {
let (projection_term, term) = if tcx
let (projection_term, term) = if cx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallOnceFuture)
{
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), tupled_inputs_ty],
),
output_coroutine_ty.into(),
)
} else if tcx
} else if cx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CallRefFuture)
{
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[
I::GenericArg::from(goal.predicate.self_ty()),
@ -416,13 +412,13 @@ where
),
output_coroutine_ty.into(),
)
} else if tcx.is_lang_item(
} else if cx.is_lang_item(
goal.predicate.def_id(),
TraitSolverLangItem::AsyncFnOnceOutput,
) {
(
ty::AliasTerm::new(
tcx,
cx,
goal.predicate.def_id(),
[
I::GenericArg::from(goal.predicate.self_ty()),
@ -440,7 +436,7 @@ where
ty::ProjectionPredicate { projection_term, term }
},
)
.upcast(tcx);
.upcast(cx);
// A built-in `AsyncFn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
@ -449,9 +445,9 @@ where
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[goal.with(tcx, output_is_sized_pred)]
[goal.with(cx, output_is_sized_pred)]
.into_iter()
.chain(nested_preds.into_iter().map(|pred| goal.with(tcx, pred)))
.chain(nested_preds.into_iter().map(|pred| goal.with(cx, pred)))
.map(|goal| (GoalSource::ImplWhereBound, goal)),
)
}
@ -514,8 +510,8 @@ where
ecx: &mut EvalCtxt<'_, D>,
goal: Goal<I, Self>,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let metadata_def_id = tcx.require_lang_item(TraitSolverLangItem::Metadata);
let cx = ecx.cx();
let metadata_def_id = cx.require_lang_item(TraitSolverLangItem::Metadata);
assert_eq!(metadata_def_id, goal.predicate.def_id());
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
let metadata_ty = match goal.predicate.self_ty().kind() {
@ -537,16 +533,16 @@ where
| ty::CoroutineWitness(..)
| ty::Never
| ty::Foreign(..)
| ty::Dynamic(_, _, ty::DynStar) => Ty::new_unit(tcx),
| ty::Dynamic(_, _, ty::DynStar) => Ty::new_unit(cx),
ty::Error(e) => Ty::new_error(tcx, e),
ty::Error(e) => Ty::new_error(cx, e),
ty::Str | ty::Slice(_) => Ty::new_usize(tcx),
ty::Str | ty::Slice(_) => Ty::new_usize(cx),
ty::Dynamic(_, _, ty::Dyn) => {
let dyn_metadata = tcx.require_lang_item(TraitSolverLangItem::DynMetadata);
tcx.type_of(dyn_metadata)
.instantiate(tcx, &[I::GenericArg::from(goal.predicate.self_ty())])
let dyn_metadata = cx.require_lang_item(TraitSolverLangItem::DynMetadata);
cx.type_of(dyn_metadata)
.instantiate(cx, &[I::GenericArg::from(goal.predicate.self_ty())])
}
ty::Alias(_, _) | ty::Param(_) | ty::Placeholder(..) => {
@ -555,26 +551,26 @@ where
// FIXME(ptr_metadata): This impl overlaps with the other impls and shouldn't
// exist. Instead, `Pointee<Metadata = ()>` should be a supertrait of `Sized`.
let sized_predicate = ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
cx,
cx.require_lang_item(TraitSolverLangItem::Sized),
[I::GenericArg::from(goal.predicate.self_ty())],
);
// FIXME(-Znext-solver=coinductive): Should this be `GoalSource::ImplWhereBound`?
ecx.add_goal(GoalSource::Misc, goal.with(tcx, sized_predicate));
Ty::new_unit(tcx)
ecx.add_goal(GoalSource::Misc, goal.with(cx, sized_predicate));
Ty::new_unit(cx)
}
ty::Adt(def, args) if def.is_struct() => match def.struct_tail_ty(tcx) {
None => Ty::new_unit(tcx),
ty::Adt(def, args) if def.is_struct() => match def.struct_tail_ty(cx) {
None => Ty::new_unit(cx),
Some(tail_ty) => {
Ty::new_projection(tcx, metadata_def_id, [tail_ty.instantiate(tcx, args)])
Ty::new_projection(cx, metadata_def_id, [tail_ty.instantiate(cx, args)])
}
},
ty::Adt(_, _) => Ty::new_unit(tcx),
ty::Adt(_, _) => Ty::new_unit(cx),
ty::Tuple(elements) => match elements.last() {
None => Ty::new_unit(tcx),
Some(tail_ty) => Ty::new_projection(tcx, metadata_def_id, [tail_ty]),
None => Ty::new_unit(cx),
Some(tail_ty) => Ty::new_projection(cx, metadata_def_id, [tail_ty]),
},
ty::Infer(
@ -601,8 +597,8 @@ where
};
// Coroutines are not futures unless they come from `async` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async(def_id) {
return Err(NoSolution);
}
@ -616,7 +612,7 @@ where
projection_term: ty::AliasTerm::new(ecx.cx(), goal.predicate.def_id(), [self_ty]),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the future type is Sized,
// but that's already proven by the coroutine being WF.
[],
@ -633,8 +629,8 @@ where
};
// Coroutines are not Iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@ -648,7 +644,7 @@ where
projection_term: ty::AliasTerm::new(ecx.cx(), goal.predicate.def_id(), [self_ty]),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the iterator type is Sized,
// but that's already proven by the generator being WF.
[],
@ -672,8 +668,8 @@ where
};
// Coroutines are not AsyncIterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async_gen(def_id) {
return Err(NoSolution);
}
@ -682,12 +678,12 @@ where
// Take `AsyncIterator<Item = I>` and turn it into the corresponding
// coroutine yield ty `Poll<Option<I>>`.
let wrapped_expected_ty = Ty::new_adt(
tcx,
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Poll)),
tcx.mk_args(&[Ty::new_adt(
tcx,
tcx.adt_def(tcx.require_lang_item(TraitSolverLangItem::Option)),
tcx.mk_args(&[expected_ty.into()]),
cx,
cx.adt_def(cx.require_lang_item(TraitSolverLangItem::Poll)),
cx.mk_args(&[Ty::new_adt(
cx,
cx.adt_def(cx.require_lang_item(TraitSolverLangItem::Option)),
cx.mk_args(&[expected_ty.into()]),
)
.into()]),
);
@ -708,18 +704,17 @@ where
};
// `async`-desugared coroutines do not implement the coroutine trait
let tcx = ecx.cx();
if !tcx.is_general_coroutine(def_id) {
let cx = ecx.cx();
if !cx.is_general_coroutine(def_id) {
return Err(NoSolution);
}
let coroutine = args.as_coroutine();
let term = if tcx
.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineReturn)
let term = if cx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineReturn)
{
coroutine.return_ty().into()
} else if tcx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineYield) {
} else if cx.is_lang_item(goal.predicate.def_id(), TraitSolverLangItem::CoroutineYield) {
coroutine.yield_ty().into()
} else {
panic!("unexpected associated item `{:?}` for `{self_ty:?}`", goal.predicate.def_id())
@ -737,7 +732,7 @@ where
),
term,
}
.upcast(tcx),
.upcast(cx),
// Technically, we need to check that the coroutine type is Sized,
// but that's already proven by the coroutine being WF.
[],
@ -884,29 +879,29 @@ where
impl_trait_ref: rustc_type_ir::TraitRef<I>,
target_container_def_id: I::DefId,
) -> Result<I::GenericArgs, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
Ok(if target_container_def_id == impl_trait_ref.def_id {
// Default value from the trait definition. No need to rebase.
goal.predicate.alias.args
} else if target_container_def_id == impl_def_id {
// Same impl, no need to fully translate, just a rebase from
// the trait is sufficient.
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, impl_args)
goal.predicate.alias.args.rebase_onto(cx, impl_trait_ref.def_id, impl_args)
} else {
let target_args = self.fresh_args_for_item(target_container_def_id);
let target_trait_ref =
tcx.impl_trait_ref(target_container_def_id).instantiate(tcx, target_args);
cx.impl_trait_ref(target_container_def_id).instantiate(cx, target_args);
// Relate source impl to target impl by equating trait refs.
self.eq(goal.param_env, impl_trait_ref, target_trait_ref)?;
// Also add predicates since they may be needed to constrain the
// target impl's params.
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(target_container_def_id)
.iter_instantiated(tcx, target_args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(target_container_def_id)
.iter_instantiated(cx, target_args)
.map(|pred| goal.with(cx, pred)),
);
goal.predicate.alias.args.rebase_onto(tcx, impl_trait_ref.def_id, target_args)
goal.predicate.alias.args.rebase_onto(cx, impl_trait_ref.def_id, target_args)
})
}
}

View File

@ -18,7 +18,7 @@ where
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let opaque_ty = goal.predicate.alias;
let expected = goal.predicate.term.as_type().expect("no such thing as an opaque const");
@ -86,7 +86,7 @@ where
}
(Reveal::All, _) => {
// FIXME: Add an assertion that opaque type storage is empty.
let actual = tcx.type_of(opaque_ty.def_id).instantiate(tcx, opaque_ty.args);
let actual = cx.type_of(opaque_ty.def_id).instantiate(cx, opaque_ty.args);
self.eq(goal.param_env, expected, actual)?;
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
}
@ -98,7 +98,7 @@ where
///
/// FIXME: Interner argument is needed to constrain the `I` parameter.
pub fn uses_unique_placeholders_ignoring_regions<I: Interner>(
_interner: I,
_cx: I,
args: I::GenericArgs,
) -> Result<(), NotUniqueParam<I>> {
let mut seen = GrowableBitSet::default();

View File

@ -18,18 +18,18 @@ where
&mut self,
goal: Goal<I, ty::NormalizesTo<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let cx = self.cx();
let weak_ty = goal.predicate.alias;
// Check where clauses
self.add_goals(
GoalSource::Misc,
tcx.predicates_of(weak_ty.def_id)
.iter_instantiated(tcx, weak_ty.args)
.map(|pred| goal.with(tcx, pred)),
cx.predicates_of(weak_ty.def_id)
.iter_instantiated(cx, weak_ty.args)
.map(|pred| goal.with(cx, pred)),
);
let actual = tcx.type_of(weak_ty.def_id).instantiate(tcx, weak_ty.args);
let actual = cx.type_of(weak_ty.def_id).instantiate(cx, weak_ty.args);
self.instantiate_normalizes_to_term(goal, actual.into());
self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)

View File

@ -14,10 +14,10 @@ where
&mut self,
goal: Goal<I, ProjectionPredicate<I>>,
) -> QueryResult<I> {
let tcx = self.cx();
let projection_term = goal.predicate.projection_term.to_term(tcx);
let cx = self.cx();
let projection_term = goal.predicate.projection_term.to_term(cx);
let goal = goal.with(
tcx,
cx,
ty::PredicateKind::AliasRelate(
projection_term,
goal.predicate.term,

View File

@ -164,7 +164,7 @@ impl<I: Interner> SearchGraph<I> {
/// the remaining depth of all nested goals to prevent hangs
/// in case there is exponential blowup.
fn allowed_depth_for_nested(
tcx: I,
cx: I,
stack: &IndexVec<StackDepth, StackEntry<I>>,
) -> Option<SolverLimit> {
if let Some(last) = stack.raw.last() {
@ -178,18 +178,18 @@ impl<I: Interner> SearchGraph<I> {
SolverLimit(last.available_depth.0 - 1)
})
} else {
Some(SolverLimit(tcx.recursion_limit()))
Some(SolverLimit(cx.recursion_limit()))
}
}
fn stack_coinductive_from(
tcx: I,
cx: I,
stack: &IndexVec<StackDepth, StackEntry<I>>,
head: StackDepth,
) -> bool {
stack.raw[head.index()..]
.iter()
.all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx))
.all(|entry| entry.input.value.goal.predicate.is_coinductive(cx))
}
// When encountering a solver cycle, the result of the current goal
@ -247,8 +247,8 @@ impl<I: Interner> SearchGraph<I> {
/// so we use a separate cache. Alternatively we could use
/// a single cache and share it between coherence and ordinary
/// trait solving.
pub(super) fn global_cache(&self, tcx: I) -> I::EvaluationCache {
tcx.evaluation_cache(self.mode)
pub(super) fn global_cache(&self, cx: I) -> I::EvaluationCache {
cx.evaluation_cache(self.mode)
}
/// Probably the most involved method of the whole solver.
@ -257,24 +257,24 @@ impl<I: Interner> SearchGraph<I> {
/// handles caching, overflow, and coinductive cycles.
pub(super) fn with_new_goal<D: SolverDelegate<Interner = I>>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
inspect: &mut ProofTreeBuilder<D>,
mut prove_goal: impl FnMut(&mut Self, &mut ProofTreeBuilder<D>) -> QueryResult<I>,
) -> QueryResult<I> {
self.check_invariants();
// Check for overflow.
let Some(available_depth) = Self::allowed_depth_for_nested(tcx, &self.stack) else {
let Some(available_depth) = Self::allowed_depth_for_nested(cx, &self.stack) else {
if let Some(last) = self.stack.raw.last_mut() {
last.encountered_overflow = true;
}
inspect
.canonical_goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::Overflow);
return Self::response_no_constraints(tcx, input, Certainty::overflow(true));
return Self::response_no_constraints(cx, input, Certainty::overflow(true));
};
if let Some(result) = self.lookup_global_cache(tcx, input, available_depth, inspect) {
if let Some(result) = self.lookup_global_cache(cx, input, available_depth, inspect) {
debug!("global cache hit");
return result;
}
@ -287,12 +287,12 @@ impl<I: Interner> SearchGraph<I> {
if let Some(entry) = cache_entry
.with_coinductive_stack
.as_ref()
.filter(|p| Self::stack_coinductive_from(tcx, &self.stack, p.head))
.filter(|p| Self::stack_coinductive_from(cx, &self.stack, p.head))
.or_else(|| {
cache_entry
.with_inductive_stack
.as_ref()
.filter(|p| !Self::stack_coinductive_from(tcx, &self.stack, p.head))
.filter(|p| !Self::stack_coinductive_from(cx, &self.stack, p.head))
})
{
debug!("provisional cache hit");
@ -315,7 +315,7 @@ impl<I: Interner> SearchGraph<I> {
inspect.canonical_goal_evaluation_kind(
inspect::WipCanonicalGoalEvaluationKind::CycleInStack,
);
let is_coinductive_cycle = Self::stack_coinductive_from(tcx, &self.stack, stack_depth);
let is_coinductive_cycle = Self::stack_coinductive_from(cx, &self.stack, stack_depth);
let usage_kind = if is_coinductive_cycle {
HasBeenUsed::COINDUCTIVE_CYCLE
} else {
@ -328,9 +328,9 @@ impl<I: Interner> SearchGraph<I> {
return if let Some(result) = self.stack[stack_depth].provisional_result {
result
} else if is_coinductive_cycle {
Self::response_no_constraints(tcx, input, Certainty::Yes)
Self::response_no_constraints(cx, input, Certainty::Yes)
} else {
Self::response_no_constraints(tcx, input, Certainty::overflow(false))
Self::response_no_constraints(cx, input, Certainty::overflow(false))
};
} else {
// No entry, we push this goal on the stack and try to prove it.
@ -355,9 +355,9 @@ impl<I: Interner> SearchGraph<I> {
// not tracked by the cache key and from outside of this anon task, it
// must not be added to the global cache. Notably, this is the case for
// trait solver cycles participants.
let ((final_entry, result), dep_node) = tcx.with_cached_task(|| {
let ((final_entry, result), dep_node) = cx.with_cached_task(|| {
for _ in 0..FIXPOINT_STEP_LIMIT {
match self.fixpoint_step_in_task(tcx, input, inspect, &mut prove_goal) {
match self.fixpoint_step_in_task(cx, input, inspect, &mut prove_goal) {
StepResult::Done(final_entry, result) => return (final_entry, result),
StepResult::HasChanged => debug!("fixpoint changed provisional results"),
}
@ -366,17 +366,17 @@ impl<I: Interner> SearchGraph<I> {
debug!("canonical cycle overflow");
let current_entry = self.pop_stack();
debug_assert!(current_entry.has_been_used.is_empty());
let result = Self::response_no_constraints(tcx, input, Certainty::overflow(false));
let result = Self::response_no_constraints(cx, input, Certainty::overflow(false));
(current_entry, result)
});
let proof_tree = inspect.finalize_canonical_goal_evaluation(tcx);
let proof_tree = inspect.finalize_canonical_goal_evaluation(cx);
// We're now done with this goal. In case this goal is involved in a larger cycle
// do not remove it from the provisional cache and update its provisional result.
// We only add the root of cycles to the global cache.
if let Some(head) = final_entry.non_root_cycle_participant {
let coinductive_stack = Self::stack_coinductive_from(tcx, &self.stack, head);
let coinductive_stack = Self::stack_coinductive_from(cx, &self.stack, head);
let entry = self.provisional_cache.get_mut(&input).unwrap();
entry.stack_depth = None;
@ -396,8 +396,8 @@ impl<I: Interner> SearchGraph<I> {
// participant is on the stack. This is necessary to prevent unstable
// results. See the comment of `StackEntry::cycle_participants` for
// more details.
self.global_cache(tcx).insert(
tcx,
self.global_cache(cx).insert(
cx,
input,
proof_tree,
reached_depth,
@ -418,15 +418,15 @@ impl<I: Interner> SearchGraph<I> {
/// this goal.
fn lookup_global_cache<D: SolverDelegate<Interner = I>>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
available_depth: SolverLimit,
inspect: &mut ProofTreeBuilder<D>,
) -> Option<QueryResult<I>> {
let CacheData { result, proof_tree, additional_depth, encountered_overflow } = self
.global_cache(tcx)
.global_cache(cx)
// FIXME: Awkward `Limit -> usize -> Limit`.
.get(tcx, input, self.stack.iter().map(|e| e.input), available_depth.0)?;
.get(cx, input, self.stack.iter().map(|e| e.input), available_depth.0)?;
// If we're building a proof tree and the current cache entry does not
// contain a proof tree, we do not use the entry but instead recompute
@ -467,7 +467,7 @@ impl<I: Interner> SearchGraph<I> {
/// point we are done.
fn fixpoint_step_in_task<D, F>(
&mut self,
tcx: I,
cx: I,
input: CanonicalInput<I>,
inspect: &mut ProofTreeBuilder<D>,
prove_goal: &mut F,
@ -506,9 +506,9 @@ impl<I: Interner> SearchGraph<I> {
let reached_fixpoint = if let Some(r) = stack_entry.provisional_result {
r == result
} else if stack_entry.has_been_used == HasBeenUsed::COINDUCTIVE_CYCLE {
Self::response_no_constraints(tcx, input, Certainty::Yes) == result
Self::response_no_constraints(cx, input, Certainty::Yes) == result
} else if stack_entry.has_been_used == HasBeenUsed::INDUCTIVE_CYCLE {
Self::response_no_constraints(tcx, input, Certainty::overflow(false)) == result
Self::response_no_constraints(cx, input, Certainty::overflow(false)) == result
} else {
false
};
@ -528,11 +528,11 @@ impl<I: Interner> SearchGraph<I> {
}
fn response_no_constraints(
tcx: I,
cx: I,
goal: CanonicalInput<I>,
certainty: Certainty,
) -> QueryResult<I> {
Ok(super::response_no_constraints_raw(tcx, goal.max_universe, goal.variables, certainty))
Ok(super::response_no_constraints_raw(cx, goal.max_universe, goal.variables, certainty))
}
#[allow(rustc::potential_query_instability)]

View File

@ -30,8 +30,8 @@ where
self.trait_ref
}
fn with_self_ty(self, tcx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(tcx, self_ty)
fn with_self_ty(self, cx: I, self_ty: I::Ty) -> Self {
self.with_self_ty(cx, self_ty)
}
fn trait_def_id(self, _: I) -> I::DefId {
@ -43,18 +43,17 @@ where
goal: Goal<I, TraitPredicate<I>>,
impl_def_id: I::DefId,
) -> Result<Candidate<I>, NoSolution> {
let tcx = ecx.cx();
let cx = ecx.cx();
let impl_trait_ref = tcx.impl_trait_ref(impl_def_id);
if !tcx
.args_may_unify_deep(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args)
let impl_trait_ref = cx.impl_trait_ref(impl_def_id);
if !cx.args_may_unify_deep(goal.predicate.trait_ref.args, impl_trait_ref.skip_binder().args)
{
return Err(NoSolution);
}
// An upper bound of the certainty of this goal, used to lower the certainty
// of reservation impl to ambiguous during coherence.
let impl_polarity = tcx.impl_polarity(impl_def_id);
let impl_polarity = cx.impl_polarity(impl_def_id);
let maximal_certainty = match (impl_polarity, goal.predicate.polarity) {
// In intercrate mode, this is ambiguous. But outside of intercrate,
// it's not a real impl.
@ -77,13 +76,13 @@ where
ecx.probe_trait_candidate(CandidateSource::Impl(impl_def_id)).enter(|ecx| {
let impl_args = ecx.fresh_args_for_item(impl_def_id);
ecx.record_impl_args(impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(tcx, impl_args);
let impl_trait_ref = impl_trait_ref.instantiate(cx, impl_args);
ecx.eq(goal.param_env, goal.predicate.trait_ref, impl_trait_ref)?;
let where_clause_bounds = tcx
let where_clause_bounds = cx
.predicates_of(impl_def_id)
.iter_instantiated(tcx, impl_args)
.map(|pred| goal.with(tcx, pred));
.iter_instantiated(cx, impl_args)
.map(|pred| goal.with(cx, pred));
ecx.add_goals(GoalSource::ImplWhereBound, where_clause_bounds);
ecx.evaluate_added_goals_and_make_canonical_response(maximal_certainty)
@ -181,13 +180,13 @@ where
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc).enter(|ecx| {
let nested_obligations = tcx
let nested_obligations = cx
.predicates_of(goal.predicate.def_id())
.iter_instantiated(tcx, goal.predicate.trait_ref.args)
.map(|p| goal.with(tcx, p));
.iter_instantiated(cx, goal.predicate.trait_ref.args)
.map(|p| goal.with(cx, p));
// FIXME(-Znext-solver=coinductive): Should this be `GoalSource::ImplWhereBound`?
ecx.add_goals(GoalSource::Misc, nested_obligations);
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
@ -232,13 +231,13 @@ where
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
// But if there are inference variables, we have to wait until it's resolved.
if (goal.param_env, goal.predicate.self_ty()).has_non_region_infer() {
return ecx.forced_ambiguity(MaybeCause::Ambiguity);
}
if tcx.layout_is_pointer_like(goal.param_env, goal.predicate.self_ty()) {
if cx.layout_is_pointer_like(goal.param_env, goal.predicate.self_ty()) {
ecx.probe_builtin_trait_candidate(BuiltinImplSource::Misc)
.enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes))
} else {
@ -286,10 +285,10 @@ where
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
let tupled_inputs_and_output =
match structural_traits::extract_tupled_inputs_and_output_from_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
)? {
@ -299,14 +298,14 @@ where
}
};
let output_is_sized_pred = tupled_inputs_and_output.map_bound(|(_, output)| {
ty::TraitRef::new(tcx, tcx.require_lang_item(TraitSolverLangItem::Sized), [output])
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [output])
});
let pred = tupled_inputs_and_output
.map_bound(|(inputs, _)| {
ty::TraitRef::new(tcx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
ty::TraitRef::new(cx, goal.predicate.def_id(), [goal.predicate.self_ty(), inputs])
})
.upcast(tcx);
.upcast(cx);
// A built-in `Fn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::probe_and_consider_implied_clause(
@ -314,7 +313,7 @@ where
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[(GoalSource::ImplWhereBound, goal.with(tcx, output_is_sized_pred))],
[(GoalSource::ImplWhereBound, goal.with(cx, output_is_sized_pred))],
)
}
@ -327,20 +326,20 @@ where
return Err(NoSolution);
}
let tcx = ecx.cx();
let cx = ecx.cx();
let (tupled_inputs_and_output_and_coroutine, nested_preds) =
structural_traits::extract_tupled_inputs_and_output_from_async_callable(
tcx,
cx,
goal.predicate.self_ty(),
goal_kind,
// This region doesn't matter because we're throwing away the coroutine type
Region::new_static(tcx),
Region::new_static(cx),
)?;
let output_is_sized_pred = tupled_inputs_and_output_and_coroutine.map_bound(
|AsyncCallableRelevantTypes { output_coroutine_ty, .. }| {
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
cx,
cx.require_lang_item(TraitSolverLangItem::Sized),
[output_coroutine_ty],
)
},
@ -349,12 +348,12 @@ where
let pred = tupled_inputs_and_output_and_coroutine
.map_bound(|AsyncCallableRelevantTypes { tupled_inputs_ty, .. }| {
ty::TraitRef::new(
tcx,
cx,
goal.predicate.def_id(),
[goal.predicate.self_ty(), tupled_inputs_ty],
)
})
.upcast(tcx);
.upcast(cx);
// A built-in `AsyncFn` impl only holds if the output is sized.
// (FIXME: technically we only need to check this if the type is a fn ptr...)
Self::probe_and_consider_implied_clause(
@ -362,9 +361,9 @@ where
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
pred,
[goal.with(tcx, output_is_sized_pred)]
[goal.with(cx, output_is_sized_pred)]
.into_iter()
.chain(nested_preds.into_iter().map(|pred| goal.with(tcx, pred)))
.chain(nested_preds.into_iter().map(|pred| goal.with(cx, pred)))
.map(|goal| (GoalSource::ImplWhereBound, goal)),
)
}
@ -437,8 +436,8 @@ where
};
// Coroutines are not futures unless they come from `async` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async(def_id) {
return Err(NoSolution);
}
@ -463,8 +462,8 @@ where
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@ -489,8 +488,8 @@ where
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_gen(def_id) {
return Err(NoSolution);
}
@ -513,8 +512,8 @@ where
};
// Coroutines are not iterators unless they come from `gen` desugaring
let tcx = ecx.cx();
if !tcx.coroutine_is_async_gen(def_id) {
let cx = ecx.cx();
if !cx.coroutine_is_async_gen(def_id) {
return Err(NoSolution);
}
@ -540,8 +539,8 @@ where
};
// `async`-desugared coroutines do not implement the coroutine trait
let tcx = ecx.cx();
if !tcx.is_general_coroutine(def_id) {
let cx = ecx.cx();
if !cx.is_general_coroutine(def_id) {
return Err(NoSolution);
}
@ -550,8 +549,8 @@ where
ecx,
CandidateSource::BuiltinImpl(BuiltinImplSource::Misc),
goal,
ty::TraitRef::new(tcx, goal.predicate.def_id(), [self_ty, coroutine.resume_ty()])
.upcast(tcx),
ty::TraitRef::new(cx, goal.predicate.def_id(), [self_ty, coroutine.resume_ty()])
.upcast(cx),
// Technically, we need to check that the coroutine types are Sized,
// but that's already proven by the coroutine being WF.
[],
@ -727,7 +726,7 @@ where
b_data: I::BoundExistentialPredicates,
b_region: I::Region,
) -> Vec<Candidate<I>> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (a_ty, _b_ty), .. } = goal;
let mut responses = vec![];
@ -745,7 +744,7 @@ where
));
} else if let Some(a_principal) = a_data.principal() {
for new_a_principal in
D::elaborate_supertraits(self.cx(), a_principal.with_self_ty(tcx, a_ty)).skip(1)
D::elaborate_supertraits(self.cx(), a_principal.with_self_ty(cx, a_ty)).skip(1)
{
responses.extend(self.consider_builtin_upcast_to_principal(
goal,
@ -755,7 +754,7 @@ where
b_data,
b_region,
Some(new_a_principal.map_bound(|trait_ref| {
ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)
ty::ExistentialTraitRef::erase_self_ty(cx, trait_ref)
})),
));
}
@ -770,11 +769,11 @@ where
b_data: I::BoundExistentialPredicates,
b_region: I::Region,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (a_ty, _), .. } = goal;
// Can only unsize to an object-safe trait.
if b_data.principal_def_id().is_some_and(|def_id| !tcx.trait_is_object_safe(def_id)) {
if b_data.principal_def_id().is_some_and(|def_id| !cx.trait_is_object_safe(def_id)) {
return Err(NoSolution);
}
@ -783,24 +782,20 @@ where
// (i.e. the principal, all of the associated types match, and any auto traits)
ecx.add_goals(
GoalSource::ImplWhereBound,
b_data.iter().map(|pred| goal.with(tcx, pred.with_self_ty(tcx, a_ty))),
b_data.iter().map(|pred| goal.with(cx, pred.with_self_ty(cx, a_ty))),
);
// The type must be `Sized` to be unsized.
ecx.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Sized),
[a_ty],
),
cx,
ty::TraitRef::new(cx, cx.require_lang_item(TraitSolverLangItem::Sized), [a_ty]),
),
);
// The type must outlive the lifetime of the `dyn` we're unsizing into.
ecx.add_goal(GoalSource::Misc, goal.with(tcx, ty::OutlivesPredicate(a_ty, b_region)));
ecx.add_goal(GoalSource::Misc, goal.with(cx, ty::OutlivesPredicate(a_ty, b_region)));
ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)
})
}
@ -941,28 +936,28 @@ where
a_args: I::GenericArgs,
b_args: I::GenericArgs,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (_a_ty, b_ty), .. } = goal;
let unsizing_params = tcx.unsizing_params_for_adt(def.def_id());
let unsizing_params = cx.unsizing_params_for_adt(def.def_id());
// We must be unsizing some type parameters. This also implies
// that the struct has a tail field.
if unsizing_params.is_empty() {
return Err(NoSolution);
}
let tail_field_ty = def.struct_tail_ty(tcx).unwrap();
let tail_field_ty = def.struct_tail_ty(cx).unwrap();
let a_tail_ty = tail_field_ty.instantiate(tcx, a_args);
let b_tail_ty = tail_field_ty.instantiate(tcx, b_args);
let a_tail_ty = tail_field_ty.instantiate(cx, a_args);
let b_tail_ty = tail_field_ty.instantiate(cx, b_args);
// Instantiate just the unsizing params from B into A. The type after
// this instantiation must be equal to B. This is so we don't unsize
// unrelated type parameters.
let new_a_args = tcx.mk_args_from_iter(a_args.iter().enumerate().map(|(i, a)| {
let new_a_args = cx.mk_args_from_iter(a_args.iter().enumerate().map(|(i, a)| {
if unsizing_params.contains(i as u32) { b_args.get(i).unwrap() } else { a }
}));
let unsized_a_ty = Ty::new_adt(tcx, def, new_a_args);
let unsized_a_ty = Ty::new_adt(cx, def, new_a_args);
// Finally, we require that `TailA: Unsize<TailB>` for the tail field
// types.
@ -970,10 +965,10 @@ where
self.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
cx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Unsize),
cx,
cx.require_lang_item(TraitSolverLangItem::Unsize),
[a_tail_ty, b_tail_ty],
),
),
@ -998,25 +993,24 @@ where
a_tys: I::Tys,
b_tys: I::Tys,
) -> Result<Candidate<I>, NoSolution> {
let tcx = self.cx();
let cx = self.cx();
let Goal { predicate: (_a_ty, b_ty), .. } = goal;
let (&a_last_ty, a_rest_tys) = a_tys.split_last().unwrap();
let b_last_ty = b_tys.last().unwrap();
// Instantiate just the tail field of B., and require that they're equal.
let unsized_a_ty =
Ty::new_tup_from_iter(tcx, a_rest_tys.iter().copied().chain([b_last_ty]));
let unsized_a_ty = Ty::new_tup_from_iter(cx, a_rest_tys.iter().copied().chain([b_last_ty]));
self.eq(goal.param_env, unsized_a_ty, b_ty)?;
// Similar to ADTs, require that we can unsize the tail.
self.add_goal(
GoalSource::ImplWhereBound,
goal.with(
tcx,
cx,
ty::TraitRef::new(
tcx,
tcx.require_lang_item(TraitSolverLangItem::Unsize),
cx,
cx.require_lang_item(TraitSolverLangItem::Unsize),
[a_last_ty, b_last_ty],
),
),