mirror of https://github.com/rust-lang/rust.git
Fix a bunch of typo
This PR will fix some typos detected by [typos]. I only picked the ones I was sure were spelling errors to fix, mostly in the comments. [typos]: https://github.com/crate-ci/typos
This commit is contained in:
parent
7f442f8ba1
commit
b1430fb7ca
|
@ -1442,7 +1442,7 @@ Compatibility Notes
|
|||
- [Mixing Option and Result via `?` is no longer permitted in closures for inferred types.][86831]
|
||||
- [Previously unsound code is no longer permitted where different constructors in branches
|
||||
could require different lifetimes.][85574]
|
||||
- As previously mentioned the [`std::arch` instrinsics now uses stricter const checking][83278]
|
||||
- As previously mentioned the [`std::arch` intrinsic now uses stricter const checking][83278]
|
||||
than before and may reject some previously accepted code.
|
||||
- [`i128` multiplication on Cortex M0+ platforms currently unconditionally causes overflow
|
||||
when compiled with `codegen-units = 1`.][86063]
|
||||
|
@ -2520,7 +2520,7 @@ Compatibility Notes
|
|||
- [Fixed a regression parsing `{} && false` in tail expressions.][74650]
|
||||
- [Added changes to how proc-macros are expanded in `macro_rules!` that should
|
||||
help to preserve more span information.][73084] These changes may cause
|
||||
compiliation errors if your macro was unhygenic or didn't correctly handle
|
||||
compilation errors if your macro was unhygenic or didn't correctly handle
|
||||
`Delimiter::None`.
|
||||
- [Moved support for the CloudABI target to tier 3.][75568]
|
||||
- [`linux-gnu` targets now require minimum kernel 2.6.32 and glibc 2.11.][74163]
|
||||
|
|
|
@ -1177,7 +1177,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> {
|
|||
) -> hir::Ty<'hir> {
|
||||
// Check whether we should interpret this as a bare trait object.
|
||||
// This check mirrors the one in late resolution. We only introduce this special case in
|
||||
// the rare occurence we need to lower `Fresh` anonymous lifetimes.
|
||||
// the rare occurrence we need to lower `Fresh` anonymous lifetimes.
|
||||
// The other cases when a qpath should be opportunistically made a trait object are handled
|
||||
// by `ty_path`.
|
||||
if qself.is_none()
|
||||
|
|
|
@ -1800,7 +1800,7 @@ pub(crate) enum ForbiddenLetReason {
|
|||
NotSupportedOr(Span),
|
||||
/// A let chain with invalid parentheses
|
||||
///
|
||||
/// For exemple, `let 1 = 1 && (expr && expr)` is allowed
|
||||
/// For example, `let 1 = 1 && (expr && expr)` is allowed
|
||||
/// but `(let 1 = 1 && (let 1 = 1 && (let 1 = 1))) && let a = 1` is not
|
||||
NotSupportedParentheses(Span),
|
||||
}
|
||||
|
|
|
@ -342,7 +342,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
|
|||
|
||||
let ret_place = codegen_place(fx, destination);
|
||||
|
||||
// Handle special calls like instrinsics and empty drop glue.
|
||||
// Handle special calls like intrinsics and empty drop glue.
|
||||
let instance = if let ty::FnDef(def_id, substs) = *fn_ty.kind() {
|
||||
let instance = ty::Instance::resolve(fx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
|
||||
.unwrap()
|
||||
|
|
|
@ -59,7 +59,7 @@ pub(crate) fn check_constants(fx: &mut FunctionCx<'_, '_, '_>) -> bool {
|
|||
ErrorHandled::TooGeneric => {
|
||||
span_bug!(
|
||||
constant.span,
|
||||
"codgen encountered polymorphic constant: {:?}",
|
||||
"codegen encountered polymorphic constant: {:?}",
|
||||
err
|
||||
);
|
||||
}
|
||||
|
|
|
@ -203,7 +203,7 @@ pub(crate) fn codegen_intrinsic_call<'tcx>(
|
|||
sym::transmute => {
|
||||
crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
|
||||
}
|
||||
_ => unimplemented!("unsupported instrinsic {}", intrinsic),
|
||||
_ => unimplemented!("unsupported intrinsics {}", intrinsic),
|
||||
}
|
||||
return;
|
||||
};
|
||||
|
|
|
@ -540,7 +540,7 @@ pub fn linking_symbol_name_for_instance_in_crate<'tcx>(
|
|||
.map(|fnabi| (fnabi.conv, &fnabi.args[..]))
|
||||
.unwrap_or((Conv::Rust, &[]));
|
||||
|
||||
// Decorate symbols with prefices, suffices and total number of bytes of arguments.
|
||||
// Decorate symbols with prefixes, suffixes and total number of bytes of arguments.
|
||||
// Reference: https://docs.microsoft.com/en-us/cpp/build/reference/decorated-names?view=msvc-170
|
||||
let (prefix, suffix) = match conv {
|
||||
Conv::X86Fastcall => ("@", "@"),
|
||||
|
|
|
@ -191,7 +191,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
|||
// errored or at least linted
|
||||
ErrorHandled::Reported(_) | ErrorHandled::Linted => {}
|
||||
ErrorHandled::TooGeneric => {
|
||||
span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
|
||||
span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -320,7 +320,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let (a_offset, b_offset) =
|
||||
match (self.ptr_try_get_alloc_id(a), self.ptr_try_get_alloc_id(b)) {
|
||||
(Err(a), Err(b)) => {
|
||||
// Neither poiner points to an allocation.
|
||||
// Neither pointer points to an allocation.
|
||||
// If these are inequal or null, this *will* fail the deref check below.
|
||||
(a, b)
|
||||
}
|
||||
|
|
|
@ -437,7 +437,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
msg,
|
||||
})
|
||||
}
|
||||
// Ensure we never consider the null pointer dereferencable.
|
||||
// Ensure we never consider the null pointer dereferenceable.
|
||||
if M::Provenance::OFFSET_IS_ADDR {
|
||||
assert_ne!(ptr.addr(), Size::ZERO);
|
||||
}
|
||||
|
@ -914,7 +914,7 @@ impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
|
|||
self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
|
||||
}
|
||||
|
||||
/// Mark the entire referenced range as uninitalized
|
||||
/// Mark the entire referenced range as uninitialized
|
||||
pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
|
||||
Ok(self
|
||||
.alloc
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
//! This file implements "place projections"; basically a symmetric API for 3 types: MPlaceTy, OpTy, PlaceTy.
|
||||
//!
|
||||
//! OpTy and PlaceTy genrally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
|
||||
//! OpTy and PlaceTy generally work by "let's see if we are actually an MPlaceTy, and do something custom if not".
|
||||
//! For PlaceTy, the custom thing is basically always to call `force_allocation` and then use the MPlaceTy logic anyway.
|
||||
//! For OpTy, the custom thing on field pojections has to be pretty clever (since `Operand::Immediate` can have fields),
|
||||
//! but for array/slice operations it only has to worry about `Operand::Uninit`. That makes the value part trivial,
|
||||
|
|
|
@ -217,7 +217,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// When comparing the PassMode, we have to be smart about comparing the attributes.
|
||||
let arg_attr_compat = |a1: &ArgAttributes, a2: &ArgAttributes| {
|
||||
// There's only one regular attribute that matters for the call ABI: InReg.
|
||||
// Everything else is things like noalias, dereferencable, nonnull, ...
|
||||
// Everything else is things like noalias, dereferenceable, nonnull, ...
|
||||
// (This also applies to pointee_size, pointee_align.)
|
||||
if a1.regular.contains(ArgAttribute::InReg) != a2.regular.contains(ArgAttribute::InReg)
|
||||
{
|
||||
|
@ -556,7 +556,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.tcx
|
||||
.struct_tail_erasing_lifetimes(receiver_place.layout.ty, self.param_env);
|
||||
let ty::Dynamic(data, ..) = receiver_tail.kind() else {
|
||||
span_bug!(self.cur_span(), "dyanmic call on non-`dyn` type {}", receiver_tail)
|
||||
span_bug!(self.cur_span(), "dynamic call on non-`dyn` type {}", receiver_tail)
|
||||
};
|
||||
|
||||
// Get the required information from the vtable.
|
||||
|
|
|
@ -32,7 +32,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(vtable_ptr.into())
|
||||
}
|
||||
|
||||
/// Returns a high-level representation of the entires of the given vtable.
|
||||
/// Returns a high-level representation of the entries of the given vtable.
|
||||
pub fn get_vtable_entries(
|
||||
&self,
|
||||
vtable: Pointer<Option<M::Provenance>>,
|
||||
|
|
|
@ -29,7 +29,7 @@ impl Fingerprint {
|
|||
// quality hash values, let's still combine the two values because the
|
||||
// Fingerprints in DefPathHash have the StableCrateId portion which is
|
||||
// the same for all DefPathHashes from the same crate. Combining the
|
||||
// two halfs makes sure we get a good quality hash in such cases too.
|
||||
// two halves makes sure we get a good quality hash in such cases too.
|
||||
self.0.wrapping_mul(3).wrapping_add(self.1)
|
||||
}
|
||||
|
||||
|
@ -120,7 +120,7 @@ impl FingerprintHasher for crate::unhash::Unhasher {
|
|||
// quality hash values, let's still combine the two values because the
|
||||
// Fingerprints in DefPathHash have the StableCrateId portion which is
|
||||
// the same for all DefPathHashes from the same crate. Combining the
|
||||
// two halfs makes sure we get a good quality hash in such cases too.
|
||||
// two halves makes sure we get a good quality hash in such cases too.
|
||||
//
|
||||
// Since `Unhasher` is used only in the context of HashMaps, it is OK
|
||||
// to combine the two components in an order-independent way (which is
|
||||
|
|
|
@ -430,7 +430,7 @@ impl TtParser {
|
|||
}
|
||||
}
|
||||
MatcherLoc::Delimited => {
|
||||
// Entering the delimeter is trivial.
|
||||
// Entering the delimiter is trivial.
|
||||
mp.idx += 1;
|
||||
self.cur_mps.push(mp);
|
||||
}
|
||||
|
|
|
@ -976,7 +976,7 @@ impl<'tt> TokenSet<'tt> {
|
|||
self.maybe_empty = false;
|
||||
}
|
||||
|
||||
// Adds `tok` to the set for `self`, marking sequence as non-empy.
|
||||
// Adds `tok` to the set for `self`, marking sequence as non-empty.
|
||||
fn add_one(&mut self, tt: TtHandle<'tt>) {
|
||||
if !self.tokens.contains(&tt) {
|
||||
self.tokens.push(tt);
|
||||
|
|
|
@ -173,7 +173,7 @@ impl MultiItemModifier for DeriveProcMacro {
|
|||
|
||||
// fail if there have been errors emitted
|
||||
if ecx.sess.parse_sess.span_diagnostic.err_count() > error_count_before {
|
||||
ecx.struct_span_err(span, "proc-macro derive produced unparseable tokens").emit();
|
||||
ecx.struct_span_err(span, "proc-macro derive produced unparsable tokens").emit();
|
||||
}
|
||||
|
||||
ExpandResult::Ready(items)
|
||||
|
|
|
@ -199,7 +199,7 @@ fn ty_to_string<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
|
|||
}
|
||||
|
||||
/// We don't want to directly use `ty_to_string` for closures as their type isn't really
|
||||
/// something users are familar with. Directly printing the `fn_sig` of closures also
|
||||
/// something users are familiar with. Directly printing the `fn_sig` of closures also
|
||||
/// doesn't work as they actually use the "rust-call" API.
|
||||
fn closure_as_fn_str<'tcx>(infcx: &InferCtxt<'_, 'tcx>, ty: Ty<'tcx>) -> String {
|
||||
let ty::Closure(_, substs) = ty.kind() else { unreachable!() };
|
||||
|
|
|
@ -1333,7 +1333,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> {
|
|||
/// `resolve_vars_if_possible` as well as `fully_resolve`.
|
||||
///
|
||||
/// Make sure to call [`InferCtxt::process_registered_region_obligations`]
|
||||
/// first, or preferrably use [`InferCtxt::check_region_obligations_and_report_errors`]
|
||||
/// first, or preferably use [`InferCtxt::check_region_obligations_and_report_errors`]
|
||||
/// to do both of these operations together.
|
||||
pub fn resolve_regions_and_report_errors(
|
||||
&self,
|
||||
|
|
|
@ -100,7 +100,7 @@ impl Default for InferCtxtUndoLogs<'_> {
|
|||
}
|
||||
|
||||
/// The UndoLogs trait defines how we undo a particular kind of action (of type T). We can undo any
|
||||
/// action that is convertable into an UndoLog (per the From impls above).
|
||||
/// action that is convertible into an UndoLog (per the From impls above).
|
||||
impl<'tcx, T> UndoLogs<T> for InferCtxtUndoLogs<'tcx>
|
||||
where
|
||||
UndoLog<'tcx>: From<T>,
|
||||
|
|
|
@ -176,7 +176,7 @@ pub fn parse_check_cfg(specs: Vec<String>) -> CheckCfg {
|
|||
let ident = arg.ident().expect("multi-segment cfg key");
|
||||
names_valid.insert(ident.name.to_string());
|
||||
} else {
|
||||
error!("`names()` arguments must be simple identifers");
|
||||
error!("`names()` arguments must be simple identifiers");
|
||||
}
|
||||
}
|
||||
continue 'specs;
|
||||
|
@ -204,7 +204,7 @@ pub fn parse_check_cfg(specs: Vec<String>) -> CheckCfg {
|
|||
continue 'specs;
|
||||
} else {
|
||||
error!(
|
||||
"`values()` first argument must be a simple identifer"
|
||||
"`values()` first argument must be a simple identifier"
|
||||
);
|
||||
}
|
||||
} else if args.is_empty() {
|
||||
|
|
|
@ -268,7 +268,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
|
|||
},
|
||||
ty::Closure(..) => {
|
||||
cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
|
||||
// FIXME(davidtwco): this isn't properly translatable becauses of the
|
||||
// FIXME(davidtwco): this isn't properly translatable because of the
|
||||
// pre/post strings
|
||||
lint.build(fluent::lint::unused_closure)
|
||||
.set_arg("count", plural_len)
|
||||
|
@ -281,7 +281,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
|
|||
}
|
||||
ty::Generator(..) => {
|
||||
cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
|
||||
// FIXME(davidtwco): this isn't properly translatable becauses of the
|
||||
// FIXME(davidtwco): this isn't properly translatable because of the
|
||||
// pre/post strings
|
||||
lint.build(fluent::lint::unused_generator)
|
||||
.set_arg("count", plural_len)
|
||||
|
@ -310,7 +310,7 @@ impl<'tcx> LateLintPass<'tcx> for UnusedResults {
|
|||
) -> bool {
|
||||
if let Some(attr) = cx.tcx.get_attr(def_id, sym::must_use) {
|
||||
cx.struct_span_lint(UNUSED_MUST_USE, span, |lint| {
|
||||
// FIXME(davidtwco): this isn't properly translatable becauses of the pre/post
|
||||
// FIXME(davidtwco): this isn't properly translatable because of the pre/post
|
||||
// strings
|
||||
let mut err = lint.build(fluent::lint::unused_def);
|
||||
err.set_arg("pre", descr_pre_path);
|
||||
|
|
|
@ -3206,7 +3206,7 @@ declare_lint! {
|
|||
/// [future-incompatible]: ../index.md#future-incompatible-lints
|
||||
pub REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS,
|
||||
Warn,
|
||||
"tranparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
|
||||
"transparent type contains an external ZST that is marked #[non_exhaustive] or contains private fields",
|
||||
@future_incompatible = FutureIncompatibleInfo {
|
||||
reference: "issue #78586 <https://github.com/rust-lang/rust/issues/78586>",
|
||||
};
|
||||
|
|
|
@ -86,7 +86,7 @@ impl<'tcx> BasicBlocks<'tcx> {
|
|||
///
|
||||
/// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`].
|
||||
/// All other methods that allow you to mutate the basic blocks also call this method
|
||||
/// themselves, thereby avoiding any risk of accidentaly cache invalidation.
|
||||
/// themselves, thereby avoiding any risk of accidentally cache invalidation.
|
||||
pub fn invalidate_cfg_cache(&mut self) {
|
||||
self.predecessor_cache.invalidate();
|
||||
self.switch_source_cache.invalidate();
|
||||
|
|
|
@ -1457,7 +1457,7 @@ pub struct PlaceRef<'tcx> {
|
|||
// Once we stop implementing `Ord` for `DefId`,
|
||||
// this impl will be unnecessary. Until then, we'll
|
||||
// leave this impl in place to prevent re-adding a
|
||||
// dependnecy on the `Ord` impl for `DefId`
|
||||
// dependency on the `Ord` impl for `DefId`
|
||||
impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
|
||||
|
||||
impl<'tcx> Place<'tcx> {
|
||||
|
|
|
@ -332,7 +332,7 @@ pub enum StatementKind<'tcx> {
|
|||
/// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
|
||||
/// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
|
||||
/// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
|
||||
/// the `src` place are copied to the continguous range of bytes beginning with the first byte
|
||||
/// the `src` place are copied to the contiguous range of bytes beginning with the first byte
|
||||
/// of `dest`.
|
||||
///
|
||||
/// **Needs clarification**: In what order are operands computed and dereferenced? It should
|
||||
|
@ -378,7 +378,7 @@ pub enum FakeReadCause {
|
|||
/// Some(closure_def_id).
|
||||
/// Otherwise, the value of the optional LocalDefId will be None.
|
||||
//
|
||||
// We can use LocaDefId here since fake read statements are removed
|
||||
// We can use LocalDefId here since fake read statements are removed
|
||||
// before codegen in the `CleanupNonCodegenStatements` pass.
|
||||
ForMatchedPlace(Option<LocalDefId>),
|
||||
|
||||
|
|
|
@ -1151,7 +1151,7 @@ rustc_queries! {
|
|||
/// Used by rustdoc.
|
||||
query rendered_const(def_id: DefId) -> String {
|
||||
storage(ArenaCacheSelector<'tcx>)
|
||||
desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) }
|
||||
desc { |tcx| "rendering constant initializer of `{}`", tcx.def_path_str(def_id) }
|
||||
cache_on_disk_if { def_id.is_local() }
|
||||
separate_provide_extern
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ impl Node {
|
|||
matches!(self, Node::Trait(..))
|
||||
}
|
||||
|
||||
/// Trys to find the associated item that implements `trait_item_def_id`
|
||||
/// Tries to find the associated item that implements `trait_item_def_id`
|
||||
/// defined in this node.
|
||||
///
|
||||
/// If this returns `None`, the item can potentially still be found in
|
||||
|
|
|
@ -1498,17 +1498,17 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
// Create a dependency to the crate to be sure we re-execute this when the amount of
|
||||
// definitions change.
|
||||
self.ensure().hir_crate(());
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new onces
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new ones
|
||||
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
|
||||
let definitions = self.definitions.leak();
|
||||
definitions.iter_local_def_id()
|
||||
}
|
||||
|
||||
pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable {
|
||||
// Create a dependency to the crate to be sure we reexcute this when the amount of
|
||||
// Create a dependency to the crate to be sure we re-execute this when the amount of
|
||||
// definitions change.
|
||||
self.ensure().hir_crate(());
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new onces
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new ones
|
||||
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
|
||||
let definitions = self.definitions.leak();
|
||||
definitions.def_path_table()
|
||||
|
@ -1517,10 +1517,10 @@ impl<'tcx> TyCtxt<'tcx> {
|
|||
pub fn def_path_hash_to_def_index_map(
|
||||
self,
|
||||
) -> &'tcx rustc_hir::def_path_hash_map::DefPathHashMap {
|
||||
// Create a dependency to the crate to be sure we reexcute this when the amount of
|
||||
// Create a dependency to the crate to be sure we re-execute this when the amount of
|
||||
// definitions change.
|
||||
self.ensure().hir_crate(());
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new onces
|
||||
// Leak a read lock once we start iterating on definitions, to prevent adding new ones
|
||||
// while iterating. If some query needs to add definitions, it should be `ensure`d above.
|
||||
let definitions = self.definitions.leak();
|
||||
definitions.def_path_hash_to_def_index_map()
|
||||
|
|
|
@ -266,7 +266,7 @@ impl<'tcx> Generics {
|
|||
// Filter the default arguments.
|
||||
//
|
||||
// This currently uses structural equality instead
|
||||
// of semantic equivalance. While not ideal, that's
|
||||
// of semantic equivalence. While not ideal, that's
|
||||
// good enough for now as this should only be used
|
||||
// for diagnostics anyways.
|
||||
own_params.end -= self
|
||||
|
|
|
@ -756,7 +756,7 @@ impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
|
|||
// * the element type and length of the single array field, if
|
||||
// the first field is of array type, or
|
||||
//
|
||||
// * the homogenous field type and the number of fields.
|
||||
// * the homogeneous field type and the number of fields.
|
||||
let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
|
||||
// First ADT field is an array:
|
||||
|
||||
|
|
|
@ -702,7 +702,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
|||
let local_id = self.var_local_id(var, for_guard);
|
||||
let source_info = self.source_info(span);
|
||||
self.cfg.push(block, Statement { source_info, kind: StatementKind::StorageLive(local_id) });
|
||||
// Altough there is almost always scope for given variable in corner cases
|
||||
// Although there is almost always scope for given variable in corner cases
|
||||
// like #92893 we might get variable with no scope.
|
||||
if let Some(region_scope) = self.region_scope_tree.var_scope(var.0.local_id) && schedule_drop{
|
||||
self.schedule_drop(span, region_scope, local_id, DropKind::Storage);
|
||||
|
|
|
@ -13,7 +13,7 @@ pub(crate) fn dump_closure_profile<'tcx>(tcx: TyCtxt<'tcx>, closure_instance: In
|
|||
.append(true)
|
||||
.open(&format!("closure_profile_{}.csv", std::process::id()))
|
||||
else {
|
||||
eprintln!("Cound't open file for writing closure profile");
|
||||
eprintln!("Couldn't open file for writing closure profile");
|
||||
return;
|
||||
};
|
||||
|
||||
|
|
|
@ -1578,7 +1578,7 @@ impl<'a> Parser<'a> {
|
|||
Applicability::MachineApplicable,
|
||||
);
|
||||
|
||||
// Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to supress future errors about `break 'label`.
|
||||
// Replace `'label: non_block_expr` with `'label: {non_block_expr}` in order to suppress future errors about `break 'label`.
|
||||
let stmt = self.mk_stmt(span, StmtKind::Expr(expr));
|
||||
let blk = self.mk_block(vec![stmt], BlockCheckMode::Default, span);
|
||||
self.mk_expr(span, ExprKind::Block(blk, label))
|
||||
|
@ -2578,7 +2578,7 @@ impl<'a> Parser<'a> {
|
|||
}
|
||||
|
||||
pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
|
||||
// Used to check the `let_chains` and `if_let_guard` features mostly by scaning
|
||||
// Used to check the `let_chains` and `if_let_guard` features mostly by scanning
|
||||
// `&&` tokens.
|
||||
fn check_let_expr(expr: &Expr) -> (bool, bool) {
|
||||
match expr.kind {
|
||||
|
|
|
@ -281,7 +281,7 @@ impl TokenCursor {
|
|||
if delim != Delimiter::Invisible {
|
||||
return (Token::new(token::OpenDelim(delim), sp.open), Spacing::Alone);
|
||||
}
|
||||
// No open delimeter to return; continue on to the next iteration.
|
||||
// No open delimiter to return; continue on to the next iteration.
|
||||
}
|
||||
};
|
||||
} else if let Some(frame) = self.stack.pop() {
|
||||
|
|
|
@ -49,7 +49,7 @@ impl Node {
|
|||
///
|
||||
/// For example, `ast::Visitor` has `visit_ident`, but `Ident`s are always
|
||||
/// stored inline within other AST nodes, so we don't implement `visit_ident`
|
||||
/// here. In constrast, we do implement `visit_expr` because `ast::Expr` is
|
||||
/// here. In contrast, we do implement `visit_expr` because `ast::Expr` is
|
||||
/// always stored as `P<ast::Expr>`, and every such expression should be
|
||||
/// measured separately.
|
||||
///
|
||||
|
|
|
@ -1368,7 +1368,7 @@ impl<'a, 'tcx> LifetimeContext<'a, 'tcx> {
|
|||
return;
|
||||
}
|
||||
|
||||
// We may fail to resolve higher-ranked lifetimes that are mentionned by APIT.
|
||||
// We may fail to resolve higher-ranked lifetimes that are mentioned by APIT.
|
||||
// AST-based resolution does not care for impl-trait desugaring, which are the
|
||||
// responibility of lowering. This may create a mismatch between the resolution
|
||||
// AST found (`region_def_id`) which points to HRTB, and what HIR allows.
|
||||
|
|
|
@ -1991,7 +1991,7 @@ impl<'a> Resolver<'a> {
|
|||
_ => panic!("invalid arg index"),
|
||||
}
|
||||
}
|
||||
// Cache the lookup to avoid parsing attributes for an iterm multiple times.
|
||||
// Cache the lookup to avoid parsing attributes for an item multiple times.
|
||||
self.legacy_const_generic_args.insert(def_id, Some(ret.clone()));
|
||||
return Some(ret);
|
||||
}
|
||||
|
|
|
@ -441,7 +441,7 @@ impl<'a> ResolverExpand for Resolver<'a> {
|
|||
}
|
||||
PathResult::Indeterminate => indeterminate = true,
|
||||
// We can only be sure that a path doesn't exist after having tested all the
|
||||
// posibilities, only at that time we can return false.
|
||||
// possibilities, only at that time we can return false.
|
||||
PathResult::Failed { .. } => {}
|
||||
PathResult::Module(_) => panic!("unexpected path resolution"),
|
||||
}
|
||||
|
|
|
@ -2309,7 +2309,7 @@ impl Target {
|
|||
load_builtin(target_triple).expect("built-in target")
|
||||
}
|
||||
TargetTriple::TargetJson { .. } => {
|
||||
panic!("built-in targets doens't support target-paths")
|
||||
panic!("built-in targets doesn't support target-paths")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ use crate::spec::{cvs, LinkerFlavor, TargetOptions};
|
|||
pub fn opts() -> TargetOptions {
|
||||
// We cannot use `-nodefaultlibs` because compiler-rt has to be passed
|
||||
// as a path since it's not added to linker search path by the default.
|
||||
// There were attemts to make it behave like libgcc (so one can just use -l<name>)
|
||||
// There were attempts to make it behave like libgcc (so one can just use -l<name>)
|
||||
// but LLVM maintainers rejected it: https://reviews.llvm.org/D51440
|
||||
let pre_link_args =
|
||||
TargetOptions::link_args(LinkerFlavor::Gcc, &["-nolibc", "--unwindlib=none"]);
|
||||
|
|
|
@ -191,7 +191,7 @@ impl<'tcx> LowerInto<'tcx, chalk_ir::GoalData<RustInterner<'tcx>>> for ty::Predi
|
|||
GenericArgKind::Const(..) => {
|
||||
chalk_ir::GoalData::All(chalk_ir::Goals::empty(interner))
|
||||
}
|
||||
GenericArgKind::Lifetime(lt) => bug!("unexpect well formed predicate: {:?}", lt),
|
||||
GenericArgKind::Lifetime(lt) => bug!("unexpected well formed predicate: {:?}", lt),
|
||||
},
|
||||
|
||||
ty::PredicateKind::ObjectSafe(t) => chalk_ir::GoalData::DomainGoal(
|
||||
|
|
|
@ -416,7 +416,7 @@ pub(crate) mod rustc {
|
|||
// begin with the field's visibility
|
||||
tree = tree.then(Self::def(Def::Field(field_def)));
|
||||
|
||||
// compute the field's layout charactaristics
|
||||
// compute the field's layout characteristics
|
||||
let field_layout = layout_of(tcx, field_ty)?.clamp_align(min_align, max_align);
|
||||
|
||||
// next comes the field's padding
|
||||
|
|
|
@ -101,7 +101,7 @@ pub(super) fn check_fn<'a, 'tcx>(
|
|||
decl.output.span(),
|
||||
param_env,
|
||||
));
|
||||
// If we replaced declared_ret_ty with infer vars, then we must be infering
|
||||
// If we replaced declared_ret_ty with infer vars, then we must be inferring
|
||||
// an opaque type, so set a flag so we can improve diagnostics.
|
||||
fcx.return_type_has_opaque = ret_ty != declared_ret_ty;
|
||||
|
||||
|
@ -1543,7 +1543,7 @@ fn detect_discriminant_duplicate<'tcx>(
|
|||
None => {
|
||||
// At this point we know this discriminant is a duplicate, and was not explicitly
|
||||
// assigned by the user. Here we iterate backwards to fetch the HIR for the last
|
||||
// explictly assigned discriminant, and letting the user know that this was the
|
||||
// explicitly assigned discriminant, and letting the user know that this was the
|
||||
// increment startpoint, and how many steps from there leading to the duplicate
|
||||
if let Some((n, hir::Variant { span, ident, .. })) =
|
||||
vs[..idx].iter().rev().enumerate().find(|v| v.1.disr_expr.is_some())
|
||||
|
@ -1566,7 +1566,7 @@ fn detect_discriminant_duplicate<'tcx>(
|
|||
};
|
||||
|
||||
// Here we loop through the discriminants, comparing each discriminant to another.
|
||||
// When a duplicate is detected, we instatiate an error and point to both
|
||||
// When a duplicate is detected, we instantiate an error and point to both
|
||||
// initial and duplicate value. The duplicate discriminant is then discarded by swapping
|
||||
// it with the last element and decrementing the `vec.len` (which is why we have to evaluate
|
||||
// `discrs.len()` anew every iteration, and why this could be tricky to do in a functional
|
||||
|
|
|
@ -153,7 +153,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
|
|||
) {
|
||||
let tcx = self.tcx;
|
||||
|
||||
// Conceptually, we've got some number of expected inputs, and some number of provided aguments
|
||||
// Conceptually, we've got some number of expected inputs, and some number of provided arguments
|
||||
// and we can form a grid of whether each argument could satisfy a given input:
|
||||
// in1 | in2 | in3 | ...
|
||||
// arg1 ? | | |
|
||||
|
|
|
@ -497,7 +497,7 @@ impl<'a, 'tcx> ExprUseVisitor<'a, 'tcx> {
|
|||
let expr_place = return_if_err!(self.mc.cat_expr(expr));
|
||||
f(self);
|
||||
if let Some(els) = els {
|
||||
// borrowing because we need to test the descriminant
|
||||
// borrowing because we need to test the discriminant
|
||||
self.maybe_read_scrutinee(expr, expr_place.clone(), from_ref(pat).iter());
|
||||
self.walk_block(els)
|
||||
}
|
||||
|
|
|
@ -1570,7 +1570,7 @@ impl<'a, T> CursorMut<'a, T> {
|
|||
/// that the cursor points to is unchanged, even if it is the "ghost" node.
|
||||
///
|
||||
/// This operation should compute in *O*(1) time.
|
||||
// `push_front` continues to point to "ghost" when it addes a node to mimic
|
||||
// `push_front` continues to point to "ghost" when it adds a node to mimic
|
||||
// the behavior of `insert_before` on an empty list.
|
||||
#[unstable(feature = "linked_list_cursors", issue = "58533")]
|
||||
pub fn push_front(&mut self, elt: T) {
|
||||
|
|
|
@ -436,7 +436,7 @@ impl<T> Vec<T> {
|
|||
/// an explanation of the difference between length and capacity, see
|
||||
/// *[Capacity and reallocation]*.
|
||||
///
|
||||
/// If it is imporant to know the exact allocated capacity of a `Vec`,
|
||||
/// If it is important to know the exact allocated capacity of a `Vec`,
|
||||
/// always use the [`capacity`] method after construction.
|
||||
///
|
||||
/// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
|
||||
|
@ -591,7 +591,7 @@ impl<T, A: Allocator> Vec<T, A> {
|
|||
/// an explanation of the difference between length and capacity, see
|
||||
/// *[Capacity and reallocation]*.
|
||||
///
|
||||
/// If it is imporant to know the exact allocated capacity of a `Vec`,
|
||||
/// If it is important to know the exact allocated capacity of a `Vec`,
|
||||
/// always use the [`capacity`] method after construction.
|
||||
///
|
||||
/// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
|
||||
|
|
|
@ -31,7 +31,7 @@ use crate::intrinsics;
|
|||
///
|
||||
/// `unreachable_unchecked()` can be used in situations where the compiler
|
||||
/// can't prove invariants that were previously established. Such situations
|
||||
/// have a higher chance of occuring if those invariants are upheld by
|
||||
/// have a higher chance of occurring if those invariants are upheld by
|
||||
/// external code that the compiler can't analyze.
|
||||
/// ```
|
||||
/// fn prepare_inputs(divisors: &mut Vec<u32>) {
|
||||
|
|
|
@ -1082,7 +1082,7 @@ extern "rust-intrinsic" {
|
|||
/// Note that using `transmute` to turn a pointer to a `usize` is (as noted above) [undefined
|
||||
/// behavior][ub] in `const` contexts. Also outside of consts, this operation might not behave
|
||||
/// as expected -- this is touching on many unspecified aspects of the Rust memory model.
|
||||
/// Depending on what the code is doing, the following alternatives are preferrable to
|
||||
/// Depending on what the code is doing, the following alternatives are preferable to
|
||||
/// pointer-to-integer transmutation:
|
||||
/// - If the code just wants to store data of arbitrary type in some buffer and needs to pick a
|
||||
/// type for that buffer, it can use [`MaybeUninit`][mem::MaybeUninit].
|
||||
|
|
|
@ -154,7 +154,7 @@ impl<T: ?Sized> *const T {
|
|||
/// This is similar to `self as usize`, which semantically discards *provenance* and
|
||||
/// *address-space* information. However, unlike `self as usize`, casting the returned address
|
||||
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
|
||||
/// properly restore the lost information and obtain a dereferencable pointer, use
|
||||
/// properly restore the lost information and obtain a dereferenceable pointer, use
|
||||
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
|
||||
///
|
||||
/// If using those APIs is not possible because there is no way to preserve a pointer with the
|
||||
|
|
|
@ -90,7 +90,7 @@
|
|||
//! isn't *pointer*-sized but address-space/offset/allocation-sized (we'll probably continue
|
||||
//! to conflate these notions). This would potentially make it possible to more efficiently
|
||||
//! target platforms where pointers are larger than offsets, such as CHERI and maybe some
|
||||
//! segmented architecures.
|
||||
//! segmented architectures.
|
||||
//!
|
||||
//! ## Provenance
|
||||
//!
|
||||
|
@ -172,7 +172,7 @@
|
|||
//! a pointer to a usize is generally an operation which *only* extracts the address. It is
|
||||
//! therefore *impossible* to construct a valid pointer from a usize because there is no way
|
||||
//! to restore the address-space and provenance. In other words, pointer-integer-pointer
|
||||
//! roundtrips are not possible (in the sense that the resulting pointer is not dereferencable).
|
||||
//! roundtrips are not possible (in the sense that the resulting pointer is not dereferenceable).
|
||||
//!
|
||||
//! The key insight to making this model *at all* viable is the [`with_addr`][] method:
|
||||
//!
|
||||
|
@ -272,7 +272,7 @@
|
|||
//!
|
||||
//! * Create an invalid pointer from just an address (see [`ptr::invalid`][]). This can
|
||||
//! be used for sentinel values like `null` *or* to represent a tagged pointer that will
|
||||
//! never be dereferencable. In general, it is always sound for an integer to pretend
|
||||
//! never be dereferenceable. In general, it is always sound for an integer to pretend
|
||||
//! to be a pointer "for fun" as long as you don't use operations on it which require
|
||||
//! it to be valid (offset, read, write, etc).
|
||||
//!
|
||||
|
|
|
@ -160,7 +160,7 @@ impl<T: ?Sized> *mut T {
|
|||
/// This is similar to `self as usize`, which semantically discards *provenance* and
|
||||
/// *address-space* information. However, unlike `self as usize`, casting the returned address
|
||||
/// back to a pointer yields [`invalid`][], which is undefined behavior to dereference. To
|
||||
/// properly restore the lost information and obtain a dereferencable pointer, use
|
||||
/// properly restore the lost information and obtain a dereferenceable pointer, use
|
||||
/// [`with_addr`][pointer::with_addr] or [`map_addr`][pointer::map_addr].
|
||||
///
|
||||
/// If using those APIs is not possible because there is no way to preserve a pointer with the
|
||||
|
|
|
@ -2754,10 +2754,10 @@ impl<'a, T> Iterator for RChunksMut<'a, T> {
|
|||
None => 0,
|
||||
};
|
||||
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
|
||||
// Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
|
||||
// Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
|
||||
let (head, tail) = unsafe { self.v.split_at_mut(start) };
|
||||
// SAFETY: This type ensures that self.v is a valid pointer with a correct len.
|
||||
// Therefore the bounds check in split_at_mut guarantess the split point is inbounds.
|
||||
// Therefore the bounds check in split_at_mut guarantees the split point is inbounds.
|
||||
let (nth, _) = unsafe { tail.split_at_mut(end - start) };
|
||||
self.v = head;
|
||||
// SAFETY: Nothing else points to or will point to the contents of this slice.
|
||||
|
|
|
@ -2321,7 +2321,7 @@ impl<T> [T] {
|
|||
}
|
||||
|
||||
/// Binary searches this slice for a given element.
|
||||
/// This behaves similary to [`contains`] if this slice is sorted.
|
||||
/// This behaves similar to [`contains`] if this slice is sorted.
|
||||
///
|
||||
/// If the value is found then [`Result::Ok`] is returned, containing the
|
||||
/// index of the matching element. If there are multiple matches, then any
|
||||
|
@ -3530,7 +3530,7 @@ impl<T> [T] {
|
|||
// alignment targeted for U.
|
||||
// `crate::ptr::align_offset` is called with a correctly aligned and
|
||||
// valid pointer `ptr` (it comes from a reference to `self`) and with
|
||||
// a size that is a power of two (since it comes from the alignement for U),
|
||||
// a size that is a power of two (since it comes from the alignment for U),
|
||||
// satisfying its safety constraints.
|
||||
let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
|
||||
if offset > self.len() {
|
||||
|
|
|
@ -1280,7 +1280,7 @@ macro_rules! try_from_secs {
|
|||
let rem_msb = nanos_tmp & rem_msb_mask == 0;
|
||||
let add_ns = !(rem_msb || (is_even && is_tie));
|
||||
|
||||
// f32 does not have enough presicion to trigger the second branch
|
||||
// f32 does not have enough precision to trigger the second branch
|
||||
// since it can not represent numbers between 0.999_999_940_395 and 1.0.
|
||||
let nanos = nanos + add_ns as u32;
|
||||
if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) { (0, nanos) } else { (1, 0) }
|
||||
|
@ -1299,9 +1299,9 @@ macro_rules! try_from_secs {
|
|||
let rem_msb = nanos_tmp & rem_msb_mask == 0;
|
||||
let add_ns = !(rem_msb || (is_even && is_tie));
|
||||
|
||||
// f32 does not have enough presicion to trigger the second branch.
|
||||
// f32 does not have enough precision to trigger the second branch.
|
||||
// For example, it can not represent numbers between 1.999_999_880...
|
||||
// and 2.0. Bigger values result in even smaller presicion of the
|
||||
// and 2.0. Bigger values result in even smaller precision of the
|
||||
// fractional part.
|
||||
let nanos = nanos + add_ns as u32;
|
||||
if ($mant_bits == 23) || (nanos != NANOS_PER_SEC) {
|
||||
|
|
|
@ -70,7 +70,7 @@ impl_integer_intrinsic! {
|
|||
impl ToBitMask<BitMask=u64> for Mask<_, 64>
|
||||
}
|
||||
|
||||
/// Returns the minimum numnber of bytes in a bitmask with `lanes` lanes.
|
||||
/// Returns the minimum number of bytes in a bitmask with `lanes` lanes.
|
||||
#[cfg(feature = "generic_const_exprs")]
|
||||
pub const fn bitmask_len(lanes: usize) -> usize {
|
||||
(lanes + 7) / 8
|
||||
|
|
|
@ -1921,7 +1921,7 @@ mod type_keyword {}
|
|||
/// and [proposal]s exist to use `unsafe {}` blocks inside such functions when
|
||||
/// making `unsafe` operations.
|
||||
///
|
||||
/// See the [Rustnomicon] and the [Reference] for more informations.
|
||||
/// See the [Rustnomicon] and the [Reference] for more information.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
|
@ -2113,7 +2113,7 @@ mod use_keyword {}
|
|||
/// Add constraints that must be upheld to use an item.
|
||||
///
|
||||
/// `where` allows specifying constraints on lifetime and generic parameters.
|
||||
/// The [RFC] introducing `where` contains detailed informations about the
|
||||
/// The [RFC] introducing `where` contains detailed information about the
|
||||
/// keyword.
|
||||
///
|
||||
/// # Examples
|
||||
|
@ -2355,7 +2355,7 @@ mod dyn_keyword {}
|
|||
/// println!("f = {f} and i = {i}");
|
||||
/// ```
|
||||
///
|
||||
/// See the [Reference][union] for more informations on `union`s.
|
||||
/// See the [Reference][union] for more information on `union`s.
|
||||
///
|
||||
/// [`struct`]: keyword.struct.html
|
||||
/// [union]: ../reference/items/unions.html
|
||||
|
|
|
@ -138,7 +138,7 @@ impl Mutex {
|
|||
}
|
||||
}
|
||||
|
||||
// The state has changed or a wakeup occured, try to lock the mutex.
|
||||
// The state has changed or a wakeup occurred, try to lock the mutex.
|
||||
match self.futex.compare_exchange(UNLOCKED, owned_state, Acquire, Relaxed) {
|
||||
Ok(_) => return,
|
||||
Err(updated) => state = updated,
|
||||
|
|
|
@ -53,7 +53,7 @@ impl Mutex {
|
|||
// We avoid an unnecessary write if it as already set to 2,
|
||||
// to be friendlier for the caches.
|
||||
if state != 2 && self.futex.swap(2, Acquire) == 0 {
|
||||
// We changed it from 0 to 2, so we just succesfully locked it.
|
||||
// We changed it from 0 to 2, so we just successfully locked it.
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ fn is_read_lockable(state: u32) -> bool {
|
|||
// We don't allow read-locking if there's readers waiting, even if the lock is unlocked
|
||||
// and there's no writers waiting. The only situation when this happens is after unlocking,
|
||||
// at which point the unlocking thread might be waking up writers, which have priority over readers.
|
||||
// The unlocking thread will clear the readers waiting bit and wake up readers, if necssary.
|
||||
// The unlocking thread will clear the readers waiting bit and wake up readers, if necessary.
|
||||
state & MASK < MAX_READERS && !has_readers_waiting(state) && !has_writers_waiting(state)
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ mod shims {
|
|||
}
|
||||
|
||||
// On 32-bit x86 MSVC these functions aren't defined, so we just define shims
|
||||
// which promote everything fo f64, perform the calculation, and then demote
|
||||
// which promote everything for f64, perform the calculation, and then demote
|
||||
// back to f32. While not precisely correct should be "correct enough" for now.
|
||||
#[cfg(all(target_env = "msvc", target_arch = "x86"))]
|
||||
mod shims {
|
||||
|
|
|
@ -115,7 +115,7 @@ fn test_parse_prefix_verbatim_device() {
|
|||
assert_eq!(prefix, parse_prefix(r"\\?/C:\windows\system32\notepad.exe"));
|
||||
}
|
||||
|
||||
// See #93586 for more infomation.
|
||||
// See #93586 for more information.
|
||||
#[test]
|
||||
fn test_windows_prefix_components() {
|
||||
use crate::path::Path;
|
||||
|
|
|
@ -85,7 +85,7 @@ def _download(path, url, probably_big, verbose, exception):
|
|||
option = "-#"
|
||||
else:
|
||||
option = "-s"
|
||||
# If curl is not present on Win32, we shoud not sys.exit
|
||||
# If curl is not present on Win32, we should not sys.exit
|
||||
# but raise `CalledProcessError` or `OSError` instead
|
||||
require(["curl", "--version"], exception=platform_is_win32)
|
||||
run(["curl", option,
|
||||
|
|
|
@ -946,7 +946,7 @@ impl<'a> Builder<'a> {
|
|||
};
|
||||
patchelf.args(&[OsString::from("--set-rpath"), rpath_entries]);
|
||||
if !fname.extension().map_or(false, |ext| ext == "so") {
|
||||
// Finally, set the corret .interp for binaries
|
||||
// Finally, set the correct .interp for binaries
|
||||
let dynamic_linker_path = nix_deps_dir.join("nix-support/dynamic-linker");
|
||||
// FIXME: can we support utf8 here? `args` doesn't accept Vec<u8>, only OsString ...
|
||||
let dynamic_linker = t!(String::from_utf8(t!(fs::read(dynamic_linker_path))));
|
||||
|
@ -962,7 +962,7 @@ impl<'a> Builder<'a> {
|
|||
let tempfile = self.tempdir().join(dest_path.file_name().unwrap());
|
||||
// While bootstrap itself only supports http and https downloads, downstream forks might
|
||||
// need to download components from other protocols. The match allows them adding more
|
||||
// protocols without worrying about merge conficts if we change the HTTP implementation.
|
||||
// protocols without worrying about merge conflicts if we change the HTTP implementation.
|
||||
match url.split_once("://").map(|(proto, _)| proto) {
|
||||
Some("http") | Some("https") => {
|
||||
self.download_http_with_retries(&tempfile, url, help_on_error)
|
||||
|
|
|
@ -1629,7 +1629,7 @@ fn chmod(_path: &Path, _perms: u32) {}
|
|||
/// If code is not 0 (successful exit status), exit status is 101 (rust's default error code.)
|
||||
/// If the test is running and code is an error code, it will cause a panic.
|
||||
fn detail_exit(code: i32) -> ! {
|
||||
// if in test and code is an error code, panic with staus code provided
|
||||
// if in test and code is an error code, panic with status code provided
|
||||
if cfg!(test) && code != 0 {
|
||||
panic!("status code: {}", code);
|
||||
} else {
|
||||
|
|
|
@ -637,7 +637,7 @@ fn configure_cmake(
|
|||
|
||||
if target.contains("darwin") {
|
||||
// Make sure that CMake does not build universal binaries on macOS.
|
||||
// Explicitly specifiy the one single target architecture.
|
||||
// Explicitly specify the one single target architecture.
|
||||
if target.starts_with("aarch64") {
|
||||
// macOS uses a different name for building arm64
|
||||
cfg.define("CMAKE_OSX_ARCHITECTURES", "arm64");
|
||||
|
|
|
@ -87,7 +87,7 @@ Rust programs can be built for that target:
|
|||
rustc --target m68k-unknown-linux-gnu your-code.rs
|
||||
```
|
||||
|
||||
Very simple progams can be run using the `qemu-m68k-static` program:
|
||||
Very simple programs can be run using the `qemu-m68k-static` program:
|
||||
|
||||
```text
|
||||
$ qemu-m68k-static your-code
|
||||
|
|
|
@ -25,7 +25,7 @@ Like with any other Windows target created binaries are in PE format.
|
|||
|
||||
## Building the target
|
||||
|
||||
For cross-compilation I recommend using [llvm-mingw](https://github.com/mstorsjo/llvm-mingw) toolchain, one change that seems necessary beside configuring corss compilers is disabling experimental `m86k` target. Otherwise LLVM build fails with `multiple definition ...` errors.
|
||||
For cross-compilation I recommend using [llvm-mingw](https://github.com/mstorsjo/llvm-mingw) toolchain, one change that seems necessary beside configuring cross compilers is disabling experimental `m86k` target. Otherwise LLVM build fails with `multiple definition ...` errors.
|
||||
Native bootstrapping builds require rather fragile hacks until host artifacts are available so I won't describe them here.
|
||||
|
||||
## Building Rust programs
|
||||
|
|
|
@ -133,7 +133,7 @@ There are 3 common ways to compile native C code for UEFI targets:
|
|||
- Use native Windows targets. This means compiling your C code for the Windows
|
||||
platform as if it was the UEFI platform. This works for static libraries, but
|
||||
needs adjustments when linking into an UEFI executable. You can, however,
|
||||
link such static libraries seemlessly into rust code compiled for UEFI
|
||||
link such static libraries seamlessly into rust code compiled for UEFI
|
||||
targets. Be wary of any includes that are not specifically suitable for UEFI
|
||||
targets (especially the C standard library includes are not always
|
||||
compatible). Freestanding compilations are recommended to avoid
|
||||
|
|
|
@ -30,7 +30,7 @@ is 8-bytes large as well as pointers. The tradeoff, though, is that the maximum
|
|||
memory size is now the full 64-bit address space instead of the 4GB as limited
|
||||
by the 32-bit address space for `wasm32-unknown-unknown`.
|
||||
|
||||
This target is not a stable target. The [memory64] WebAssembly proposal is stil
|
||||
This target is not a stable target. The [memory64] WebAssembly proposal is still
|
||||
in-progress and not standardized. This means that there are not many engines
|
||||
which implement the `memory64` feature and if they do they're likely behind a
|
||||
flag, for example:
|
||||
|
|
|
@ -143,7 +143,7 @@ fn do_features() {}
|
|||
|
||||
#[cfg(has_feathers = "zapping")] // This is expected as "has_feathers" was provided in names()
|
||||
// and because no value checking was enable for "has_feathers"
|
||||
// no warning is emited for the value "zapping"
|
||||
// no warning is emitted for the value "zapping"
|
||||
fn do_zapping() {}
|
||||
|
||||
#[cfg(has_mumble_frotz)] // This is UNEXPECTED because names checking is enable and
|
||||
|
|
|
@ -8,7 +8,7 @@ This flag will rewrite absolute paths under the current working directory,
|
|||
replacing the current working directory prefix with a specified value.
|
||||
|
||||
The given value may be absolute or relative, or empty. This switch takes
|
||||
precidence over `--remap-path-prefix` in case they would both match a given
|
||||
precedence over `--remap-path-prefix` in case they would both match a given
|
||||
path.
|
||||
|
||||
This flag helps to produce deterministic output, by removing the current working
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# Improvements to this script are greatly appreciated!
|
||||
|
||||
if [[ $# != 2 ]]; then
|
||||
echo "expected 2 arguments, recieved $#"
|
||||
echo "expected 2 arguments, received $#"
|
||||
echo "example usage: './src/etc/cpu-usage-over-time-plot.sh \
|
||||
7737e0b5c4103216d6fd8cf941b7ab9bdbaace7c \
|
||||
x86_64-gnu'"
|
||||
|
|
|
@ -386,7 +386,7 @@ def check_tree_attr(tree, path, attr, pat, regexp):
|
|||
return ret
|
||||
|
||||
|
||||
# Returns the number of occurences matching the regex (`regexp`) and the text (`pat`).
|
||||
# Returns the number of occurrences matching the regex (`regexp`) and the text (`pat`).
|
||||
def check_tree_text(tree, path, pat, regexp, stop_at_first):
|
||||
path = normalize_xpath(path)
|
||||
match_count = 0
|
||||
|
|
|
@ -450,7 +450,7 @@ impl<'a> PeekIter<'a> {
|
|||
fn new(iter: TokenIter<'a>) -> Self {
|
||||
Self { stored: VecDeque::new(), peek_pos: 0, iter }
|
||||
}
|
||||
/// Returns the next item after the current one. It doesn't interfer with `peek_next` output.
|
||||
/// Returns the next item after the current one. It doesn't interfere with `peek_next` output.
|
||||
fn peek(&mut self) -> Option<&(TokenKind, &'a str)> {
|
||||
if self.stored.is_empty() {
|
||||
if let Some(next) = self.iter.next() {
|
||||
|
@ -459,7 +459,7 @@ impl<'a> PeekIter<'a> {
|
|||
}
|
||||
self.stored.front()
|
||||
}
|
||||
/// Returns the next item after the last one peeked. It doesn't interfer with `peek` output.
|
||||
/// Returns the next item after the last one peeked. It doesn't interfere with `peek` output.
|
||||
fn peek_next(&mut self) -> Option<&(TokenKind, &'a str)> {
|
||||
self.peek_pos += 1;
|
||||
if self.peek_pos - 1 < self.stored.len() {
|
||||
|
|
|
@ -477,7 +477,7 @@ impl<'a, 'tcx> LinkCollector<'a, 'tcx> {
|
|||
// If there's no `::`, it's not an associated item.
|
||||
// So we can be sure that `rustc_resolve` was accurate when it said it wasn't resolved.
|
||||
.ok_or_else(|| {
|
||||
debug!("found no `::`, assumming {} was correctly not in scope", item_name);
|
||||
debug!("found no `::`, assuming {} was correctly not in scope", item_name);
|
||||
UnresolvedPath {
|
||||
item_id,
|
||||
module_id,
|
||||
|
@ -1256,7 +1256,7 @@ impl LinkCollector<'_, '_> {
|
|||
&mut self,
|
||||
key: ResolutionInfo,
|
||||
diag: DiagnosticInfo<'_>,
|
||||
// If errors are cached then they are only reported on first ocurrence
|
||||
// If errors are cached then they are only reported on first occurrence
|
||||
// which we want in some cases but not in others.
|
||||
cache_errors: bool,
|
||||
) -> Option<(Res, Option<UrlFragment>)> {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Test that `wrapping_div` only checks divisor once.
|
||||
// This test checks that there is only a single compare agains -1 and -1 is not present as a
|
||||
// This test checks that there is only a single compare against -1 and -1 is not present as a
|
||||
// switch case (the second check present until rustc 1.12).
|
||||
// This test also verifies that a single panic call is generated (for the division by zero case).
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `aapcs` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `aapcs` and
|
||||
// `aapcs-unwind` extern functions. `aapcs-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// compile-flags: -C panic=abort
|
||||
|
||||
// Test that `nounwind` atributes are also applied to extern `C-unwind` Rust functions
|
||||
// Test that `nounwind` attributes are also applied to extern `C-unwind` Rust functions
|
||||
// when the code is compiled with `panic=abort`.
|
||||
|
||||
#![crate_type = "lib"]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// compile-flags: -C opt-level=0
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `C` and `C-unwind` extern
|
||||
// Test that `nounwind` attributes are correctly applied to exported `C` and `C-unwind` extern
|
||||
// functions. `C-unwind` functions MUST NOT have this attribute. We disable optimizations above
|
||||
// to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// compile-flags: -C opt-level=0
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `cdecl` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `cdecl` and
|
||||
// `cdecl-unwind` extern functions. `cdecl-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `fastcall` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `fastcall` and
|
||||
// `fastcall-unwind` extern functions. `fastcall-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
// We disable optimizations to prevent LLVM from infering the attribute.
|
||||
// We disable optimizations to prevent LLVM from inferring the attribute.
|
||||
|
||||
// CHECK: Function Attrs:{{.*}}nounwind
|
||||
// CHECK-NEXT: @foo
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#![crate_type = "lib"]
|
||||
|
||||
// We disable optimizations to prevent LLVM from infering the attribute.
|
||||
// We disable optimizations to prevent LLVM from inferring the attribute.
|
||||
|
||||
extern "C" {
|
||||
fn bar();
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#![crate_type = "lib"]
|
||||
#![feature(c_unwind)]
|
||||
|
||||
// We disable optimizations to prevent LLVM from infering the attribute.
|
||||
// We disable optimizations to prevent LLVM from inferring the attribute.
|
||||
|
||||
// CHECK: Function Attrs:{{.*}}nounwind
|
||||
// CHECK-NEXT: @foo
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `stdcall` and `stdcall-unwind`
|
||||
// Test that `nounwind` attributes are correctly applied to exported `stdcall` and `stdcall-unwind`
|
||||
// extern functions. `stdcall-unwind` functions MUST NOT have this attribute. We disable
|
||||
// optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// compile-flags: -C opt-level=0
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `system` and `system-unwind`
|
||||
// Test that `nounwind` attributes are correctly applied to exported `system` and `system-unwind`
|
||||
// extern functions. `system-unwind` functions MUST NOT have this attribute. We disable
|
||||
// optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `sysv64` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `sysv64` and
|
||||
// `sysv64-unwind` extern functions. `sysv64-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `thiscall` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `thiscall` and
|
||||
// `thiscall-unwind` extern functions. `thiscall-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `vectorcall` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `vectorcall` and
|
||||
// `vectorcall-unwind` extern functions. `vectorcall-unwind` functions MUST NOT have this attribute.
|
||||
// We disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#[lang="sized"]
|
||||
trait Sized { }
|
||||
|
||||
// Test that `nounwind` atributes are correctly applied to exported `win64` and
|
||||
// Test that `nounwind` attributes are correctly applied to exported `win64` and
|
||||
// `win64-unwind` extern functions. `win64-unwind` functions MUST NOT have this attribute. We
|
||||
// disable optimizations above to prevent LLVM from inferring the attribute.
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#![feature(c_unwind)]
|
||||
|
||||
// Make sure these all do *not* get the attribute.
|
||||
// We disable optimizations to prevent LLVM from infering the attribute.
|
||||
// We disable optimizations to prevent LLVM from inferring the attribute.
|
||||
// CHECK-NOT: nounwind
|
||||
|
||||
// "C" ABI
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
// compile-flags: -Z query-dep-graph
|
||||
// aux-build:cached_hygiene.rs
|
||||
|
||||
// This tests the folllowing scenario
|
||||
// This tests the following scenario
|
||||
// 1. A foreign crate is compiled with incremental compilation.
|
||||
// This causes hygiene information to be saved to the incr cache.
|
||||
// 2. One function is the foreign crate is modified. This causes the
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
// Regression test for hashing involving canonical variables. In this
|
||||
// test -- which has an intensional error -- the type of the value
|
||||
// test -- which has an intentional error -- the type of the value
|
||||
// being dropped winds up including a type variable. Canonicalization
|
||||
// would then produce a `?0` which -- in turn -- triggered an ICE in
|
||||
// hashing.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// rust-lang/rust#69798:
|
||||
//
|
||||
// This is analgous to cgu_invalidated_when_import_added, but it covers a
|
||||
// This is analogous to cgu_invalidated_when_import_added, but it covers a
|
||||
// problem uncovered where a change to the *export* set caused a link failure
|
||||
// when reusing post-LTO optimized object code.
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
// rust-lang/rust#69798:
|
||||
//
|
||||
// This is analgous to cgu_invalidated_when_export_added, but it covers the
|
||||
// This is analogous to cgu_invalidated_when_export_added, but it covers the
|
||||
// other direction. This is analogous to cgu_invalidated_when_import_added: we
|
||||
// include it, because it may uncover bugs in variant implementation strategies.
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//! Tests that we can propogate into places that are projections into unions
|
||||
//! Tests that we can propagate into places that are projections into unions
|
||||
// compile-flags: -Zunsound-mir-opts
|
||||
fn val() -> u32 {
|
||||
1
|
||||
|
|
|
@ -19,7 +19,7 @@ all:
|
|||
# Dump all the symbols from the staticlib into `syms`
|
||||
"$(LLVM_BIN_DIR)"/llvm-objdump -t $(TMPDIR)/libdownstream.a > $(TMPDIR)/syms
|
||||
# Count the global instances of `issue64153_test_function`. There'll be 2
|
||||
# if the `upstream` object file got erronously included twice.
|
||||
# if the `upstream` object file got erroneously included twice.
|
||||
# The line we are testing for with the regex looks something like:
|
||||
# 0000000000000000 g F .text.issue64153_test_function 00000023 issue64153_test_function
|
||||
grep -c -e "[[:space:]]g[[:space:]]*F[[:space:]].*issue64153_test_function" $(TMPDIR)/syms > $(TMPDIR)/count
|
||||
|
|
|
@ -60,7 +60,7 @@ endif
|
|||
# for now, but it is effectively ignored for all tests that don't include this file anyway.
|
||||
#
|
||||
# (Note that it's also possible the `_counters.<test>.txt` and `<test>.json` files (if generated)
|
||||
# may order results from multiple files inconsistently, which might also have to be accomodated
|
||||
# may order results from multiple files inconsistently, which might also have to be accommodated
|
||||
# if and when we allow `llvm-cov` to produce results for multiple files. Note, the path separators
|
||||
# appear to be normalized to `/` in those files, thankfully.)
|
||||
LLVM_COV_IGNORE_FILES=\
|
||||
|
@ -157,7 +157,7 @@ else
|
|||
# `// ignore-llvm-cov-show-diffs` anymore. This directive exists to work around a limitation
|
||||
# with `llvm-cov show`. When reporting coverage for multiple instantiations of a generic function,
|
||||
# with different type substitutions, `llvm-cov show` prints these in a non-deterministic order,
|
||||
# breaking the `diff` comparision.
|
||||
# breaking the `diff` comparison.
|
||||
#
|
||||
# A partial workaround is implemented below, with `diff --ignore-matching-lines=RE`
|
||||
# to ignore each line prefixing each generic instantiation coverage code region.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue