summaryrefslogtreecommitdiffstats
path: root/vendor/tracing-subscriber/src/registry
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /vendor/tracing-subscriber/src/registry
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/tracing-subscriber/src/registry')
-rw-r--r--vendor/tracing-subscriber/src/registry/extensions.rs274
-rw-r--r--vendor/tracing-subscriber/src/registry/mod.rs600
-rw-r--r--vendor/tracing-subscriber/src/registry/sharded.rs901
-rw-r--r--vendor/tracing-subscriber/src/registry/stack.rs77
4 files changed, 1852 insertions, 0 deletions
diff --git a/vendor/tracing-subscriber/src/registry/extensions.rs b/vendor/tracing-subscriber/src/registry/extensions.rs
new file mode 100644
index 000000000..ff76fb599
--- /dev/null
+++ b/vendor/tracing-subscriber/src/registry/extensions.rs
@@ -0,0 +1,274 @@
+// taken from https://github.com/hyperium/http/blob/master/src/extensions.rs.
+
+use crate::sync::{RwLockReadGuard, RwLockWriteGuard};
+use std::{
+ any::{Any, TypeId},
+ collections::HashMap,
+ fmt,
+ hash::{BuildHasherDefault, Hasher},
+};
+
+#[allow(warnings)]
+type AnyMap = HashMap<TypeId, Box<dyn Any + Send + Sync>, BuildHasherDefault<IdHasher>>;
+
+/// With TypeIds as keys, there's no need to hash them. They are already hashes
+/// themselves, coming from the compiler. The IdHasher holds the u64 of
+/// the TypeId, and then returns it, instead of doing any bit fiddling.
+#[derive(Default, Debug)]
+struct IdHasher(u64);
+
+impl Hasher for IdHasher {
+ fn write(&mut self, _: &[u8]) {
+ unreachable!("TypeId calls write_u64");
+ }
+
+ #[inline]
+ fn write_u64(&mut self, id: u64) {
+ self.0 = id;
+ }
+
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.0
+ }
+}
+
+/// An immutable, read-only reference to a Span's extensions.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub struct Extensions<'a> {
+ inner: RwLockReadGuard<'a, ExtensionsInner>,
+}
+
+impl<'a> Extensions<'a> {
+ #[cfg(feature = "registry")]
+ pub(crate) fn new(inner: RwLockReadGuard<'a, ExtensionsInner>) -> Self {
+ Self { inner }
+ }
+
+ /// Immutably borrows a type previously inserted into this `Extensions`.
+ pub fn get<T: 'static>(&self) -> Option<&T> {
+ self.inner.get::<T>()
+ }
+}
+
+/// An mutable reference to a Span's extensions.
+#[derive(Debug)]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+pub struct ExtensionsMut<'a> {
+ inner: RwLockWriteGuard<'a, ExtensionsInner>,
+}
+
+impl<'a> ExtensionsMut<'a> {
+ #[cfg(feature = "registry")]
+ pub(crate) fn new(inner: RwLockWriteGuard<'a, ExtensionsInner>) -> Self {
+ Self { inner }
+ }
+
+ /// Insert a type into this `Extensions`.
+ ///
+ /// Note that extensions are _not_
+ /// `Layer`-specific—they are _span_-specific. This means that
+ /// other layers can access and mutate extensions that
+ /// a different Layer recorded. For example, an application might
+ /// have a layer that records execution timings, alongside a layer
+ /// that reports spans and events to a distributed
+ /// tracing system that requires timestamps for spans.
+ /// Ideally, if one layer records a timestamp _x_, the other layer
+ /// should be able to reuse timestamp _x_.
+ ///
+ /// Therefore, extensions should generally be newtypes, rather than common
+ /// types like [`String`](std::string::String), to avoid accidental
+ /// cross-`Layer` clobbering.
+ ///
+ /// ## Panics
+ ///
+ /// If `T` is already present in `Extensions`, then this method will panic.
+ pub fn insert<T: Send + Sync + 'static>(&mut self, val: T) {
+ assert!(self.replace(val).is_none())
+ }
+
+ /// Replaces an existing `T` into this extensions.
+ ///
+ /// If `T` is not present, `Option::None` will be returned.
+ pub fn replace<T: Send + Sync + 'static>(&mut self, val: T) -> Option<T> {
+ self.inner.insert(val)
+ }
+
+ /// Get a mutable reference to a type previously inserted on this `ExtensionsMut`.
+ pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
+ self.inner.get_mut::<T>()
+ }
+
+ /// Remove a type from this `Extensions`.
+ ///
+ /// If a extension of this type existed, it will be returned.
+ pub fn remove<T: Send + Sync + 'static>(&mut self) -> Option<T> {
+ self.inner.remove::<T>()
+ }
+}
+
+/// A type map of span extensions.
+///
+/// [ExtensionsInner] is used by `SpanData` to store and
+/// span-specific data. A given `Layer` can read and write
+/// data that it is interested in recording and emitting.
+#[derive(Default)]
+pub(crate) struct ExtensionsInner {
+ map: AnyMap,
+}
+
+impl ExtensionsInner {
+ /// Create an empty `Extensions`.
+ #[cfg(any(test, feature = "registry"))]
+ #[inline]
+ #[cfg(any(test, feature = "registry"))]
+ pub(crate) fn new() -> ExtensionsInner {
+ ExtensionsInner {
+ map: AnyMap::default(),
+ }
+ }
+
+ /// Insert a type into this `Extensions`.
+ ///
+ /// If a extension of this type already existed, it will
+ /// be returned.
+ pub(crate) fn insert<T: Send + Sync + 'static>(&mut self, val: T) -> Option<T> {
+ self.map
+ .insert(TypeId::of::<T>(), Box::new(val))
+ .and_then(|boxed| {
+ #[allow(warnings)]
+ {
+ (boxed as Box<Any + 'static>)
+ .downcast()
+ .ok()
+ .map(|boxed| *boxed)
+ }
+ })
+ }
+
+ /// Get a reference to a type previously inserted on this `Extensions`.
+ pub(crate) fn get<T: 'static>(&self) -> Option<&T> {
+ self.map
+ .get(&TypeId::of::<T>())
+ .and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
+ }
+
+ /// Get a mutable reference to a type previously inserted on this `Extensions`.
+ pub(crate) fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
+ self.map
+ .get_mut(&TypeId::of::<T>())
+ .and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
+ }
+
+ /// Remove a type from this `Extensions`.
+ ///
+ /// If a extension of this type existed, it will be returned.
+ pub(crate) fn remove<T: Send + Sync + 'static>(&mut self) -> Option<T> {
+ self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
+ #[allow(warnings)]
+ {
+ (boxed as Box<Any + 'static>)
+ .downcast()
+ .ok()
+ .map(|boxed| *boxed)
+ }
+ })
+ }
+
+ /// Clear the `ExtensionsInner` in-place, dropping any elements in the map but
+ /// retaining allocated capacity.
+ ///
+ /// This permits the hash map allocation to be pooled by the registry so
+ /// that future spans will not need to allocate new hashmaps.
+ #[cfg(any(test, feature = "registry"))]
+ pub(crate) fn clear(&mut self) {
+ self.map.clear();
+ }
+}
+
+impl fmt::Debug for ExtensionsInner {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Extensions")
+ .field("len", &self.map.len())
+ .field("capacity", &self.map.capacity())
+ .finish()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[derive(Debug, PartialEq)]
+ struct MyType(i32);
+
+ #[test]
+ fn test_extensions() {
+ let mut extensions = ExtensionsInner::new();
+
+ extensions.insert(5i32);
+ extensions.insert(MyType(10));
+
+ assert_eq!(extensions.get(), Some(&5i32));
+ assert_eq!(extensions.get_mut(), Some(&mut 5i32));
+
+ assert_eq!(extensions.remove::<i32>(), Some(5i32));
+ assert!(extensions.get::<i32>().is_none());
+
+ assert_eq!(extensions.get::<bool>(), None);
+ assert_eq!(extensions.get(), Some(&MyType(10)));
+ }
+
+ #[test]
+ fn clear_retains_capacity() {
+ let mut extensions = ExtensionsInner::new();
+ extensions.insert(5i32);
+ extensions.insert(MyType(10));
+ extensions.insert(true);
+
+ assert_eq!(extensions.map.len(), 3);
+ let prev_capacity = extensions.map.capacity();
+ extensions.clear();
+
+ assert_eq!(
+ extensions.map.len(),
+ 0,
+ "after clear(), extensions map should have length 0"
+ );
+ assert_eq!(
+ extensions.map.capacity(),
+ prev_capacity,
+ "after clear(), extensions map should retain prior capacity"
+ );
+ }
+
+ #[test]
+ fn clear_drops_elements() {
+ use std::sync::Arc;
+ struct DropMePlease(Arc<()>);
+ struct DropMeTooPlease(Arc<()>);
+
+ let mut extensions = ExtensionsInner::new();
+ let val1 = DropMePlease(Arc::new(()));
+ let val2 = DropMeTooPlease(Arc::new(()));
+
+ let val1_dropped = Arc::downgrade(&val1.0);
+ let val2_dropped = Arc::downgrade(&val2.0);
+ extensions.insert(val1);
+ extensions.insert(val2);
+
+ assert!(val1_dropped.upgrade().is_some());
+ assert!(val2_dropped.upgrade().is_some());
+
+ extensions.clear();
+ assert!(
+ val1_dropped.upgrade().is_none(),
+ "after clear(), val1 should be dropped"
+ );
+ assert!(
+ val2_dropped.upgrade().is_none(),
+ "after clear(), val2 should be dropped"
+ );
+ }
+}
diff --git a/vendor/tracing-subscriber/src/registry/mod.rs b/vendor/tracing-subscriber/src/registry/mod.rs
new file mode 100644
index 000000000..38af53e8a
--- /dev/null
+++ b/vendor/tracing-subscriber/src/registry/mod.rs
@@ -0,0 +1,600 @@
+//! Storage for span data shared by multiple [`Layer`]s.
+//!
+//! ## Using the Span Registry
+//!
+//! This module provides the [`Registry`] type, a [`Subscriber`] implementation
+//! which tracks per-span data and exposes it to [`Layer`]s. When a `Registry`
+//! is used as the base `Subscriber` of a `Layer` stack, the
+//! [`layer::Context`][ctx] type will provide methods allowing `Layer`s to
+//! [look up span data][lookup] stored in the registry. While [`Registry`] is a
+//! reasonable default for storing spans and events, other stores that implement
+//! [`LookupSpan`] and [`Subscriber`] themselves (with [`SpanData`] implemented
+//! by the per-span data they store) can be used as a drop-in replacement.
+//!
+//! For example, we might create a `Registry` and add multiple `Layer`s like so:
+//! ```rust
+//! use tracing_subscriber::{registry::Registry, Layer, prelude::*};
+//! # use tracing_core::Subscriber;
+//! # pub struct FooLayer {}
+//! # pub struct BarLayer {}
+//! # impl<S: Subscriber> Layer<S> for FooLayer {}
+//! # impl<S: Subscriber> Layer<S> for BarLayer {}
+//! # impl FooLayer {
+//! # fn new() -> Self { Self {} }
+//! # }
+//! # impl BarLayer {
+//! # fn new() -> Self { Self {} }
+//! # }
+//!
+//! let subscriber = Registry::default()
+//! .with(FooLayer::new())
+//! .with(BarLayer::new());
+//! ```
+//!
+//! If a type implementing `Layer` depends on the functionality of a `Registry`
+//! implementation, it should bound its `Subscriber` type parameter with the
+//! [`LookupSpan`] trait, like so:
+//!
+//! ```rust
+//! use tracing_subscriber::{registry, Layer};
+//! use tracing_core::Subscriber;
+//!
+//! pub struct MyLayer {
+//! // ...
+//! }
+//!
+//! impl<S> Layer<S> for MyLayer
+//! where
+//! S: Subscriber + for<'a> registry::LookupSpan<'a>,
+//! {
+//! // ...
+//! }
+//! ```
+//! When this bound is added, the `Layer` implementation will be guaranteed
+//! access to the [`Context`][ctx] methods, such as [`Context::span`][lookup], that
+//! require the root subscriber to be a registry.
+//!
+//! [`Layer`]: crate::layer::Layer
+//! [`Subscriber`]: tracing_core::Subscriber
+//! [ctx]: crate::layer::Context
+//! [lookup]: crate::layer::Context::span()
+use tracing_core::{field::FieldSet, span::Id, Metadata};
+
+feature! {
+ #![feature = "std"]
+ /// A module containing a type map of span extensions.
+ mod extensions;
+ pub use extensions::{Extensions, ExtensionsMut};
+
+}
+
+feature! {
+ #![all(feature = "registry", feature = "std")]
+
+ mod sharded;
+ mod stack;
+
+ pub use sharded::Data;
+ pub use sharded::Registry;
+
+ use crate::filter::FilterId;
+}
+
+/// Provides access to stored span data.
+///
+/// Subscribers which store span data and associate it with span IDs should
+/// implement this trait; if they do, any [`Layer`]s wrapping them can look up
+/// metadata via the [`Context`] type's [`span()`] method.
+///
+/// [`Layer`]: super::layer::Layer
+/// [`Context`]: super::layer::Context
+/// [`span()`]: super::layer::Context::span
+pub trait LookupSpan<'a> {
+ /// The type of span data stored in this registry.
+ type Data: SpanData<'a>;
+
+ /// Returns the [`SpanData`] for a given `Id`, if it exists.
+ ///
+ /// <pre class="ignore" style="white-space:normal;font:inherit;">
+ /// <strong>Note</strong>: users of the <code>LookupSpan</code> trait should
+ /// typically call the <a href="#method.span"><code>span</code></a> method rather
+ /// than this method. The <code>span</code> method is implemented by
+ /// <em>calling</em> <code>span_data</code>, but returns a reference which is
+ /// capable of performing more sophisiticated queries.
+ /// </pre>
+ ///
+ fn span_data(&'a self, id: &Id) -> Option<Self::Data>;
+
+ /// Returns a [`SpanRef`] for the span with the given `Id`, if it exists.
+ ///
+ /// A `SpanRef` is similar to [`SpanData`], but it allows performing
+ /// additional lookups against the registryr that stores the wrapped data.
+ ///
+ /// In general, _users_ of the `LookupSpan` trait should use this method
+ /// rather than the [`span_data`] method; while _implementors_ of this trait
+ /// should only implement `span_data`.
+ ///
+ /// [`span_data`]: LookupSpan::span_data()
+ fn span(&'a self, id: &Id) -> Option<SpanRef<'_, Self>>
+ where
+ Self: Sized,
+ {
+ let data = self.span_data(id)?;
+ Some(SpanRef {
+ registry: self,
+ data,
+ #[cfg(feature = "registry")]
+ filter: FilterId::none(),
+ })
+ }
+
+ /// Registers a [`Filter`] for [per-layer filtering] with this
+ /// [`Subscriber`].
+ ///
+ /// The [`Filter`] can then use the returned [`FilterId`] to
+ /// [check if it previously enabled a span][check].
+ ///
+ /// # Panics
+ ///
+ /// If this `Subscriber` does not support [per-layer filtering].
+ ///
+ /// [`Filter`]: crate::layer::Filter
+ /// [per-layer filtering]: crate::layer::Layer#per-layer-filtering
+ /// [`Subscriber`]: tracing_core::Subscriber
+ /// [`FilterId`]: crate::filter::FilterId
+ /// [check]: SpanData::is_enabled_for
+ #[cfg(feature = "registry")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
+ fn register_filter(&mut self) -> FilterId {
+ panic!(
+ "{} does not currently support filters",
+ std::any::type_name::<Self>()
+ )
+ }
+}
+
+/// A stored representation of data associated with a span.
+pub trait SpanData<'a> {
+ /// Returns this span's ID.
+ fn id(&self) -> Id;
+
+ /// Returns a reference to the span's `Metadata`.
+ fn metadata(&self) -> &'static Metadata<'static>;
+
+ /// Returns a reference to the ID
+ fn parent(&self) -> Option<&Id>;
+
+ /// Returns a reference to this span's `Extensions`.
+ ///
+ /// The extensions may be used by `Layer`s to store additional data
+ /// describing the span.
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ fn extensions(&self) -> Extensions<'_>;
+
+ /// Returns a mutable reference to this span's `Extensions`.
+ ///
+ /// The extensions may be used by `Layer`s to store additional data
+ /// describing the span.
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ fn extensions_mut(&self) -> ExtensionsMut<'_>;
+
+ /// Returns `true` if this span is enabled for the [per-layer filter][plf]
+ /// corresponding to the provided [`FilterId`].
+ ///
+ /// ## Default Implementation
+ ///
+ /// By default, this method assumes that the [`LookupSpan`] implementation
+ /// does not support [per-layer filtering][plf], and always returns `true`.
+ ///
+ /// [plf]: crate::layer::Layer#per-layer-filtering
+ /// [`FilterId`]: crate::filter::FilterId
+ #[cfg(feature = "registry")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
+ fn is_enabled_for(&self, filter: FilterId) -> bool {
+ let _ = filter;
+ true
+ }
+}
+
+/// A reference to [span data] and the associated [registry].
+///
+/// This type implements all the same methods as [`SpanData`][span data], and
+/// provides additional methods for querying the registry based on values from
+/// the span.
+///
+/// [span data]: SpanData
+/// [registry]: LookupSpan
+#[derive(Debug)]
+pub struct SpanRef<'a, R: LookupSpan<'a>> {
+ registry: &'a R,
+ data: R::Data,
+
+ #[cfg(feature = "registry")]
+ filter: FilterId,
+}
+
+/// An iterator over the parents of a span, ordered from leaf to root.
+///
+/// This is returned by the [`SpanRef::scope`] method.
+#[derive(Debug)]
+pub struct Scope<'a, R> {
+ registry: &'a R,
+ next: Option<Id>,
+
+ #[cfg(all(feature = "registry", feature = "std"))]
+ filter: FilterId,
+}
+
+feature! {
+ #![any(feature = "alloc", feature = "std")]
+
+ #[cfg(not(feature = "smallvec"))]
+ use alloc::vec::{self, Vec};
+
+ use core::{fmt,iter};
+
+ /// An iterator over the parents of a span, ordered from root to leaf.
+ ///
+ /// This is returned by the [`Scope::from_root`] method.
+ pub struct ScopeFromRoot<'a, R>
+ where
+ R: LookupSpan<'a>,
+ {
+ #[cfg(feature = "smallvec")]
+ spans: iter::Rev<smallvec::IntoIter<SpanRefVecArray<'a, R>>>,
+ #[cfg(not(feature = "smallvec"))]
+ spans: iter::Rev<vec::IntoIter<SpanRef<'a, R>>>,
+ }
+
+ #[cfg(feature = "smallvec")]
+ type SpanRefVecArray<'span, L> = [SpanRef<'span, L>; 16];
+
+ impl<'a, R> Scope<'a, R>
+ where
+ R: LookupSpan<'a>,
+ {
+ /// Flips the order of the iterator, so that it is ordered from root to leaf.
+ ///
+ /// The iterator will first return the root span, then that span's immediate child,
+ /// and so on until it finally returns the span that [`SpanRef::scope`] was called on.
+ ///
+ /// If any items were consumed from the [`Scope`] before calling this method then they
+ /// will *not* be returned from the [`ScopeFromRoot`].
+ ///
+ /// **Note**: this will allocate if there are many spans remaining, or if the
+ /// "smallvec" feature flag is not enabled.
+ #[allow(clippy::wrong_self_convention)]
+ pub fn from_root(self) -> ScopeFromRoot<'a, R> {
+ #[cfg(feature = "smallvec")]
+ type Buf<T> = smallvec::SmallVec<T>;
+ #[cfg(not(feature = "smallvec"))]
+ type Buf<T> = Vec<T>;
+ ScopeFromRoot {
+ spans: self.collect::<Buf<_>>().into_iter().rev(),
+ }
+ }
+ }
+
+ impl<'a, R> Iterator for ScopeFromRoot<'a, R>
+ where
+ R: LookupSpan<'a>,
+ {
+ type Item = SpanRef<'a, R>;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.spans.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.spans.size_hint()
+ }
+ }
+
+ impl<'a, R> fmt::Debug for ScopeFromRoot<'a, R>
+ where
+ R: LookupSpan<'a>,
+ {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad("ScopeFromRoot { .. }")
+ }
+ }
+}
+
+impl<'a, R> Iterator for Scope<'a, R>
+where
+ R: LookupSpan<'a>,
+{
+ type Item = SpanRef<'a, R>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ loop {
+ let curr = self.registry.span(self.next.as_ref()?)?;
+
+ #[cfg(all(feature = "registry", feature = "std"))]
+ let curr = curr.with_filter(self.filter);
+ self.next = curr.data.parent().cloned();
+
+ // If the `Scope` is filtered, check if the current span is enabled
+ // by the selected filter ID.
+
+ #[cfg(all(feature = "registry", feature = "std"))]
+ {
+ if !curr.is_enabled_for(self.filter) {
+ // The current span in the chain is disabled for this
+ // filter. Try its parent.
+ continue;
+ }
+ }
+
+ return Some(curr);
+ }
+ }
+}
+
+impl<'a, R> SpanRef<'a, R>
+where
+ R: LookupSpan<'a>,
+{
+ /// Returns this span's ID.
+ pub fn id(&self) -> Id {
+ self.data.id()
+ }
+
+ /// Returns a static reference to the span's metadata.
+ pub fn metadata(&self) -> &'static Metadata<'static> {
+ self.data.metadata()
+ }
+
+ /// Returns the span's name,
+ pub fn name(&self) -> &'static str {
+ self.data.metadata().name()
+ }
+
+ /// Returns a list of [fields] defined by the span.
+ ///
+ /// [fields]: tracing_core::field
+ pub fn fields(&self) -> &FieldSet {
+ self.data.metadata().fields()
+ }
+
+ /// Returns a `SpanRef` describing this span's parent, or `None` if this
+ /// span is the root of its trace tree.
+ pub fn parent(&self) -> Option<Self> {
+ let id = self.data.parent()?;
+ let data = self.registry.span_data(id)?;
+
+ #[cfg(all(feature = "registry", feature = "std"))]
+ {
+ // move these into mut bindings if the registry feature is enabled,
+ // since they may be mutated in the loop.
+ let mut data = data;
+ loop {
+ // Is this parent enabled by our filter?
+ if data.is_enabled_for(self.filter) {
+ return Some(Self {
+ registry: self.registry,
+ filter: self.filter,
+ data,
+ });
+ }
+
+ // It's not enabled. If the disabled span has a parent, try that!
+ let id = data.parent()?;
+ data = self.registry.span_data(id)?;
+ }
+ }
+
+ #[cfg(not(all(feature = "registry", feature = "std")))]
+ Some(Self {
+ registry: self.registry,
+ data,
+ })
+ }
+
+ /// Returns an iterator over all parents of this span, starting with this span,
+ /// ordered from leaf to root.
+ ///
+ /// The iterator will first return the span, then the span's immediate parent,
+ /// followed by that span's parent, and so on, until it reaches a root span.
+ ///
+ /// ```rust
+ /// use tracing::{span, Subscriber};
+ /// use tracing_subscriber::{
+ /// layer::{Context, Layer},
+ /// prelude::*,
+ /// registry::LookupSpan,
+ /// };
+ ///
+ /// struct PrintingLayer;
+ /// impl<S> Layer<S> for PrintingLayer
+ /// where
+ /// S: Subscriber + for<'lookup> LookupSpan<'lookup>,
+ /// {
+ /// fn on_enter(&self, id: &span::Id, ctx: Context<S>) {
+ /// let span = ctx.span(id).unwrap();
+ /// let scope = span.scope().map(|span| span.name()).collect::<Vec<_>>();
+ /// println!("Entering span: {:?}", scope);
+ /// }
+ /// }
+ ///
+ /// tracing::subscriber::with_default(tracing_subscriber::registry().with(PrintingLayer), || {
+ /// let _root = tracing::info_span!("root").entered();
+ /// // Prints: Entering span: ["root"]
+ /// let _child = tracing::info_span!("child").entered();
+ /// // Prints: Entering span: ["child", "root"]
+ /// let _leaf = tracing::info_span!("leaf").entered();
+ /// // Prints: Entering span: ["leaf", "child", "root"]
+ /// });
+ /// ```
+ ///
+ /// If the opposite order (from the root to this span) is desired, calling [`Scope::from_root`] on
+ /// the returned iterator reverses the order.
+ ///
+ /// ```rust
+ /// # use tracing::{span, Subscriber};
+ /// # use tracing_subscriber::{
+ /// # layer::{Context, Layer},
+ /// # prelude::*,
+ /// # registry::LookupSpan,
+ /// # };
+ /// # struct PrintingLayer;
+ /// impl<S> Layer<S> for PrintingLayer
+ /// where
+ /// S: Subscriber + for<'lookup> LookupSpan<'lookup>,
+ /// {
+ /// fn on_enter(&self, id: &span::Id, ctx: Context<S>) {
+ /// let span = ctx.span(id).unwrap();
+ /// let scope = span.scope().from_root().map(|span| span.name()).collect::<Vec<_>>();
+ /// println!("Entering span: {:?}", scope);
+ /// }
+ /// }
+ ///
+ /// tracing::subscriber::with_default(tracing_subscriber::registry().with(PrintingLayer), || {
+ /// let _root = tracing::info_span!("root").entered();
+ /// // Prints: Entering span: ["root"]
+ /// let _child = tracing::info_span!("child").entered();
+ /// // Prints: Entering span: ["root", "child"]
+ /// let _leaf = tracing::info_span!("leaf").entered();
+ /// // Prints: Entering span: ["root", "child", "leaf"]
+ /// });
+ /// ```
+ pub fn scope(&self) -> Scope<'a, R> {
+ Scope {
+ registry: self.registry,
+ next: Some(self.id()),
+
+ #[cfg(feature = "registry")]
+ filter: self.filter,
+ }
+ }
+
+ /// Returns a reference to this span's `Extensions`.
+ ///
+ /// The extensions may be used by `Layer`s to store additional data
+ /// describing the span.
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ pub fn extensions(&self) -> Extensions<'_> {
+ self.data.extensions()
+ }
+
+ /// Returns a mutable reference to this span's `Extensions`.
+ ///
+ /// The extensions may be used by `Layer`s to store additional data
+ /// describing the span.
+ #[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+ pub fn extensions_mut(&self) -> ExtensionsMut<'_> {
+ self.data.extensions_mut()
+ }
+
+ #[cfg(all(feature = "registry", feature = "std"))]
+ pub(crate) fn try_with_filter(self, filter: FilterId) -> Option<Self> {
+ if self.is_enabled_for(filter) {
+ return Some(self.with_filter(filter));
+ }
+
+ None
+ }
+
+ #[inline]
+ #[cfg(all(feature = "registry", feature = "std"))]
+ pub(crate) fn is_enabled_for(&self, filter: FilterId) -> bool {
+ self.data.is_enabled_for(filter)
+ }
+
+ #[inline]
+ #[cfg(all(feature = "registry", feature = "std"))]
+ fn with_filter(self, filter: FilterId) -> Self {
+ Self { filter, ..self }
+ }
+}
+
+#[cfg(all(test, feature = "registry", feature = "std"))]
+mod tests {
+ use crate::{
+ layer::{Context, Layer},
+ prelude::*,
+ registry::LookupSpan,
+ };
+ use std::sync::{Arc, Mutex};
+ use tracing::{span, Subscriber};
+
+ #[test]
+ fn spanref_scope_iteration_order() {
+ let last_entered_scope = Arc::new(Mutex::new(Vec::new()));
+
+ #[derive(Default)]
+ struct PrintingLayer {
+ last_entered_scope: Arc<Mutex<Vec<&'static str>>>,
+ }
+
+ impl<S> Layer<S> for PrintingLayer
+ where
+ S: Subscriber + for<'lookup> LookupSpan<'lookup>,
+ {
+ fn on_enter(&self, id: &span::Id, ctx: Context<'_, S>) {
+ let span = ctx.span(id).unwrap();
+ let scope = span.scope().map(|span| span.name()).collect::<Vec<_>>();
+ *self.last_entered_scope.lock().unwrap() = scope;
+ }
+ }
+
+ let _guard = tracing::subscriber::set_default(crate::registry().with(PrintingLayer {
+ last_entered_scope: last_entered_scope.clone(),
+ }));
+
+ let _root = tracing::info_span!("root").entered();
+ assert_eq!(&*last_entered_scope.lock().unwrap(), &["root"]);
+ let _child = tracing::info_span!("child").entered();
+ assert_eq!(&*last_entered_scope.lock().unwrap(), &["child", "root"]);
+ let _leaf = tracing::info_span!("leaf").entered();
+ assert_eq!(
+ &*last_entered_scope.lock().unwrap(),
+ &["leaf", "child", "root"]
+ );
+ }
+
+ #[test]
+ fn spanref_scope_fromroot_iteration_order() {
+ let last_entered_scope = Arc::new(Mutex::new(Vec::new()));
+
+ #[derive(Default)]
+ struct PrintingLayer {
+ last_entered_scope: Arc<Mutex<Vec<&'static str>>>,
+ }
+
+ impl<S> Layer<S> for PrintingLayer
+ where
+ S: Subscriber + for<'lookup> LookupSpan<'lookup>,
+ {
+ fn on_enter(&self, id: &span::Id, ctx: Context<'_, S>) {
+ let span = ctx.span(id).unwrap();
+ let scope = span
+ .scope()
+ .from_root()
+ .map(|span| span.name())
+ .collect::<Vec<_>>();
+ *self.last_entered_scope.lock().unwrap() = scope;
+ }
+ }
+
+ let _guard = tracing::subscriber::set_default(crate::registry().with(PrintingLayer {
+ last_entered_scope: last_entered_scope.clone(),
+ }));
+
+ let _root = tracing::info_span!("root").entered();
+ assert_eq!(&*last_entered_scope.lock().unwrap(), &["root"]);
+ let _child = tracing::info_span!("child").entered();
+ assert_eq!(&*last_entered_scope.lock().unwrap(), &["root", "child",]);
+ let _leaf = tracing::info_span!("leaf").entered();
+ assert_eq!(
+ &*last_entered_scope.lock().unwrap(),
+ &["root", "child", "leaf"]
+ );
+ }
+}
diff --git a/vendor/tracing-subscriber/src/registry/sharded.rs b/vendor/tracing-subscriber/src/registry/sharded.rs
new file mode 100644
index 000000000..b81d5fef8
--- /dev/null
+++ b/vendor/tracing-subscriber/src/registry/sharded.rs
@@ -0,0 +1,901 @@
+use sharded_slab::{pool::Ref, Clear, Pool};
+use thread_local::ThreadLocal;
+
+use super::stack::SpanStack;
+use crate::{
+ filter::{FilterId, FilterMap, FilterState},
+ registry::{
+ extensions::{Extensions, ExtensionsInner, ExtensionsMut},
+ LookupSpan, SpanData,
+ },
+ sync::RwLock,
+};
+use std::{
+ cell::{self, Cell, RefCell},
+ sync::atomic::{fence, AtomicUsize, Ordering},
+};
+use tracing_core::{
+ dispatcher::{self, Dispatch},
+ span::{self, Current, Id},
+ Event, Interest, Metadata, Subscriber,
+};
+
+/// A shared, reusable store for spans.
+///
+/// A `Registry` is a [`Subscriber`] around which multiple [`Layer`]s
+/// implementing various behaviors may be [added]. Unlike other types
+/// implementing `Subscriber`, `Registry` does not actually record traces itself:
+/// instead, it collects and stores span data that is exposed to any [`Layer`]s
+/// wrapping it through implementations of the [`LookupSpan`] trait.
+/// The `Registry` is responsible for storing span metadata, recording
+/// relationships between spans, and tracking which spans are active and which
+/// are closed. In addition, it provides a mechanism for [`Layer`]s to store
+/// user-defined per-span data, called [extensions], in the registry. This
+/// allows [`Layer`]-specific data to benefit from the `Registry`'s
+/// high-performance concurrent storage.
+///
+/// This registry is implemented using a [lock-free sharded slab][slab], and is
+/// highly optimized for concurrent access.
+///
+/// # Span ID Generation
+///
+/// Span IDs are not globally unique, but the registry ensures that
+/// no two currently active spans have the same ID within a process.
+///
+/// One of the primary responsibilities of the registry is to generate [span
+/// IDs]. Therefore, it's important for other code that interacts with the
+/// registry, such as [`Layer`]s, to understand the guarantees of the
+/// span IDs that are generated.
+///
+/// The registry's span IDs are guaranteed to be unique **at a given point
+/// in time**. This means that an active span will never be assigned the
+/// same ID as another **currently active** span. However, the registry
+/// **will** eventually reuse the IDs of [closed] spans, although an ID
+/// will never be reassigned immediately after a span has closed.
+///
+/// Spans are not [considered closed] by the `Registry` until *every*
+/// [`Span`] reference with that ID has been dropped.
+///
+/// Thus: span IDs generated by the registry should be considered unique
+/// only at a given point in time, and only relative to other spans
+/// generated by the same process. Two spans with the same ID will not exist
+/// in the same process concurrently. However, if historical span data is
+/// being stored, the same ID may occur for multiple spans times in that
+/// data. If spans must be uniquely identified in historical data, the user
+/// code storing this data must assign its own unique identifiers to those
+/// spans. A counter is generally sufficient for this.
+///
+/// Similarly, span IDs generated by the registry are not unique outside of
+/// a given process. Distributed tracing systems may require identifiers
+/// that are unique across multiple processes on multiple machines (for
+/// example, [OpenTelemetry's `SpanId`s and `TraceId`s][ot]). `tracing` span
+/// IDs generated by the registry should **not** be used for this purpose.
+/// Instead, code which integrates with a distributed tracing system should
+/// generate and propagate its own IDs according to the rules specified by
+/// the distributed tracing system. These IDs can be associated with
+/// `tracing` spans using [fields] and/or [stored span data].
+///
+/// [span IDs]: tracing_core::span::Id
+/// [slab]: sharded_slab
+/// [`Layer`]: crate::Layer
+/// [added]: crate::layer::Layer#composing-layers
+/// [extensions]: super::Extensions
+/// [closed]: https://docs.rs/tracing/latest/tracing/span/index.html#closing-spans
+/// [considered closed]: tracing_core::subscriber::Subscriber::try_close()
+/// [`Span`]: https://docs.rs/tracing/latest/tracing/span/struct.Span.html
+/// [ot]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#spancontext
+/// [fields]: tracing_core::field
+/// [stored span data]: crate::registry::SpanData::extensions_mut
+#[cfg(feature = "registry")]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
+#[derive(Debug)]
+pub struct Registry {
+ spans: Pool<DataInner>,
+ current_spans: ThreadLocal<RefCell<SpanStack>>,
+ next_filter_id: u8,
+}
+
+/// Span data stored in a [`Registry`].
+///
+/// The registry stores well-known data defined by tracing: span relationships,
+/// metadata and reference counts. Additional user-defined data provided by
+/// [`Layer`s], such as formatted fields, metrics, or distributed traces should
+/// be stored in the [extensions] typemap.
+///
+/// [`Layer`s]: crate::layer::Layer
+/// [extensions]: Extensions
+#[cfg(feature = "registry")]
+#[cfg_attr(docsrs, doc(cfg(all(feature = "registry", feature = "std"))))]
+#[derive(Debug)]
+pub struct Data<'a> {
+ /// Immutable reference to the pooled `DataInner` entry.
+ inner: Ref<'a, DataInner>,
+}
+
+/// Stored data associated with a span.
+///
+/// This type is pooled using [`sharded_slab::Pool`]; when a span is
+/// dropped, the `DataInner` entry at that span's slab index is cleared
+/// in place and reused by a future span. Thus, the `Default` and
+/// [`sharded_slab::Clear`] implementations for this type are
+/// load-bearing.
+#[derive(Debug)]
+struct DataInner {
+ filter_map: FilterMap,
+ metadata: &'static Metadata<'static>,
+ parent: Option<Id>,
+ ref_count: AtomicUsize,
+ // The span's `Extensions` typemap. Allocations for the `HashMap` backing
+ // this are pooled and reused in place.
+ pub(crate) extensions: RwLock<ExtensionsInner>,
+}
+
+// === impl Registry ===
+
+impl Default for Registry {
+ fn default() -> Self {
+ Self {
+ spans: Pool::new(),
+ current_spans: ThreadLocal::new(),
+ next_filter_id: 0,
+ }
+ }
+}
+
+#[inline]
+fn idx_to_id(idx: usize) -> Id {
+ Id::from_u64(idx as u64 + 1)
+}
+
+#[inline]
+fn id_to_idx(id: &Id) -> usize {
+ id.into_u64() as usize - 1
+}
+
+/// A guard that tracks how many [`Registry`]-backed `Layer`s have
+/// processed an `on_close` event.
+///
+/// This is needed to enable a [`Registry`]-backed Layer to access span
+/// data after the `Layer` has recieved the `on_close` callback.
+///
+/// Once all `Layer`s have processed this event, the [`Registry`] knows
+/// that is able to safely remove the span tracked by `id`. `CloseGuard`
+/// accomplishes this through a two-step process:
+/// 1. Whenever a [`Registry`]-backed `Layer::on_close` method is
+/// called, `Registry::start_close` is closed.
+/// `Registry::start_close` increments a thread-local `CLOSE_COUNT`
+/// by 1 and returns a `CloseGuard`.
+/// 2. The `CloseGuard` is dropped at the end of `Layer::on_close`. On
+/// drop, `CloseGuard` checks thread-local `CLOSE_COUNT`. If
+/// `CLOSE_COUNT` is 0, the `CloseGuard` removes the span with the
+/// `id` from the registry, as all `Layers` that might have seen the
+/// `on_close` notification have processed it. If `CLOSE_COUNT` is
+/// greater than 0, `CloseGuard` decrements the counter by one and
+/// _does not_ remove the span from the [`Registry`].
+///
+pub(crate) struct CloseGuard<'a> {
+ id: Id,
+ registry: &'a Registry,
+ is_closing: bool,
+}
+
+impl Registry {
+ fn get(&self, id: &Id) -> Option<Ref<'_, DataInner>> {
+ self.spans.get(id_to_idx(id))
+ }
+
+ /// Returns a guard which tracks how many `Layer`s have
+ /// processed an `on_close` notification via the `CLOSE_COUNT` thread-local.
+ /// For additional details, see [`CloseGuard`].
+ ///
+ pub(crate) fn start_close(&self, id: Id) -> CloseGuard<'_> {
+ CLOSE_COUNT.with(|count| {
+ let c = count.get();
+ count.set(c + 1);
+ });
+ CloseGuard {
+ id,
+ registry: self,
+ is_closing: false,
+ }
+ }
+
+ pub(crate) fn has_per_layer_filters(&self) -> bool {
+ self.next_filter_id > 0
+ }
+
+ pub(crate) fn span_stack(&self) -> cell::Ref<'_, SpanStack> {
+ self.current_spans.get_or_default().borrow()
+ }
+}
+
+thread_local! {
+ /// `CLOSE_COUNT` is the thread-local counter used by `CloseGuard` to
+ /// track how many layers have processed the close.
+ /// For additional details, see [`CloseGuard`].
+ ///
+ static CLOSE_COUNT: Cell<usize> = Cell::new(0);
+}
+
+impl Subscriber for Registry {
+ fn register_callsite(&self, _: &'static Metadata<'static>) -> Interest {
+ if self.has_per_layer_filters() {
+ return FilterState::take_interest().unwrap_or_else(Interest::always);
+ }
+
+ Interest::always()
+ }
+
+ fn enabled(&self, _: &Metadata<'_>) -> bool {
+ if self.has_per_layer_filters() {
+ return FilterState::event_enabled();
+ }
+ true
+ }
+
+ #[inline]
+ fn new_span(&self, attrs: &span::Attributes<'_>) -> span::Id {
+ let parent = if attrs.is_root() {
+ None
+ } else if attrs.is_contextual() {
+ self.current_span().id().map(|id| self.clone_span(id))
+ } else {
+ attrs.parent().map(|id| self.clone_span(id))
+ };
+
+ let id = self
+ .spans
+ // Check out a `DataInner` entry from the pool for the new span. If
+ // there are free entries already allocated in the pool, this will
+ // preferentially reuse one; otherwise, a new `DataInner` is
+ // allocated and added to the pool.
+ .create_with(|data| {
+ data.metadata = attrs.metadata();
+ data.parent = parent;
+ data.filter_map = crate::filter::FILTERING.with(|filtering| filtering.filter_map());
+ #[cfg(debug_assertions)]
+ {
+ if data.filter_map != FilterMap::default() {
+ debug_assert!(self.has_per_layer_filters());
+ }
+ }
+
+ let refs = data.ref_count.get_mut();
+ debug_assert_eq!(*refs, 0);
+ *refs = 1;
+ })
+ .expect("Unable to allocate another span");
+ idx_to_id(id)
+ }
+
+ /// This is intentionally not implemented, as recording fields
+ /// on a span is the responsibility of layers atop of this registry.
+ #[inline]
+ fn record(&self, _: &span::Id, _: &span::Record<'_>) {}
+
+ fn record_follows_from(&self, _span: &span::Id, _follows: &span::Id) {}
+
+ /// This is intentionally not implemented, as recording events
+ /// is the responsibility of layers atop of this registry.
+ fn event(&self, _: &Event<'_>) {}
+
+ fn enter(&self, id: &span::Id) {
+ if self
+ .current_spans
+ .get_or_default()
+ .borrow_mut()
+ .push(id.clone())
+ {
+ self.clone_span(id);
+ }
+ }
+
+ fn exit(&self, id: &span::Id) {
+ if let Some(spans) = self.current_spans.get() {
+ if spans.borrow_mut().pop(id) {
+ dispatcher::get_default(|dispatch| dispatch.try_close(id.clone()));
+ }
+ }
+ }
+
+ fn clone_span(&self, id: &span::Id) -> span::Id {
+ let span = self
+ .get(id)
+ .unwrap_or_else(|| panic!(
+ "tried to clone {:?}, but no span exists with that ID\n\
+ This may be caused by consuming a parent span (`parent: span`) rather than borrowing it (`parent: &span`).",
+ id,
+ ));
+ // Like `std::sync::Arc`, adds to the ref count (on clone) don't require
+ // a strong ordering; if we call` clone_span`, the reference count must
+ // always at least 1. The only synchronization necessary is between
+ // calls to `try_close`: we have to ensure that all threads have
+ // dropped their refs to the span before the span is closed.
+ let refs = span.ref_count.fetch_add(1, Ordering::Relaxed);
+ assert_ne!(
+ refs, 0,
+ "tried to clone a span ({:?}) that already closed",
+ id
+ );
+ id.clone()
+ }
+
+ fn current_span(&self) -> Current {
+ self.current_spans
+ .get()
+ .and_then(|spans| {
+ let spans = spans.borrow();
+ let id = spans.current()?;
+ let span = self.get(id)?;
+ Some(Current::new(id.clone(), span.metadata))
+ })
+ .unwrap_or_else(Current::none)
+ }
+
+ /// Decrements the reference count of the span with the given `id`, and
+ /// removes the span if it is zero.
+ ///
+ /// The allocated span slot will be reused when a new span is created.
+ fn try_close(&self, id: span::Id) -> bool {
+ let span = match self.get(&id) {
+ Some(span) => span,
+ None if std::thread::panicking() => return false,
+ None => panic!("tried to drop a ref to {:?}, but no such span exists!", id),
+ };
+
+ let refs = span.ref_count.fetch_sub(1, Ordering::Release);
+ if !std::thread::panicking() {
+ assert!(refs < std::usize::MAX, "reference count overflow!");
+ }
+ if refs > 1 {
+ return false;
+ }
+
+ // Synchronize if we are actually removing the span (stolen
+ // from std::Arc); this ensures that all other `try_close` calls on
+ // other threads happen-before we actually remove the span.
+ fence(Ordering::Acquire);
+ true
+ }
+}
+
+impl<'a> LookupSpan<'a> for Registry {
+ type Data = Data<'a>;
+
+ fn span_data(&'a self, id: &Id) -> Option<Self::Data> {
+ let inner = self.get(id)?;
+ Some(Data { inner })
+ }
+
+ fn register_filter(&mut self) -> FilterId {
+ let id = FilterId::new(self.next_filter_id);
+ self.next_filter_id += 1;
+ id
+ }
+}
+
+// === impl CloseGuard ===
+
+impl<'a> CloseGuard<'a> {
+ pub(crate) fn set_closing(&mut self) {
+ self.is_closing = true;
+ }
+}
+
+impl<'a> Drop for CloseGuard<'a> {
+ fn drop(&mut self) {
+ // If this returns with an error, we are already panicking. At
+ // this point, there's nothing we can really do to recover
+ // except by avoiding a double-panic.
+ let _ = CLOSE_COUNT.try_with(|count| {
+ let c = count.get();
+ // Decrement the count to indicate that _this_ guard's
+ // `on_close` callback has completed.
+ //
+ // Note that we *must* do this before we actually remove the span
+ // from the registry, since dropping the `DataInner` may trigger a
+ // new close, if this span is the last reference to a parent span.
+ count.set(c - 1);
+
+ // If the current close count is 1, this stack frame is the last
+ // `on_close` call. If the span is closing, it's okay to remove the
+ // span.
+ if c == 1 && self.is_closing {
+ self.registry.spans.clear(id_to_idx(&self.id));
+ }
+ });
+ }
+}
+
+// === impl Data ===
+
+impl<'a> SpanData<'a> for Data<'a> {
+ fn id(&self) -> Id {
+ idx_to_id(self.inner.key())
+ }
+
+ fn metadata(&self) -> &'static Metadata<'static> {
+ (*self).inner.metadata
+ }
+
+ fn parent(&self) -> Option<&Id> {
+ self.inner.parent.as_ref()
+ }
+
+ fn extensions(&self) -> Extensions<'_> {
+ Extensions::new(self.inner.extensions.read().expect("Mutex poisoned"))
+ }
+
+ fn extensions_mut(&self) -> ExtensionsMut<'_> {
+ ExtensionsMut::new(self.inner.extensions.write().expect("Mutex poisoned"))
+ }
+
+ #[inline]
+ fn is_enabled_for(&self, filter: FilterId) -> bool {
+ self.inner.filter_map.is_enabled(filter)
+ }
+}
+
+// === impl DataInner ===
+
+impl Default for DataInner {
+ fn default() -> Self {
+ // Since `DataInner` owns a `&'static Callsite` pointer, we need
+ // something to use as the initial default value for that callsite.
+ // Since we can't access a `DataInner` until it has had actual span data
+ // inserted into it, the null metadata will never actually be accessed.
+ struct NullCallsite;
+ impl tracing_core::callsite::Callsite for NullCallsite {
+ fn set_interest(&self, _: Interest) {
+ unreachable!(
+ "/!\\ Tried to register the null callsite /!\\\n \
+ This should never have happened and is definitely a bug. \
+ A `tracing` bug report would be appreciated."
+ )
+ }
+
+ fn metadata(&self) -> &Metadata<'_> {
+ unreachable!(
+ "/!\\ Tried to access the null callsite's metadata /!\\\n \
+ This should never have happened and is definitely a bug. \
+ A `tracing` bug report would be appreciated."
+ )
+ }
+ }
+
+ static NULL_CALLSITE: NullCallsite = NullCallsite;
+ static NULL_METADATA: Metadata<'static> = tracing_core::metadata! {
+ name: "",
+ target: "",
+ level: tracing_core::Level::TRACE,
+ fields: &[],
+ callsite: &NULL_CALLSITE,
+ kind: tracing_core::metadata::Kind::SPAN,
+ };
+
+ Self {
+ filter_map: FilterMap::default(),
+ metadata: &NULL_METADATA,
+ parent: None,
+ ref_count: AtomicUsize::new(0),
+ extensions: RwLock::new(ExtensionsInner::new()),
+ }
+ }
+}
+
+impl Clear for DataInner {
+ /// Clears the span's data in place, dropping the parent's reference count.
+ fn clear(&mut self) {
+ // A span is not considered closed until all of its children have closed.
+ // Therefore, each span's `DataInner` holds a "reference" to the parent
+ // span, keeping the parent span open until all its children have closed.
+ // When we close a span, we must then decrement the parent's ref count
+ // (potentially, allowing it to close, if this child is the last reference
+ // to that span).
+ // We have to actually unpack the option inside the `get_default`
+ // closure, since it is a `FnMut`, but testing that there _is_ a value
+ // here lets us avoid the thread-local access if we don't need the
+ // dispatcher at all.
+ if self.parent.is_some() {
+ // Note that --- because `Layered::try_close` works by calling
+ // `try_close` on the inner subscriber and using the return value to
+ // determine whether to call the `Layer`'s `on_close` callback ---
+ // we must call `try_close` on the entire subscriber stack, rather
+ // than just on the registry. If the registry called `try_close` on
+ // itself directly, the layers wouldn't see the close notification.
+ let subscriber = dispatcher::get_default(Dispatch::clone);
+ if let Some(parent) = self.parent.take() {
+ let _ = subscriber.try_close(parent);
+ }
+ }
+
+ // Clear (but do not deallocate!) the pooled `HashMap` for the span's extensions.
+ self.extensions
+ .get_mut()
+ .unwrap_or_else(|l| {
+ // This function can be called in a `Drop` impl, such as while
+ // panicking, so ignore lock poisoning.
+ l.into_inner()
+ })
+ .clear();
+
+ self.filter_map = FilterMap::default();
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{layer::Context, registry::LookupSpan, Layer};
+ use std::{
+ collections::HashMap,
+ sync::{Arc, Mutex, Weak},
+ };
+ use tracing::{self, subscriber::with_default};
+ use tracing_core::{
+ dispatcher,
+ span::{Attributes, Id},
+ Subscriber,
+ };
+
+ #[derive(Debug)]
+ struct DoesNothing;
+ impl<S: Subscriber> Layer<S> for DoesNothing {}
+
+ struct AssertionLayer;
+ impl<S> Layer<S> for AssertionLayer
+ where
+ S: Subscriber + for<'a> LookupSpan<'a>,
+ {
+ fn on_close(&self, id: Id, ctx: Context<'_, S>) {
+ dbg!(format_args!("closing {:?}", id));
+ assert!(&ctx.span(&id).is_some());
+ }
+ }
+
+ #[test]
+ fn single_layer_can_access_closed_span() {
+ let subscriber = AssertionLayer.with_subscriber(Registry::default());
+
+ with_default(subscriber, || {
+ let span = tracing::debug_span!("span");
+ drop(span);
+ });
+ }
+
+ #[test]
+ fn multiple_layers_can_access_closed_span() {
+ let subscriber = AssertionLayer
+ .and_then(AssertionLayer)
+ .with_subscriber(Registry::default());
+
+ with_default(subscriber, || {
+ let span = tracing::debug_span!("span");
+ drop(span);
+ });
+ }
+
+ struct CloseLayer {
+ inner: Arc<Mutex<CloseState>>,
+ }
+
+ struct CloseHandle {
+ state: Arc<Mutex<CloseState>>,
+ }
+
+ #[derive(Default)]
+ struct CloseState {
+ open: HashMap<&'static str, Weak<()>>,
+ closed: Vec<(&'static str, Weak<()>)>,
+ }
+
+ struct SetRemoved(Arc<()>);
+
+ impl<S> Layer<S> for CloseLayer
+ where
+ S: Subscriber + for<'a> LookupSpan<'a>,
+ {
+ fn on_new_span(&self, _: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {
+ let span = ctx.span(id).expect("Missing span; this is a bug");
+ let mut lock = self.inner.lock().unwrap();
+ let is_removed = Arc::new(());
+ assert!(
+ lock.open
+ .insert(span.name(), Arc::downgrade(&is_removed))
+ .is_none(),
+ "test layer saw multiple spans with the same name, the test is probably messed up"
+ );
+ let mut extensions = span.extensions_mut();
+ extensions.insert(SetRemoved(is_removed));
+ }
+
+ fn on_close(&self, id: Id, ctx: Context<'_, S>) {
+ let span = if let Some(span) = ctx.span(&id) {
+ span
+ } else {
+ println!(
+ "span {:?} did not exist in `on_close`, are we panicking?",
+ id
+ );
+ return;
+ };
+ let name = span.name();
+ println!("close {} ({:?})", name, id);
+ if let Ok(mut lock) = self.inner.lock() {
+ if let Some(is_removed) = lock.open.remove(name) {
+ assert!(is_removed.upgrade().is_some());
+ lock.closed.push((name, is_removed));
+ }
+ }
+ }
+ }
+
+ impl CloseLayer {
+ fn new() -> (Self, CloseHandle) {
+ let state = Arc::new(Mutex::new(CloseState::default()));
+ (
+ Self {
+ inner: state.clone(),
+ },
+ CloseHandle { state },
+ )
+ }
+ }
+
+ impl CloseState {
+ fn is_open(&self, span: &str) -> bool {
+ self.open.contains_key(span)
+ }
+
+ fn is_closed(&self, span: &str) -> bool {
+ self.closed.iter().any(|(name, _)| name == &span)
+ }
+ }
+
+ impl CloseHandle {
+ fn assert_closed(&self, span: &str) {
+ let lock = self.state.lock().unwrap();
+ assert!(
+ lock.is_closed(span),
+ "expected {} to be closed{}",
+ span,
+ if lock.is_open(span) {
+ " (it was still open)"
+ } else {
+ ", but it never existed (is there a problem with the test?)"
+ }
+ )
+ }
+
+ fn assert_open(&self, span: &str) {
+ let lock = self.state.lock().unwrap();
+ assert!(
+ lock.is_open(span),
+ "expected {} to be open{}",
+ span,
+ if lock.is_closed(span) {
+ " (it was still open)"
+ } else {
+ ", but it never existed (is there a problem with the test?)"
+ }
+ )
+ }
+
+ fn assert_removed(&self, span: &str) {
+ let lock = self.state.lock().unwrap();
+ let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
+ Some((_, is_removed)) => is_removed,
+ None => panic!(
+ "expected {} to be removed from the registry, but it was not closed {}",
+ span,
+ if lock.is_closed(span) {
+ " (it was still open)"
+ } else {
+ ", but it never existed (is there a problem with the test?)"
+ }
+ ),
+ };
+ assert!(
+ is_removed.upgrade().is_none(),
+ "expected {} to have been removed from the registry",
+ span
+ )
+ }
+
+ fn assert_not_removed(&self, span: &str) {
+ let lock = self.state.lock().unwrap();
+ let is_removed = match lock.closed.iter().find(|(name, _)| name == &span) {
+ Some((_, is_removed)) => is_removed,
+ None if lock.is_open(span) => return,
+ None => unreachable!(),
+ };
+ assert!(
+ is_removed.upgrade().is_some(),
+ "expected {} to have been removed from the registry",
+ span
+ )
+ }
+
+ #[allow(unused)] // may want this for future tests
+ fn assert_last_closed(&self, span: Option<&str>) {
+ let lock = self.state.lock().unwrap();
+ let last = lock.closed.last().map(|(span, _)| span);
+ assert_eq!(
+ last,
+ span.as_ref(),
+ "expected {:?} to have closed last",
+ span
+ );
+ }
+
+ fn assert_closed_in_order(&self, order: impl AsRef<[&'static str]>) {
+ let lock = self.state.lock().unwrap();
+ let order = order.as_ref();
+ for (i, name) in order.iter().enumerate() {
+ assert_eq!(
+ lock.closed.get(i).map(|(span, _)| span),
+ Some(name),
+ "expected close order: {:?}, actual: {:?}",
+ order,
+ lock.closed.iter().map(|(name, _)| name).collect::<Vec<_>>()
+ );
+ }
+ }
+ }
+
+ #[test]
+ fn spans_are_removed_from_registry() {
+ let (close_layer, state) = CloseLayer::new();
+ let subscriber = AssertionLayer
+ .and_then(close_layer)
+ .with_subscriber(Registry::default());
+
+ // Create a `Dispatch` (which is internally reference counted) so that
+ // the subscriber lives to the end of the test. Otherwise, if we just
+ // passed the subscriber itself to `with_default`, we could see the span
+ // be dropped when the subscriber itself is dropped, destroying the
+ // registry.
+ let dispatch = dispatcher::Dispatch::new(subscriber);
+
+ dispatcher::with_default(&dispatch, || {
+ let span = tracing::debug_span!("span1");
+ drop(span);
+ let span = tracing::info_span!("span2");
+ drop(span);
+ });
+
+ state.assert_removed("span1");
+ state.assert_removed("span2");
+
+ // Ensure the registry itself outlives the span.
+ drop(dispatch);
+ }
+
+ #[test]
+ fn spans_are_only_closed_when_the_last_ref_drops() {
+ let (close_layer, state) = CloseLayer::new();
+ let subscriber = AssertionLayer
+ .and_then(close_layer)
+ .with_subscriber(Registry::default());
+
+ // Create a `Dispatch` (which is internally reference counted) so that
+ // the subscriber lives to the end of the test. Otherwise, if we just
+ // passed the subscriber itself to `with_default`, we could see the span
+ // be dropped when the subscriber itself is dropped, destroying the
+ // registry.
+ let dispatch = dispatcher::Dispatch::new(subscriber);
+
+ let span2 = dispatcher::with_default(&dispatch, || {
+ let span = tracing::debug_span!("span1");
+ drop(span);
+ let span2 = tracing::info_span!("span2");
+ let span2_clone = span2.clone();
+ drop(span2);
+ span2_clone
+ });
+
+ state.assert_removed("span1");
+ state.assert_not_removed("span2");
+
+ drop(span2);
+ state.assert_removed("span1");
+
+ // Ensure the registry itself outlives the span.
+ drop(dispatch);
+ }
+
+ #[test]
+ fn span_enter_guards_are_dropped_out_of_order() {
+ let (close_layer, state) = CloseLayer::new();
+ let subscriber = AssertionLayer
+ .and_then(close_layer)
+ .with_subscriber(Registry::default());
+
+ // Create a `Dispatch` (which is internally reference counted) so that
+ // the subscriber lives to the end of the test. Otherwise, if we just
+ // passed the subscriber itself to `with_default`, we could see the span
+ // be dropped when the subscriber itself is dropped, destroying the
+ // registry.
+ let dispatch = dispatcher::Dispatch::new(subscriber);
+
+ dispatcher::with_default(&dispatch, || {
+ let span1 = tracing::debug_span!("span1");
+ let span2 = tracing::info_span!("span2");
+
+ let enter1 = span1.enter();
+ let enter2 = span2.enter();
+
+ drop(enter1);
+ drop(span1);
+
+ state.assert_removed("span1");
+ state.assert_not_removed("span2");
+
+ drop(enter2);
+ state.assert_not_removed("span2");
+
+ drop(span2);
+ state.assert_removed("span1");
+ state.assert_removed("span2");
+ });
+ }
+
+ #[test]
+ fn child_closes_parent() {
+ // This test asserts that if a parent span's handle is dropped before
+ // a child span's handle, the parent will remain open until child
+ // closes, and will then be closed.
+
+ let (close_layer, state) = CloseLayer::new();
+ let subscriber = close_layer.with_subscriber(Registry::default());
+
+ let dispatch = dispatcher::Dispatch::new(subscriber);
+
+ dispatcher::with_default(&dispatch, || {
+ let span1 = tracing::info_span!("parent");
+ let span2 = tracing::info_span!(parent: &span1, "child");
+
+ state.assert_open("parent");
+ state.assert_open("child");
+
+ drop(span1);
+ state.assert_open("parent");
+ state.assert_open("child");
+
+ drop(span2);
+ state.assert_closed("parent");
+ state.assert_closed("child");
+ });
+ }
+
+ #[test]
+ fn child_closes_grandparent() {
+ // This test asserts that, when a span is kept open by a child which
+ // is *itself* kept open by a child, closing the grandchild will close
+ // both the parent *and* the grandparent.
+ let (close_layer, state) = CloseLayer::new();
+ let subscriber = close_layer.with_subscriber(Registry::default());
+
+ let dispatch = dispatcher::Dispatch::new(subscriber);
+
+ dispatcher::with_default(&dispatch, || {
+ let span1 = tracing::info_span!("grandparent");
+ let span2 = tracing::info_span!(parent: &span1, "parent");
+ let span3 = tracing::info_span!(parent: &span2, "child");
+
+ state.assert_open("grandparent");
+ state.assert_open("parent");
+ state.assert_open("child");
+
+ drop(span1);
+ drop(span2);
+ state.assert_open("grandparent");
+ state.assert_open("parent");
+ state.assert_open("child");
+
+ drop(span3);
+
+ state.assert_closed_in_order(&["child", "parent", "grandparent"]);
+ });
+ }
+}
diff --git a/vendor/tracing-subscriber/src/registry/stack.rs b/vendor/tracing-subscriber/src/registry/stack.rs
new file mode 100644
index 000000000..4a3f7e59d
--- /dev/null
+++ b/vendor/tracing-subscriber/src/registry/stack.rs
@@ -0,0 +1,77 @@
+pub(crate) use tracing_core::span::Id;
+
+#[derive(Debug)]
+struct ContextId {
+ id: Id,
+ duplicate: bool,
+}
+
+/// `SpanStack` tracks what spans are currently executing on a thread-local basis.
+///
+/// A "separate current span" for each thread is a semantic choice, as each span
+/// can be executing in a different thread.
+#[derive(Debug, Default)]
+pub(crate) struct SpanStack {
+ stack: Vec<ContextId>,
+}
+
+impl SpanStack {
+ #[inline]
+ pub(super) fn push(&mut self, id: Id) -> bool {
+ let duplicate = self.stack.iter().any(|i| i.id == id);
+ self.stack.push(ContextId { id, duplicate });
+ !duplicate
+ }
+
+ #[inline]
+ pub(super) fn pop(&mut self, expected_id: &Id) -> bool {
+ if let Some((idx, _)) = self
+ .stack
+ .iter()
+ .enumerate()
+ .rev()
+ .find(|(_, ctx_id)| ctx_id.id == *expected_id)
+ {
+ let ContextId { id: _, duplicate } = self.stack.remove(idx);
+ return !duplicate;
+ }
+ false
+ }
+
+ #[inline]
+ pub(crate) fn iter(&self) -> impl Iterator<Item = &Id> {
+ self.stack
+ .iter()
+ .rev()
+ .filter_map(|ContextId { id, duplicate }| if !*duplicate { Some(id) } else { None })
+ }
+
+ #[inline]
+ pub(crate) fn current(&self) -> Option<&Id> {
+ self.iter().next()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::{Id, SpanStack};
+
+ #[test]
+ fn pop_last_span() {
+ let mut stack = SpanStack::default();
+ let id = Id::from_u64(1);
+ stack.push(id.clone());
+
+ assert!(stack.pop(&id));
+ }
+
+ #[test]
+ fn pop_first_span() {
+ let mut stack = SpanStack::default();
+ stack.push(Id::from_u64(1));
+ stack.push(Id::from_u64(2));
+
+ let id = Id::from_u64(1);
+ assert!(stack.pop(&id));
+ }
+}