summaryrefslogtreecommitdiffstats
path: root/third_party/rust/tokio/src/util/pad.rs
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/rust/tokio/src/util/pad.rs')
-rw-r--r--third_party/rust/tokio/src/util/pad.rs52
1 files changed, 52 insertions, 0 deletions
diff --git a/third_party/rust/tokio/src/util/pad.rs b/third_party/rust/tokio/src/util/pad.rs
new file mode 100644
index 0000000000..bf0913ca85
--- /dev/null
+++ b/third_party/rust/tokio/src/util/pad.rs
@@ -0,0 +1,52 @@
+use core::fmt;
+use core::ops::{Deref, DerefMut};
+
+#[derive(Clone, Copy, Default, Hash, PartialEq, Eq)]
+// Starting from Intel's Sandy Bridge, spatial prefetcher is now pulling pairs of 64-byte cache
+// lines at a time, so we have to align to 128 bytes rather than 64.
+//
+// Sources:
+// - https://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-optimization-manual.pdf
+// - https://github.com/facebook/folly/blob/1b5288e6eea6df074758f877c849b6e73bbb9fbb/folly/lang/Align.h#L107
+#[cfg_attr(target_arch = "x86_64", repr(align(128)))]
+#[cfg_attr(not(target_arch = "x86_64"), repr(align(64)))]
+pub(crate) struct CachePadded<T> {
+ value: T,
+}
+
+unsafe impl<T: Send> Send for CachePadded<T> {}
+unsafe impl<T: Sync> Sync for CachePadded<T> {}
+
+impl<T> CachePadded<T> {
+ pub(crate) fn new(t: T) -> CachePadded<T> {
+ CachePadded::<T> { value: t }
+ }
+}
+
+impl<T> Deref for CachePadded<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &self.value
+ }
+}
+
+impl<T> DerefMut for CachePadded<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut self.value
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CachePadded")
+ .field("value", &self.value)
+ .finish()
+ }
+}
+
+impl<T> From<T> for CachePadded<T> {
+ fn from(t: T) -> Self {
+ CachePadded::new(t)
+ }
+}