summaryrefslogtreecommitdiffstats
path: root/vendor/perf-event-open-sys
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/perf-event-open-sys')
-rw-r--r--vendor/perf-event-open-sys/.cargo-checksum.json1
-rw-r--r--vendor/perf-event-open-sys/Cargo.toml23
-rw-r--r--vendor/perf-event-open-sys/LICENSE-APACHE201
-rw-r--r--vendor/perf-event-open-sys/LICENSE-MIT23
-rw-r--r--vendor/perf-event-open-sys/README.md47
-rwxr-xr-xvendor/perf-event-open-sys/regenerate.sh12
-rw-r--r--vendor/perf-event-open-sys/src/bindings.rs2897
-rw-r--r--vendor/perf-event-open-sys/src/lib.rs260
-rw-r--r--vendor/perf-event-open-sys/wrapper.h23
9 files changed, 3487 insertions, 0 deletions
diff --git a/vendor/perf-event-open-sys/.cargo-checksum.json b/vendor/perf-event-open-sys/.cargo-checksum.json
new file mode 100644
index 000000000..94c12b528
--- /dev/null
+++ b/vendor/perf-event-open-sys/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"43d4960bd26abbc5e6682924a3a35cd5b96ddde703f8578e1b304dcbfcc77a7a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"383389744bfd0cd720224f8a01fb6dd28be093ba4965363dcd0d6ae8da3d087b","regenerate.sh":"75c81d43681cd44c961da0399605a8cfe1b05b6d3a8659e11e9984f9f7c88618","src/bindings.rs":"ee0d5d224dfeae4feefbbfbf2ce46b7617ba109fa1b7f88ab886a99131d41bab","src/lib.rs":"42c2107446f81663e4c59a11148ebc8df5241b62a58f0712133382667bc35176","wrapper.h":"22abab03fcdb32f39c72756da8c45dd4d4b6cbf3de24d8922dbc3a460bccf27a"},"package":"ce9bedf5da2c234fdf2391ede2b90fabf585355f33100689bc364a3ea558561a"} \ No newline at end of file
diff --git a/vendor/perf-event-open-sys/Cargo.toml b/vendor/perf-event-open-sys/Cargo.toml
new file mode 100644
index 000000000..1fd0b9dd4
--- /dev/null
+++ b/vendor/perf-event-open-sys/Cargo.toml
@@ -0,0 +1,23 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "perf-event-open-sys"
+version = "1.0.1"
+authors = ["Jim Blandy <jimb@red-bean.com>"]
+description = "Unsafe, direct bindings for Linux's perf_event_open system call, with associated\ntypes and constants.\n"
+readme = "README.md"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/jimblandy/perf-event-open-sys.git"
+[dependencies.libc]
+version = "0.2"
diff --git a/vendor/perf-event-open-sys/LICENSE-APACHE b/vendor/perf-event-open-sys/LICENSE-APACHE
new file mode 100644
index 000000000..16fe87b06
--- /dev/null
+++ b/vendor/perf-event-open-sys/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/perf-event-open-sys/LICENSE-MIT b/vendor/perf-event-open-sys/LICENSE-MIT
new file mode 100644
index 000000000..31aa79387
--- /dev/null
+++ b/vendor/perf-event-open-sys/LICENSE-MIT
@@ -0,0 +1,23 @@
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/perf-event-open-sys/README.md b/vendor/perf-event-open-sys/README.md
new file mode 100644
index 000000000..faf43d4ff
--- /dev/null
+++ b/vendor/perf-event-open-sys/README.md
@@ -0,0 +1,47 @@
+## Direct, unsafe Rust bindings for Linux's `perf_event_open` system call
+
+This crate exports `unsafe` Rust wrappers for Linux system calls for accessing
+performance monitoring counters and tracing facilities. This includes:
+
+- the processor's own performance monitoring registers
+- kernel counters for things like context switches and page faults
+- kernel tracepoints, kprobe, and uprobes
+- processor tracing facilities like Intel's Branch Trace Store (BTS)
+- hardware breakpoints
+
+This crate provides:
+
+- a Rust wrapper the Linux `perf_event_open` system call
+- Rust wrappers for the ioctls you can apply to a file descriptor returned by `perf_event_open`
+- bindings for `perf_event_open`'s associated header files, automatically generated by `bindgen`
+
+All functions are direct, `unsafe` wrappers for the underlying calls. They
+operate on raw pointers and raw file descriptors.
+
+For a type-safe API for basic functionality, see the [perf-event] crate.
+
+[perf-event]: https://crates.io/crates/perf-event
+
+### Updating the System Call Bindings
+
+The `bindings` module defines Rust equivalents for the types and constants used
+by the Linux `perf_event_open` system call and its related ioctls. These are
+generated automatically from the kernel's C header files, using [bindgen]. Both
+the interface and the underlying functionality are quite complex, and new
+features are added at a steady pace. To update the generated bindings:
+
+- Run the `regenerate.sh` script, found in the same directory as this
+ `README.md` file. This runs bindgen and splices its output in the `bindings`
+ module's source code, preserving the documentation.
+
+- Fix the comments in `src/lib.rs` explaining exactly which version of the
+ kernel headers you generated the bindings from.
+
+- Update the crate's major version. Newer versions of the kernel headers may
+ add fields to structs, which is a breaking change. (As explained in the
+ module documentation, properly written user crates should not be affected,
+ but it seems unnecessary to risk `cargo update` breaking builds. When users
+ need new functionality from the bindings, they can update the major version
+ number of this crate they request.)
+
+[bindgen]: https://crates.io/crates/bindgen
diff --git a/vendor/perf-event-open-sys/regenerate.sh b/vendor/perf-event-open-sys/regenerate.sh
new file mode 100755
index 000000000..81cfea680
--- /dev/null
+++ b/vendor/perf-event-open-sys/regenerate.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -eu
+
+cd $(dirname $0)
+
+(
+ sed -e '/automatically generated by rust-bindgen/,$d' src/bindings.rs
+ bindgen --with-derive-default wrapper.h
+) > new-bindings.rs~
+
+mv new-bindings.rs~ src/bindings.rs
diff --git a/vendor/perf-event-open-sys/src/bindings.rs b/vendor/perf-event-open-sys/src/bindings.rs
new file mode 100644
index 000000000..0cdd40742
--- /dev/null
+++ b/vendor/perf-event-open-sys/src/bindings.rs
@@ -0,0 +1,2897 @@
+//! Types and constants used with `perf_event_open`.
+//!
+//! This module contains types and constants for use with the
+//! [`perf_event_open`][man] system call. These are automatically generated from
+//! the header files `<linux/perf_event.h>` and `<linux/hw_breakpoint.h>` by the
+//! Rust [`bindgen`][bindgen] tool.
+//!
+//! It's not always obvious how `bindgen` will choose to reflect a given C
+//! construct into Rust. The best approach I've found is simply to search
+//! [the source code][src] for the C identifier name and see what `bindgen` did
+//! with it.
+//!
+//! [man]: http://man7.org/linux/man-pages/man2/perf_event_open.2.html
+//! [bindgen]: https://github.com/rust-lang/rust-bindgen
+//! [src]: ../../src/perf_event_open_sys/bindings.rs.html
+
+#![allow(dead_code)]
+#![allow(non_upper_case_globals)]
+#![allow(non_camel_case_types)]
+#![allow(non_snake_case)]
+
+/* automatically generated by rust-bindgen 0.54.1 */
+
+#[repr(C)]
+#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct __BindgenBitfieldUnit<Storage, Align> {
+ storage: Storage,
+ align: [Align; 0],
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> {
+ #[inline]
+ pub const fn new(storage: Storage) -> Self {
+ Self { storage, align: [] }
+ }
+}
+impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align>
+where
+ Storage: AsRef<[u8]> + AsMut<[u8]>,
+{
+ #[inline]
+ pub fn get_bit(&self, index: usize) -> bool {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = self.storage.as_ref()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ byte & mask == mask
+ }
+ #[inline]
+ pub fn set_bit(&mut self, index: usize, val: bool) {
+ debug_assert!(index / 8 < self.storage.as_ref().len());
+ let byte_index = index / 8;
+ let byte = &mut self.storage.as_mut()[byte_index];
+ let bit_index = if cfg!(target_endian = "big") {
+ 7 - (index % 8)
+ } else {
+ index % 8
+ };
+ let mask = 1 << bit_index;
+ if val {
+ *byte |= mask;
+ } else {
+ *byte &= !mask;
+ }
+ }
+ #[inline]
+ pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ let mut val = 0;
+ for i in 0..(bit_width as usize) {
+ if self.get_bit(i + bit_offset) {
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ val |= 1 << index;
+ }
+ }
+ val
+ }
+ #[inline]
+ pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+ debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+ let index = if cfg!(target_endian = "big") {
+ bit_width as usize - 1 - i
+ } else {
+ i
+ };
+ self.set_bit(index + bit_offset, val_bit_is_set);
+ }
+ }
+}
+#[repr(C)]
+#[derive(Default)]
+pub struct __IncompleteArrayField<T>(::std::marker::PhantomData<T>, [T; 0]);
+impl<T> __IncompleteArrayField<T> {
+ #[inline]
+ pub const fn new() -> Self {
+ __IncompleteArrayField(::std::marker::PhantomData, [])
+ }
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ self as *const _ as *const T
+ }
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ self as *mut _ as *mut T
+ }
+ #[inline]
+ pub unsafe fn as_slice(&self, len: usize) -> &[T] {
+ ::std::slice::from_raw_parts(self.as_ptr(), len)
+ }
+ #[inline]
+ pub unsafe fn as_mut_slice(&mut self, len: usize) -> &mut [T] {
+ ::std::slice::from_raw_parts_mut(self.as_mut_ptr(), len)
+ }
+}
+impl<T> ::std::fmt::Debug for __IncompleteArrayField<T> {
+ fn fmt(&self, fmt: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
+ fmt.write_str("__IncompleteArrayField")
+ }
+}
+pub const __BITS_PER_LONG: u32 = 64;
+pub const __FD_SETSIZE: u32 = 1024;
+pub const _IOC_NRBITS: u32 = 8;
+pub const _IOC_TYPEBITS: u32 = 8;
+pub const _IOC_SIZEBITS: u32 = 14;
+pub const _IOC_DIRBITS: u32 = 2;
+pub const _IOC_NRMASK: u32 = 255;
+pub const _IOC_TYPEMASK: u32 = 255;
+pub const _IOC_SIZEMASK: u32 = 16383;
+pub const _IOC_DIRMASK: u32 = 3;
+pub const _IOC_NRSHIFT: u32 = 0;
+pub const _IOC_TYPESHIFT: u32 = 8;
+pub const _IOC_SIZESHIFT: u32 = 16;
+pub const _IOC_DIRSHIFT: u32 = 30;
+pub const _IOC_NONE: u32 = 0;
+pub const _IOC_WRITE: u32 = 1;
+pub const _IOC_READ: u32 = 2;
+pub const IOC_IN: u32 = 1073741824;
+pub const IOC_OUT: u32 = 2147483648;
+pub const IOC_INOUT: u32 = 3221225472;
+pub const IOCSIZE_MASK: u32 = 1073676288;
+pub const IOCSIZE_SHIFT: u32 = 16;
+pub const __LITTLE_ENDIAN: u32 = 1234;
+pub const PERF_ATTR_SIZE_VER0: u32 = 64;
+pub const PERF_ATTR_SIZE_VER1: u32 = 72;
+pub const PERF_ATTR_SIZE_VER2: u32 = 80;
+pub const PERF_ATTR_SIZE_VER3: u32 = 96;
+pub const PERF_ATTR_SIZE_VER4: u32 = 104;
+pub const PERF_ATTR_SIZE_VER5: u32 = 112;
+pub const PERF_ATTR_SIZE_VER6: u32 = 120;
+pub const PERF_RECORD_MISC_CPUMODE_MASK: u32 = 7;
+pub const PERF_RECORD_MISC_CPUMODE_UNKNOWN: u32 = 0;
+pub const PERF_RECORD_MISC_KERNEL: u32 = 1;
+pub const PERF_RECORD_MISC_USER: u32 = 2;
+pub const PERF_RECORD_MISC_HYPERVISOR: u32 = 3;
+pub const PERF_RECORD_MISC_GUEST_KERNEL: u32 = 4;
+pub const PERF_RECORD_MISC_GUEST_USER: u32 = 5;
+pub const PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT: u32 = 4096;
+pub const PERF_RECORD_MISC_MMAP_DATA: u32 = 8192;
+pub const PERF_RECORD_MISC_COMM_EXEC: u32 = 8192;
+pub const PERF_RECORD_MISC_FORK_EXEC: u32 = 8192;
+pub const PERF_RECORD_MISC_SWITCH_OUT: u32 = 8192;
+pub const PERF_RECORD_MISC_EXACT_IP: u32 = 16384;
+pub const PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: u32 = 16384;
+pub const PERF_RECORD_MISC_EXT_RESERVED: u32 = 32768;
+pub const PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER: u32 = 1;
+pub const PERF_MAX_STACK_DEPTH: u32 = 127;
+pub const PERF_MAX_CONTEXTS_PER_STACK: u32 = 8;
+pub const PERF_AUX_FLAG_TRUNCATED: u32 = 1;
+pub const PERF_AUX_FLAG_OVERWRITE: u32 = 2;
+pub const PERF_AUX_FLAG_PARTIAL: u32 = 4;
+pub const PERF_AUX_FLAG_COLLISION: u32 = 8;
+pub const PERF_FLAG_FD_NO_GROUP: u32 = 1;
+pub const PERF_FLAG_FD_OUTPUT: u32 = 2;
+pub const PERF_FLAG_PID_CGROUP: u32 = 4;
+pub const PERF_FLAG_FD_CLOEXEC: u32 = 8;
+pub const PERF_MEM_OP_NA: u32 = 1;
+pub const PERF_MEM_OP_LOAD: u32 = 2;
+pub const PERF_MEM_OP_STORE: u32 = 4;
+pub const PERF_MEM_OP_PFETCH: u32 = 8;
+pub const PERF_MEM_OP_EXEC: u32 = 16;
+pub const PERF_MEM_OP_SHIFT: u32 = 0;
+pub const PERF_MEM_LVL_NA: u32 = 1;
+pub const PERF_MEM_LVL_HIT: u32 = 2;
+pub const PERF_MEM_LVL_MISS: u32 = 4;
+pub const PERF_MEM_LVL_L1: u32 = 8;
+pub const PERF_MEM_LVL_LFB: u32 = 16;
+pub const PERF_MEM_LVL_L2: u32 = 32;
+pub const PERF_MEM_LVL_L3: u32 = 64;
+pub const PERF_MEM_LVL_LOC_RAM: u32 = 128;
+pub const PERF_MEM_LVL_REM_RAM1: u32 = 256;
+pub const PERF_MEM_LVL_REM_RAM2: u32 = 512;
+pub const PERF_MEM_LVL_REM_CCE1: u32 = 1024;
+pub const PERF_MEM_LVL_REM_CCE2: u32 = 2048;
+pub const PERF_MEM_LVL_IO: u32 = 4096;
+pub const PERF_MEM_LVL_UNC: u32 = 8192;
+pub const PERF_MEM_LVL_SHIFT: u32 = 5;
+pub const PERF_MEM_REMOTE_REMOTE: u32 = 1;
+pub const PERF_MEM_REMOTE_SHIFT: u32 = 37;
+pub const PERF_MEM_LVLNUM_L1: u32 = 1;
+pub const PERF_MEM_LVLNUM_L2: u32 = 2;
+pub const PERF_MEM_LVLNUM_L3: u32 = 3;
+pub const PERF_MEM_LVLNUM_L4: u32 = 4;
+pub const PERF_MEM_LVLNUM_ANY_CACHE: u32 = 11;
+pub const PERF_MEM_LVLNUM_LFB: u32 = 12;
+pub const PERF_MEM_LVLNUM_RAM: u32 = 13;
+pub const PERF_MEM_LVLNUM_PMEM: u32 = 14;
+pub const PERF_MEM_LVLNUM_NA: u32 = 15;
+pub const PERF_MEM_LVLNUM_SHIFT: u32 = 33;
+pub const PERF_MEM_SNOOP_NA: u32 = 1;
+pub const PERF_MEM_SNOOP_NONE: u32 = 2;
+pub const PERF_MEM_SNOOP_HIT: u32 = 4;
+pub const PERF_MEM_SNOOP_MISS: u32 = 8;
+pub const PERF_MEM_SNOOP_HITM: u32 = 16;
+pub const PERF_MEM_SNOOP_SHIFT: u32 = 19;
+pub const PERF_MEM_SNOOPX_FWD: u32 = 1;
+pub const PERF_MEM_SNOOPX_SHIFT: u32 = 37;
+pub const PERF_MEM_LOCK_NA: u32 = 1;
+pub const PERF_MEM_LOCK_LOCKED: u32 = 2;
+pub const PERF_MEM_LOCK_SHIFT: u32 = 24;
+pub const PERF_MEM_TLB_NA: u32 = 1;
+pub const PERF_MEM_TLB_HIT: u32 = 2;
+pub const PERF_MEM_TLB_MISS: u32 = 4;
+pub const PERF_MEM_TLB_L1: u32 = 8;
+pub const PERF_MEM_TLB_L2: u32 = 16;
+pub const PERF_MEM_TLB_WK: u32 = 32;
+pub const PERF_MEM_TLB_OS: u32 = 64;
+pub const PERF_MEM_TLB_SHIFT: u32 = 26;
+pub const __X32_SYSCALL_BIT: u32 = 1073741824;
+pub const _ASM_X86_UNISTD_64_H: u32 = 1;
+pub const __NR_read: u32 = 0;
+pub const __NR_write: u32 = 1;
+pub const __NR_open: u32 = 2;
+pub const __NR_close: u32 = 3;
+pub const __NR_stat: u32 = 4;
+pub const __NR_fstat: u32 = 5;
+pub const __NR_lstat: u32 = 6;
+pub const __NR_poll: u32 = 7;
+pub const __NR_lseek: u32 = 8;
+pub const __NR_mmap: u32 = 9;
+pub const __NR_mprotect: u32 = 10;
+pub const __NR_munmap: u32 = 11;
+pub const __NR_brk: u32 = 12;
+pub const __NR_rt_sigaction: u32 = 13;
+pub const __NR_rt_sigprocmask: u32 = 14;
+pub const __NR_rt_sigreturn: u32 = 15;
+pub const __NR_ioctl: u32 = 16;
+pub const __NR_pread64: u32 = 17;
+pub const __NR_pwrite64: u32 = 18;
+pub const __NR_readv: u32 = 19;
+pub const __NR_writev: u32 = 20;
+pub const __NR_access: u32 = 21;
+pub const __NR_pipe: u32 = 22;
+pub const __NR_select: u32 = 23;
+pub const __NR_sched_yield: u32 = 24;
+pub const __NR_mremap: u32 = 25;
+pub const __NR_msync: u32 = 26;
+pub const __NR_mincore: u32 = 27;
+pub const __NR_madvise: u32 = 28;
+pub const __NR_shmget: u32 = 29;
+pub const __NR_shmat: u32 = 30;
+pub const __NR_shmctl: u32 = 31;
+pub const __NR_dup: u32 = 32;
+pub const __NR_dup2: u32 = 33;
+pub const __NR_pause: u32 = 34;
+pub const __NR_nanosleep: u32 = 35;
+pub const __NR_getitimer: u32 = 36;
+pub const __NR_alarm: u32 = 37;
+pub const __NR_setitimer: u32 = 38;
+pub const __NR_getpid: u32 = 39;
+pub const __NR_sendfile: u32 = 40;
+pub const __NR_socket: u32 = 41;
+pub const __NR_connect: u32 = 42;
+pub const __NR_accept: u32 = 43;
+pub const __NR_sendto: u32 = 44;
+pub const __NR_recvfrom: u32 = 45;
+pub const __NR_sendmsg: u32 = 46;
+pub const __NR_recvmsg: u32 = 47;
+pub const __NR_shutdown: u32 = 48;
+pub const __NR_bind: u32 = 49;
+pub const __NR_listen: u32 = 50;
+pub const __NR_getsockname: u32 = 51;
+pub const __NR_getpeername: u32 = 52;
+pub const __NR_socketpair: u32 = 53;
+pub const __NR_setsockopt: u32 = 54;
+pub const __NR_getsockopt: u32 = 55;
+pub const __NR_clone: u32 = 56;
+pub const __NR_fork: u32 = 57;
+pub const __NR_vfork: u32 = 58;
+pub const __NR_execve: u32 = 59;
+pub const __NR_exit: u32 = 60;
+pub const __NR_wait4: u32 = 61;
+pub const __NR_kill: u32 = 62;
+pub const __NR_uname: u32 = 63;
+pub const __NR_semget: u32 = 64;
+pub const __NR_semop: u32 = 65;
+pub const __NR_semctl: u32 = 66;
+pub const __NR_shmdt: u32 = 67;
+pub const __NR_msgget: u32 = 68;
+pub const __NR_msgsnd: u32 = 69;
+pub const __NR_msgrcv: u32 = 70;
+pub const __NR_msgctl: u32 = 71;
+pub const __NR_fcntl: u32 = 72;
+pub const __NR_flock: u32 = 73;
+pub const __NR_fsync: u32 = 74;
+pub const __NR_fdatasync: u32 = 75;
+pub const __NR_truncate: u32 = 76;
+pub const __NR_ftruncate: u32 = 77;
+pub const __NR_getdents: u32 = 78;
+pub const __NR_getcwd: u32 = 79;
+pub const __NR_chdir: u32 = 80;
+pub const __NR_fchdir: u32 = 81;
+pub const __NR_rename: u32 = 82;
+pub const __NR_mkdir: u32 = 83;
+pub const __NR_rmdir: u32 = 84;
+pub const __NR_creat: u32 = 85;
+pub const __NR_link: u32 = 86;
+pub const __NR_unlink: u32 = 87;
+pub const __NR_symlink: u32 = 88;
+pub const __NR_readlink: u32 = 89;
+pub const __NR_chmod: u32 = 90;
+pub const __NR_fchmod: u32 = 91;
+pub const __NR_chown: u32 = 92;
+pub const __NR_fchown: u32 = 93;
+pub const __NR_lchown: u32 = 94;
+pub const __NR_umask: u32 = 95;
+pub const __NR_gettimeofday: u32 = 96;
+pub const __NR_getrlimit: u32 = 97;
+pub const __NR_getrusage: u32 = 98;
+pub const __NR_sysinfo: u32 = 99;
+pub const __NR_times: u32 = 100;
+pub const __NR_ptrace: u32 = 101;
+pub const __NR_getuid: u32 = 102;
+pub const __NR_syslog: u32 = 103;
+pub const __NR_getgid: u32 = 104;
+pub const __NR_setuid: u32 = 105;
+pub const __NR_setgid: u32 = 106;
+pub const __NR_geteuid: u32 = 107;
+pub const __NR_getegid: u32 = 108;
+pub const __NR_setpgid: u32 = 109;
+pub const __NR_getppid: u32 = 110;
+pub const __NR_getpgrp: u32 = 111;
+pub const __NR_setsid: u32 = 112;
+pub const __NR_setreuid: u32 = 113;
+pub const __NR_setregid: u32 = 114;
+pub const __NR_getgroups: u32 = 115;
+pub const __NR_setgroups: u32 = 116;
+pub const __NR_setresuid: u32 = 117;
+pub const __NR_getresuid: u32 = 118;
+pub const __NR_setresgid: u32 = 119;
+pub const __NR_getresgid: u32 = 120;
+pub const __NR_getpgid: u32 = 121;
+pub const __NR_setfsuid: u32 = 122;
+pub const __NR_setfsgid: u32 = 123;
+pub const __NR_getsid: u32 = 124;
+pub const __NR_capget: u32 = 125;
+pub const __NR_capset: u32 = 126;
+pub const __NR_rt_sigpending: u32 = 127;
+pub const __NR_rt_sigtimedwait: u32 = 128;
+pub const __NR_rt_sigqueueinfo: u32 = 129;
+pub const __NR_rt_sigsuspend: u32 = 130;
+pub const __NR_sigaltstack: u32 = 131;
+pub const __NR_utime: u32 = 132;
+pub const __NR_mknod: u32 = 133;
+pub const __NR_uselib: u32 = 134;
+pub const __NR_personality: u32 = 135;
+pub const __NR_ustat: u32 = 136;
+pub const __NR_statfs: u32 = 137;
+pub const __NR_fstatfs: u32 = 138;
+pub const __NR_sysfs: u32 = 139;
+pub const __NR_getpriority: u32 = 140;
+pub const __NR_setpriority: u32 = 141;
+pub const __NR_sched_setparam: u32 = 142;
+pub const __NR_sched_getparam: u32 = 143;
+pub const __NR_sched_setscheduler: u32 = 144;
+pub const __NR_sched_getscheduler: u32 = 145;
+pub const __NR_sched_get_priority_max: u32 = 146;
+pub const __NR_sched_get_priority_min: u32 = 147;
+pub const __NR_sched_rr_get_interval: u32 = 148;
+pub const __NR_mlock: u32 = 149;
+pub const __NR_munlock: u32 = 150;
+pub const __NR_mlockall: u32 = 151;
+pub const __NR_munlockall: u32 = 152;
+pub const __NR_vhangup: u32 = 153;
+pub const __NR_modify_ldt: u32 = 154;
+pub const __NR_pivot_root: u32 = 155;
+pub const __NR__sysctl: u32 = 156;
+pub const __NR_prctl: u32 = 157;
+pub const __NR_arch_prctl: u32 = 158;
+pub const __NR_adjtimex: u32 = 159;
+pub const __NR_setrlimit: u32 = 160;
+pub const __NR_chroot: u32 = 161;
+pub const __NR_sync: u32 = 162;
+pub const __NR_acct: u32 = 163;
+pub const __NR_settimeofday: u32 = 164;
+pub const __NR_mount: u32 = 165;
+pub const __NR_umount2: u32 = 166;
+pub const __NR_swapon: u32 = 167;
+pub const __NR_swapoff: u32 = 168;
+pub const __NR_reboot: u32 = 169;
+pub const __NR_sethostname: u32 = 170;
+pub const __NR_setdomainname: u32 = 171;
+pub const __NR_iopl: u32 = 172;
+pub const __NR_ioperm: u32 = 173;
+pub const __NR_create_module: u32 = 174;
+pub const __NR_init_module: u32 = 175;
+pub const __NR_delete_module: u32 = 176;
+pub const __NR_get_kernel_syms: u32 = 177;
+pub const __NR_query_module: u32 = 178;
+pub const __NR_quotactl: u32 = 179;
+pub const __NR_nfsservctl: u32 = 180;
+pub const __NR_getpmsg: u32 = 181;
+pub const __NR_putpmsg: u32 = 182;
+pub const __NR_afs_syscall: u32 = 183;
+pub const __NR_tuxcall: u32 = 184;
+pub const __NR_security: u32 = 185;
+pub const __NR_gettid: u32 = 186;
+pub const __NR_readahead: u32 = 187;
+pub const __NR_setxattr: u32 = 188;
+pub const __NR_lsetxattr: u32 = 189;
+pub const __NR_fsetxattr: u32 = 190;
+pub const __NR_getxattr: u32 = 191;
+pub const __NR_lgetxattr: u32 = 192;
+pub const __NR_fgetxattr: u32 = 193;
+pub const __NR_listxattr: u32 = 194;
+pub const __NR_llistxattr: u32 = 195;
+pub const __NR_flistxattr: u32 = 196;
+pub const __NR_removexattr: u32 = 197;
+pub const __NR_lremovexattr: u32 = 198;
+pub const __NR_fremovexattr: u32 = 199;
+pub const __NR_tkill: u32 = 200;
+pub const __NR_time: u32 = 201;
+pub const __NR_futex: u32 = 202;
+pub const __NR_sched_setaffinity: u32 = 203;
+pub const __NR_sched_getaffinity: u32 = 204;
+pub const __NR_set_thread_area: u32 = 205;
+pub const __NR_io_setup: u32 = 206;
+pub const __NR_io_destroy: u32 = 207;
+pub const __NR_io_getevents: u32 = 208;
+pub const __NR_io_submit: u32 = 209;
+pub const __NR_io_cancel: u32 = 210;
+pub const __NR_get_thread_area: u32 = 211;
+pub const __NR_lookup_dcookie: u32 = 212;
+pub const __NR_epoll_create: u32 = 213;
+pub const __NR_epoll_ctl_old: u32 = 214;
+pub const __NR_epoll_wait_old: u32 = 215;
+pub const __NR_remap_file_pages: u32 = 216;
+pub const __NR_getdents64: u32 = 217;
+pub const __NR_set_tid_address: u32 = 218;
+pub const __NR_restart_syscall: u32 = 219;
+pub const __NR_semtimedop: u32 = 220;
+pub const __NR_fadvise64: u32 = 221;
+pub const __NR_timer_create: u32 = 222;
+pub const __NR_timer_settime: u32 = 223;
+pub const __NR_timer_gettime: u32 = 224;
+pub const __NR_timer_getoverrun: u32 = 225;
+pub const __NR_timer_delete: u32 = 226;
+pub const __NR_clock_settime: u32 = 227;
+pub const __NR_clock_gettime: u32 = 228;
+pub const __NR_clock_getres: u32 = 229;
+pub const __NR_clock_nanosleep: u32 = 230;
+pub const __NR_exit_group: u32 = 231;
+pub const __NR_epoll_wait: u32 = 232;
+pub const __NR_epoll_ctl: u32 = 233;
+pub const __NR_tgkill: u32 = 234;
+pub const __NR_utimes: u32 = 235;
+pub const __NR_vserver: u32 = 236;
+pub const __NR_mbind: u32 = 237;
+pub const __NR_set_mempolicy: u32 = 238;
+pub const __NR_get_mempolicy: u32 = 239;
+pub const __NR_mq_open: u32 = 240;
+pub const __NR_mq_unlink: u32 = 241;
+pub const __NR_mq_timedsend: u32 = 242;
+pub const __NR_mq_timedreceive: u32 = 243;
+pub const __NR_mq_notify: u32 = 244;
+pub const __NR_mq_getsetattr: u32 = 245;
+pub const __NR_kexec_load: u32 = 246;
+pub const __NR_waitid: u32 = 247;
+pub const __NR_add_key: u32 = 248;
+pub const __NR_request_key: u32 = 249;
+pub const __NR_keyctl: u32 = 250;
+pub const __NR_ioprio_set: u32 = 251;
+pub const __NR_ioprio_get: u32 = 252;
+pub const __NR_inotify_init: u32 = 253;
+pub const __NR_inotify_add_watch: u32 = 254;
+pub const __NR_inotify_rm_watch: u32 = 255;
+pub const __NR_migrate_pages: u32 = 256;
+pub const __NR_openat: u32 = 257;
+pub const __NR_mkdirat: u32 = 258;
+pub const __NR_mknodat: u32 = 259;
+pub const __NR_fchownat: u32 = 260;
+pub const __NR_futimesat: u32 = 261;
+pub const __NR_newfstatat: u32 = 262;
+pub const __NR_unlinkat: u32 = 263;
+pub const __NR_renameat: u32 = 264;
+pub const __NR_linkat: u32 = 265;
+pub const __NR_symlinkat: u32 = 266;
+pub const __NR_readlinkat: u32 = 267;
+pub const __NR_fchmodat: u32 = 268;
+pub const __NR_faccessat: u32 = 269;
+pub const __NR_pselect6: u32 = 270;
+pub const __NR_ppoll: u32 = 271;
+pub const __NR_unshare: u32 = 272;
+pub const __NR_set_robust_list: u32 = 273;
+pub const __NR_get_robust_list: u32 = 274;
+pub const __NR_splice: u32 = 275;
+pub const __NR_tee: u32 = 276;
+pub const __NR_sync_file_range: u32 = 277;
+pub const __NR_vmsplice: u32 = 278;
+pub const __NR_move_pages: u32 = 279;
+pub const __NR_utimensat: u32 = 280;
+pub const __NR_epoll_pwait: u32 = 281;
+pub const __NR_signalfd: u32 = 282;
+pub const __NR_timerfd_create: u32 = 283;
+pub const __NR_eventfd: u32 = 284;
+pub const __NR_fallocate: u32 = 285;
+pub const __NR_timerfd_settime: u32 = 286;
+pub const __NR_timerfd_gettime: u32 = 287;
+pub const __NR_accept4: u32 = 288;
+pub const __NR_signalfd4: u32 = 289;
+pub const __NR_eventfd2: u32 = 290;
+pub const __NR_epoll_create1: u32 = 291;
+pub const __NR_dup3: u32 = 292;
+pub const __NR_pipe2: u32 = 293;
+pub const __NR_inotify_init1: u32 = 294;
+pub const __NR_preadv: u32 = 295;
+pub const __NR_pwritev: u32 = 296;
+pub const __NR_rt_tgsigqueueinfo: u32 = 297;
+pub const __NR_perf_event_open: u32 = 298;
+pub const __NR_recvmmsg: u32 = 299;
+pub const __NR_fanotify_init: u32 = 300;
+pub const __NR_fanotify_mark: u32 = 301;
+pub const __NR_prlimit64: u32 = 302;
+pub const __NR_name_to_handle_at: u32 = 303;
+pub const __NR_open_by_handle_at: u32 = 304;
+pub const __NR_clock_adjtime: u32 = 305;
+pub const __NR_syncfs: u32 = 306;
+pub const __NR_sendmmsg: u32 = 307;
+pub const __NR_setns: u32 = 308;
+pub const __NR_getcpu: u32 = 309;
+pub const __NR_process_vm_readv: u32 = 310;
+pub const __NR_process_vm_writev: u32 = 311;
+pub const __NR_kcmp: u32 = 312;
+pub const __NR_finit_module: u32 = 313;
+pub const __NR_sched_setattr: u32 = 314;
+pub const __NR_sched_getattr: u32 = 315;
+pub const __NR_renameat2: u32 = 316;
+pub const __NR_seccomp: u32 = 317;
+pub const __NR_getrandom: u32 = 318;
+pub const __NR_memfd_create: u32 = 319;
+pub const __NR_kexec_file_load: u32 = 320;
+pub const __NR_bpf: u32 = 321;
+pub const __NR_execveat: u32 = 322;
+pub const __NR_userfaultfd: u32 = 323;
+pub const __NR_membarrier: u32 = 324;
+pub const __NR_mlock2: u32 = 325;
+pub const __NR_copy_file_range: u32 = 326;
+pub const __NR_preadv2: u32 = 327;
+pub const __NR_pwritev2: u32 = 328;
+pub const __NR_pkey_mprotect: u32 = 329;
+pub const __NR_pkey_alloc: u32 = 330;
+pub const __NR_pkey_free: u32 = 331;
+pub const __NR_statx: u32 = 332;
+pub const __NR_io_pgetevents: u32 = 333;
+pub const __NR_rseq: u32 = 334;
+pub const __NR_pidfd_send_signal: u32 = 424;
+pub const __NR_io_uring_setup: u32 = 425;
+pub const __NR_io_uring_enter: u32 = 426;
+pub const __NR_io_uring_register: u32 = 427;
+pub const __NR_open_tree: u32 = 428;
+pub const __NR_move_mount: u32 = 429;
+pub const __NR_fsopen: u32 = 430;
+pub const __NR_fsconfig: u32 = 431;
+pub const __NR_fsmount: u32 = 432;
+pub const __NR_fspick: u32 = 433;
+pub const __NR_pidfd_open: u32 = 434;
+pub const __NR_clone3: u32 = 435;
+pub const __NR_openat2: u32 = 437;
+pub const __NR_pidfd_getfd: u32 = 438;
+pub type __s8 = ::std::os::raw::c_schar;
+pub type __u8 = ::std::os::raw::c_uchar;
+pub type __s16 = ::std::os::raw::c_short;
+pub type __u16 = ::std::os::raw::c_ushort;
+pub type __s32 = ::std::os::raw::c_int;
+pub type __u32 = ::std::os::raw::c_uint;
+pub type __s64 = ::std::os::raw::c_longlong;
+pub type __u64 = ::std::os::raw::c_ulonglong;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct __kernel_fd_set {
+ pub fds_bits: [::std::os::raw::c_ulong; 16usize],
+}
+#[test]
+fn bindgen_test_layout___kernel_fd_set() {
+ assert_eq!(
+ ::std::mem::size_of::<__kernel_fd_set>(),
+ 128usize,
+ concat!("Size of: ", stringify!(__kernel_fd_set))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<__kernel_fd_set>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(__kernel_fd_set))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<__kernel_fd_set>())).fds_bits as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(__kernel_fd_set),
+ "::",
+ stringify!(fds_bits)
+ )
+ );
+}
+pub type __kernel_sighandler_t =
+ ::std::option::Option<unsafe extern "C" fn(arg1: ::std::os::raw::c_int)>;
+pub type __kernel_key_t = ::std::os::raw::c_int;
+pub type __kernel_mqd_t = ::std::os::raw::c_int;
+pub type __kernel_old_uid_t = ::std::os::raw::c_ushort;
+pub type __kernel_old_gid_t = ::std::os::raw::c_ushort;
+pub type __kernel_old_dev_t = ::std::os::raw::c_ulong;
+pub type __kernel_long_t = ::std::os::raw::c_long;
+pub type __kernel_ulong_t = ::std::os::raw::c_ulong;
+pub type __kernel_ino_t = __kernel_ulong_t;
+pub type __kernel_mode_t = ::std::os::raw::c_uint;
+pub type __kernel_pid_t = ::std::os::raw::c_int;
+pub type __kernel_ipc_pid_t = ::std::os::raw::c_int;
+pub type __kernel_uid_t = ::std::os::raw::c_uint;
+pub type __kernel_gid_t = ::std::os::raw::c_uint;
+pub type __kernel_suseconds_t = __kernel_long_t;
+pub type __kernel_daddr_t = ::std::os::raw::c_int;
+pub type __kernel_uid32_t = ::std::os::raw::c_uint;
+pub type __kernel_gid32_t = ::std::os::raw::c_uint;
+pub type __kernel_size_t = __kernel_ulong_t;
+pub type __kernel_ssize_t = __kernel_long_t;
+pub type __kernel_ptrdiff_t = __kernel_long_t;
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct __kernel_fsid_t {
+ pub val: [::std::os::raw::c_int; 2usize],
+}
+#[test]
+fn bindgen_test_layout___kernel_fsid_t() {
+ assert_eq!(
+ ::std::mem::size_of::<__kernel_fsid_t>(),
+ 8usize,
+ concat!("Size of: ", stringify!(__kernel_fsid_t))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<__kernel_fsid_t>(),
+ 4usize,
+ concat!("Alignment of ", stringify!(__kernel_fsid_t))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<__kernel_fsid_t>())).val as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(__kernel_fsid_t),
+ "::",
+ stringify!(val)
+ )
+ );
+}
+pub type __kernel_off_t = __kernel_long_t;
+pub type __kernel_loff_t = ::std::os::raw::c_longlong;
+pub type __kernel_old_time_t = __kernel_long_t;
+pub type __kernel_time_t = __kernel_long_t;
+pub type __kernel_time64_t = ::std::os::raw::c_longlong;
+pub type __kernel_clock_t = __kernel_long_t;
+pub type __kernel_timer_t = ::std::os::raw::c_int;
+pub type __kernel_clockid_t = ::std::os::raw::c_int;
+pub type __kernel_caddr_t = *mut ::std::os::raw::c_char;
+pub type __kernel_uid16_t = ::std::os::raw::c_ushort;
+pub type __kernel_gid16_t = ::std::os::raw::c_ushort;
+pub type __le16 = __u16;
+pub type __be16 = __u16;
+pub type __le32 = __u32;
+pub type __be32 = __u32;
+pub type __le64 = __u64;
+pub type __be64 = __u64;
+pub type __sum16 = __u16;
+pub type __wsum = __u32;
+pub type __poll_t = ::std::os::raw::c_uint;
+pub const perf_type_id_PERF_TYPE_HARDWARE: perf_type_id = 0;
+pub const perf_type_id_PERF_TYPE_SOFTWARE: perf_type_id = 1;
+pub const perf_type_id_PERF_TYPE_TRACEPOINT: perf_type_id = 2;
+pub const perf_type_id_PERF_TYPE_HW_CACHE: perf_type_id = 3;
+pub const perf_type_id_PERF_TYPE_RAW: perf_type_id = 4;
+pub const perf_type_id_PERF_TYPE_BREAKPOINT: perf_type_id = 5;
+pub const perf_type_id_PERF_TYPE_MAX: perf_type_id = 6;
+pub type perf_type_id = u32;
+pub const perf_hw_id_PERF_COUNT_HW_CPU_CYCLES: perf_hw_id = 0;
+pub const perf_hw_id_PERF_COUNT_HW_INSTRUCTIONS: perf_hw_id = 1;
+pub const perf_hw_id_PERF_COUNT_HW_CACHE_REFERENCES: perf_hw_id = 2;
+pub const perf_hw_id_PERF_COUNT_HW_CACHE_MISSES: perf_hw_id = 3;
+pub const perf_hw_id_PERF_COUNT_HW_BRANCH_INSTRUCTIONS: perf_hw_id = 4;
+pub const perf_hw_id_PERF_COUNT_HW_BRANCH_MISSES: perf_hw_id = 5;
+pub const perf_hw_id_PERF_COUNT_HW_BUS_CYCLES: perf_hw_id = 6;
+pub const perf_hw_id_PERF_COUNT_HW_STALLED_CYCLES_FRONTEND: perf_hw_id = 7;
+pub const perf_hw_id_PERF_COUNT_HW_STALLED_CYCLES_BACKEND: perf_hw_id = 8;
+pub const perf_hw_id_PERF_COUNT_HW_REF_CPU_CYCLES: perf_hw_id = 9;
+pub const perf_hw_id_PERF_COUNT_HW_MAX: perf_hw_id = 10;
+pub type perf_hw_id = u32;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_L1D: perf_hw_cache_id = 0;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_L1I: perf_hw_cache_id = 1;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_LL: perf_hw_cache_id = 2;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_DTLB: perf_hw_cache_id = 3;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_ITLB: perf_hw_cache_id = 4;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_BPU: perf_hw_cache_id = 5;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_NODE: perf_hw_cache_id = 6;
+pub const perf_hw_cache_id_PERF_COUNT_HW_CACHE_MAX: perf_hw_cache_id = 7;
+pub type perf_hw_cache_id = u32;
+pub const perf_hw_cache_op_id_PERF_COUNT_HW_CACHE_OP_READ: perf_hw_cache_op_id = 0;
+pub const perf_hw_cache_op_id_PERF_COUNT_HW_CACHE_OP_WRITE: perf_hw_cache_op_id = 1;
+pub const perf_hw_cache_op_id_PERF_COUNT_HW_CACHE_OP_PREFETCH: perf_hw_cache_op_id = 2;
+pub const perf_hw_cache_op_id_PERF_COUNT_HW_CACHE_OP_MAX: perf_hw_cache_op_id = 3;
+pub type perf_hw_cache_op_id = u32;
+pub const perf_hw_cache_op_result_id_PERF_COUNT_HW_CACHE_RESULT_ACCESS: perf_hw_cache_op_result_id =
+ 0;
+pub const perf_hw_cache_op_result_id_PERF_COUNT_HW_CACHE_RESULT_MISS: perf_hw_cache_op_result_id =
+ 1;
+pub const perf_hw_cache_op_result_id_PERF_COUNT_HW_CACHE_RESULT_MAX: perf_hw_cache_op_result_id = 2;
+pub type perf_hw_cache_op_result_id = u32;
+pub const perf_sw_ids_PERF_COUNT_SW_CPU_CLOCK: perf_sw_ids = 0;
+pub const perf_sw_ids_PERF_COUNT_SW_TASK_CLOCK: perf_sw_ids = 1;
+pub const perf_sw_ids_PERF_COUNT_SW_PAGE_FAULTS: perf_sw_ids = 2;
+pub const perf_sw_ids_PERF_COUNT_SW_CONTEXT_SWITCHES: perf_sw_ids = 3;
+pub const perf_sw_ids_PERF_COUNT_SW_CPU_MIGRATIONS: perf_sw_ids = 4;
+pub const perf_sw_ids_PERF_COUNT_SW_PAGE_FAULTS_MIN: perf_sw_ids = 5;
+pub const perf_sw_ids_PERF_COUNT_SW_PAGE_FAULTS_MAJ: perf_sw_ids = 6;
+pub const perf_sw_ids_PERF_COUNT_SW_ALIGNMENT_FAULTS: perf_sw_ids = 7;
+pub const perf_sw_ids_PERF_COUNT_SW_EMULATION_FAULTS: perf_sw_ids = 8;
+pub const perf_sw_ids_PERF_COUNT_SW_DUMMY: perf_sw_ids = 9;
+pub const perf_sw_ids_PERF_COUNT_SW_BPF_OUTPUT: perf_sw_ids = 10;
+pub const perf_sw_ids_PERF_COUNT_SW_MAX: perf_sw_ids = 11;
+pub type perf_sw_ids = u32;
+pub const perf_event_sample_format_PERF_SAMPLE_IP: perf_event_sample_format = 1;
+pub const perf_event_sample_format_PERF_SAMPLE_TID: perf_event_sample_format = 2;
+pub const perf_event_sample_format_PERF_SAMPLE_TIME: perf_event_sample_format = 4;
+pub const perf_event_sample_format_PERF_SAMPLE_ADDR: perf_event_sample_format = 8;
+pub const perf_event_sample_format_PERF_SAMPLE_READ: perf_event_sample_format = 16;
+pub const perf_event_sample_format_PERF_SAMPLE_CALLCHAIN: perf_event_sample_format = 32;
+pub const perf_event_sample_format_PERF_SAMPLE_ID: perf_event_sample_format = 64;
+pub const perf_event_sample_format_PERF_SAMPLE_CPU: perf_event_sample_format = 128;
+pub const perf_event_sample_format_PERF_SAMPLE_PERIOD: perf_event_sample_format = 256;
+pub const perf_event_sample_format_PERF_SAMPLE_STREAM_ID: perf_event_sample_format = 512;
+pub const perf_event_sample_format_PERF_SAMPLE_RAW: perf_event_sample_format = 1024;
+pub const perf_event_sample_format_PERF_SAMPLE_BRANCH_STACK: perf_event_sample_format = 2048;
+pub const perf_event_sample_format_PERF_SAMPLE_REGS_USER: perf_event_sample_format = 4096;
+pub const perf_event_sample_format_PERF_SAMPLE_STACK_USER: perf_event_sample_format = 8192;
+pub const perf_event_sample_format_PERF_SAMPLE_WEIGHT: perf_event_sample_format = 16384;
+pub const perf_event_sample_format_PERF_SAMPLE_DATA_SRC: perf_event_sample_format = 32768;
+pub const perf_event_sample_format_PERF_SAMPLE_IDENTIFIER: perf_event_sample_format = 65536;
+pub const perf_event_sample_format_PERF_SAMPLE_TRANSACTION: perf_event_sample_format = 131072;
+pub const perf_event_sample_format_PERF_SAMPLE_REGS_INTR: perf_event_sample_format = 262144;
+pub const perf_event_sample_format_PERF_SAMPLE_PHYS_ADDR: perf_event_sample_format = 524288;
+pub const perf_event_sample_format_PERF_SAMPLE_AUX: perf_event_sample_format = 1048576;
+pub const perf_event_sample_format_PERF_SAMPLE_MAX: perf_event_sample_format = 2097152;
+pub const perf_event_sample_format___PERF_SAMPLE_CALLCHAIN_EARLY: perf_event_sample_format =
+ 9223372036854775808;
+pub type perf_event_sample_format = u64;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_USER_SHIFT:
+ perf_branch_sample_type_shift = 0;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_KERNEL_SHIFT:
+ perf_branch_sample_type_shift = 1;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_HV_SHIFT: perf_branch_sample_type_shift =
+ 2;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_ANY_SHIFT:
+ perf_branch_sample_type_shift = 3;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT:
+ perf_branch_sample_type_shift = 4;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT:
+ perf_branch_sample_type_shift = 5;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_IND_CALL_SHIFT:
+ perf_branch_sample_type_shift = 6;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT:
+ perf_branch_sample_type_shift = 7;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_IN_TX_SHIFT:
+ perf_branch_sample_type_shift = 8;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_NO_TX_SHIFT:
+ perf_branch_sample_type_shift = 9;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_COND_SHIFT:
+ perf_branch_sample_type_shift = 10;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT:
+ perf_branch_sample_type_shift = 11;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT:
+ perf_branch_sample_type_shift = 12;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_CALL_SHIFT:
+ perf_branch_sample_type_shift = 13;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT:
+ perf_branch_sample_type_shift = 14;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT:
+ perf_branch_sample_type_shift = 15;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT:
+ perf_branch_sample_type_shift = 16;
+pub const perf_branch_sample_type_shift_PERF_SAMPLE_BRANCH_MAX_SHIFT:
+ perf_branch_sample_type_shift = 17;
+pub type perf_branch_sample_type_shift = u32;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_USER: perf_branch_sample_type = 1;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_KERNEL: perf_branch_sample_type = 2;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_HV: perf_branch_sample_type = 4;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_ANY: perf_branch_sample_type = 8;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_ANY_CALL: perf_branch_sample_type = 16;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_ANY_RETURN: perf_branch_sample_type = 32;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_IND_CALL: perf_branch_sample_type = 64;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_ABORT_TX: perf_branch_sample_type = 128;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_IN_TX: perf_branch_sample_type = 256;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_NO_TX: perf_branch_sample_type = 512;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_COND: perf_branch_sample_type = 1024;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_CALL_STACK: perf_branch_sample_type = 2048;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_IND_JUMP: perf_branch_sample_type = 4096;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_CALL: perf_branch_sample_type = 8192;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_NO_FLAGS: perf_branch_sample_type = 16384;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_NO_CYCLES: perf_branch_sample_type = 32768;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_TYPE_SAVE: perf_branch_sample_type = 65536;
+pub const perf_branch_sample_type_PERF_SAMPLE_BRANCH_MAX: perf_branch_sample_type = 131072;
+pub type perf_branch_sample_type = u32;
+pub const PERF_BR_UNKNOWN: _bindgen_ty_1 = 0;
+pub const PERF_BR_COND: _bindgen_ty_1 = 1;
+pub const PERF_BR_UNCOND: _bindgen_ty_1 = 2;
+pub const PERF_BR_IND: _bindgen_ty_1 = 3;
+pub const PERF_BR_CALL: _bindgen_ty_1 = 4;
+pub const PERF_BR_IND_CALL: _bindgen_ty_1 = 5;
+pub const PERF_BR_RET: _bindgen_ty_1 = 6;
+pub const PERF_BR_SYSCALL: _bindgen_ty_1 = 7;
+pub const PERF_BR_SYSRET: _bindgen_ty_1 = 8;
+pub const PERF_BR_COND_CALL: _bindgen_ty_1 = 9;
+pub const PERF_BR_COND_RET: _bindgen_ty_1 = 10;
+pub const PERF_BR_MAX: _bindgen_ty_1 = 11;
+pub type _bindgen_ty_1 = u32;
+pub const perf_sample_regs_abi_PERF_SAMPLE_REGS_ABI_NONE: perf_sample_regs_abi = 0;
+pub const perf_sample_regs_abi_PERF_SAMPLE_REGS_ABI_32: perf_sample_regs_abi = 1;
+pub const perf_sample_regs_abi_PERF_SAMPLE_REGS_ABI_64: perf_sample_regs_abi = 2;
+pub type perf_sample_regs_abi = u32;
+pub const PERF_TXN_ELISION: _bindgen_ty_2 = 1;
+pub const PERF_TXN_TRANSACTION: _bindgen_ty_2 = 2;
+pub const PERF_TXN_SYNC: _bindgen_ty_2 = 4;
+pub const PERF_TXN_ASYNC: _bindgen_ty_2 = 8;
+pub const PERF_TXN_RETRY: _bindgen_ty_2 = 16;
+pub const PERF_TXN_CONFLICT: _bindgen_ty_2 = 32;
+pub const PERF_TXN_CAPACITY_WRITE: _bindgen_ty_2 = 64;
+pub const PERF_TXN_CAPACITY_READ: _bindgen_ty_2 = 128;
+pub const PERF_TXN_MAX: _bindgen_ty_2 = 256;
+pub const PERF_TXN_ABORT_MASK: _bindgen_ty_2 = 18446744069414584320;
+pub const PERF_TXN_ABORT_SHIFT: _bindgen_ty_2 = 32;
+pub type _bindgen_ty_2 = u64;
+pub const perf_event_read_format_PERF_FORMAT_TOTAL_TIME_ENABLED: perf_event_read_format = 1;
+pub const perf_event_read_format_PERF_FORMAT_TOTAL_TIME_RUNNING: perf_event_read_format = 2;
+pub const perf_event_read_format_PERF_FORMAT_ID: perf_event_read_format = 4;
+pub const perf_event_read_format_PERF_FORMAT_GROUP: perf_event_read_format = 8;
+pub const perf_event_read_format_PERF_FORMAT_MAX: perf_event_read_format = 16;
+pub type perf_event_read_format = u32;
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct perf_event_attr {
+ pub type_: __u32,
+ pub size: __u32,
+ pub config: __u64,
+ pub __bindgen_anon_1: perf_event_attr__bindgen_ty_1,
+ pub sample_type: __u64,
+ pub read_format: __u64,
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u32>,
+ pub __bindgen_anon_2: perf_event_attr__bindgen_ty_2,
+ pub bp_type: __u32,
+ pub __bindgen_anon_3: perf_event_attr__bindgen_ty_3,
+ pub __bindgen_anon_4: perf_event_attr__bindgen_ty_4,
+ pub branch_sample_type: __u64,
+ pub sample_regs_user: __u64,
+ pub sample_stack_user: __u32,
+ pub clockid: __s32,
+ pub sample_regs_intr: __u64,
+ pub aux_watermark: __u32,
+ pub sample_max_stack: __u16,
+ pub __reserved_2: __u16,
+ pub aux_sample_size: __u32,
+ pub __reserved_3: __u32,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_1 {
+ pub sample_period: __u64,
+ pub sample_freq: __u64,
+ _bindgen_union_align: u64,
+}
+#[test]
+fn bindgen_test_layout_perf_event_attr__bindgen_ty_1() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_attr__bindgen_ty_1>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_attr__bindgen_ty_1))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_attr__bindgen_ty_1>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_event_attr__bindgen_ty_1))
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_1>())).sample_period as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_1),
+ "::",
+ stringify!(sample_period)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_1>())).sample_freq as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_1),
+ "::",
+ stringify!(sample_freq)
+ )
+ );
+}
+impl Default for perf_event_attr__bindgen_ty_1 {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_2 {
+ pub wakeup_events: __u32,
+ pub wakeup_watermark: __u32,
+ _bindgen_union_align: u32,
+}
+#[test]
+fn bindgen_test_layout_perf_event_attr__bindgen_ty_2() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_attr__bindgen_ty_2>(),
+ 4usize,
+ concat!("Size of: ", stringify!(perf_event_attr__bindgen_ty_2))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_attr__bindgen_ty_2>(),
+ 4usize,
+ concat!("Alignment of ", stringify!(perf_event_attr__bindgen_ty_2))
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_2>())).wakeup_events as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_2),
+ "::",
+ stringify!(wakeup_events)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_2>())).wakeup_watermark as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_2),
+ "::",
+ stringify!(wakeup_watermark)
+ )
+ );
+}
+impl Default for perf_event_attr__bindgen_ty_2 {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_3 {
+ pub bp_addr: __u64,
+ pub kprobe_func: __u64,
+ pub uprobe_path: __u64,
+ pub config1: __u64,
+ _bindgen_union_align: u64,
+}
+#[test]
+fn bindgen_test_layout_perf_event_attr__bindgen_ty_3() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_attr__bindgen_ty_3>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_attr__bindgen_ty_3))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_attr__bindgen_ty_3>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_event_attr__bindgen_ty_3))
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_3>())).bp_addr as *const _ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_3),
+ "::",
+ stringify!(bp_addr)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_3>())).kprobe_func as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_3),
+ "::",
+ stringify!(kprobe_func)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_3>())).uprobe_path as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_3),
+ "::",
+ stringify!(uprobe_path)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_3>())).config1 as *const _ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_3),
+ "::",
+ stringify!(config1)
+ )
+ );
+}
+impl Default for perf_event_attr__bindgen_ty_3 {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_attr__bindgen_ty_4 {
+ pub bp_len: __u64,
+ pub kprobe_addr: __u64,
+ pub probe_offset: __u64,
+ pub config2: __u64,
+ _bindgen_union_align: u64,
+}
+#[test]
+fn bindgen_test_layout_perf_event_attr__bindgen_ty_4() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_attr__bindgen_ty_4>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_attr__bindgen_ty_4))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_attr__bindgen_ty_4>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_event_attr__bindgen_ty_4))
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_4>())).bp_len as *const _ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_4),
+ "::",
+ stringify!(bp_len)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_4>())).kprobe_addr as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_4),
+ "::",
+ stringify!(kprobe_addr)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_4>())).probe_offset as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_4),
+ "::",
+ stringify!(probe_offset)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr__bindgen_ty_4>())).config2 as *const _ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr__bindgen_ty_4),
+ "::",
+ stringify!(config2)
+ )
+ );
+}
+impl Default for perf_event_attr__bindgen_ty_4 {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[test]
+fn bindgen_test_layout_perf_event_attr() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_attr>(),
+ 120usize,
+ concat!("Size of: ", stringify!(perf_event_attr))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_attr>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_event_attr))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).type_ as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(type_)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).size as *const _ as usize },
+ 4usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(size)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).config as *const _ as usize },
+ 8usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(config)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).sample_type as *const _ as usize },
+ 24usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(sample_type)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).read_format as *const _ as usize },
+ 32usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(read_format)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).bp_type as *const _ as usize },
+ 52usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(bp_type)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr>())).branch_sample_type as *const _ as usize
+ },
+ 72usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(branch_sample_type)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr>())).sample_regs_user as *const _ as usize
+ },
+ 80usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(sample_regs_user)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr>())).sample_stack_user as *const _ as usize
+ },
+ 88usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(sample_stack_user)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).clockid as *const _ as usize },
+ 92usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(clockid)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr>())).sample_regs_intr as *const _ as usize
+ },
+ 96usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(sample_regs_intr)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).aux_watermark as *const _ as usize },
+ 104usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(aux_watermark)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_attr>())).sample_max_stack as *const _ as usize
+ },
+ 108usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(sample_max_stack)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).__reserved_2 as *const _ as usize },
+ 110usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(__reserved_2)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).aux_sample_size as *const _ as usize },
+ 112usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(aux_sample_size)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_attr>())).__reserved_3 as *const _ as usize },
+ 116usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_attr),
+ "::",
+ stringify!(__reserved_3)
+ )
+ );
+}
+impl Default for perf_event_attr {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+impl perf_event_attr {
+ #[inline]
+ pub fn disabled(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_disabled(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn inherit(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_inherit(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn pinned(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_pinned(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclusive(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclusive(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(3usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_user(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_user(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(4usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_kernel(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_kernel(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(5usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_hv(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(6usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_hv(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(6usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_idle(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(7usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_idle(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(7usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mmap(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(8usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mmap(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(8usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn comm(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(9usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_comm(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(9usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn freq(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(10usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_freq(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(10usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn inherit_stat(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(11usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_inherit_stat(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(11usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn enable_on_exec(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(12usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_enable_on_exec(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(12usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn task(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(13usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_task(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(13usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn watermark(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(14usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_watermark(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(14usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn precise_ip(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(15usize, 2u8) as u64) }
+ }
+ #[inline]
+ pub fn set_precise_ip(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(15usize, 2u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mmap_data(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(17usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mmap_data(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(17usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn sample_id_all(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(18usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_sample_id_all(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(18usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_host(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_host(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(19usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_guest(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_guest(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(20usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_callchain_kernel(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(21usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_callchain_kernel(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(21usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn exclude_callchain_user(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(22usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_exclude_callchain_user(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(22usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mmap2(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(23usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mmap2(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(23usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn comm_exec(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_comm_exec(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(24usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn use_clockid(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(25usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_use_clockid(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(25usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn context_switch(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_context_switch(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(26usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn write_backward(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(27usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_write_backward(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(27usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn namespaces(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(28usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_namespaces(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(28usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn ksymbol(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(29usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_ksymbol(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(29usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn bpf_event(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(30usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_bpf_event(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(30usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn aux_output(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(31usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_aux_output(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(31usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn __reserved_1(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(32usize, 32u8) as u64) }
+ }
+ #[inline]
+ pub fn set___reserved_1(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(32usize, 32u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ disabled: __u64,
+ inherit: __u64,
+ pinned: __u64,
+ exclusive: __u64,
+ exclude_user: __u64,
+ exclude_kernel: __u64,
+ exclude_hv: __u64,
+ exclude_idle: __u64,
+ mmap: __u64,
+ comm: __u64,
+ freq: __u64,
+ inherit_stat: __u64,
+ enable_on_exec: __u64,
+ task: __u64,
+ watermark: __u64,
+ precise_ip: __u64,
+ mmap_data: __u64,
+ sample_id_all: __u64,
+ exclude_host: __u64,
+ exclude_guest: __u64,
+ exclude_callchain_kernel: __u64,
+ exclude_callchain_user: __u64,
+ mmap2: __u64,
+ comm_exec: __u64,
+ use_clockid: __u64,
+ context_switch: __u64,
+ write_backward: __u64,
+ namespaces: __u64,
+ ksymbol: __u64,
+ bpf_event: __u64,
+ aux_output: __u64,
+ __reserved_1: __u64,
+ ) -> __BindgenBitfieldUnit<[u8; 8usize], u32> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u32> =
+ Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let disabled: u64 = unsafe { ::std::mem::transmute(disabled) };
+ disabled as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let inherit: u64 = unsafe { ::std::mem::transmute(inherit) };
+ inherit as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let pinned: u64 = unsafe { ::std::mem::transmute(pinned) };
+ pinned as u64
+ });
+ __bindgen_bitfield_unit.set(3usize, 1u8, {
+ let exclusive: u64 = unsafe { ::std::mem::transmute(exclusive) };
+ exclusive as u64
+ });
+ __bindgen_bitfield_unit.set(4usize, 1u8, {
+ let exclude_user: u64 = unsafe { ::std::mem::transmute(exclude_user) };
+ exclude_user as u64
+ });
+ __bindgen_bitfield_unit.set(5usize, 1u8, {
+ let exclude_kernel: u64 = unsafe { ::std::mem::transmute(exclude_kernel) };
+ exclude_kernel as u64
+ });
+ __bindgen_bitfield_unit.set(6usize, 1u8, {
+ let exclude_hv: u64 = unsafe { ::std::mem::transmute(exclude_hv) };
+ exclude_hv as u64
+ });
+ __bindgen_bitfield_unit.set(7usize, 1u8, {
+ let exclude_idle: u64 = unsafe { ::std::mem::transmute(exclude_idle) };
+ exclude_idle as u64
+ });
+ __bindgen_bitfield_unit.set(8usize, 1u8, {
+ let mmap: u64 = unsafe { ::std::mem::transmute(mmap) };
+ mmap as u64
+ });
+ __bindgen_bitfield_unit.set(9usize, 1u8, {
+ let comm: u64 = unsafe { ::std::mem::transmute(comm) };
+ comm as u64
+ });
+ __bindgen_bitfield_unit.set(10usize, 1u8, {
+ let freq: u64 = unsafe { ::std::mem::transmute(freq) };
+ freq as u64
+ });
+ __bindgen_bitfield_unit.set(11usize, 1u8, {
+ let inherit_stat: u64 = unsafe { ::std::mem::transmute(inherit_stat) };
+ inherit_stat as u64
+ });
+ __bindgen_bitfield_unit.set(12usize, 1u8, {
+ let enable_on_exec: u64 = unsafe { ::std::mem::transmute(enable_on_exec) };
+ enable_on_exec as u64
+ });
+ __bindgen_bitfield_unit.set(13usize, 1u8, {
+ let task: u64 = unsafe { ::std::mem::transmute(task) };
+ task as u64
+ });
+ __bindgen_bitfield_unit.set(14usize, 1u8, {
+ let watermark: u64 = unsafe { ::std::mem::transmute(watermark) };
+ watermark as u64
+ });
+ __bindgen_bitfield_unit.set(15usize, 2u8, {
+ let precise_ip: u64 = unsafe { ::std::mem::transmute(precise_ip) };
+ precise_ip as u64
+ });
+ __bindgen_bitfield_unit.set(17usize, 1u8, {
+ let mmap_data: u64 = unsafe { ::std::mem::transmute(mmap_data) };
+ mmap_data as u64
+ });
+ __bindgen_bitfield_unit.set(18usize, 1u8, {
+ let sample_id_all: u64 = unsafe { ::std::mem::transmute(sample_id_all) };
+ sample_id_all as u64
+ });
+ __bindgen_bitfield_unit.set(19usize, 1u8, {
+ let exclude_host: u64 = unsafe { ::std::mem::transmute(exclude_host) };
+ exclude_host as u64
+ });
+ __bindgen_bitfield_unit.set(20usize, 1u8, {
+ let exclude_guest: u64 = unsafe { ::std::mem::transmute(exclude_guest) };
+ exclude_guest as u64
+ });
+ __bindgen_bitfield_unit.set(21usize, 1u8, {
+ let exclude_callchain_kernel: u64 =
+ unsafe { ::std::mem::transmute(exclude_callchain_kernel) };
+ exclude_callchain_kernel as u64
+ });
+ __bindgen_bitfield_unit.set(22usize, 1u8, {
+ let exclude_callchain_user: u64 =
+ unsafe { ::std::mem::transmute(exclude_callchain_user) };
+ exclude_callchain_user as u64
+ });
+ __bindgen_bitfield_unit.set(23usize, 1u8, {
+ let mmap2: u64 = unsafe { ::std::mem::transmute(mmap2) };
+ mmap2 as u64
+ });
+ __bindgen_bitfield_unit.set(24usize, 1u8, {
+ let comm_exec: u64 = unsafe { ::std::mem::transmute(comm_exec) };
+ comm_exec as u64
+ });
+ __bindgen_bitfield_unit.set(25usize, 1u8, {
+ let use_clockid: u64 = unsafe { ::std::mem::transmute(use_clockid) };
+ use_clockid as u64
+ });
+ __bindgen_bitfield_unit.set(26usize, 1u8, {
+ let context_switch: u64 = unsafe { ::std::mem::transmute(context_switch) };
+ context_switch as u64
+ });
+ __bindgen_bitfield_unit.set(27usize, 1u8, {
+ let write_backward: u64 = unsafe { ::std::mem::transmute(write_backward) };
+ write_backward as u64
+ });
+ __bindgen_bitfield_unit.set(28usize, 1u8, {
+ let namespaces: u64 = unsafe { ::std::mem::transmute(namespaces) };
+ namespaces as u64
+ });
+ __bindgen_bitfield_unit.set(29usize, 1u8, {
+ let ksymbol: u64 = unsafe { ::std::mem::transmute(ksymbol) };
+ ksymbol as u64
+ });
+ __bindgen_bitfield_unit.set(30usize, 1u8, {
+ let bpf_event: u64 = unsafe { ::std::mem::transmute(bpf_event) };
+ bpf_event as u64
+ });
+ __bindgen_bitfield_unit.set(31usize, 1u8, {
+ let aux_output: u64 = unsafe { ::std::mem::transmute(aux_output) };
+ aux_output as u64
+ });
+ __bindgen_bitfield_unit.set(32usize, 32u8, {
+ let __reserved_1: u64 = unsafe { ::std::mem::transmute(__reserved_1) };
+ __reserved_1 as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default)]
+pub struct perf_event_query_bpf {
+ pub ids_len: __u32,
+ pub prog_cnt: __u32,
+ pub ids: __IncompleteArrayField<__u32>,
+}
+#[test]
+fn bindgen_test_layout_perf_event_query_bpf() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_query_bpf>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_query_bpf))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_query_bpf>(),
+ 4usize,
+ concat!("Alignment of ", stringify!(perf_event_query_bpf))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_query_bpf>())).ids_len as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_query_bpf),
+ "::",
+ stringify!(ids_len)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_query_bpf>())).prog_cnt as *const _ as usize },
+ 4usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_query_bpf),
+ "::",
+ stringify!(prog_cnt)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_query_bpf>())).ids as *const _ as usize },
+ 8usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_query_bpf),
+ "::",
+ stringify!(ids)
+ )
+ );
+}
+pub const perf_event_ioc_flags_PERF_IOC_FLAG_GROUP: perf_event_ioc_flags = 1;
+pub type perf_event_ioc_flags = u32;
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub struct perf_event_mmap_page {
+ pub version: __u32,
+ pub compat_version: __u32,
+ pub lock: __u32,
+ pub index: __u32,
+ pub offset: __s64,
+ pub time_enabled: __u64,
+ pub time_running: __u64,
+ pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1,
+ pub pmc_width: __u16,
+ pub time_shift: __u16,
+ pub time_mult: __u32,
+ pub time_offset: __u64,
+ pub time_zero: __u64,
+ pub size: __u32,
+ pub __reserved: [__u8; 948usize],
+ pub data_head: __u64,
+ pub data_tail: __u64,
+ pub data_offset: __u64,
+ pub data_size: __u64,
+ pub aux_head: __u64,
+ pub aux_tail: __u64,
+ pub aux_offset: __u64,
+ pub aux_size: __u64,
+}
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_event_mmap_page__bindgen_ty_1 {
+ pub capabilities: __u64,
+ pub __bindgen_anon_1: perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1,
+ _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[repr(align(8))]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u64>,
+}
+#[test]
+fn bindgen_test_layout_perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1>(),
+ 8usize,
+ concat!(
+ "Size of: ",
+ stringify!(perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1)
+ )
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1>(),
+ 8usize,
+ concat!(
+ "Alignment of ",
+ stringify!(perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1)
+ )
+ );
+}
+impl perf_event_mmap_page__bindgen_ty_1__bindgen_ty_1 {
+ #[inline]
+ pub fn cap_bit0(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_bit0(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cap_bit0_is_deprecated(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_bit0_is_deprecated(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cap_user_rdpmc(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_user_rdpmc(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cap_user_time(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_user_time(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(3usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cap_user_time_zero(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_user_time_zero(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(4usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cap_____res(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 59u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cap_____res(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(5usize, 59u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ cap_bit0: __u64,
+ cap_bit0_is_deprecated: __u64,
+ cap_user_rdpmc: __u64,
+ cap_user_time: __u64,
+ cap_user_time_zero: __u64,
+ cap_____res: __u64,
+ ) -> __BindgenBitfieldUnit<[u8; 8usize], u64> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u64> =
+ Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let cap_bit0: u64 = unsafe { ::std::mem::transmute(cap_bit0) };
+ cap_bit0 as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let cap_bit0_is_deprecated: u64 =
+ unsafe { ::std::mem::transmute(cap_bit0_is_deprecated) };
+ cap_bit0_is_deprecated as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let cap_user_rdpmc: u64 = unsafe { ::std::mem::transmute(cap_user_rdpmc) };
+ cap_user_rdpmc as u64
+ });
+ __bindgen_bitfield_unit.set(3usize, 1u8, {
+ let cap_user_time: u64 = unsafe { ::std::mem::transmute(cap_user_time) };
+ cap_user_time as u64
+ });
+ __bindgen_bitfield_unit.set(4usize, 1u8, {
+ let cap_user_time_zero: u64 = unsafe { ::std::mem::transmute(cap_user_time_zero) };
+ cap_user_time_zero as u64
+ });
+ __bindgen_bitfield_unit.set(5usize, 59u8, {
+ let cap_____res: u64 = unsafe { ::std::mem::transmute(cap_____res) };
+ cap_____res as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
+#[test]
+fn bindgen_test_layout_perf_event_mmap_page__bindgen_ty_1() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_mmap_page__bindgen_ty_1>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_mmap_page__bindgen_ty_1))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_mmap_page__bindgen_ty_1>(),
+ 8usize,
+ concat!(
+ "Alignment of ",
+ stringify!(perf_event_mmap_page__bindgen_ty_1)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page__bindgen_ty_1>())).capabilities as *const _
+ as usize
+ },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page__bindgen_ty_1),
+ "::",
+ stringify!(capabilities)
+ )
+ );
+}
+impl Default for perf_event_mmap_page__bindgen_ty_1 {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[test]
+fn bindgen_test_layout_perf_event_mmap_page() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_mmap_page>(),
+ 1088usize,
+ concat!("Size of: ", stringify!(perf_event_mmap_page))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_mmap_page>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_event_mmap_page))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).version as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(version)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page>())).compat_version as *const _ as usize
+ },
+ 4usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(compat_version)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).lock as *const _ as usize },
+ 8usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(lock)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).index as *const _ as usize },
+ 12usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(index)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).offset as *const _ as usize },
+ 16usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(offset)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page>())).time_enabled as *const _ as usize
+ },
+ 24usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_enabled)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page>())).time_running as *const _ as usize
+ },
+ 32usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_running)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).pmc_width as *const _ as usize },
+ 48usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(pmc_width)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).time_shift as *const _ as usize },
+ 50usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_shift)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).time_mult as *const _ as usize },
+ 52usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_mult)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page>())).time_offset as *const _ as usize
+ },
+ 56usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_offset)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).time_zero as *const _ as usize },
+ 64usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(time_zero)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).size as *const _ as usize },
+ 72usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(size)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).__reserved as *const _ as usize },
+ 76usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(__reserved)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).data_head as *const _ as usize },
+ 1024usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(data_head)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).data_tail as *const _ as usize },
+ 1032usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(data_tail)
+ )
+ );
+ assert_eq!(
+ unsafe {
+ &(*(::std::ptr::null::<perf_event_mmap_page>())).data_offset as *const _ as usize
+ },
+ 1040usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(data_offset)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).data_size as *const _ as usize },
+ 1048usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(data_size)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).aux_head as *const _ as usize },
+ 1056usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(aux_head)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).aux_tail as *const _ as usize },
+ 1064usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(aux_tail)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).aux_offset as *const _ as usize },
+ 1072usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(aux_offset)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_mmap_page>())).aux_size as *const _ as usize },
+ 1080usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_mmap_page),
+ "::",
+ stringify!(aux_size)
+ )
+ );
+}
+impl Default for perf_event_mmap_page {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct perf_event_header {
+ pub type_: __u32,
+ pub misc: __u16,
+ pub size: __u16,
+}
+#[test]
+fn bindgen_test_layout_perf_event_header() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_event_header>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_event_header))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_event_header>(),
+ 4usize,
+ concat!("Alignment of ", stringify!(perf_event_header))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_header>())).type_ as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_header),
+ "::",
+ stringify!(type_)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_header>())).misc as *const _ as usize },
+ 4usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_header),
+ "::",
+ stringify!(misc)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_event_header>())).size as *const _ as usize },
+ 6usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_event_header),
+ "::",
+ stringify!(size)
+ )
+ );
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct perf_ns_link_info {
+ pub dev: __u64,
+ pub ino: __u64,
+}
+#[test]
+fn bindgen_test_layout_perf_ns_link_info() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_ns_link_info>(),
+ 16usize,
+ concat!("Size of: ", stringify!(perf_ns_link_info))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_ns_link_info>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_ns_link_info))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_ns_link_info>())).dev as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_ns_link_info),
+ "::",
+ stringify!(dev)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_ns_link_info>())).ino as *const _ as usize },
+ 8usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_ns_link_info),
+ "::",
+ stringify!(ino)
+ )
+ );
+}
+pub const NET_NS_INDEX: _bindgen_ty_3 = 0;
+pub const UTS_NS_INDEX: _bindgen_ty_3 = 1;
+pub const IPC_NS_INDEX: _bindgen_ty_3 = 2;
+pub const PID_NS_INDEX: _bindgen_ty_3 = 3;
+pub const USER_NS_INDEX: _bindgen_ty_3 = 4;
+pub const MNT_NS_INDEX: _bindgen_ty_3 = 5;
+pub const CGROUP_NS_INDEX: _bindgen_ty_3 = 6;
+pub const NR_NAMESPACES: _bindgen_ty_3 = 7;
+pub type _bindgen_ty_3 = u32;
+pub const perf_event_type_PERF_RECORD_MMAP: perf_event_type = 1;
+pub const perf_event_type_PERF_RECORD_LOST: perf_event_type = 2;
+pub const perf_event_type_PERF_RECORD_COMM: perf_event_type = 3;
+pub const perf_event_type_PERF_RECORD_EXIT: perf_event_type = 4;
+pub const perf_event_type_PERF_RECORD_THROTTLE: perf_event_type = 5;
+pub const perf_event_type_PERF_RECORD_UNTHROTTLE: perf_event_type = 6;
+pub const perf_event_type_PERF_RECORD_FORK: perf_event_type = 7;
+pub const perf_event_type_PERF_RECORD_READ: perf_event_type = 8;
+pub const perf_event_type_PERF_RECORD_SAMPLE: perf_event_type = 9;
+pub const perf_event_type_PERF_RECORD_MMAP2: perf_event_type = 10;
+pub const perf_event_type_PERF_RECORD_AUX: perf_event_type = 11;
+pub const perf_event_type_PERF_RECORD_ITRACE_START: perf_event_type = 12;
+pub const perf_event_type_PERF_RECORD_LOST_SAMPLES: perf_event_type = 13;
+pub const perf_event_type_PERF_RECORD_SWITCH: perf_event_type = 14;
+pub const perf_event_type_PERF_RECORD_SWITCH_CPU_WIDE: perf_event_type = 15;
+pub const perf_event_type_PERF_RECORD_NAMESPACES: perf_event_type = 16;
+pub const perf_event_type_PERF_RECORD_KSYMBOL: perf_event_type = 17;
+pub const perf_event_type_PERF_RECORD_BPF_EVENT: perf_event_type = 18;
+pub const perf_event_type_PERF_RECORD_MAX: perf_event_type = 19;
+pub type perf_event_type = u32;
+pub const perf_record_ksymbol_type_PERF_RECORD_KSYMBOL_TYPE_UNKNOWN: perf_record_ksymbol_type = 0;
+pub const perf_record_ksymbol_type_PERF_RECORD_KSYMBOL_TYPE_BPF: perf_record_ksymbol_type = 1;
+pub const perf_record_ksymbol_type_PERF_RECORD_KSYMBOL_TYPE_MAX: perf_record_ksymbol_type = 2;
+pub type perf_record_ksymbol_type = u32;
+pub const perf_bpf_event_type_PERF_BPF_EVENT_UNKNOWN: perf_bpf_event_type = 0;
+pub const perf_bpf_event_type_PERF_BPF_EVENT_PROG_LOAD: perf_bpf_event_type = 1;
+pub const perf_bpf_event_type_PERF_BPF_EVENT_PROG_UNLOAD: perf_bpf_event_type = 2;
+pub const perf_bpf_event_type_PERF_BPF_EVENT_MAX: perf_bpf_event_type = 3;
+pub type perf_bpf_event_type = u32;
+pub const perf_callchain_context_PERF_CONTEXT_HV: perf_callchain_context = 18446744073709551584;
+pub const perf_callchain_context_PERF_CONTEXT_KERNEL: perf_callchain_context = 18446744073709551488;
+pub const perf_callchain_context_PERF_CONTEXT_USER: perf_callchain_context = 18446744073709551104;
+pub const perf_callchain_context_PERF_CONTEXT_GUEST: perf_callchain_context = 18446744073709549568;
+pub const perf_callchain_context_PERF_CONTEXT_GUEST_KERNEL: perf_callchain_context =
+ 18446744073709549440;
+pub const perf_callchain_context_PERF_CONTEXT_GUEST_USER: perf_callchain_context =
+ 18446744073709549056;
+pub const perf_callchain_context_PERF_CONTEXT_MAX: perf_callchain_context = 18446744073709547521;
+pub type perf_callchain_context = u64;
+#[repr(C)]
+#[derive(Copy, Clone)]
+pub union perf_mem_data_src {
+ pub val: __u64,
+ pub __bindgen_anon_1: perf_mem_data_src__bindgen_ty_1,
+ _bindgen_union_align: u64,
+}
+#[repr(C)]
+#[repr(align(8))]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct perf_mem_data_src__bindgen_ty_1 {
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u32>,
+}
+#[test]
+fn bindgen_test_layout_perf_mem_data_src__bindgen_ty_1() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_mem_data_src__bindgen_ty_1>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_mem_data_src__bindgen_ty_1))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_mem_data_src__bindgen_ty_1>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_mem_data_src__bindgen_ty_1))
+ );
+}
+impl perf_mem_data_src__bindgen_ty_1 {
+ #[inline]
+ pub fn mem_op(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 5u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_op(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 5u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_lvl(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(5usize, 14u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_lvl(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(5usize, 14u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_snoop(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(19usize, 5u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_snoop(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(19usize, 5u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_lock(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 2u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_lock(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(24usize, 2u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_dtlb(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(26usize, 7u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_dtlb(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(26usize, 7u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_lvl_num(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(33usize, 4u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_lvl_num(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(33usize, 4u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_remote(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(37usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_remote(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(37usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_snoopx(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(38usize, 2u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_snoopx(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(38usize, 2u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn mem_rsvd(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(40usize, 24u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mem_rsvd(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(40usize, 24u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ mem_op: __u64,
+ mem_lvl: __u64,
+ mem_snoop: __u64,
+ mem_lock: __u64,
+ mem_dtlb: __u64,
+ mem_lvl_num: __u64,
+ mem_remote: __u64,
+ mem_snoopx: __u64,
+ mem_rsvd: __u64,
+ ) -> __BindgenBitfieldUnit<[u8; 8usize], u32> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u32> =
+ Default::default();
+ __bindgen_bitfield_unit.set(0usize, 5u8, {
+ let mem_op: u64 = unsafe { ::std::mem::transmute(mem_op) };
+ mem_op as u64
+ });
+ __bindgen_bitfield_unit.set(5usize, 14u8, {
+ let mem_lvl: u64 = unsafe { ::std::mem::transmute(mem_lvl) };
+ mem_lvl as u64
+ });
+ __bindgen_bitfield_unit.set(19usize, 5u8, {
+ let mem_snoop: u64 = unsafe { ::std::mem::transmute(mem_snoop) };
+ mem_snoop as u64
+ });
+ __bindgen_bitfield_unit.set(24usize, 2u8, {
+ let mem_lock: u64 = unsafe { ::std::mem::transmute(mem_lock) };
+ mem_lock as u64
+ });
+ __bindgen_bitfield_unit.set(26usize, 7u8, {
+ let mem_dtlb: u64 = unsafe { ::std::mem::transmute(mem_dtlb) };
+ mem_dtlb as u64
+ });
+ __bindgen_bitfield_unit.set(33usize, 4u8, {
+ let mem_lvl_num: u64 = unsafe { ::std::mem::transmute(mem_lvl_num) };
+ mem_lvl_num as u64
+ });
+ __bindgen_bitfield_unit.set(37usize, 1u8, {
+ let mem_remote: u64 = unsafe { ::std::mem::transmute(mem_remote) };
+ mem_remote as u64
+ });
+ __bindgen_bitfield_unit.set(38usize, 2u8, {
+ let mem_snoopx: u64 = unsafe { ::std::mem::transmute(mem_snoopx) };
+ mem_snoopx as u64
+ });
+ __bindgen_bitfield_unit.set(40usize, 24u8, {
+ let mem_rsvd: u64 = unsafe { ::std::mem::transmute(mem_rsvd) };
+ mem_rsvd as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
+#[test]
+fn bindgen_test_layout_perf_mem_data_src() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_mem_data_src>(),
+ 8usize,
+ concat!("Size of: ", stringify!(perf_mem_data_src))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_mem_data_src>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_mem_data_src))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_mem_data_src>())).val as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_mem_data_src),
+ "::",
+ stringify!(val)
+ )
+ );
+}
+impl Default for perf_mem_data_src {
+ fn default() -> Self {
+ unsafe { ::std::mem::zeroed() }
+ }
+}
+#[repr(C)]
+#[derive(Debug, Default, Copy, Clone)]
+pub struct perf_branch_entry {
+ pub from: __u64,
+ pub to: __u64,
+ pub _bitfield_1: __BindgenBitfieldUnit<[u8; 8usize], u64>,
+}
+#[test]
+fn bindgen_test_layout_perf_branch_entry() {
+ assert_eq!(
+ ::std::mem::size_of::<perf_branch_entry>(),
+ 24usize,
+ concat!("Size of: ", stringify!(perf_branch_entry))
+ );
+ assert_eq!(
+ ::std::mem::align_of::<perf_branch_entry>(),
+ 8usize,
+ concat!("Alignment of ", stringify!(perf_branch_entry))
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_branch_entry>())).from as *const _ as usize },
+ 0usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_branch_entry),
+ "::",
+ stringify!(from)
+ )
+ );
+ assert_eq!(
+ unsafe { &(*(::std::ptr::null::<perf_branch_entry>())).to as *const _ as usize },
+ 8usize,
+ concat!(
+ "Offset of field: ",
+ stringify!(perf_branch_entry),
+ "::",
+ stringify!(to)
+ )
+ );
+}
+impl perf_branch_entry {
+ #[inline]
+ pub fn mispred(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(0usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_mispred(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(0usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn predicted(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(1usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_predicted(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(1usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn in_tx(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(2usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_in_tx(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(2usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn abort(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(3usize, 1u8) as u64) }
+ }
+ #[inline]
+ pub fn set_abort(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(3usize, 1u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn cycles(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(4usize, 16u8) as u64) }
+ }
+ #[inline]
+ pub fn set_cycles(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(4usize, 16u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn type_(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(20usize, 4u8) as u64) }
+ }
+ #[inline]
+ pub fn set_type(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(20usize, 4u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn reserved(&self) -> __u64 {
+ unsafe { ::std::mem::transmute(self._bitfield_1.get(24usize, 40u8) as u64) }
+ }
+ #[inline]
+ pub fn set_reserved(&mut self, val: __u64) {
+ unsafe {
+ let val: u64 = ::std::mem::transmute(val);
+ self._bitfield_1.set(24usize, 40u8, val as u64)
+ }
+ }
+ #[inline]
+ pub fn new_bitfield_1(
+ mispred: __u64,
+ predicted: __u64,
+ in_tx: __u64,
+ abort: __u64,
+ cycles: __u64,
+ type_: __u64,
+ reserved: __u64,
+ ) -> __BindgenBitfieldUnit<[u8; 8usize], u64> {
+ let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 8usize], u64> =
+ Default::default();
+ __bindgen_bitfield_unit.set(0usize, 1u8, {
+ let mispred: u64 = unsafe { ::std::mem::transmute(mispred) };
+ mispred as u64
+ });
+ __bindgen_bitfield_unit.set(1usize, 1u8, {
+ let predicted: u64 = unsafe { ::std::mem::transmute(predicted) };
+ predicted as u64
+ });
+ __bindgen_bitfield_unit.set(2usize, 1u8, {
+ let in_tx: u64 = unsafe { ::std::mem::transmute(in_tx) };
+ in_tx as u64
+ });
+ __bindgen_bitfield_unit.set(3usize, 1u8, {
+ let abort: u64 = unsafe { ::std::mem::transmute(abort) };
+ abort as u64
+ });
+ __bindgen_bitfield_unit.set(4usize, 16u8, {
+ let cycles: u64 = unsafe { ::std::mem::transmute(cycles) };
+ cycles as u64
+ });
+ __bindgen_bitfield_unit.set(20usize, 4u8, {
+ let type_: u64 = unsafe { ::std::mem::transmute(type_) };
+ type_ as u64
+ });
+ __bindgen_bitfield_unit.set(24usize, 40u8, {
+ let reserved: u64 = unsafe { ::std::mem::transmute(reserved) };
+ reserved as u64
+ });
+ __bindgen_bitfield_unit
+ }
+}
+pub const HW_BREAKPOINT_LEN_1: _bindgen_ty_4 = 1;
+pub const HW_BREAKPOINT_LEN_2: _bindgen_ty_4 = 2;
+pub const HW_BREAKPOINT_LEN_3: _bindgen_ty_4 = 3;
+pub const HW_BREAKPOINT_LEN_4: _bindgen_ty_4 = 4;
+pub const HW_BREAKPOINT_LEN_5: _bindgen_ty_4 = 5;
+pub const HW_BREAKPOINT_LEN_6: _bindgen_ty_4 = 6;
+pub const HW_BREAKPOINT_LEN_7: _bindgen_ty_4 = 7;
+pub const HW_BREAKPOINT_LEN_8: _bindgen_ty_4 = 8;
+pub type _bindgen_ty_4 = u32;
+pub const HW_BREAKPOINT_EMPTY: _bindgen_ty_5 = 0;
+pub const HW_BREAKPOINT_R: _bindgen_ty_5 = 1;
+pub const HW_BREAKPOINT_W: _bindgen_ty_5 = 2;
+pub const HW_BREAKPOINT_RW: _bindgen_ty_5 = 3;
+pub const HW_BREAKPOINT_X: _bindgen_ty_5 = 4;
+pub const HW_BREAKPOINT_INVALID: _bindgen_ty_5 = 7;
+pub type _bindgen_ty_5 = u32;
+pub const bp_type_idx_TYPE_INST: bp_type_idx = 0;
+pub const bp_type_idx_TYPE_DATA: bp_type_idx = 1;
+pub const bp_type_idx_TYPE_MAX: bp_type_idx = 2;
+pub type bp_type_idx = u32;
+pub const perf_event_ioctls_ENABLE: perf_event_ioctls = 9216;
+pub const perf_event_ioctls_DISABLE: perf_event_ioctls = 9217;
+pub const perf_event_ioctls_REFRESH: perf_event_ioctls = 9218;
+pub const perf_event_ioctls_RESET: perf_event_ioctls = 9219;
+pub const perf_event_ioctls_PERIOD: perf_event_ioctls = 1074275332;
+pub const perf_event_ioctls_SET_OUTPUT: perf_event_ioctls = 9221;
+pub const perf_event_ioctls_SET_FILTER: perf_event_ioctls = 1074275334;
+pub const perf_event_ioctls_ID: perf_event_ioctls = 2148017159;
+pub const perf_event_ioctls_SET_BPF: perf_event_ioctls = 1074013192;
+pub const perf_event_ioctls_PAUSE_OUTPUT: perf_event_ioctls = 1074013193;
+pub const perf_event_ioctls_QUERY_BPF: perf_event_ioctls = 3221758986;
+pub const perf_event_ioctls_MODIFY_ATTRIBUTES: perf_event_ioctls = 1074275339;
+pub type perf_event_ioctls = u32;
diff --git a/vendor/perf-event-open-sys/src/lib.rs b/vendor/perf-event-open-sys/src/lib.rs
new file mode 100644
index 000000000..314428045
--- /dev/null
+++ b/vendor/perf-event-open-sys/src/lib.rs
@@ -0,0 +1,260 @@
+//! Direct, unsafe bindings for Linux [`perf_event_open`][man] and friends.
+//!
+//! Linux's `perf_event_open` system call provides access to the processor's
+//! performance measurement counters (things like instructions retired, cache
+//! misses, and so on), kernel counters (context switches, page faults), and
+//! many other sources of performance information.
+//!
+//! You can't get the `perf_event_open` function from the `libc` crate, as you
+//! would any other system call. The Linux standard C library does not provide a
+//! binding for this function or its associated types and constants.
+//!
+//! Rust analogs to the C types and constants from `<linux/perf_event.h>` and
+//! `<linux/hw_breakpoint.h>`, generated with `bindgen`, are available in the
+//! [`bindings`] module.
+//!
+//! There are several ioctls for use with `perf_event_open` file descriptors;
+//! see the [`ioctls`] module for those.
+//!
+//! For a safe and convenient interface to this functionality, see the
+//! [`perf_event`] crate.
+//!
+//! ## Using the raw API
+//!
+//! As the kernel interface evolves, the struct and union types from the
+//! [`bindings`] module may acquire new fields. To ensure that your code will
+//! continue to compile against newer versions of this crate, you should
+//! construct values of these types by calling their `Default` implementations,
+//! which return zero-filled values, and then assigning to the fields you care
+//! about. For example:
+//!
+//! ```
+//! use perf_event_open_sys as sys;
+//!
+//! // Construct a zero-filled `perf_event_attr`.
+//! let mut attrs = sys::bindings::perf_event_attr::default();
+//!
+//! // Populate the fields we need.
+//! attrs.size = std::mem::size_of::<sys::bindings::perf_event_attr>() as u32;
+//! attrs.type_ = sys::bindings::perf_type_id_PERF_TYPE_HARDWARE;
+//! attrs.config = sys::bindings::perf_hw_id_PERF_COUNT_HW_INSTRUCTIONS as u64;
+//! attrs.set_disabled(1);
+//! attrs.set_exclude_kernel(1);
+//! attrs.set_exclude_hv(1);
+//!
+//! // Make the system call.
+//! let result = unsafe {
+//! sys::perf_event_open(&mut attrs, 0, -1, -1, 0)
+//! };
+//!
+//! if result < 0 {
+//! // ... handle error
+//! }
+//!
+//! // ... use `result` as a raw file descriptor
+//! ```
+//!
+//! It is not necessary to adjust `size` to what the running kernel expects:
+//! older kernels can accept newer `perf_event_attr` structs, and vice versa. As
+//! long as the `size` field was properly initialized, an error result of
+//! `E2BIG` indicates that the `attrs` structure has requested behavior the
+//! kernel is too old to support.
+//!
+//! When `E2BIG` is returned, the kernel writes the size it expected back to the
+//! `size` field of the `attrs` struct. Again, if you want to retry the call, it
+//! is not necessary to adjust the size you pass to match what the kernel passed
+//! back. The size from the kernel just indicates which version of the API the
+//! kernel supports; see the documentation for the `PERF_EVENT_ATTR_SIZE_VER...`
+//! constants for details.
+//!
+//! ## Kernel versions
+//!
+//! The bindings in this crate are generated from the Linux kernel headers
+//! packaged by Fedora as `kernel-headers-5.6.11-100.fc30.x86_64`, which
+//! corresponds to `PERF_EVENT_ATTR_SIZE_VER6`.
+//!
+//! As explained above, bugs aside, it is not necessary to use the version of
+//! these structures that matches the kernel you want to run under, so it should
+//! always be acceptable to use the latest version of this crate, even if you
+//! want to support older kernels.
+//!
+//! This crate's `README.md` file includes instructions on regenerating the
+//! bindings from newer kernel headers. However, this can be a breaking change
+//! for users that have not followed the advice above, so regeneration should
+//! cause a major version increment.
+//!
+//! If you need features that are available only in a more recent version of the
+//! types than this crate provides, please file an issue.
+//!
+//! ## Linux API Backward/Forward Compatibility Strategy
+//!
+//! (This is more detail than necessary if you just want to use the crate. I
+//! want to write this down somewhere so that I have something to refer to when
+//! I forget the details.)
+//!
+//! It is an important principle of Linux kernel development that new versions
+//! of the kernel should not break userspace. If upgrading your kernel breaks a
+//! user program, then that's a bug in the kernel. (This refers to the run-time
+//! interface. I don't know what the stability rules are for the kernel headers:
+//! can new headers cause old code to fail to compile? Anyway, run time is our
+//! concern here.)
+//!
+//! But when you have an open-ended, complex system call like `perf_event_open`,
+//! it's really important for the interface to be able to evolve. Certainly, old
+//! programs must run properly on new kernels, but ideally, it should work the
+//! other way, too: a program built against a newer version of the kernel
+//! headers should run on an older kernel, as long as it only requests features
+//! the old kernel actually supports. That is, simply compiling against newer
+//! headers should not be disqualifying - only using those new headers to
+//! request features the running kernel can't provide should cause an error.
+//!
+//! Consider the specific case of passing a struct like `perf_event_attr` to a
+//! system call like `perf_event_open`. In general, there are two versions of
+//! the struct in play: the version the user program was compiled against, and
+//! the version the running kernel was compiled against. How can we let old
+//! programs call `perf_event_open` on new kernels, and vice versa?
+//!
+//! Linux has a neat strategy for making this work. There are four rules:
+//!
+//! - Every system call that passes a struct to the kernel includes some
+//! indication of how large userspace thinks that struct is. For
+//! `perf_event_open`, it's the `size` field of the `perf_event_attr`
+//! struct. For `ioctl`s that pass a struct, it's a bitfield of the
+//! `request` value.
+//!
+//! - Fields are never deleted from structs. At most, newer kernel headers may
+//! rename them to '__reserved_foo' or something like that, but once a field
+//! has been placed, its layout in the struct never changes.
+//!
+//! - New fields are added to the end of structs.
+//!
+//! - New fields' semantics are chosen such that filling them with zeros
+//! preserves the old behavior. That is, turning an old struct into a new
+//! struct by extending it with zero bytes should always give you a new
+//! struct with the same meaning the old struct had.
+//!
+//! Then, the kernel's strategy for receiving structs from userspace (explained
+//! by the kernel comments for `copy_struct_from_user` in
+//! `include/linux/uaccess.h`) is as follows:
+//!
+//! - If the kernel's struct is larger than the one passed from userspace,
+//! then that means the kernel is newer than the userspace program. The
+//! kernel copies the userspace data into the initial bytes of its own
+//! struct, and zeros the remaining bytes. Since zeroed fields have no
+//! effect, the resulting struct properly reflects the user's intent.
+//!
+//! - If the kernel's struct is smaller than the one passed from userspace,
+//! then that means that a userspace program compiled against newer kernel
+//! headers is running on an older kernel. The kernel checks that the excess
+//! bytes in the userspace struct are all zero; if they are not, the system
+//! call returns `E2BIG`, indicating that userspace has requested a feature
+//! the kernel doesn't support. If they are all zero, then the kernel
+//! initializes its own struct with the bytes from the start of the
+//! userspace struct, and drops the rest. Since the dropped bytes were all
+//! zero, they did not affect the requested behavior, and the resulting
+//! struct reflects the user's intent.
+//!
+//! - In either case, the kernel verifies that any `__reserved_foo` fields in
+//! its own version of the struct are zero.
+//!
+//! This covers both the old-on-new and new-on-old cases, and returns an error
+//! only when the call requests functionality the kernel doesn't support.
+//!
+//! You can find one example of using `perf_event_open` in the [`perf_event`]
+//! crate, which provides a safe interface to a subset of `perf_event_open`'s
+//! functionality.
+//!
+//! [`bindings`]: bindings/index.html
+//! [`ioctls`]: ioctls/index.html
+//! [man]: http://man7.org/linux/man-pages/man2/perf_event_open.2.html
+//! [`perf_event`]: https://crates.io/crates/perf_event
+
+pub mod bindings;
+
+use libc::pid_t;
+use std::os::raw::{c_int, c_ulong};
+
+/// The `perf_event_open` system call.
+///
+/// See the [`perf_event_open(2) man page`][man] for details.
+///
+/// On error, this returns a negated raw OS error value. The C `errno` value is
+/// not changed.
+///
+/// Note: The `attrs` argument needs to be a `*mut` because if the `size` field
+/// is too small or too large, the kernel writes the size it was expecing back
+/// into that field. It might do other things as well.
+///
+/// [man]: http://man7.org/linux/man-pages/man2/perf_event_open.2.html
+pub unsafe fn perf_event_open(
+ attrs: *mut bindings::perf_event_attr,
+ pid: pid_t,
+ cpu: c_int,
+ group_fd: c_int,
+ flags: c_ulong,
+) -> c_int {
+ libc::syscall(
+ bindings::__NR_perf_event_open as libc::c_long,
+ attrs as *const bindings::perf_event_attr,
+ pid,
+ cpu,
+ group_fd,
+ flags,
+ ) as c_int
+}
+
+#[allow(dead_code, non_snake_case)]
+pub mod ioctls {
+ //! Ioctls for use with `perf_event_open` file descriptors.
+ //!
+ //! See the [`perf_event_open(2)`][man] man page for details.
+ //!
+ //! On error, these return `-1` and set the C `errno` value.
+ //!
+ //! [man]: http://man7.org/linux/man-pages/man2/perf_event_open.2.html
+ use crate::bindings::{self, perf_event_attr, perf_event_query_bpf};
+ use std::os::raw::{c_char, c_int, c_uint, c_ulong};
+
+ macro_rules! define_ioctls {
+ ( $( $args:tt )* ) => {
+ $(
+ define_ioctl!($args);
+ )*
+ }
+ }
+
+ macro_rules! define_ioctl {
+ ({ $name:ident, $ioctl:ident, $arg_type:ty }) => {
+ pub unsafe fn $name(fd: c_int, arg: $arg_type) -> c_int {
+ untyped_ioctl(fd, bindings::$ioctl, arg)
+ }
+ };
+ }
+
+ define_ioctls! {
+ { ENABLE, perf_event_ioctls_ENABLE, c_uint }
+ { DISABLE, perf_event_ioctls_DISABLE, c_uint }
+ { REFRESH, perf_event_ioctls_REFRESH, c_int }
+ { RESET, perf_event_ioctls_RESET, c_uint }
+ { PERIOD, perf_event_ioctls_PERIOD, u64 }
+ { SET_OUTPUT, perf_event_ioctls_SET_OUTPUT, c_int }
+ { SET_FILTER, perf_event_ioctls_SET_FILTER, *mut c_char }
+ { ID, perf_event_ioctls_ID, *mut u64 }
+ { SET_BPF, perf_event_ioctls_SET_BPF, u32 }
+ { PAUSE_OUTPUT, perf_event_ioctls_PAUSE_OUTPUT, u32 }
+ { QUERY_BPF, perf_event_ioctls_QUERY_BPF, *mut perf_event_query_bpf }
+ { MODIFY_ATTRIBUTES, perf_event_ioctls_MODIFY_ATTRIBUTES, *mut perf_event_attr }
+ }
+
+ unsafe fn untyped_ioctl<A>(
+ fd: c_int,
+ ioctl: bindings::perf_event_ioctls,
+ arg: A,
+ ) -> c_int {
+ #[cfg(target_env = "musl")]
+ return libc::ioctl(fd, ioctl as c_int, arg);
+
+ #[cfg(not(target_env = "musl"))]
+ libc::ioctl(fd, ioctl as c_ulong, arg)
+ }
+}
diff --git a/vendor/perf-event-open-sys/wrapper.h b/vendor/perf-event-open-sys/wrapper.h
new file mode 100644
index 000000000..1838eea99
--- /dev/null
+++ b/vendor/perf-event-open-sys/wrapper.h
@@ -0,0 +1,23 @@
+// This file is consumed by bindgen, called from our build.rs file.
+
+#include <linux/perf_event.h>
+#include <linux/hw_breakpoint.h>
+
+// for __NR_perf_event_open
+#include <asm/unistd.h>
+
+// bindgen won't capture preprocessor macro definitions, so we have to do this.
+enum perf_event_ioctls {
+ ENABLE = PERF_EVENT_IOC_ENABLE,
+ DISABLE = PERF_EVENT_IOC_DISABLE,
+ REFRESH = PERF_EVENT_IOC_REFRESH,
+ RESET = PERF_EVENT_IOC_RESET,
+ PERIOD = PERF_EVENT_IOC_PERIOD,
+ SET_OUTPUT = PERF_EVENT_IOC_SET_OUTPUT,
+ SET_FILTER = PERF_EVENT_IOC_SET_FILTER,
+ ID = PERF_EVENT_IOC_ID,
+ SET_BPF = PERF_EVENT_IOC_SET_BPF,
+ PAUSE_OUTPUT = PERF_EVENT_IOC_PAUSE_OUTPUT,
+ QUERY_BPF = PERF_EVENT_IOC_QUERY_BPF,
+ MODIFY_ATTRIBUTES = PERF_EVENT_IOC_MODIFY_ATTRIBUTES,
+};