diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-06 01:02:30 +0000 |
commit | 76cb841cb886eef6b3bee341a2266c76578724ad (patch) | |
tree | f5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /include/net/gen_stats.h | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'include/net/gen_stats.h')
-rw-r--r-- | include/net/gen_stats.h | 73 |
1 files changed, 73 insertions, 0 deletions
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h new file mode 100644 index 000000000..883bb9085 --- /dev/null +++ b/include/net/gen_stats.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_GEN_STATS_H +#define __NET_GEN_STATS_H + +#include <linux/gen_stats.h> +#include <linux/socket.h> +#include <linux/rtnetlink.h> +#include <linux/pkt_sched.h> + +struct gnet_stats_basic_cpu { + struct gnet_stats_basic_packed bstats; + struct u64_stats_sync syncp; +}; + +struct net_rate_estimator; + +struct gnet_dump { + spinlock_t * lock; + struct sk_buff * skb; + struct nlattr * tail; + + /* Backward compatibility */ + int compat_tc_stats; + int compat_xstats; + int padattr; + void * xstats; + int xstats_len; + struct tc_stats tc_stats; +}; + +int gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock, + struct gnet_dump *d, int padattr); + +int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, + int tc_stats_type, int xstats_type, + spinlock_t *lock, struct gnet_dump *d, + int padattr); + +int gnet_stats_copy_basic(const seqcount_t *running, + struct gnet_dump *d, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +void __gnet_stats_copy_basic(const seqcount_t *running, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu, + struct gnet_stats_basic_packed *b); +int gnet_stats_copy_rate_est(struct gnet_dump *d, + struct net_rate_estimator __rcu **ptr); +int gnet_stats_copy_queue(struct gnet_dump *d, + struct gnet_stats_queue __percpu *cpu_q, + struct gnet_stats_queue *q, __u32 qlen); +void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats, + const struct gnet_stats_queue __percpu *cpu_q, + const struct gnet_stats_queue *q, __u32 qlen); +int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len); + +int gnet_stats_finish_copy(struct gnet_dump *d); + +int gen_new_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **rate_est, + spinlock_t *lock, + seqcount_t *running, struct nlattr *opt); +void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); +int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, + struct gnet_stats_basic_cpu __percpu *cpu_bstats, + struct net_rate_estimator __rcu **ptr, + spinlock_t *lock, + seqcount_t *running, struct nlattr *opt); +bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); +bool gen_estimator_read(struct net_rate_estimator __rcu **ptr, + struct gnet_stats_rate_est64 *sample); +#endif |