summaryrefslogtreecommitdiffstats
path: root/include/haproxy/activity-t.h
blob: 9faeecdb3103f6347ea3f8f4f599c0f91a755a8d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/*
 * include/haproxy/activity-t.h
 * This file contains structure declarations for activity measurements.
 *
 * Copyright (C) 2000-2020 Willy Tarreau - w@1wt.eu
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation, version 2.1
 * exclusively.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
 */

#ifndef _HAPROXY_ACTIVITY_T_H
#define _HAPROXY_ACTIVITY_T_H

#include <haproxy/api-t.h>
#include <haproxy/freq_ctr-t.h>

/* bit fields for the "profiling" global variable */
#define HA_PROF_TASKS_OFF   0x00000000     /* per-task CPU profiling forced disabled */
#define HA_PROF_TASKS_AOFF  0x00000001     /* per-task CPU profiling off (automatic) */
#define HA_PROF_TASKS_AON   0x00000002     /* per-task CPU profiling on (automatic) */
#define HA_PROF_TASKS_ON    0x00000003     /* per-task CPU profiling forced enabled */
#define HA_PROF_TASKS_MASK  0x00000003     /* per-task CPU profiling mask */

#define HA_PROF_MEMORY      0x00000004     /* memory profiling */


#ifdef USE_MEMORY_PROFILING
/* Elements used by memory profiling. This determines the number of buckets to
 * store stats.
 */
#define MEMPROF_HASH_BITS 10
#define MEMPROF_HASH_BUCKETS (1U << MEMPROF_HASH_BITS)

enum memprof_method {
	MEMPROF_METH_UNKNOWN = 0,
	MEMPROF_METH_MALLOC,
	MEMPROF_METH_CALLOC,
	MEMPROF_METH_REALLOC,
	MEMPROF_METH_FREE,
	MEMPROF_METH_P_ALLOC, // pool_alloc()
	MEMPROF_METH_P_FREE,  // pool_free()
	MEMPROF_METH_METHODS /* count, must be last */
};

/* stats:
 *   - malloc increases alloc
 *   - free increases free (if non null)
 *   - realloc increases either depending on the size change.
 * when the real size is known (malloc_usable_size()), it's used in free_tot
 * and alloc_tot, otherwise the requested size is reported in alloc_tot and
 * zero in free_tot.
 */
struct memprof_stats {
	const void *caller;
	enum memprof_method method;
	/* 4-7 bytes hole here */
	unsigned long long alloc_calls;
	unsigned long long free_calls;
	unsigned long long alloc_tot;
	unsigned long long free_tot;
	void *info; // for pools, ptr to the pool
	void *pad;  // pad to 64
};
#endif

/* per-thread activity reports. It's important that it's aligned on cache lines
 * because some elements will be updated very often. Most counters are OK on
 * 32-bit since this will be used during debugging sessions for troubleshooting
 * in iterative mode.
 */
struct activity {
	unsigned int loops;        // complete loops in run_poll_loop()
	unsigned int wake_tasks;   // active tasks prevented poll() from sleeping
	unsigned int wake_signal;  // pending signal prevented poll() from sleeping
	unsigned int poll_io;      // number of times poll() reported I/O events
	unsigned int poll_exp;     // number of times poll() sees an expired timeout (includes wake_*)
	unsigned int poll_drop_fd; // poller dropped a dead FD from the update list
	unsigned int poll_skip_fd; // poller skipped another thread's FD
	unsigned int conn_dead;    // conn_fd_handler woke up on an FD indicating a dead connection
	unsigned int stream_calls; // calls to process_stream()
	unsigned int ctxsw;        // total number of context switches
	unsigned int tasksw;       // total number of task switches
	unsigned int empty_rq;     // calls to process_runnable_tasks() with nothing for the thread
	unsigned int long_rq;      // process_runnable_tasks() left with tasks in the run queue
	unsigned int cpust_total;  // sum of half-ms stolen per thread
	unsigned int fd_takeover;  // number of times this thread stole another one's FD
	unsigned int check_adopted;// number of times a check was migrated to this thread
	ALWAYS_ALIGN(64);

	struct freq_ctr cpust_1s;  // avg amount of half-ms stolen over last second
	struct freq_ctr cpust_15s; // avg amount of half-ms stolen over last 15s
	unsigned int avg_loop_us;  // average run time per loop over last 1024 runs
	unsigned int accepted;     // accepted incoming connections
	unsigned int accq_pushed;  // accept queue connections pushed
	unsigned int accq_full;    // accept queue connection not pushed because full
	unsigned int pool_fail;    // failed a pool allocation
	unsigned int buf_wait;     // waited on a buffer allocation
	unsigned int check_started;// number of times a check was started on this thread
#if defined(DEBUG_DEV)
	/* keep these ones at the end */
	unsigned int ctr0;         // general purposee debug counter
	unsigned int ctr1;         // general purposee debug counter
	unsigned int ctr2;         // general purposee debug counter
#endif
	char __pad[0]; // unused except to check remaining room
	char __end[0] __attribute__((aligned(64))); // align size to 64.
};

/* 256 entries for callers * callees should be highly sufficient (~45 seen usually) */
#define SCHED_ACT_HASH_BITS 8
#define SCHED_ACT_HASH_BUCKETS (1U << SCHED_ACT_HASH_BITS)

/* global profiling stats from the scheduler: each entry corresponds to a
 * task or tasklet ->process function pointer, with a number of calls and
 * a total time. Each entry is unique, except entry 0 which is for colliding
 * hashes (i.e. others). All of these must be accessed atomically.
 */
struct sched_activity {
	const void *func;
	const struct ha_caller *caller;
	uint64_t calls;
	uint64_t cpu_time;
	uint64_t lat_time;
};

#endif /* _HAPROXY_ACTIVITY_T_H */

/*
 * Local variables:
 *  c-indent-level: 8
 *  c-basic-offset: 8
 * End:
 */