summaryrefslogtreecommitdiffstats
path: root/python.d/web_log.chart.py
diff options
context:
space:
mode:
authorFederico Ceratto <federico.ceratto@gmail.com>2018-03-27 21:28:21 +0000
committerFederico Ceratto <federico.ceratto@gmail.com>2018-03-27 21:28:21 +0000
commitd4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c (patch)
treefaac99f51f182bb8c0a03e95e393d421ac9ddf42 /python.d/web_log.chart.py
parentNew upstream version 1.9.0+dfsg (diff)
downloadnetdata-d4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c.tar.xz
netdata-d4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c.zip
New upstream version 1.10.0+dfsgupstream/1.10.0+dfsg
Diffstat (limited to 'python.d/web_log.chart.py')
-rw-r--r--python.d/web_log.chart.py70
1 files changed, 65 insertions, 5 deletions
diff --git a/python.d/web_log.chart.py b/python.d/web_log.chart.py
index 954ecd41..be9baba9 100644
--- a/python.d/web_log.chart.py
+++ b/python.d/web_log.chart.py
@@ -5,6 +5,7 @@
import bisect
import re
import os
+import sys
from collections import namedtuple, defaultdict
from copy import deepcopy
@@ -21,7 +22,8 @@ from bases.FrameworkServices.LogService import LogService
ORDER_APACHE_CACHE = ['apache_cache']
-ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth', 'response_time', 'response_time_upstream',
+ORDER_WEB = ['response_statuses', 'response_codes', 'bandwidth',
+ 'response_time', 'response_time_hist', 'response_time_upstream', 'response_time_upstream_hist',
'requests_per_url', 'requests_per_user_defined', 'http_method', 'http_version',
'requests_per_ipproto', 'clients', 'clients_all']
@@ -55,6 +57,10 @@ CHARTS_WEB = {
['resp_time_max', 'max', 'incremental', 1, 1000],
['resp_time_avg', 'avg', 'incremental', 1, 1000]
]},
+ 'response_time_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings', 'web_log.response_time_hist', 'line'],
+ 'lines': [
+ ]},
'response_time_upstream': {
'options': [None, 'Processing Time Upstream', 'milliseconds', 'timings',
'web_log.response_time_upstream', 'area'],
@@ -63,6 +69,11 @@ CHARTS_WEB = {
['resp_time_upstream_max', 'max', 'incremental', 1, 1000],
['resp_time_upstream_avg', 'avg', 'incremental', 1, 1000]
]},
+ 'response_time_upstream_hist': {
+ 'options': [None, 'Processing Time Histogram', 'requests/s', 'timings',
+ 'web_log.response_time_upstream_hist', 'line'],
+ 'lines': [
+ ]},
'clients': {
'options': [None, 'Current Poll Unique Client IPs', 'unique ips', 'clients', 'web_log.clients', 'stacked'],
'lines': [
@@ -347,8 +358,30 @@ class Web:
"""
if 'resp_time' not in match_dict:
self.order.remove('response_time')
+ self.order.remove('response_time_hist')
if 'resp_time_upstream' not in match_dict:
self.order.remove('response_time_upstream')
+ self.order.remove('response_time_upstream_hist')
+
+ # Add 'response_time_hist' and 'response_time_upstream_hist' charts if is specified in the configuration
+ histogram = self.configuration.get('histogram', None)
+ if isinstance(histogram, list):
+ self.storage['bucket_index'] = histogram[:]
+ self.storage['bucket_index'].append(sys.maxint)
+ self.storage['buckets'] = [0] * (len(histogram) + 1)
+ self.storage['upstream_buckets'] = [0] * (len(histogram) + 1)
+ hist_lines = self.definitions['response_time_hist']['lines']
+ upstream_hist_lines = self.definitions['response_time_upstream_hist']['lines']
+ for i, le in enumerate(histogram):
+ hist_key = "response_time_hist_%d" % i
+ upstream_hist_key = "response_time_upstream_hist_%d" % i
+ hist_lines.append([hist_key, str(le), 'incremental', 1, 1])
+ upstream_hist_lines.append([upstream_hist_key, str(le), 'incremental', 1, 1])
+
+ hist_lines.append(["response_time_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
+ upstream_hist_lines.append(["response_time_upstream_hist_%d" % len(histogram), '+Inf', 'incremental', 1, 1])
+ elif histogram is not None:
+ self.error("expect histogram list, but was {0}".format(type(histogram)))
if not self.configuration.get('all_time', True):
self.order.remove('clients_all')
@@ -431,11 +464,15 @@ class Web:
resp_length = match_dict['resp_length'] if '-' not in match_dict['resp_length'] else 0
self.data['resp_length'] += int(resp_length)
if 'resp_time' in match_dict:
- get_timings(timings=timings['resp_time'],
- time=self.storage['func_resp_time'](float(match_dict['resp_time'])))
+ resp_time = self.storage['func_resp_time'](float(match_dict['resp_time']))
+ get_timings(timings=timings['resp_time'], time=resp_time)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['buckets'], resp_time / 1000)
if 'resp_time_upstream' in match_dict and match_dict['resp_time_upstream'] != '-':
- get_timings(timings=timings['resp_time_upstream'],
- time=self.storage['func_resp_time'](float(match_dict['resp_time_upstream'])))
+ resp_time_upstream = self.storage['func_resp_time'](float(match_dict['resp_time_upstream']))
+ get_timings(timings=timings['resp_time_upstream'], time=resp_time_upstream)
+ if 'bucket_index' in self.storage:
+ get_hist(self.storage['bucket_index'], self.storage['upstream_buckets'], resp_time / 1000)
# requests per ip proto
proto = 'ipv6' if ':' in match_dict['address'] else 'ipv4'
self.data['req_' + proto] += 1
@@ -456,6 +493,17 @@ class Web:
self.data[elem + '_min'] += timings[elem]['minimum']
self.data[elem + '_avg'] += timings[elem]['summary'] / timings[elem]['count']
self.data[elem + '_max'] += timings[elem]['maximum']
+
+ # histogram
+ if 'bucket_index' in self.storage:
+ buckets = self.storage['buckets']
+ upstream_buckets = self.storage['upstream_buckets']
+ for i in range(0, len(self.storage['bucket_index'])):
+ hist_key = "response_time_hist_%d" % i
+ upstream_hist_key = "response_time_upstream_hist_%d" % i
+ self.data[hist_key] = buckets[i]
+ self.data[upstream_hist_key] = upstream_buckets[i]
+
return self.data
def find_regex(self, last_line):
@@ -903,6 +951,18 @@ def get_timings(timings, time):
timings['summary'] += time
timings['count'] += 1
+def get_hist(index, buckets, time):
+ """
+ :param index: histogram index (Ex. [10, 50, 100, 150, ...])
+ :param buckets: histogram buckets
+ :param time: time
+ :return: None
+ """
+ for i in range(len(index)-1, -1, -1):
+ if time <= index[i]:
+ buckets[i] += 1
+ else:
+ break
def address_not_in_pool(pool, address, pool_size):
"""