diff options
Diffstat (limited to 'collectors/python.d.plugin/python_modules/third_party')
7 files changed, 0 insertions, 1678 deletions
diff --git a/collectors/python.d.plugin/python_modules/third_party/__init__.py b/collectors/python.d.plugin/python_modules/third_party/__init__.py deleted file mode 100644 index e69de29bb..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/__init__.py +++ /dev/null diff --git a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py b/collectors/python.d.plugin/python_modules/third_party/boinc_client.py deleted file mode 100644 index ec21779a0..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/boinc_client.py +++ /dev/null @@ -1,515 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# -# client.py - Somewhat higher-level GUI_RPC API for BOINC core client -# -# Copyright (C) 2013 Rodrigo Silva (MestreLion) <linux@rodrigosilva.com> -# Copyright (C) 2017 Austin S. Hemmelgarn -# -# SPDX-License-Identifier: GPL-3.0 - -# Based on client/boinc_cmd.cpp - -import hashlib -import socket -import sys -import time -from functools import total_ordering -from xml.etree import ElementTree - -GUI_RPC_PASSWD_FILE = "/var/lib/boinc/gui_rpc_auth.cfg" - -GUI_RPC_HOSTNAME = None # localhost -GUI_RPC_PORT = 31416 -GUI_RPC_TIMEOUT = 1 - -class Rpc(object): - ''' Class to perform GUI RPC calls to a BOINC core client. - Usage in a context manager ('with' block) is recommended to ensure - disconnect() is called. Using the same instance for all calls is also - recommended so it reuses the same socket connection - ''' - def __init__(self, hostname="", port=0, timeout=0, text_output=False): - self.hostname = hostname - self.port = port - self.timeout = timeout - self.sock = None - self.text_output = text_output - - @property - def sockargs(self): - return (self.hostname, self.port, self.timeout) - - def __enter__(self): self.connect(*self.sockargs); return self - def __exit__(self, *args): self.disconnect() - - def connect(self, hostname="", port=0, timeout=0): - ''' Connect to (hostname, port) with timeout in seconds. - Hostname defaults to None (localhost), and port to 31416 - Calling multiple times will disconnect previous connection (if any), - and (re-)connect to host. - ''' - if self.sock: - self.disconnect() - - self.hostname = hostname or GUI_RPC_HOSTNAME - self.port = port or GUI_RPC_PORT - self.timeout = timeout or GUI_RPC_TIMEOUT - - self.sock = socket.create_connection(self.sockargs[0:2], self.sockargs[2]) - - def disconnect(self): - ''' Disconnect from host. Calling multiple times is OK (idempotent) - ''' - if self.sock: - self.sock.close() - self.sock = None - - def call(self, request, text_output=None): - ''' Do an RPC call. Pack and send the XML request and return the - unpacked reply. request can be either plain XML text or a - xml.etree.ElementTree.Element object. Return ElementTree.Element - or XML text according to text_output flag. - Will auto-connect if not connected. - ''' - if text_output is None: - text_output = self.text_output - - if not self.sock: - self.connect(*self.sockargs) - - if not isinstance(request, ElementTree.Element): - request = ElementTree.fromstring(request) - - # pack request - end = '\003' - if sys.version_info[0] < 3: - req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request).replace(' />', '/>'), end) - else: - req = "<boinc_gui_rpc_request>\n{0}\n</boinc_gui_rpc_request>\n{1}".format(ElementTree.tostring(request, encoding='unicode').replace(' />', '/>'), end).encode() - - try: - self.sock.sendall(req) - except (socket.error, socket.herror, socket.gaierror, socket.timeout): - raise - - req = "" - while True: - try: - buf = self.sock.recv(8192) - if not buf: - raise socket.error("No data from socket") - if sys.version_info[0] >= 3: - buf = buf.decode() - except socket.error: - raise - n = buf.find(end) - if not n == -1: break - req += buf - req += buf[:n] - - # unpack reply (remove root tag, ie: first and last lines) - req = '\n'.join(req.strip().rsplit('\n')[1:-1]) - - if text_output: - return req - else: - return ElementTree.fromstring(req) - -def setattrs_from_xml(obj, xml, attrfuncdict={}): - ''' Helper to set values for attributes of a class instance by mapping - matching tags from a XML file. - attrfuncdict is a dict of functions to customize value data type of - each attribute. It falls back to simple int/float/bool/str detection - based on values defined in __init__(). This would not be needed if - Boinc used standard RPC protocol, which includes data type in XML. - ''' - if not isinstance(xml, ElementTree.Element): - xml = ElementTree.fromstring(xml) - for e in list(xml): - if hasattr(obj, e.tag): - attr = getattr(obj, e.tag) - attrfunc = attrfuncdict.get(e.tag, None) - if attrfunc is None: - if isinstance(attr, bool): attrfunc = parse_bool - elif isinstance(attr, int): attrfunc = parse_int - elif isinstance(attr, float): attrfunc = parse_float - elif isinstance(attr, str): attrfunc = parse_str - elif isinstance(attr, list): attrfunc = parse_list - else: attrfunc = lambda x: x - setattr(obj, e.tag, attrfunc(e)) - else: - pass - #print "class missing attribute '%s': %r" % (e.tag, obj) - return obj - - -def parse_bool(e): - ''' Helper to convert ElementTree.Element.text to boolean. - Treat '<foo/>' (and '<foo>[[:blank:]]</foo>') as True - Treat '0' and 'false' as False - ''' - if e.text is None: - return True - else: - return bool(e.text) and not e.text.strip().lower() in ('0', 'false') - - -def parse_int(e): - ''' Helper to convert ElementTree.Element.text to integer. - Treat '<foo/>' (and '<foo></foo>') as 0 - ''' - # int(float()) allows casting to int a value expressed as float in XML - return 0 if e.text is None else int(float(e.text.strip())) - - -def parse_float(e): - ''' Helper to convert ElementTree.Element.text to float. ''' - return 0.0 if e.text is None else float(e.text.strip()) - - -def parse_str(e): - ''' Helper to convert ElementTree.Element.text to string. ''' - return "" if e.text is None else e.text.strip() - - -def parse_list(e): - ''' Helper to convert ElementTree.Element to list. For now, simply return - the list of root element's children - ''' - return list(e) - - -class Enum(object): - UNKNOWN = -1 # Not in original API - - @classmethod - def name(cls, value): - ''' Quick-and-dirty fallback for getting the "name" of an enum item ''' - - # value as string, if it matches an enum attribute. - # Allows short usage as Enum.name("VALUE") besides Enum.name(Enum.VALUE) - if hasattr(cls, str(value)): - return cls.name(getattr(cls, value, None)) - - # value not handled in subclass name() - for k, v in cls.__dict__.items(): - if v == value: - return k.lower().replace('_', ' ') - - # value not found - return cls.name(Enum.UNKNOWN) - - -class CpuSched(Enum): - ''' values of ACTIVE_TASK::scheduler_state and ACTIVE_TASK::next_scheduler_state - "SCHEDULED" is synonymous with "executing" except when CPU throttling - is in use. - ''' - UNINITIALIZED = 0 - PREEMPTED = 1 - SCHEDULED = 2 - - -class ResultState(Enum): - ''' Values of RESULT::state in client. - THESE MUST BE IN NUMERICAL ORDER - (because of the > comparison in RESULT::computing_done()) - see html/inc/common_defs.inc - ''' - NEW = 0 - #// New result - FILES_DOWNLOADING = 1 - #// Input files for result (WU, app version) are being downloaded - FILES_DOWNLOADED = 2 - #// Files are downloaded, result can be (or is being) computed - COMPUTE_ERROR = 3 - #// computation failed; no file upload - FILES_UPLOADING = 4 - #// Output files for result are being uploaded - FILES_UPLOADED = 5 - #// Files are uploaded, notify scheduling server at some point - ABORTED = 6 - #// result was aborted - UPLOAD_FAILED = 7 - #// some output file permanent failure - - -class Process(Enum): - ''' values of ACTIVE_TASK::task_state ''' - UNINITIALIZED = 0 - #// process doesn't exist yet - EXECUTING = 1 - #// process is running, as far as we know - SUSPENDED = 9 - #// we've sent it a "suspend" message - ABORT_PENDING = 5 - #// process exceeded limits; send "abort" message, waiting to exit - QUIT_PENDING = 8 - #// we've sent it a "quit" message, waiting to exit - COPY_PENDING = 10 - #// waiting for async file copies to finish - - -class _Struct(object): - ''' base helper class with common methods for all classes derived from - BOINC's C++ structs - ''' - @classmethod - def parse(cls, xml): - return setattrs_from_xml(cls(), xml) - - def __str__(self, indent=0): - buf = '{0}{1}:\n'.format('\t' * indent, self.__class__.__name__) - for attr in self.__dict__: - value = getattr(self, attr) - if isinstance(value, list): - buf += '{0}\t{1} [\n'.format('\t' * indent, attr) - for v in value: buf += '\t\t{0}\t\t,\n'.format(v) - buf += '\t]\n' - else: - buf += '{0}\t{1}\t{2}\n'.format('\t' * indent, - attr, - value.__str__(indent+2) - if isinstance(value, _Struct) - else repr(value)) - return buf - - -@total_ordering -class VersionInfo(_Struct): - def __init__(self, major=0, minor=0, release=0): - self.major = major - self.minor = minor - self.release = release - - @property - def _tuple(self): - return (self.major, self.minor, self.release) - - def __eq__(self, other): - return isinstance(other, self.__class__) and self._tuple == other._tuple - - def __ne__(self, other): - return not self.__eq__(other) - - def __gt__(self, other): - if not isinstance(other, self.__class__): - return NotImplemented - return self._tuple > other._tuple - - def __str__(self): - return "{0}.{1}.{2}".format(self.major, self.minor, self.release) - - def __repr__(self): - return "{0}{1}".format(self.__class__.__name__, self._tuple) - - -class Result(_Struct): - ''' Also called "task" in some contexts ''' - def __init__(self): - # Names and values follow lib/gui_rpc_client.h @ RESULT - # Order too, except when grouping contradicts client/result.cpp - # RESULT::write_gui(), then XML order is used. - - self.name = "" - self.wu_name = "" - self.version_num = 0 - #// identifies the app used - self.plan_class = "" - self.project_url = "" # from PROJECT.master_url - self.report_deadline = 0.0 # seconds since epoch - self.received_time = 0.0 # seconds since epoch - #// when we got this from server - self.ready_to_report = False - #// we're ready to report this result to the server; - #// either computation is done and all the files have been uploaded - #// or there was an error - self.got_server_ack = False - #// we've received the ack for this result from the server - self.final_cpu_time = 0.0 - self.final_elapsed_time = 0.0 - self.state = ResultState.NEW - self.estimated_cpu_time_remaining = 0.0 - #// actually, estimated elapsed time remaining - self.exit_status = 0 - #// return value from the application - self.suspended_via_gui = False - self.project_suspended_via_gui = False - self.edf_scheduled = False - #// temporary used to tell GUI that this result is deadline-scheduled - self.coproc_missing = False - #// a coproc needed by this job is missing - #// (e.g. because user removed their GPU board). - self.scheduler_wait = False - self.scheduler_wait_reason = "" - self.network_wait = False - self.resources = "" - #// textual description of resources used - - #// the following defined if active - # XML is generated in client/app.cpp ACTIVE_TASK::write_gui() - self.active_task = False - self.active_task_state = Process.UNINITIALIZED - self.app_version_num = 0 - self.slot = -1 - self.pid = 0 - self.scheduler_state = CpuSched.UNINITIALIZED - self.checkpoint_cpu_time = 0.0 - self.current_cpu_time = 0.0 - self.fraction_done = 0.0 - self.elapsed_time = 0.0 - self.swap_size = 0 - self.working_set_size_smoothed = 0.0 - self.too_large = False - self.needs_shmem = False - self.graphics_exec_path = "" - self.web_graphics_url = "" - self.remote_desktop_addr = "" - self.slot_path = "" - #// only present if graphics_exec_path is - - # The following are not in original API, but are present in RPC XML reply - self.completed_time = 0.0 - #// time when ready_to_report was set - self.report_immediately = False - self.working_set_size = 0 - self.page_fault_rate = 0.0 - #// derived by higher-level code - - # The following are in API, but are NEVER in RPC XML reply. Go figure - self.signal = 0 - - self.app = None # APP* - self.wup = None # WORKUNIT* - self.project = None # PROJECT* - self.avp = None # APP_VERSION* - - @classmethod - def parse(cls, xml): - if not isinstance(xml, ElementTree.Element): - xml = ElementTree.fromstring(xml) - - # parse main XML - result = super(Result, cls).parse(xml) - - # parse '<active_task>' children - active_task = xml.find('active_task') - if active_task is None: - result.active_task = False # already the default after __init__() - else: - result.active_task = True # already the default after main parse - result = setattrs_from_xml(result, active_task) - - #// if CPU time is nonzero but elapsed time is zero, - #// we must be talking to an old client. - #// Set elapsed = CPU - #// (easier to deal with this here than in the manager) - if result.current_cpu_time != 0 and result.elapsed_time == 0: - result.elapsed_time = result.current_cpu_time - - if result.final_cpu_time != 0 and result.final_elapsed_time == 0: - result.final_elapsed_time = result.final_cpu_time - - return result - - def __str__(self): - buf = '{0}:\n'.format(self.__class__.__name__) - for attr in self.__dict__: - value = getattr(self, attr) - if attr in ['received_time', 'report_deadline']: - value = time.ctime(value) - buf += '\t{0}\t{1}\n'.format(attr, value) - return buf - - -class BoincClient(object): - - def __init__(self, host="", port=0, passwd=None): - self.hostname = host - self.port = port - self.passwd = passwd - self.rpc = Rpc(text_output=False) - self.version = None - self.authorized = False - - # Informative, not authoritative. Records status of *last* RPC call, - # but does not infer success about the *next* one. - # Thus, it should be read *after* an RPC call, not prior to one - self.connected = False - - def __enter__(self): self.connect(); return self - def __exit__(self, *args): self.disconnect() - - def connect(self): - try: - self.rpc.connect(self.hostname, self.port) - self.connected = True - except socket.error: - self.connected = False - return - self.authorized = self.authorize(self.passwd) - self.version = self.exchange_versions() - - def disconnect(self): - self.rpc.disconnect() - - def authorize(self, password): - ''' Request authorization. If password is None and we are connecting - to localhost, try to read password from the local config file - GUI_RPC_PASSWD_FILE. If file can't be read (not found or no - permission to read), try to authorize with a blank password. - If authorization is requested and fails, all subsequent calls - will be refused with socket.error 'Connection reset by peer' (104). - Since most local calls do no require authorization, do not attempt - it if you're not sure about the password. - ''' - if password is None and not self.hostname: - password = read_gui_rpc_password() or "" - nonce = self.rpc.call('<auth1/>').text - authhash = hashlib.md5('{0}{1}'.format(nonce, password).encode()).hexdigest().lower() - reply = self.rpc.call('<auth2><nonce_hash>{0}</nonce_hash></auth2>'.format(authhash)) - - if reply.tag == 'authorized': - return True - else: - return False - - def exchange_versions(self): - ''' Return VersionInfo instance with core client version info ''' - return VersionInfo.parse(self.rpc.call('<exchange_versions/>')) - - def get_tasks(self): - ''' Same as get_results(active_only=False) ''' - return self.get_results(False) - - def get_results(self, active_only=False): - ''' Get a list of results. - Those that are in progress will have information such as CPU time - and fraction done. Each result includes a name; - Use CC_STATE::lookup_result() to find this result in the current static state; - if it's not there, call get_state() again. - ''' - reply = self.rpc.call("<get_results><active_only>{0}</active_only></get_results>".format(1 if active_only else 0)) - if not reply.tag == 'results': - return [] - - results = [] - for item in list(reply): - results.append(Result.parse(item)) - - return results - - -def read_gui_rpc_password(): - ''' Read password string from GUI_RPC_PASSWD_FILE file, trim the last CR - (if any), and return it - ''' - try: - with open(GUI_RPC_PASSWD_FILE, 'r') as f: - buf = f.read() - if buf.endswith('\n'): return buf[:-1] # trim last CR - else: return buf - except IOError: - # Permission denied or File not found. - pass diff --git a/collectors/python.d.plugin/python_modules/third_party/filelock.py b/collectors/python.d.plugin/python_modules/third_party/filelock.py deleted file mode 100644 index 4c981672b..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/filelock.py +++ /dev/null @@ -1,451 +0,0 @@ -# This is free and unencumbered software released into the public domain. -# -# Anyone is free to copy, modify, publish, use, compile, sell, or -# distribute this software, either in source code form or as a compiled -# binary, for any purpose, commercial or non-commercial, and by any -# means. -# -# In jurisdictions that recognize copyright laws, the author or authors -# of this software dedicate any and all copyright interest in the -# software to the public domain. We make this dedication for the benefit -# of the public at large and to the detriment of our heirs and -# successors. We intend this dedication to be an overt act of -# relinquishment in perpetuity of all present and future rights to this -# software under copyright law. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# For more information, please refer to <http://unlicense.org> - -""" -A platform independent file lock that supports the with-statement. -""" - - -# Modules -# ------------------------------------------------ -import logging -import os -import threading -import time -try: - import warnings -except ImportError: - warnings = None - -try: - import msvcrt -except ImportError: - msvcrt = None - -try: - import fcntl -except ImportError: - fcntl = None - - -# Backward compatibility -# ------------------------------------------------ -try: - TimeoutError -except NameError: - TimeoutError = OSError - - -# Data -# ------------------------------------------------ -__all__ = [ - "Timeout", - "BaseFileLock", - "WindowsFileLock", - "UnixFileLock", - "SoftFileLock", - "FileLock" -] - -__version__ = "3.0.12" - - -_logger = None -def logger(): - """Returns the logger instance used in this module.""" - global _logger - _logger = _logger or logging.getLogger(__name__) - return _logger - - -# Exceptions -# ------------------------------------------------ -class Timeout(TimeoutError): - """ - Raised when the lock could not be acquired in *timeout* - seconds. - """ - - def __init__(self, lock_file): - """ - """ - #: The path of the file lock. - self.lock_file = lock_file - return None - - def __str__(self): - temp = "The file lock '{}' could not be acquired."\ - .format(self.lock_file) - return temp - - -# Classes -# ------------------------------------------------ - -# This is a helper class which is returned by :meth:`BaseFileLock.acquire` -# and wraps the lock to make sure __enter__ is not called twice when entering -# the with statement. -# If we would simply return *self*, the lock would be acquired again -# in the *__enter__* method of the BaseFileLock, but not released again -# automatically. -# -# :seealso: issue #37 (memory leak) -class _Acquire_ReturnProxy(object): - - def __init__(self, lock): - self.lock = lock - return None - - def __enter__(self): - return self.lock - - def __exit__(self, exc_type, exc_value, traceback): - self.lock.release() - return None - - -class BaseFileLock(object): - """ - Implements the base class of a file lock. - """ - - def __init__(self, lock_file, timeout = -1): - """ - """ - # The path to the lock file. - self._lock_file = lock_file - - # The file descriptor for the *_lock_file* as it is returned by the - # os.open() function. - # This file lock is only NOT None, if the object currently holds the - # lock. - self._lock_file_fd = None - - # The default timeout value. - self.timeout = timeout - - # We use this lock primarily for the lock counter. - self._thread_lock = threading.Lock() - - # The lock counter is used for implementing the nested locking - # mechanism. Whenever the lock is acquired, the counter is increased and - # the lock is only released, when this value is 0 again. - self._lock_counter = 0 - return None - - @property - def lock_file(self): - """ - The path to the lock file. - """ - return self._lock_file - - @property - def timeout(self): - """ - You can set a default timeout for the filelock. It will be used as - fallback value in the acquire method, if no timeout value (*None*) is - given. - - If you want to disable the timeout, set it to a negative value. - - A timeout of 0 means, that there is exactly one attempt to acquire the - file lock. - - .. versionadded:: 2.0.0 - """ - return self._timeout - - @timeout.setter - def timeout(self, value): - """ - """ - self._timeout = float(value) - return None - - # Platform dependent locking - # -------------------------------------------- - - def _acquire(self): - """ - Platform dependent. If the file lock could be - acquired, self._lock_file_fd holds the file descriptor - of the lock file. - """ - raise NotImplementedError() - - def _release(self): - """ - Releases the lock and sets self._lock_file_fd to None. - """ - raise NotImplementedError() - - # Platform independent methods - # -------------------------------------------- - - @property - def is_locked(self): - """ - True, if the object holds the file lock. - - .. versionchanged:: 2.0.0 - - This was previously a method and is now a property. - """ - return self._lock_file_fd is not None - - def acquire(self, timeout=None, poll_intervall=0.05): - """ - Acquires the file lock or fails with a :exc:`Timeout` error. - - .. code-block:: python - - # You can use this method in the context manager (recommended) - with lock.acquire(): - pass - - # Or use an equivalent try-finally construct: - lock.acquire() - try: - pass - finally: - lock.release() - - :arg float timeout: - The maximum time waited for the file lock. - If ``timeout < 0``, there is no timeout and this method will - block until the lock could be acquired. - If ``timeout`` is None, the default :attr:`~timeout` is used. - - :arg float poll_intervall: - We check once in *poll_intervall* seconds if we can acquire the - file lock. - - :raises Timeout: - if the lock could not be acquired in *timeout* seconds. - - .. versionchanged:: 2.0.0 - - This method returns now a *proxy* object instead of *self*, - so that it can be used in a with statement without side effects. - """ - # Use the default timeout, if no timeout is provided. - if timeout is None: - timeout = self.timeout - - # Increment the number right at the beginning. - # We can still undo it, if something fails. - with self._thread_lock: - self._lock_counter += 1 - - lock_id = id(self) - lock_filename = self._lock_file - start_time = time.time() - try: - while True: - with self._thread_lock: - if not self.is_locked: - logger().debug('Attempting to acquire lock %s on %s', lock_id, lock_filename) - self._acquire() - - if self.is_locked: - logger().info('Lock %s acquired on %s', lock_id, lock_filename) - break - elif timeout >= 0 and time.time() - start_time > timeout: - logger().debug('Timeout on acquiring lock %s on %s', lock_id, lock_filename) - raise Timeout(self._lock_file) - else: - logger().debug( - 'Lock %s not acquired on %s, waiting %s seconds ...', - lock_id, lock_filename, poll_intervall - ) - time.sleep(poll_intervall) - except: - # Something did go wrong, so decrement the counter. - with self._thread_lock: - self._lock_counter = max(0, self._lock_counter - 1) - - raise - return _Acquire_ReturnProxy(lock = self) - - def release(self, force = False): - """ - Releases the file lock. - - Please note, that the lock is only completly released, if the lock - counter is 0. - - Also note, that the lock file itself is not automatically deleted. - - :arg bool force: - If true, the lock counter is ignored and the lock is released in - every case. - """ - with self._thread_lock: - - if self.is_locked: - self._lock_counter -= 1 - - if self._lock_counter == 0 or force: - lock_id = id(self) - lock_filename = self._lock_file - - logger().debug('Attempting to release lock %s on %s', lock_id, lock_filename) - self._release() - self._lock_counter = 0 - logger().info('Lock %s released on %s', lock_id, lock_filename) - - return None - - def __enter__(self): - self.acquire() - return self - - def __exit__(self, exc_type, exc_value, traceback): - self.release() - return None - - def __del__(self): - self.release(force = True) - return None - - -# Windows locking mechanism -# ~~~~~~~~~~~~~~~~~~~~~~~~~ - -class WindowsFileLock(BaseFileLock): - """ - Uses the :func:`msvcrt.locking` function to hard lock the lock file on - windows systems. - """ - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - - try: - fd = os.open(self._lock_file, open_mode) - except OSError: - pass - else: - try: - msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - fd = self._lock_file_fd - self._lock_file_fd = None - msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) - os.close(fd) - - try: - os.remove(self._lock_file) - # Probably another instance of the application - # that acquired the file lock. - except OSError: - pass - return None - -# Unix locking mechanism -# ~~~~~~~~~~~~~~~~~~~~~~ - -class UnixFileLock(BaseFileLock): - """ - Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems. - """ - - def _acquire(self): - open_mode = os.O_RDWR | os.O_CREAT | os.O_TRUNC - fd = os.open(self._lock_file, open_mode) - - try: - fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) - except (IOError, OSError): - os.close(fd) - else: - self._lock_file_fd = fd - return None - - def _release(self): - # Do not remove the lockfile: - # - # https://github.com/benediktschmitt/py-filelock/issues/31 - # https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition - fd = self._lock_file_fd - self._lock_file_fd = None - fcntl.flock(fd, fcntl.LOCK_UN) - os.close(fd) - return None - -# Soft lock -# ~~~~~~~~~ - -class SoftFileLock(BaseFileLock): - """ - Simply watches the existence of the lock file. - """ - - def _acquire(self): - open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC - try: - fd = os.open(self._lock_file, open_mode) - except (IOError, OSError): - pass - else: - self._lock_file_fd = fd - return None - - def _release(self): - os.close(self._lock_file_fd) - self._lock_file_fd = None - - try: - os.remove(self._lock_file) - # The file is already deleted and that's what we want. - except OSError: - pass - return None - - -# Platform filelock -# ~~~~~~~~~~~~~~~~~ - -#: Alias for the lock, which should be used for the current platform. On -#: Windows, this is an alias for :class:`WindowsFileLock`, on Unix for -#: :class:`UnixFileLock` and otherwise for :class:`SoftFileLock`. -FileLock = None - -if msvcrt: - FileLock = WindowsFileLock -elif fcntl: - FileLock = UnixFileLock -else: - FileLock = SoftFileLock - - if warnings is not None: - warnings.warn("only soft file lock is available") diff --git a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py deleted file mode 100644 index f873eac83..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py +++ /dev/null @@ -1,327 +0,0 @@ -# SPDX-License-Identifier: LGPL-2.1 -""" -@package sensors.py -Python Bindings for libsensors3 - -use the documentation of libsensors for the low level API. -see example.py for high level API usage. - -@author: Pavel Rojtberg (http://www.rojtberg.net) -@see: https://github.com/paroj/sensors.py -@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1> -""" - -from ctypes import * -import ctypes.util - -_libc = cdll.LoadLibrary(ctypes.util.find_library("c")) -# see https://github.com/paroj/sensors.py/issues/1 -_libc.free.argtypes = [c_void_p] - -_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors")) - -version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii") - - -class SensorsError(Exception): - pass - - -class ErrorWildcards(SensorsError): - pass - - -class ErrorNoEntry(SensorsError): - pass - - -class ErrorAccessRead(SensorsError, OSError): - pass - - -class ErrorKernel(SensorsError, OSError): - pass - - -class ErrorDivZero(SensorsError, ZeroDivisionError): - pass - - -class ErrorChipName(SensorsError): - pass - - -class ErrorBusName(SensorsError): - pass - - -class ErrorParse(SensorsError): - pass - - -class ErrorAccessWrite(SensorsError, OSError): - pass - - -class ErrorIO(SensorsError, IOError): - pass - - -class ErrorRecursion(SensorsError): - pass - - -_ERR_MAP = { - 1: ErrorWildcards, - 2: ErrorNoEntry, - 3: ErrorAccessRead, - 4: ErrorKernel, - 5: ErrorDivZero, - 6: ErrorChipName, - 7: ErrorBusName, - 8: ErrorParse, - 9: ErrorAccessWrite, - 10: ErrorIO, - 11: ErrorRecursion -} - - -def raise_sensor_error(errno, message=''): - raise _ERR_MAP[abs(errno)](message) - - -class bus_id(Structure): - _fields_ = [("type", c_short), - ("nr", c_short)] - - -class chip_name(Structure): - _fields_ = [("prefix", c_char_p), - ("bus", bus_id), - ("addr", c_int), - ("path", c_char_p)] - - -class feature(Structure): - _fields_ = [("name", c_char_p), - ("number", c_int), - ("type", c_int)] - - # sensors_feature_type - IN = 0x00 - FAN = 0x01 - TEMP = 0x02 - POWER = 0x03 - ENERGY = 0x04 - CURR = 0x05 - HUMIDITY = 0x06 - MAX_MAIN = 0x7 - VID = 0x10 - INTRUSION = 0x11 - MAX_OTHER = 0x12 - BEEP_ENABLE = 0x18 - - -class subfeature(Structure): - _fields_ = [("name", c_char_p), - ("number", c_int), - ("type", c_int), - ("mapping", c_int), - ("flags", c_uint)] - - -_hdl.sensors_get_detected_chips.restype = POINTER(chip_name) -_hdl.sensors_get_features.restype = POINTER(feature) -_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature) -_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it -_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not -_hdl.sensors_strerror.restype = c_char_p - -### RAW API ### -MODE_R = 1 -MODE_W = 2 -COMPUTE_MAPPING = 4 - - -def init(cfg_file=None): - file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None - - result = _hdl.sensors_init(file) - if result != 0: - raise_sensor_error(result, "sensors_init failed") - - if file is not None: - _libc.fclose(file) - - -def cleanup(): - _hdl.sensors_cleanup() - - -def parse_chip_name(orig_name): - ret = chip_name() - err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret)) - - if err < 0: - raise_sensor_error(err, strerror(err)) - - return ret - - -def strerror(errnum): - return _hdl.sensors_strerror(errnum).decode("utf-8") - - -def free_chip_name(chip): - _hdl.sensors_free_chip_name(byref(chip)) - - -def get_detected_chips(match, nr): - """ - @return: (chip, next nr to query) - """ - _nr = c_int(nr) - - if match is not None: - match = byref(match) - - chip = _hdl.sensors_get_detected_chips(match, byref(_nr)) - chip = chip.contents if bool(chip) else None - return chip, _nr.value - - -def chip_snprintf_name(chip, buffer_size=200): - """ - @param buffer_size defaults to the size used in the sensors utility - """ - ret = create_string_buffer(buffer_size) - err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip)) - - if err < 0: - raise_sensor_error(err, strerror(err)) - - return ret.value.decode("utf-8") - - -def do_chip_sets(chip): - """ - @attention this function was not tested - """ - err = _hdl.sensors_do_chip_sets(byref(chip)) - if err < 0: - raise_sensor_error(err, strerror(err)) - - -def get_adapter_name(bus): - return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8") - - -def get_features(chip, nr): - """ - @return: (feature, next nr to query) - """ - _nr = c_int(nr) - feature = _hdl.sensors_get_features(byref(chip), byref(_nr)) - feature = feature.contents if bool(feature) else None - return feature, _nr.value - - -def get_label(chip, feature): - ptr = _hdl.sensors_get_label(byref(chip), byref(feature)) - val = cast(ptr, c_char_p).value.decode("utf-8") - _libc.free(ptr) - return val - - -def get_all_subfeatures(chip, feature, nr): - """ - @return: (subfeature, next nr to query) - """ - _nr = c_int(nr) - subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr)) - subfeature = subfeature.contents if bool(subfeature) else None - return subfeature, _nr.value - - -def get_value(chip, subfeature_nr): - val = c_double() - err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val)) - if err < 0: - raise_sensor_error(err, strerror(err)) - return val.value - - -def set_value(chip, subfeature_nr, value): - """ - @attention this function was not tested - """ - val = c_double(value) - err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val)) - if err < 0: - raise_sensor_error(err, strerror(err)) - - -### Convenience API ### -class ChipIterator: - def __init__(self, match=None): - self.match = parse_chip_name(match) if match is not None else None - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - chip, self.nr = get_detected_chips(self.match, self.nr) - - if chip is None: - raise StopIteration - - return chip - - def __del__(self): - if self.match is not None: - free_chip_name(self.match) - - def next(self): # python2 compability - return self.__next__() - - -class FeatureIterator: - def __init__(self, chip): - self.chip = chip - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - feature, self.nr = get_features(self.chip, self.nr) - - if feature is None: - raise StopIteration - - return feature - - def next(self): # python2 compability - return self.__next__() - - -class SubFeatureIterator: - def __init__(self, chip, feature): - self.chip = chip - self.feature = feature - self.nr = 0 - - def __iter__(self): - return self - - def __next__(self): - subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr) - - if subfeature is None: - raise StopIteration - - return subfeature - - def next(self): # python2 compability - return self.__next__() diff --git a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py b/collectors/python.d.plugin/python_modules/third_party/mcrcon.py deleted file mode 100644 index a65a304b6..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/mcrcon.py +++ /dev/null @@ -1,74 +0,0 @@ -# Minecraft Remote Console module. -# -# Copyright (C) 2015 Barnaby Gale -# -# SPDX-License-Identifier: MIT - -import socket -import select -import struct -import time - - -class MCRconException(Exception): - pass - - -class MCRcon(object): - socket = None - - def connect(self, host, port, password): - if self.socket is not None: - raise MCRconException("Already connected") - self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - self.socket.settimeout(0.9) - self.socket.connect((host, port)) - self.send(3, password) - - def disconnect(self): - if self.socket is None: - raise MCRconException("Already disconnected") - self.socket.close() - self.socket = None - - def read(self, length): - data = b"" - while len(data) < length: - data += self.socket.recv(length - len(data)) - return data - - def send(self, out_type, out_data): - if self.socket is None: - raise MCRconException("Must connect before sending data") - - # Send a request packet - out_payload = struct.pack('<ii', 0, out_type) + out_data.encode('utf8') + b'\x00\x00' - out_length = struct.pack('<i', len(out_payload)) - self.socket.send(out_length + out_payload) - - # Read response packets - in_data = "" - while True: - # Read a packet - in_length, = struct.unpack('<i', self.read(4)) - in_payload = self.read(in_length) - in_id = struct.unpack('<ii', in_payload[:8]) - in_data_partial, in_padding = in_payload[8:-2], in_payload[-2:] - - # Sanity checks - if in_padding != b'\x00\x00': - raise MCRconException("Incorrect padding") - if in_id == -1: - raise MCRconException("Login failed") - - # Record the response - in_data += in_data_partial.decode('utf8') - - # If there's nothing more to receive, return the response - if len(select.select([self.socket], [], [], 0)[0]) == 0: - return in_data - - def command(self, command): - result = self.send(2, command) - time.sleep(0.003) # MC-72390 workaround - return result diff --git a/collectors/python.d.plugin/python_modules/third_party/monotonic.py b/collectors/python.d.plugin/python_modules/third_party/monotonic.py deleted file mode 100644 index 4ebd556c3..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/monotonic.py +++ /dev/null @@ -1,201 +0,0 @@ -# -*- coding: utf-8 -*- -# -# SPDX-License-Identifier: Apache-2.0 -""" - monotonic - ~~~~~~~~~ - - This module provides a ``monotonic()`` function which returns the - value (in fractional seconds) of a clock which never goes backwards. - - On Python 3.3 or newer, ``monotonic`` will be an alias of - ``time.monotonic`` from the standard library. On older versions, - it will fall back to an equivalent implementation: - - +-------------+----------------------------------------+ - | Linux, BSD | ``clock_gettime(3)`` | - +-------------+----------------------------------------+ - | Windows | ``GetTickCount`` or ``GetTickCount64`` | - +-------------+----------------------------------------+ - | OS X | ``mach_absolute_time`` | - +-------------+----------------------------------------+ - - If no suitable implementation exists for the current platform, - attempting to import this module (or to import from it) will - cause a ``RuntimeError`` exception to be raised. - - - Copyright 2014, 2015, 2016 Ori Livneh <ori@wikimedia.org> - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -""" -import time - - -__all__ = ('monotonic',) - - -try: - monotonic = time.monotonic -except AttributeError: - import ctypes - import ctypes.util - import os - import sys - import threading - - - def clock_clock_gettime_c_library(): - return ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True).clock_gettime - - - def clock_clock_gettime_rt_library(): - return ctypes.CDLL(ctypes.util.find_library('rt'), use_errno=True).clock_gettime - - - def clock_clock_gettime_c_library_synology6(): - return ctypes.CDLL('/usr/lib/libc.so.6', use_errno=True).clock_gettime - - - def clock_clock_gettime_rt_library_synology6(): - return ctypes.CDLL('/usr/lib/librt.so.1', use_errno=True).clock_gettime - - - def clock_gettime_linux(): - # see https://github.com/netdata/netdata/issues/7976 - order = [ - clock_clock_gettime_c_library, - clock_clock_gettime_rt_library, - clock_clock_gettime_c_library_synology6, - clock_clock_gettime_rt_library_synology6, - ] - - for gettime in order: - try: - return gettime() - except (RuntimeError, AttributeError, OSError): - continue - raise RuntimeError('can not find c and rt libraries') - - - try: - if sys.platform == 'darwin': # OS X, iOS - # See Technical Q&A QA1398 of the Mac Developer Library: - # <https://developer.apple.com/library/mac/qa/qa1398/> - libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) - - class mach_timebase_info_data_t(ctypes.Structure): - """System timebase info. Defined in <mach/mach_time.h>.""" - _fields_ = (('numer', ctypes.c_uint32), - ('denom', ctypes.c_uint32)) - - mach_absolute_time = libc.mach_absolute_time - mach_absolute_time.restype = ctypes.c_uint64 - - timebase = mach_timebase_info_data_t() - libc.mach_timebase_info(ctypes.byref(timebase)) - ticks_per_second = timebase.numer / timebase.denom * 1.0e9 - - def monotonic(): - """Monotonic clock, cannot go backward.""" - return mach_absolute_time() / ticks_per_second - - elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): - if sys.platform.startswith('cygwin'): - # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since - # version 1.7.6. Using raw WinAPI for maximum version compatibility. - - # Ugly hack using the wrong calling convention (in 32-bit mode) - # because ctypes has no windll under cygwin (and it also seems that - # the code letting you select stdcall in _ctypes doesn't exist under - # the preprocessor definitions relevant to cygwin). - # This is 'safe' because: - # 1. The ABI of GetTickCount and GetTickCount64 is identical for - # both calling conventions because they both have no parameters. - # 2. libffi masks the problem because after making the call it doesn't - # touch anything through esp and epilogue code restores a correct - # esp from ebp afterwards. - try: - kernel32 = ctypes.cdll.kernel32 - except OSError: # 'No such file or directory' - kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') - else: - kernel32 = ctypes.windll.kernel32 - - GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) - if GetTickCount64: - # Windows Vista / Windows Server 2008 or newer. - GetTickCount64.restype = ctypes.c_ulonglong - - def monotonic(): - """Monotonic clock, cannot go backward.""" - return GetTickCount64() / 1000.0 - - else: - # Before Windows Vista. - GetTickCount = kernel32.GetTickCount - GetTickCount.restype = ctypes.c_uint32 - - get_tick_count_lock = threading.Lock() - get_tick_count_last_sample = 0 - get_tick_count_wraparounds = 0 - - def monotonic(): - """Monotonic clock, cannot go backward.""" - global get_tick_count_last_sample - global get_tick_count_wraparounds - - with get_tick_count_lock: - current_sample = GetTickCount() - if current_sample < get_tick_count_last_sample: - get_tick_count_wraparounds += 1 - get_tick_count_last_sample = current_sample - - final_milliseconds = get_tick_count_wraparounds << 32 - final_milliseconds += get_tick_count_last_sample - return final_milliseconds / 1000.0 - - else: - clock_gettime = clock_gettime_linux() - - class timespec(ctypes.Structure): - """Time specification, as described in clock_gettime(3).""" - _fields_ = (('tv_sec', ctypes.c_long), - ('tv_nsec', ctypes.c_long)) - - if sys.platform.startswith('linux'): - CLOCK_MONOTONIC = 1 - elif sys.platform.startswith('freebsd'): - CLOCK_MONOTONIC = 4 - elif sys.platform.startswith('sunos5'): - CLOCK_MONOTONIC = 4 - elif 'bsd' in sys.platform: - CLOCK_MONOTONIC = 3 - elif sys.platform.startswith('aix'): - CLOCK_MONOTONIC = ctypes.c_longlong(10) - - def monotonic(): - """Monotonic clock, cannot go backward.""" - ts = timespec() - if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): - errno = ctypes.get_errno() - raise OSError(errno, os.strerror(errno)) - return ts.tv_sec + ts.tv_nsec / 1.0e9 - - # Perform a sanity-check. - if monotonic() - monotonic() > 0: - raise ValueError('monotonic() is not monotonic!') - - except Exception as e: - raise RuntimeError('no suitable implementation for this system: ' + repr(e)) diff --git a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py b/collectors/python.d.plugin/python_modules/third_party/ordereddict.py deleted file mode 100644 index 589401b8f..000000000 --- a/collectors/python.d.plugin/python_modules/third_party/ordereddict.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright (c) 2009 Raymond Hettinger -# -# SPDX-License-Identifier: MIT - -from UserDict import DictMixin - - -class OrderedDict(dict, DictMixin): - - def __init__(self, *args, **kwds): - if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) - try: - self.__end - except AttributeError: - self.clear() - self.update(*args, **kwds) - - def clear(self): - self.__end = end = [] - end += [None, end, end] # sentinel node for doubly linked list - self.__map = {} # key --> [key, prev, next] - dict.clear(self) - - def __setitem__(self, key, value): - if key not in self: - end = self.__end - curr = end[1] - curr[2] = end[1] = self.__map[key] = [key, curr, end] - dict.__setitem__(self, key, value) - - def __delitem__(self, key): - dict.__delitem__(self, key) - key, prev, next = self.__map.pop(key) - prev[2] = next - next[1] = prev - - def __iter__(self): - end = self.__end - curr = end[2] - while curr is not end: - yield curr[0] - curr = curr[2] - - def __reversed__(self): - end = self.__end - curr = end[1] - while curr is not end: - yield curr[0] - curr = curr[1] - - def popitem(self, last=True): - if not self: - raise KeyError('dictionary is empty') - if last: - key = reversed(self).next() - else: - key = iter(self).next() - value = self.pop(key) - return key, value - - def __reduce__(self): - items = [[k, self[k]] for k in self] - tmp = self.__map, self.__end - del self.__map, self.__end - inst_dict = vars(self).copy() - self.__map, self.__end = tmp - if inst_dict: - return self.__class__, (items,), inst_dict - return self.__class__, (items,) - - def keys(self): - return list(self) - - setdefault = DictMixin.setdefault - update = DictMixin.update - pop = DictMixin.pop - values = DictMixin.values - items = DictMixin.items - iterkeys = DictMixin.iterkeys - itervalues = DictMixin.itervalues - iteritems = DictMixin.iteritems - - def __repr__(self): - if not self: - return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, self.items()) - - def copy(self): - return self.__class__(self) - - @classmethod - def fromkeys(cls, iterable, value=None): - d = cls() - for key in iterable: - d[key] = value - return d - - def __eq__(self, other): - if isinstance(other, OrderedDict): - if len(self) != len(other): - return False - for p, q in zip(self.items(), other.items()): - if p != q: - return False - return True - return dict.__eq__(self, other) - - def __ne__(self, other): - return not self == other |