summaryrefslogtreecommitdiffstats
path: root/collectors/cups.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-20 04:50:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-20 04:50:01 +0000
commitcd4377fab21e0f500bef7f06543fa848a039c1e0 (patch)
treeba00a55e430c052d6bed0b61c0f8bbe8ebedd313 /collectors/cups.plugin
parentReleasing debian version 1.40.1-1. (diff)
downloadnetdata-cd4377fab21e0f500bef7f06543fa848a039c1e0.tar.xz
netdata-cd4377fab21e0f500bef7f06543fa848a039c1e0.zip
Merging upstream version 1.41.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/cups.plugin')
-rw-r--r--collectors/cups.plugin/cups_plugin.c78
-rw-r--r--collectors/cups.plugin/metadata.yaml118
2 files changed, 158 insertions, 38 deletions
diff --git a/collectors/cups.plugin/cups_plugin.c b/collectors/cups.plugin/cups_plugin.c
index ecadc4ecb..ce7f05d4d 100644
--- a/collectors/cups.plugin/cups_plugin.c
+++ b/collectors/cups.plugin/cups_plugin.c
@@ -17,7 +17,7 @@
static int debug = 0;
static int netdata_update_every = 1;
-static int netdata_priority = 100004;
+static uint32_t netdata_priority = 100004;
http_t *http; // connection to the cups daemon
@@ -25,7 +25,9 @@ http_t *http; // connection to the cups daemon
* Used to aggregate job metrics for a destination (and all destinations).
*/
struct job_metrics {
- int is_collected; // flag if this was collected in the current cycle
+ uint32_t id;
+
+ bool is_collected; // flag if this was collected in the current cycle
int num_pending;
int num_processing;
@@ -102,7 +104,7 @@ void parse_command_line(int argc, char **argv) {
if (freq >= netdata_update_every) {
netdata_update_every = freq;
} else if (freq) {
- error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
+ netdata_log_error("update frequency %d seconds is too small for CUPS. Using %d.", freq, netdata_update_every);
}
}
@@ -140,7 +142,7 @@ getIntegerOption(
static int reset_job_metrics(const DICTIONARY_ITEM *item __maybe_unused, void *entry, void *data __maybe_unused) {
struct job_metrics *jm = (struct job_metrics *)entry;
- jm->is_collected = 0;
+ jm->is_collected = false;
jm->num_held = 0;
jm->num_pending = 0;
jm->num_processing = 0;
@@ -151,28 +153,34 @@ static int reset_job_metrics(const DICTIONARY_ITEM *item __maybe_unused, void *e
return 0;
}
+void send_job_charts_definitions_to_netdata(const char *name, uint32_t job_id, bool obsolete) {
+ printf("CHART cups.job_num_%s '' 'Active jobs of %s' jobs '%s' cups.destination_job_num stacked %u %i %s\n",
+ name, name, name, netdata_priority + job_id, netdata_update_every, obsolete?"obsolete":"");
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+
+ printf("CHART cups.job_size_%s '' 'Active jobs size of %s' KB '%s' cups.destination_job_size stacked %u %i %s\n",
+ name, name, name, netdata_priority + 1 + job_id, netdata_update_every, obsolete?"obsolete":"");
+ printf("DIMENSION pending '' absolute 1 1\n");
+ printf("DIMENSION held '' absolute 1 1\n");
+ printf("DIMENSION processing '' absolute 1 1\n");
+}
+
struct job_metrics *get_job_metrics(char *dest) {
struct job_metrics *jm = dictionary_get(dict_dest_job_metrics, dest);
if (unlikely(!jm)) {
- struct job_metrics new_job_metrics;
- reset_job_metrics(NULL, &new_job_metrics, NULL);
+ static uint32_t job_id = 0;
+ struct job_metrics new_job_metrics = { .id = ++job_id };
jm = dictionary_set(dict_dest_job_metrics, dest, &new_job_metrics, sizeof(struct job_metrics));
-
- printf("CHART cups.job_num_%s '' 'Active jobs of %s' jobs '%s' cups.destination_job_num stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
-
- printf("CHART cups.job_size_%s '' 'Active jobs size of %s' KB '%s' cups.destination_job_size stacked %i %i\n", dest, dest, dest, netdata_priority++, netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
+ send_job_charts_definitions_to_netdata(dest, jm->id, false);
};
+
return jm;
}
-int collect_job_metrics(const DICTIONARY_ITEM *item, void *entry, void *data __maybe_unused) {
+int send_job_metrics_to_netdata(const DICTIONARY_ITEM *item, void *entry, void *data __maybe_unused) {
const char *name = dictionary_acquired_item_name(item);
struct job_metrics *jm = (struct job_metrics *)entry;
@@ -192,16 +200,12 @@ int collect_job_metrics(const DICTIONARY_ITEM *item, void *entry, void *data __m
"SET processing = %d\n"
"END\n",
name, jm->size_pending, jm->size_held, jm->size_processing);
- } else {
- printf("CHART cups.job_num_%s '' 'Active jobs of %s' jobs '%s' cups.destination_job_num stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
-
- printf("CHART cups.job_size_%s '' 'Active jobs size of %s' KB '%s' cups.destination_job_size stacked 1 %i 'obsolete'\n", name, name, name, netdata_update_every);
- printf("DIMENSION pending '' absolute 1 1\n");
- printf("DIMENSION held '' absolute 1 1\n");
- printf("DIMENSION processing '' absolute 1 1\n");
+ }
+ else {
+ // mark it obsolete
+ send_job_charts_definitions_to_netdata(name, jm->id, true);
+
+ // delete it
dictionary_del(dict_dest_job_metrics, name);
}
@@ -255,14 +259,11 @@ int main(int argc, char **argv) {
heartbeat_t hb;
heartbeat_init(&hb);
- for (iteration = 0; 1; iteration++)
- {
+ for (iteration = 0; 1; iteration++) {
heartbeat_next(&hb, step);
if (unlikely(netdata_exit))
- {
break;
- }
reset_metrics();
@@ -274,7 +275,7 @@ int main(int argc, char **argv) {
httpClose(http);
http = httpConnect2(cupsServer(), ippPort(), NULL, AF_UNSPEC, cupsEncryption(), 0, netdata_update_every * 1000, NULL);
if(http == NULL) {
- error("cups daemon is not running. Exiting!");
+ netdata_log_error("cups daemon is not running. Exiting!");
exit(1);
}
}
@@ -320,7 +321,7 @@ int main(int argc, char **argv) {
fprintf(stderr, "printer state is missing for destination %s", curr_dest->name);
break;
default:
- error("Unknown printer state (%d) found.", printer_state);
+ netdata_log_error("Unknown printer state (%d) found.", printer_state);
break;
}
@@ -329,7 +330,7 @@ int main(int argc, char **argv) {
* This is needed to report also destinations with zero active jobs.
*/
struct job_metrics *jm = get_job_metrics(curr_dest->name);
- jm->is_collected = 1;
+ jm->is_collected = true;
}
cupsFreeDests(num_dest_total, dests);
@@ -341,7 +342,7 @@ int main(int argc, char **argv) {
int i;
for (i = num_jobs, curr_job = jobs; i > 0; i--, curr_job++) {
struct job_metrics *jm = get_job_metrics(curr_job->dest);
- jm->is_collected = 1;
+ jm->is_collected = true;
switch (curr_job->state) {
case IPP_JOB_PENDING:
@@ -363,13 +364,14 @@ int main(int argc, char **argv) {
global_job_metrics.size_processing += curr_job->size;
break;
default:
- error("Unsupported job state (%u) found.", curr_job->state);
+ netdata_log_error("Unsupported job state (%u) found.", curr_job->state);
break;
}
}
cupsFreeJobs(num_jobs, jobs);
- dictionary_walkthrough_write(dict_dest_job_metrics, collect_job_metrics, NULL);
+ dictionary_walkthrough_write(dict_dest_job_metrics, send_job_metrics_to_netdata, NULL);
+ dictionary_garbage_collect(dict_dest_job_metrics);
static int cups_printer_by_option_created = 0;
if (unlikely(!cups_printer_by_option_created))
@@ -436,5 +438,5 @@ int main(int argc, char **argv) {
}
httpClose(http);
- info("CUPS process exiting");
+ netdata_log_info("CUPS process exiting");
}
diff --git a/collectors/cups.plugin/metadata.yaml b/collectors/cups.plugin/metadata.yaml
new file mode 100644
index 000000000..c8a7e0834
--- /dev/null
+++ b/collectors/cups.plugin/metadata.yaml
@@ -0,0 +1,118 @@
+meta:
+ plugin_name: cups.plugin
+ module_name: cups.plugin
+ monitored_instance:
+ name: CUPS
+ link: ''
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: 'cups.png'
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ''
+ keywords: []
+ most_popular: false
+overview:
+ data_collection:
+ metrics_description: 'Monitor CUPS performance for achieving optimal printing system operations. Monitor job statuses, queue lengths, and error rates to ensure smooth printing tasks.'
+ method_description: ''
+ supported_platforms:
+ include: []
+ exclude: []
+ multi-instance: true
+ additional_permissions:
+ description: ''
+ default_behavior:
+ auto_detection:
+ description: ''
+ limits:
+ description: ''
+ performance_impact:
+ description: ''
+setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: ''
+ description: ''
+ options:
+ description: ''
+ folding:
+ title: ''
+ enabled: true
+ list: []
+ examples:
+ folding:
+ enabled: true
+ title: ''
+ list: []
+troubleshooting:
+ problems:
+ list: []
+alerts: []
+metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: ""
+ labels: []
+ metrics:
+ - name: cups.dests_state
+ description: Destinations by state
+ unit: "dests"
+ chart_type: stacked
+ dimensions:
+ - name: idle
+ - name: printing
+ - name: stopped
+ - name: cups.dests_option
+ description: Destinations by option
+ unit: "dests"
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: acceptingjobs
+ - name: shared
+ - name: cups.job_num
+ description: Active jobs
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: pending
+ - name: held
+ - name: processing
+ - name: cups.job_size
+ description: Active jobs size
+ unit: "KB"
+ chart_type: stacked
+ dimensions:
+ - name: pending
+ - name: held
+ - name: processing
+ - name: destination
+ description: ""
+ labels: []
+ metrics:
+ - name: cups.destination_job_num
+ description: Active jobs of {destination}
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: pending
+ - name: held
+ - name: processing
+ - name: cups.destination_job_size
+ description: Active jobs size of {destination}
+ unit: "KB"
+ chart_type: stacked
+ dimensions:
+ - name: pending
+ - name: held
+ - name: processing