summaryrefslogtreecommitdiffstats
path: root/arch/s390/appldata
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/appldata')
-rw-r--r--arch/s390/appldata/Makefile9
-rw-r--r--arch/s390/appldata/appldata.h48
-rw-r--r--arch/s390/appldata/appldata_base.c544
-rw-r--r--arch/s390/appldata/appldata_mem.c165
-rw-r--r--arch/s390/appldata/appldata_net_sum.c167
-rw-r--r--arch/s390/appldata/appldata_os.c221
6 files changed, 1154 insertions, 0 deletions
diff --git a/arch/s390/appldata/Makefile b/arch/s390/appldata/Makefile
new file mode 100644
index 000000000..b06def4a4
--- /dev/null
+++ b/arch/s390/appldata/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux - z/VM Monitor Stream.
+#
+
+obj-$(CONFIG_APPLDATA_BASE) += appldata_base.o
+obj-$(CONFIG_APPLDATA_MEM) += appldata_mem.o
+obj-$(CONFIG_APPLDATA_OS) += appldata_os.o
+obj-$(CONFIG_APPLDATA_NET_SUM) += appldata_net_sum.o
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
new file mode 100644
index 000000000..10346d2f3
--- /dev/null
+++ b/arch/s390/appldata/appldata.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions and interface for Linux - z/VM Monitor Stream.
+ *
+ * Copyright IBM Corp. 2003, 2008
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define APPLDATA_MAX_REC_SIZE 4024 /* Maximum size of the */
+ /* data buffer */
+#define APPLDATA_MAX_PROCS 100
+
+#define APPLDATA_PROC_NAME_LENGTH 16 /* Max. length of /proc name */
+
+#define APPLDATA_RECORD_MEM_ID 0x01 /* IDs to identify the */
+#define APPLDATA_RECORD_OS_ID 0x02 /* individual records, */
+#define APPLDATA_RECORD_NET_SUM_ID 0x03 /* must be < 256 ! */
+#define APPLDATA_RECORD_PROC_ID 0x04
+
+#define CTL_APPLDATA_TIMER 2121 /* sysctl IDs, must be unique */
+#define CTL_APPLDATA_INTERVAL 2122
+#define CTL_APPLDATA_MEM 2123
+#define CTL_APPLDATA_OS 2124
+#define CTL_APPLDATA_NET_SUM 2125
+#define CTL_APPLDATA_PROC 2126
+
+struct appldata_ops {
+ struct list_head list;
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table *ctl_table;
+ int active; /* monitoring status */
+
+ /* fill in from here */
+ char name[APPLDATA_PROC_NAME_LENGTH]; /* name of /proc fs node */
+ unsigned char record_nr; /* Record Nr. for Product ID */
+ void (*callback)(void *data); /* callback function */
+ void *data; /* record data */
+ unsigned int size; /* size of record */
+ struct module *owner; /* THIS_MODULE */
+ char mod_lvl[2]; /* modification level, EBCDIC */
+};
+
+extern int appldata_register_ops(struct appldata_ops *ops);
+extern void appldata_unregister_ops(struct appldata_ops *ops);
+extern int appldata_diag(char record_nr, u16 function, unsigned long buffer,
+ u16 length, char *mod_lvl);
+
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
new file mode 100644
index 000000000..9bf8489df
--- /dev/null
+++ b/arch/s390/appldata/appldata_base.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
+ * Exports appldata_register_ops() and appldata_unregister_ops() for the
+ * data gathering modules.
+ *
+ * Copyright IBM Corp. 2003, 2009
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "appldata"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/sched/stat.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/workqueue.h>
+#include <linux/suspend.h>
+#include <linux/platform_device.h>
+#include <asm/appldata.h>
+#include <asm/vtimer.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <asm/smp.h>
+
+#include "appldata.h"
+
+
+#define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
+ sampling interval in
+ milliseconds */
+
+#define TOD_MICRO 0x01000 /* nr. of TOD clock units
+ for 1 microsecond */
+
+static struct platform_device *appldata_pdev;
+
+/*
+ * /proc entries (sysctl)
+ */
+static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
+static int appldata_timer_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+static int appldata_interval_handler(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+
+static struct ctl_table_header *appldata_sysctl_header;
+static struct ctl_table appldata_table[] = {
+ {
+ .procname = "timer",
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = appldata_timer_handler,
+ },
+ {
+ .procname = "interval",
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = appldata_interval_handler,
+ },
+ { },
+};
+
+static struct ctl_table appldata_dir_table[] = {
+ {
+ .procname = appldata_proc_name,
+ .maxlen = 0,
+ .mode = S_IRUGO | S_IXUGO,
+ .child = appldata_table,
+ },
+ { },
+};
+
+/*
+ * Timer
+ */
+static struct vtimer_list appldata_timer;
+
+static DEFINE_SPINLOCK(appldata_timer_lock);
+static int appldata_interval = APPLDATA_CPU_INTERVAL;
+static int appldata_timer_active;
+static int appldata_timer_suspended = 0;
+
+/*
+ * Work queue
+ */
+static struct workqueue_struct *appldata_wq;
+static void appldata_work_fn(struct work_struct *work);
+static DECLARE_WORK(appldata_work, appldata_work_fn);
+
+
+/*
+ * Ops list
+ */
+static DEFINE_MUTEX(appldata_ops_mutex);
+static LIST_HEAD(appldata_ops_list);
+
+
+/*************************** timer, work, DIAG *******************************/
+/*
+ * appldata_timer_function()
+ *
+ * schedule work and reschedule timer
+ */
+static void appldata_timer_function(unsigned long data)
+{
+ queue_work(appldata_wq, (struct work_struct *) data);
+}
+
+/*
+ * appldata_work_fn()
+ *
+ * call data gathering function for each (active) module
+ */
+static void appldata_work_fn(struct work_struct *work)
+{
+ struct list_head *lh;
+ struct appldata_ops *ops;
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ ops->callback(ops->data);
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+}
+
+/*
+ * appldata_diag()
+ *
+ * prepare parameter list, issue DIAG 0xDC
+ */
+int appldata_diag(char record_nr, u16 function, unsigned long buffer,
+ u16 length, char *mod_lvl)
+{
+ struct appldata_product_id id = {
+ .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
+ 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
+ .prod_fn = 0xD5D3, /* "NL" */
+ .version_nr = 0xF2F6, /* "26" */
+ .release_nr = 0xF0F1, /* "01" */
+ };
+
+ id.record_nr = record_nr;
+ id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
+ return appldata_asm(&id, function, (void *) buffer, length);
+}
+/************************ timer, work, DIAG <END> ****************************/
+
+
+/****************************** /proc stuff **********************************/
+
+#define APPLDATA_ADD_TIMER 0
+#define APPLDATA_DEL_TIMER 1
+#define APPLDATA_MOD_TIMER 2
+
+/*
+ * __appldata_vtimer_setup()
+ *
+ * Add, delete or modify virtual timers on all online cpus.
+ * The caller needs to get the appldata_timer_lock spinlock.
+ */
+static void __appldata_vtimer_setup(int cmd)
+{
+ u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
+
+ switch (cmd) {
+ case APPLDATA_ADD_TIMER:
+ if (appldata_timer_active)
+ break;
+ appldata_timer.expires = timer_interval;
+ add_virt_timer_periodic(&appldata_timer);
+ appldata_timer_active = 1;
+ break;
+ case APPLDATA_DEL_TIMER:
+ del_virt_timer(&appldata_timer);
+ if (!appldata_timer_active)
+ break;
+ appldata_timer_active = 0;
+ break;
+ case APPLDATA_MOD_TIMER:
+ if (!appldata_timer_active)
+ break;
+ mod_virt_timer_periodic(&appldata_timer, timer_interval);
+ }
+}
+
+/*
+ * appldata_timer_handler()
+ *
+ * Start/Stop timer, show status of timer (0 = not active, 1 = active)
+ */
+static int
+appldata_timer_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int timer_active = appldata_timer_active;
+ int zero = 0;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &timer_active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+
+ spin_lock(&appldata_timer_lock);
+ if (timer_active)
+ __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+ else
+ __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+ spin_unlock(&appldata_timer_lock);
+ return 0;
+}
+
+/*
+ * appldata_interval_handler()
+ *
+ * Set (CPU) timer interval for collection of data (in milliseconds), show
+ * current timer interval.
+ */
+static int
+appldata_interval_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int interval = appldata_interval;
+ int one = 1;
+ int rc;
+ struct ctl_table ctl_entry = {
+ .procname = ctl->procname,
+ .data = &interval,
+ .maxlen = sizeof(int),
+ .extra1 = &one,
+ };
+
+ rc = proc_dointvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write)
+ return rc;
+
+ spin_lock(&appldata_timer_lock);
+ appldata_interval = interval;
+ __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
+ spin_unlock(&appldata_timer_lock);
+ return 0;
+}
+
+/*
+ * appldata_generic_handler()
+ *
+ * Generic start/stop monitoring and DIAG, show status of
+ * monitoring (0 = not in process, 1 = in process)
+ */
+static int
+appldata_generic_handler(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct appldata_ops *ops = NULL, *tmp_ops;
+ struct list_head *lh;
+ int rc, found;
+ int active;
+ int zero = 0;
+ int one = 1;
+ struct ctl_table ctl_entry = {
+ .data = &active,
+ .maxlen = sizeof(int),
+ .extra1 = &zero,
+ .extra2 = &one,
+ };
+
+ found = 0;
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ tmp_ops = list_entry(lh, struct appldata_ops, list);
+ if (&tmp_ops->ctl_table[2] == ctl) {
+ found = 1;
+ }
+ }
+ if (!found) {
+ mutex_unlock(&appldata_ops_mutex);
+ return -ENODEV;
+ }
+ ops = ctl->data;
+ if (!try_module_get(ops->owner)) { // protect this function
+ mutex_unlock(&appldata_ops_mutex);
+ return -ENODEV;
+ }
+ mutex_unlock(&appldata_ops_mutex);
+
+ active = ops->active;
+ rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
+ if (rc < 0 || !write) {
+ module_put(ops->owner);
+ return rc;
+ }
+
+ mutex_lock(&appldata_ops_mutex);
+ if (active && (ops->active == 0)) {
+ // protect work queue callback
+ if (!try_module_get(ops->owner)) {
+ mutex_unlock(&appldata_ops_mutex);
+ module_put(ops->owner);
+ return -ENODEV;
+ }
+ ops->callback(ops->data); // init record
+ rc = appldata_diag(ops->record_nr,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0) {
+ pr_err("Starting the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ module_put(ops->owner);
+ } else
+ ops->active = 1;
+ } else if (!active && (ops->active == 1)) {
+ ops->active = 0;
+ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0)
+ pr_err("Stopping the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ module_put(ops->owner);
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ module_put(ops->owner);
+ return 0;
+}
+
+/*************************** /proc stuff <END> *******************************/
+
+
+/************************* module-ops management *****************************/
+/*
+ * appldata_register_ops()
+ *
+ * update ops list, register /proc/sys entries
+ */
+int appldata_register_ops(struct appldata_ops *ops)
+{
+ if (ops->size > APPLDATA_MAX_REC_SIZE)
+ return -EINVAL;
+
+ ops->ctl_table = kcalloc(4, sizeof(struct ctl_table), GFP_KERNEL);
+ if (!ops->ctl_table)
+ return -ENOMEM;
+
+ mutex_lock(&appldata_ops_mutex);
+ list_add(&ops->list, &appldata_ops_list);
+ mutex_unlock(&appldata_ops_mutex);
+
+ ops->ctl_table[0].procname = appldata_proc_name;
+ ops->ctl_table[0].maxlen = 0;
+ ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
+ ops->ctl_table[0].child = &ops->ctl_table[2];
+
+ ops->ctl_table[2].procname = ops->name;
+ ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
+ ops->ctl_table[2].proc_handler = appldata_generic_handler;
+ ops->ctl_table[2].data = ops;
+
+ ops->sysctl_header = register_sysctl_table(ops->ctl_table);
+ if (!ops->sysctl_header)
+ goto out;
+ return 0;
+out:
+ mutex_lock(&appldata_ops_mutex);
+ list_del(&ops->list);
+ mutex_unlock(&appldata_ops_mutex);
+ kfree(ops->ctl_table);
+ return -ENOMEM;
+}
+
+/*
+ * appldata_unregister_ops()
+ *
+ * update ops list, unregister /proc entries, stop DIAG if necessary
+ */
+void appldata_unregister_ops(struct appldata_ops *ops)
+{
+ mutex_lock(&appldata_ops_mutex);
+ list_del(&ops->list);
+ mutex_unlock(&appldata_ops_mutex);
+ unregister_sysctl_table(ops->sysctl_header);
+ kfree(ops->ctl_table);
+}
+/********************** module-ops management <END> **************************/
+
+
+/**************************** suspend / resume *******************************/
+static int appldata_freeze(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_active) {
+ __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
+ appldata_timer_suspended = 1;
+ }
+ spin_unlock(&appldata_timer_lock);
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0)
+ pr_err("Stopping the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_restore(struct device *dev)
+{
+ struct appldata_ops *ops;
+ int rc;
+ struct list_head *lh;
+
+ spin_lock(&appldata_timer_lock);
+ if (appldata_timer_suspended) {
+ __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
+ appldata_timer_suspended = 0;
+ }
+ spin_unlock(&appldata_timer_lock);
+
+ mutex_lock(&appldata_ops_mutex);
+ list_for_each(lh, &appldata_ops_list) {
+ ops = list_entry(lh, struct appldata_ops, list);
+ if (ops->active == 1) {
+ ops->callback(ops->data); // init record
+ rc = appldata_diag(ops->record_nr,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops->data, ops->size,
+ ops->mod_lvl);
+ if (rc != 0) {
+ pr_err("Starting the data collection for %s "
+ "failed with rc=%d\n", ops->name, rc);
+ }
+ }
+ }
+ mutex_unlock(&appldata_ops_mutex);
+ return 0;
+}
+
+static int appldata_thaw(struct device *dev)
+{
+ return appldata_restore(dev);
+}
+
+static const struct dev_pm_ops appldata_pm_ops = {
+ .freeze = appldata_freeze,
+ .thaw = appldata_thaw,
+ .restore = appldata_restore,
+};
+
+static struct platform_driver appldata_pdrv = {
+ .driver = {
+ .name = "appldata",
+ .pm = &appldata_pm_ops,
+ },
+};
+/************************* suspend / resume <END> ****************************/
+
+
+/******************************* init / exit *********************************/
+
+/*
+ * appldata_init()
+ *
+ * init timer, register /proc entries
+ */
+static int __init appldata_init(void)
+{
+ int rc;
+
+ init_virt_timer(&appldata_timer);
+ appldata_timer.function = appldata_timer_function;
+ appldata_timer.data = (unsigned long) &appldata_work;
+
+ rc = platform_driver_register(&appldata_pdrv);
+ if (rc)
+ return rc;
+
+ appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
+ 0);
+ if (IS_ERR(appldata_pdev)) {
+ rc = PTR_ERR(appldata_pdev);
+ goto out_driver;
+ }
+ appldata_wq = alloc_ordered_workqueue("appldata", 0);
+ if (!appldata_wq) {
+ rc = -ENOMEM;
+ goto out_device;
+ }
+
+ appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
+ return 0;
+
+out_device:
+ platform_device_unregister(appldata_pdev);
+out_driver:
+ platform_driver_unregister(&appldata_pdrv);
+ return rc;
+}
+
+__initcall(appldata_init);
+
+/**************************** init / exit <END> ******************************/
+
+EXPORT_SYMBOL_GPL(appldata_register_ops);
+EXPORT_SYMBOL_GPL(appldata_unregister_ops);
+EXPORT_SYMBOL_GPL(appldata_diag);
+
+#ifdef CONFIG_SWAP
+EXPORT_SYMBOL_GPL(si_swapinfo);
+#endif
+EXPORT_SYMBOL_GPL(nr_threads);
+EXPORT_SYMBOL_GPL(nr_running);
+EXPORT_SYMBOL_GPL(nr_iowait);
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c
new file mode 100644
index 000000000..e68136c3c
--- /dev/null
+++ b/arch/s390/appldata/appldata_mem.c
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects data related to memory management.
+ *
+ * Copyright IBM Corp. 2003, 2006
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/pagemap.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include "appldata.h"
+
+
+#define P2K(x) ((x) << (PAGE_SHIFT - 10)) /* Converts #Pages to KB */
+
+/*
+ * Memory data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be
+ * updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_mem_data {
+ u64 timestamp;
+ u32 sync_count_1; /* after VM collected the record data, */
+ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
+ same. If not, the record has been updated on
+ the Linux side while VM was collecting the
+ (possibly corrupt) data */
+
+ u64 pgpgin; /* data read from disk */
+ u64 pgpgout; /* data written to disk */
+ u64 pswpin; /* pages swapped in */
+ u64 pswpout; /* pages swapped out */
+
+ u64 sharedram; /* sharedram is currently set to 0 */
+
+ u64 totalram; /* total main memory size */
+ u64 freeram; /* free main memory size */
+ u64 totalhigh; /* total high memory size */
+ u64 freehigh; /* free high memory size */
+
+ u64 bufferram; /* memory reserved for buffers, free cache */
+ u64 cached; /* size of (used) cache, w/o buffers */
+ u64 totalswap; /* total swap space size */
+ u64 freeswap; /* free swap space */
+
+// New in 2.6 -->
+ u64 pgalloc; /* page allocations */
+ u64 pgfault; /* page faults (major+minor) */
+ u64 pgmajfault; /* page faults (major only) */
+// <-- New in 2.6
+
+} __packed;
+
+
+/*
+ * appldata_get_mem_data()
+ *
+ * gather memory data
+ */
+static void appldata_get_mem_data(void *data)
+{
+ /*
+ * don't put large structures on the stack, we are
+ * serialized through the appldata_ops_mutex and can use static
+ */
+ static struct sysinfo val;
+ unsigned long ev[NR_VM_EVENT_ITEMS];
+ struct appldata_mem_data *mem_data;
+
+ mem_data = data;
+ mem_data->sync_count_1++;
+
+ all_vm_events(ev);
+ mem_data->pgpgin = ev[PGPGIN] >> 1;
+ mem_data->pgpgout = ev[PGPGOUT] >> 1;
+ mem_data->pswpin = ev[PSWPIN];
+ mem_data->pswpout = ev[PSWPOUT];
+ mem_data->pgalloc = ev[PGALLOC_NORMAL];
+ mem_data->pgalloc += ev[PGALLOC_DMA];
+ mem_data->pgfault = ev[PGFAULT];
+ mem_data->pgmajfault = ev[PGMAJFAULT];
+
+ si_meminfo(&val);
+ mem_data->sharedram = val.sharedram;
+ mem_data->totalram = P2K(val.totalram);
+ mem_data->freeram = P2K(val.freeram);
+ mem_data->totalhigh = P2K(val.totalhigh);
+ mem_data->freehigh = P2K(val.freehigh);
+ mem_data->bufferram = P2K(val.bufferram);
+ mem_data->cached = P2K(global_node_page_state(NR_FILE_PAGES)
+ - val.bufferram);
+
+ si_swapinfo(&val);
+ mem_data->totalswap = P2K(val.totalswap);
+ mem_data->freeswap = P2K(val.freeswap);
+
+ mem_data->timestamp = get_tod_clock();
+ mem_data->sync_count_2++;
+}
+
+
+static struct appldata_ops ops = {
+ .name = "mem",
+ .record_nr = APPLDATA_RECORD_MEM_ID,
+ .size = sizeof(struct appldata_mem_data),
+ .callback = &appldata_get_mem_data,
+ .owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
+};
+
+
+/*
+ * appldata_mem_init()
+ *
+ * init_data, register ops
+ */
+static int __init appldata_mem_init(void)
+{
+ int ret;
+
+ ops.data = kzalloc(sizeof(struct appldata_mem_data), GFP_KERNEL);
+ if (!ops.data)
+ return -ENOMEM;
+
+ ret = appldata_register_ops(&ops);
+ if (ret)
+ kfree(ops.data);
+
+ return ret;
+}
+
+/*
+ * appldata_mem_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_mem_exit(void)
+{
+ appldata_unregister_ops(&ops);
+ kfree(ops.data);
+}
+
+
+module_init(appldata_mem_init);
+module_exit(appldata_mem_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, MEMORY statistics");
diff --git a/arch/s390/appldata/appldata_net_sum.c b/arch/s390/appldata/appldata_net_sum.c
new file mode 100644
index 000000000..8bc14b0d1
--- /dev/null
+++ b/arch/s390/appldata/appldata_net_sum.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects accumulated network statistics (Packets received/transmitted,
+ * dropped, errors, ...).
+ *
+ * Copyright IBM Corp. 2003, 2006
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+
+#include "appldata.h"
+
+
+/*
+ * Network data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_net_sum_data {
+ u64 timestamp;
+ u32 sync_count_1; /* after VM collected the record data, */
+ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
+ same. If not, the record has been updated on
+ the Linux side while VM was collecting the
+ (possibly corrupt) data */
+
+ u32 nr_interfaces; /* nr. of network interfaces being monitored */
+
+ u32 padding; /* next value is 64-bit aligned, so these */
+ /* 4 byte would be padded out by compiler */
+
+ u64 rx_packets; /* total packets received */
+ u64 tx_packets; /* total packets transmitted */
+ u64 rx_bytes; /* total bytes received */
+ u64 tx_bytes; /* total bytes transmitted */
+ u64 rx_errors; /* bad packets received */
+ u64 tx_errors; /* packet transmit problems */
+ u64 rx_dropped; /* no space in linux buffers */
+ u64 tx_dropped; /* no space available in linux */
+ u64 collisions; /* collisions while transmitting */
+} __packed;
+
+
+/*
+ * appldata_get_net_sum_data()
+ *
+ * gather accumulated network statistics
+ */
+static void appldata_get_net_sum_data(void *data)
+{
+ int i;
+ struct appldata_net_sum_data *net_data;
+ struct net_device *dev;
+ unsigned long rx_packets, tx_packets, rx_bytes, tx_bytes, rx_errors,
+ tx_errors, rx_dropped, tx_dropped, collisions;
+
+ net_data = data;
+ net_data->sync_count_1++;
+
+ i = 0;
+ rx_packets = 0;
+ tx_packets = 0;
+ rx_bytes = 0;
+ tx_bytes = 0;
+ rx_errors = 0;
+ tx_errors = 0;
+ rx_dropped = 0;
+ tx_dropped = 0;
+ collisions = 0;
+
+ rcu_read_lock();
+ for_each_netdev_rcu(&init_net, dev) {
+ const struct rtnl_link_stats64 *stats;
+ struct rtnl_link_stats64 temp;
+
+ stats = dev_get_stats(dev, &temp);
+ rx_packets += stats->rx_packets;
+ tx_packets += stats->tx_packets;
+ rx_bytes += stats->rx_bytes;
+ tx_bytes += stats->tx_bytes;
+ rx_errors += stats->rx_errors;
+ tx_errors += stats->tx_errors;
+ rx_dropped += stats->rx_dropped;
+ tx_dropped += stats->tx_dropped;
+ collisions += stats->collisions;
+ i++;
+ }
+ rcu_read_unlock();
+
+ net_data->nr_interfaces = i;
+ net_data->rx_packets = rx_packets;
+ net_data->tx_packets = tx_packets;
+ net_data->rx_bytes = rx_bytes;
+ net_data->tx_bytes = tx_bytes;
+ net_data->rx_errors = rx_errors;
+ net_data->tx_errors = tx_errors;
+ net_data->rx_dropped = rx_dropped;
+ net_data->tx_dropped = tx_dropped;
+ net_data->collisions = collisions;
+
+ net_data->timestamp = get_tod_clock();
+ net_data->sync_count_2++;
+}
+
+
+static struct appldata_ops ops = {
+ .name = "net_sum",
+ .record_nr = APPLDATA_RECORD_NET_SUM_ID,
+ .size = sizeof(struct appldata_net_sum_data),
+ .callback = &appldata_get_net_sum_data,
+ .owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF0}, /* EBCDIC "00" */
+};
+
+
+/*
+ * appldata_net_init()
+ *
+ * init data, register ops
+ */
+static int __init appldata_net_init(void)
+{
+ int ret;
+
+ ops.data = kzalloc(sizeof(struct appldata_net_sum_data), GFP_KERNEL);
+ if (!ops.data)
+ return -ENOMEM;
+
+ ret = appldata_register_ops(&ops);
+ if (ret)
+ kfree(ops.data);
+
+ return ret;
+}
+
+/*
+ * appldata_net_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_net_exit(void)
+{
+ appldata_unregister_ops(&ops);
+ kfree(ops.data);
+}
+
+
+module_init(appldata_net_init);
+module_exit(appldata_net_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, accumulated network statistics");
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
new file mode 100644
index 000000000..433a994b1
--- /dev/null
+++ b/arch/s390/appldata/appldata_os.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Data gathering module for Linux-VM Monitor Stream, Stage 1.
+ * Collects misc. OS related data (CPU utilization, running processes).
+ *
+ * Copyright IBM Corp. 2003, 2006
+ *
+ * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#define KMSG_COMPONENT "appldata"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/netdevice.h>
+#include <linux/sched.h>
+#include <linux/sched/loadavg.h>
+#include <linux/sched/stat.h>
+#include <asm/appldata.h>
+#include <asm/smp.h>
+
+#include "appldata.h"
+
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
+
+/*
+ * OS data
+ *
+ * This is accessed as binary data by z/VM. If changes to it can't be avoided,
+ * the structure version (product ID, see appldata_base.c) needs to be changed
+ * as well and all documentation and z/VM applications using it must be
+ * updated.
+ *
+ * The record layout is documented in the Linux for zSeries Device Drivers
+ * book:
+ * http://oss.software.ibm.com/developerworks/opensource/linux390/index.shtml
+ */
+struct appldata_os_per_cpu {
+ u32 per_cpu_user; /* timer ticks spent in user mode */
+ u32 per_cpu_nice; /* ... spent with modified priority */
+ u32 per_cpu_system; /* ... spent in kernel mode */
+ u32 per_cpu_idle; /* ... spent in idle mode */
+
+ /* New in 2.6 */
+ u32 per_cpu_irq; /* ... spent in interrupts */
+ u32 per_cpu_softirq; /* ... spent in softirqs */
+ u32 per_cpu_iowait; /* ... spent while waiting for I/O */
+
+ /* New in modification level 01 */
+ u32 per_cpu_steal; /* ... stolen by hypervisor */
+ u32 cpu_id; /* number of this CPU */
+} __attribute__((packed));
+
+struct appldata_os_data {
+ u64 timestamp;
+ u32 sync_count_1; /* after VM collected the record data, */
+ u32 sync_count_2; /* sync_count_1 and sync_count_2 should be the
+ same. If not, the record has been updated on
+ the Linux side while VM was collecting the
+ (possibly corrupt) data */
+
+ u32 nr_cpus; /* number of (virtual) CPUs */
+ u32 per_cpu_size; /* size of the per-cpu data struct */
+ u32 cpu_offset; /* offset of the first per-cpu data struct */
+
+ u32 nr_running; /* number of runnable threads */
+ u32 nr_threads; /* number of threads */
+ u32 avenrun[3]; /* average nr. of running processes during */
+ /* the last 1, 5 and 15 minutes */
+
+ /* New in 2.6 */
+ u32 nr_iowait; /* number of blocked threads
+ (waiting for I/O) */
+
+ /* per cpu data */
+ struct appldata_os_per_cpu os_cpu[0];
+} __attribute__((packed));
+
+static struct appldata_os_data *appldata_os_data;
+
+static struct appldata_ops ops = {
+ .name = "os",
+ .record_nr = APPLDATA_RECORD_OS_ID,
+ .owner = THIS_MODULE,
+ .mod_lvl = {0xF0, 0xF1}, /* EBCDIC "01" */
+};
+
+
+/*
+ * appldata_get_os_data()
+ *
+ * gather OS data
+ */
+static void appldata_get_os_data(void *data)
+{
+ int i, j, rc;
+ struct appldata_os_data *os_data;
+ unsigned int new_size;
+
+ os_data = data;
+ os_data->sync_count_1++;
+
+ os_data->nr_threads = nr_threads;
+ os_data->nr_running = nr_running();
+ os_data->nr_iowait = nr_iowait();
+ os_data->avenrun[0] = avenrun[0] + (FIXED_1/200);
+ os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
+ os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
+
+ j = 0;
+ for_each_online_cpu(i) {
+ os_data->os_cpu[j].per_cpu_user =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
+ os_data->os_cpu[j].per_cpu_nice =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
+ os_data->os_cpu[j].per_cpu_system =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
+ os_data->os_cpu[j].per_cpu_idle =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
+ os_data->os_cpu[j].per_cpu_irq =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
+ os_data->os_cpu[j].per_cpu_softirq =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
+ os_data->os_cpu[j].per_cpu_iowait =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
+ os_data->os_cpu[j].per_cpu_steal =
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
+ os_data->os_cpu[j].cpu_id = i;
+ j++;
+ }
+
+ os_data->nr_cpus = j;
+
+ new_size = sizeof(struct appldata_os_data) +
+ (os_data->nr_cpus * sizeof(struct appldata_os_per_cpu));
+ if (ops.size != new_size) {
+ if (ops.active) {
+ rc = appldata_diag(APPLDATA_RECORD_OS_ID,
+ APPLDATA_START_INTERVAL_REC,
+ (unsigned long) ops.data, new_size,
+ ops.mod_lvl);
+ if (rc != 0)
+ pr_err("Starting a new OS data collection "
+ "failed with rc=%d\n", rc);
+
+ rc = appldata_diag(APPLDATA_RECORD_OS_ID,
+ APPLDATA_STOP_REC,
+ (unsigned long) ops.data, ops.size,
+ ops.mod_lvl);
+ if (rc != 0)
+ pr_err("Stopping a faulty OS data "
+ "collection failed with rc=%d\n", rc);
+ }
+ ops.size = new_size;
+ }
+ os_data->timestamp = get_tod_clock();
+ os_data->sync_count_2++;
+}
+
+
+/*
+ * appldata_os_init()
+ *
+ * init data, register ops
+ */
+static int __init appldata_os_init(void)
+{
+ int rc, max_size;
+
+ max_size = sizeof(struct appldata_os_data) +
+ (num_possible_cpus() * sizeof(struct appldata_os_per_cpu));
+ if (max_size > APPLDATA_MAX_REC_SIZE) {
+ pr_err("Maximum OS record size %i exceeds the maximum "
+ "record size %i\n", max_size, APPLDATA_MAX_REC_SIZE);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ appldata_os_data = kzalloc(max_size, GFP_KERNEL | GFP_DMA);
+ if (appldata_os_data == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ appldata_os_data->per_cpu_size = sizeof(struct appldata_os_per_cpu);
+ appldata_os_data->cpu_offset = offsetof(struct appldata_os_data,
+ os_cpu);
+
+ ops.data = appldata_os_data;
+ ops.callback = &appldata_get_os_data;
+ rc = appldata_register_ops(&ops);
+ if (rc != 0)
+ kfree(appldata_os_data);
+out:
+ return rc;
+}
+
+/*
+ * appldata_os_exit()
+ *
+ * unregister ops
+ */
+static void __exit appldata_os_exit(void)
+{
+ appldata_unregister_ops(&ops);
+ kfree(appldata_os_data);
+}
+
+
+module_init(appldata_os_init);
+module_exit(appldata_os_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Gerald Schaefer");
+MODULE_DESCRIPTION("Linux-VM Monitor Stream, OS statistics");