summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/idle.c
blob: af9c97c0ad73052327e15de68cd618d3bca1fc24 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
// SPDX-License-Identifier: GPL-2.0
/*
 * Idle functions for s390.
 *
 * Copyright IBM Corp. 2014
 *
 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
 */

#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <trace/events/power.h>
#include <asm/cpu_mf.h>
#include <asm/cputime.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"

static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);

void account_idle_time_irq(void)
{
	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
	unsigned long idle_time;
	u64 cycles_new[8];
	int i;

	if (smp_cpu_mtid) {
		stcctm(MT_DIAG, smp_cpu_mtid, cycles_new);
		for (i = 0; i < smp_cpu_mtid; i++)
			this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
	}

	idle_time = S390_lowcore.int_clock - idle->clock_idle_enter;

	S390_lowcore.steal_timer += idle->clock_idle_enter - S390_lowcore.last_update_clock;
	S390_lowcore.last_update_clock = S390_lowcore.int_clock;

	S390_lowcore.system_timer += S390_lowcore.last_update_timer - idle->timer_idle_enter;
	S390_lowcore.last_update_timer = S390_lowcore.sys_enter_timer;

	/* Account time spent with enabled wait psw loaded as idle time. */
	WRITE_ONCE(idle->idle_time, READ_ONCE(idle->idle_time) + idle_time);
	WRITE_ONCE(idle->idle_count, READ_ONCE(idle->idle_count) + 1);
	account_idle_time(cputime_to_nsecs(idle_time));
}

void noinstr arch_cpu_idle(void)
{
	struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
	unsigned long psw_mask;

	/* Wait for external, I/O or machine check interrupt. */
	psw_mask = PSW_KERNEL_BITS | PSW_MASK_WAIT |
		   PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
	clear_cpu_flag(CIF_NOHZ_DELAY);
	set_cpu_flag(CIF_ENABLED_WAIT);
	if (smp_cpu_mtid)
		stcctm(MT_DIAG, smp_cpu_mtid, (u64 *)&idle->mt_cycles_enter);
	idle->clock_idle_enter = get_tod_clock_fast();
	idle->timer_idle_enter = get_cpu_timer();
	bpon();
	__load_psw_mask(psw_mask);
}

static ssize_t show_idle_count(struct device *dev,
			       struct device_attribute *attr, char *buf)
{
	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);

	return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_count));
}
DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);

static ssize_t show_idle_time(struct device *dev,
			      struct device_attribute *attr, char *buf)
{
	struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);

	return sysfs_emit(buf, "%lu\n", READ_ONCE(idle->idle_time) >> 12);
}
DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);

void arch_cpu_idle_enter(void)
{
}

void arch_cpu_idle_exit(void)
{
}

void __noreturn arch_cpu_idle_dead(void)
{
	cpu_die();
}