1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013 ARM/Linaro
*
* Authors: Daniel Lezcano <daniel.lezcano@linaro.org>
* Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
* Nicolas Pitre <nicolas.pitre@linaro.org>
*
* Maintainer: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
* Maintainer: Daniel Lezcano <daniel.lezcano@linaro.org>
*/
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/cpuidle.h>
#include <asm/mcpm.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
static int bl_enter_powerdown(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx);
/*
* NB: Owing to current menu governor behaviour big and LITTLE
* index 1 states have to define exit_latency and target_residency for
* cluster state since, when all CPUs in a cluster hit it, the cluster
* can be shutdown. This means that when a single CPU enters this state
* the exit_latency and target_residency values are somewhat overkill.
* There is no notion of cluster states in the menu governor, so CPUs
* have to define CPU states where possibly the cluster will be shutdown
* depending on the state of other CPUs. idle states entry and exit happen
* at random times; however the cluster state provides target_residency
* values as if all CPUs in a cluster enter the state at once; this is
* somewhat optimistic and behaviour should be fixed either in the governor
* or in the MCPM back-ends.
* To make this driver 100% generic the number of states and the exit_latency
* target_residency values must be obtained from device tree bindings.
*
* exit_latency: refers to the TC2 vexpress test chip and depends on the
* current cluster operating point. It is the time it takes to get the CPU
* up and running when the CPU is powered up on cluster wake-up from shutdown.
* Current values for big and LITTLE clusters are provided for clusters
* running at default operating points.
*
* target_residency: it is the minimum amount of time the cluster has
* to be down to break even in terms of power consumption. cluster
* shutdown has inherent dynamic power costs (L2 writebacks to DRAM
* being the main factor) that depend on the current operating points.
* The current values for both clusters are provided for a CPU whose half
* of L2 lines are dirty and require cleaning to DRAM, and takes into
* account leakage static power values related to the vexpress TC2 testchip.
*/
static struct cpuidle_driver bl_idle_little_driver = {
.name = "little_idle",
.owner = THIS_MODULE,
.states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = {
.enter = bl_enter_powerdown,
.exit_latency = 700,
.target_residency = 2500,
.flags = CPUIDLE_FLAG_TIMER_STOP |
CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM little-cluster power down",
},
.state_count = 2,
};
static const struct of_device_id bl_idle_state_match[] __initconst = {
{ .compatible = "arm,idle-state",
.data = bl_enter_powerdown },
{ },
};
static struct cpuidle_driver bl_idle_big_driver = {
.name = "big_idle",
.owner = THIS_MODULE,
.states[0] = ARM_CPUIDLE_WFI_STATE,
.states[1] = {
.enter = bl_enter_powerdown,
.exit_latency = 500,
.target_residency = 2000,
.flags = CPUIDLE_FLAG_TIMER_STOP |
CPUIDLE_FLAG_RCU_IDLE,
.name = "C1",
.desc = "ARM big-cluster power down",
},
.state_count = 2,
};
/*
* notrace prevents trace shims from getting inserted where they
* should not. Global jumps and ldrex/strex must not be inserted
* in power down sequences where caches and MMU may be turned off.
*/
static int notrace bl_powerdown_finisher(unsigned long arg)
{
/* MCPM works with HW CPU identifiers */
unsigned int mpidr = read_cpuid_mpidr();
unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
mcpm_set_entry_vector(cpu, cluster, cpu_resume);
mcpm_cpu_suspend();
/* return value != 0 means failure */
return 1;
}
/**
* bl_enter_powerdown - Programs CPU to enter the specified state
* @dev: cpuidle device
* @drv: The target state to be programmed
* @idx: state index
*
* Called from the CPUidle framework to program the device to the
* specified target state selected by the governor.
*/
static __cpuidle int bl_enter_powerdown(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
cpu_pm_enter();
ct_cpuidle_enter();
cpu_suspend(0, bl_powerdown_finisher);
/* signals the MCPM core that CPU is out of low power state */
mcpm_cpu_powered_up();
ct_cpuidle_exit();
cpu_pm_exit();
return idx;
}
static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
{
struct cpumask *cpumask;
int cpu;
cpumask = kzalloc(cpumask_size(), GFP_KERNEL);
if (!cpumask)
return -ENOMEM;
for_each_possible_cpu(cpu)
if (smp_cpuid_part(cpu) == part_id)
cpumask_set_cpu(cpu, cpumask);
drv->cpumask = cpumask;
return 0;
}
static const struct of_device_id compatible_machine_match[] = {
{ .compatible = "arm,vexpress,v2p-ca15_a7" },
{ .compatible = "google,peach" },
{},
};
static int __init bl_idle_init(void)
{
int ret;
struct device_node *root = of_find_node_by_path("/");
const struct of_device_id *match_id;
if (!root)
return -ENODEV;
/*
* Initialize the driver just for a compliant set of machines
*/
match_id = of_match_node(compatible_machine_match, root);
of_node_put(root);
if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
return -EUNATCH;
/*
* For now the differentiation between little and big cores
* is based on the part number. A7 cores are considered little
* cores, A15 are considered big cores. This distinction may
* evolve in the future with a more generic matching approach.
*/
ret = bl_idle_driver_init(&bl_idle_little_driver,
ARM_CPU_PART_CORTEX_A7);
if (ret)
return ret;
ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
if (ret)
goto out_uninit_little;
/* Start at index 1, index 0 standard WFI */
ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
if (ret < 0)
goto out_uninit_big;
/* Start at index 1, index 0 standard WFI */
ret = dt_init_idle_driver(&bl_idle_little_driver,
bl_idle_state_match, 1);
if (ret < 0)
goto out_uninit_big;
ret = cpuidle_register(&bl_idle_little_driver, NULL);
if (ret)
goto out_uninit_big;
ret = cpuidle_register(&bl_idle_big_driver, NULL);
if (ret)
goto out_unregister_little;
return 0;
out_unregister_little:
cpuidle_unregister(&bl_idle_little_driver);
out_uninit_big:
kfree(bl_idle_big_driver.cpumask);
out_uninit_little:
kfree(bl_idle_little_driver.cpumask);
return ret;
}
device_initcall(bl_idle_init);
|