aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/arm/mali/backend/gpu/mali_kbase_pm_policy.c
blob: d2979e8653ca37bd9dc78642600e53d78181bdb1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

/*
 * Power policy API implementations
 */

#include <mali_kbase.h>
#include <gpu/mali_kbase_gpu_regmap.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <mali_kbase_reset_gpu.h>

#if MALI_USE_CSF && defined CONFIG_MALI_DEBUG
#include <csf/mali_kbase_csf_firmware.h>
#endif

#include <linux/of.h>

static const struct kbase_pm_policy *const all_policy_list[] = {
#if IS_ENABLED(CONFIG_MALI_NO_MALI)
	&kbase_pm_always_on_policy_ops,
	&kbase_pm_coarse_demand_policy_ops,
#else /* CONFIG_MALI_NO_MALI */
	&kbase_pm_coarse_demand_policy_ops,
	&kbase_pm_always_on_policy_ops,
#endif /* CONFIG_MALI_NO_MALI */
};

void kbase_pm_policy_init(struct kbase_device *kbdev)
{
	const struct kbase_pm_policy *default_policy = all_policy_list[0];
	struct device_node *np = kbdev->dev->of_node;
	const char *power_policy_name;
	unsigned long flags;
	int i;

	if (of_property_read_string(np, "power_policy", &power_policy_name) == 0) {
		for (i = 0; i < ARRAY_SIZE(all_policy_list); i++)
			if (sysfs_streq(all_policy_list[i]->name, power_policy_name)) {
				default_policy = all_policy_list[i];
				break;
			}
	}

#if MALI_USE_CSF && defined(CONFIG_MALI_DEBUG)
	/* Use always_on policy if module param fw_debug=1 is
	 * passed, to aid firmware debugging.
	 */
	if (fw_debug)
		default_policy = &kbase_pm_always_on_policy_ops;
#endif

	default_policy->init(kbdev);

#if MALI_USE_CSF
	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
	kbdev->pm.backend.pm_current_policy = default_policy;
	kbdev->pm.backend.csf_pm_sched_flags = default_policy->pm_sched_flags;
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
#else
	CSTD_UNUSED(flags);
	kbdev->pm.backend.pm_current_policy = default_policy;
#endif
}

void kbase_pm_policy_term(struct kbase_device *kbdev)
{
	kbdev->pm.backend.pm_current_policy->term(kbdev);
}

void kbase_pm_update_active(struct kbase_device *kbdev)
{
	struct kbase_pm_device_data *pm = &kbdev->pm;
	struct kbase_pm_backend_data *backend = &pm->backend;
	unsigned long flags;
	bool active;

	lockdep_assert_held(&pm->lock);

	/* pm_current_policy will never be NULL while pm.lock is held */
	KBASE_DEBUG_ASSERT(backend->pm_current_policy);

	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);

	active = backend->pm_current_policy->get_core_active(kbdev);
	WARN((kbase_pm_is_active(kbdev) && !active),
		"GPU is active but policy '%s' is indicating that it can be powered off",
		kbdev->pm.backend.pm_current_policy->name);

	if (active) {
		/* Power on the GPU and any cores requested by the policy */
		if (!pm->backend.invoke_poweroff_wait_wq_when_l2_off &&
				pm->backend.poweroff_wait_in_progress) {
			KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_powered);
			pm->backend.poweron_required = true;
			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
		} else {
			/* Cancel the invocation of
			 * kbase_pm_gpu_poweroff_wait_wq() from the L2 state
			 * machine. This is safe - it
			 * invoke_poweroff_wait_wq_when_l2_off is true, then
			 * the poweroff work hasn't even been queued yet,
			 * meaning we can go straight to powering on.
			 */
			pm->backend.invoke_poweroff_wait_wq_when_l2_off = false;
			pm->backend.poweroff_wait_in_progress = false;
			pm->backend.l2_desired = true;
#if MALI_USE_CSF
			pm->backend.mcu_desired = true;
#endif

			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
			kbase_pm_do_poweron(kbdev, false);
		}
	} else {
		/* It is an error for the power policy to power off the GPU
		 * when there are contexts active
		 */
		KBASE_DEBUG_ASSERT(pm->active_count == 0);

		pm->backend.poweron_required = false;

		/* Request power off */
		if (pm->backend.gpu_powered) {
			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

			/* Power off the GPU immediately */
			kbase_pm_do_poweroff(kbdev);
		} else {
			spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
		}
	}
}

void kbase_pm_update_dynamic_cores_onoff(struct kbase_device *kbdev)
{
	bool shaders_desired;

	lockdep_assert_held(&kbdev->hwaccess_lock);
	lockdep_assert_held(&kbdev->pm.lock);

	if (kbdev->pm.backend.pm_current_policy == NULL)
		return;
	if (kbdev->pm.backend.poweroff_wait_in_progress)
		return;

#if MALI_USE_CSF
	CSTD_UNUSED(shaders_desired);
	/* Invoke the MCU state machine to send a request to FW for updating
	 * the mask of shader cores that can be used for allocation of
	 * endpoints requested by CSGs.
	 */
	if (kbase_pm_is_mcu_desired(kbdev))
		kbase_pm_update_state(kbdev);
#else
	/* In protected transition, don't allow outside shader core request
	 * affect transition, return directly
	 */
	if (kbdev->pm.backend.protected_transition_override)
		return;

	shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);

	if (shaders_desired && kbase_pm_is_l2_desired(kbdev))
		kbase_pm_update_state(kbdev);
#endif
}

void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
{
	bool shaders_desired = false;

	lockdep_assert_held(&kbdev->hwaccess_lock);

	if (kbdev->pm.backend.pm_current_policy == NULL)
		return;
	if (kbdev->pm.backend.poweroff_wait_in_progress)
		return;

#if !MALI_USE_CSF
	if (kbdev->pm.backend.protected_transition_override)
		/* We are trying to change in/out of protected mode - force all
		 * cores off so that the L2 powers down
		 */
		shaders_desired = false;
	else
		shaders_desired = kbdev->pm.backend.pm_current_policy->shaders_needed(kbdev);
#endif

	if (kbdev->pm.backend.shaders_desired != shaders_desired) {
		KBASE_KTRACE_ADD(kbdev, PM_CORES_CHANGE_DESIRED, NULL, kbdev->pm.backend.shaders_desired);

		kbdev->pm.backend.shaders_desired = shaders_desired;
		kbase_pm_update_state(kbdev);
	}
}

void kbase_pm_update_cores_state(struct kbase_device *kbdev)
{
	unsigned long flags;

	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);

	kbase_pm_update_cores_state_nolock(kbdev);

	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}

int kbase_pm_list_policies(struct kbase_device *kbdev,
	const struct kbase_pm_policy * const **list)
{
	if (list)
		*list = all_policy_list;

	return ARRAY_SIZE(all_policy_list);
}

KBASE_EXPORT_TEST_API(kbase_pm_list_policies);

const struct kbase_pm_policy *kbase_pm_get_policy(struct kbase_device *kbdev)
{
	KBASE_DEBUG_ASSERT(kbdev != NULL);

	return kbdev->pm.backend.pm_current_policy;
}

KBASE_EXPORT_TEST_API(kbase_pm_get_policy);

#if MALI_USE_CSF
static int policy_change_wait_for_L2_off(struct kbase_device *kbdev)
{
	long remaining;
	long timeout = kbase_csf_timeout_in_jiffies(kbase_get_timeout_ms(kbdev, CSF_PM_TIMEOUT));
	int err = 0;

	/* Wait for L2 becoming off, by which the MCU is also implicitly off
	 * since the L2 state machine would only start its power-down
	 * sequence when the MCU is in off state. The L2 off is required
	 * as the tiler may need to be power cycled for MCU reconfiguration
	 * for host control of shader cores.
	 */
#if KERNEL_VERSION(4, 13, 1) <= LINUX_VERSION_CODE
	remaining = wait_event_killable_timeout(
		kbdev->pm.backend.gpu_in_desired_state_wait,
		kbdev->pm.backend.l2_state == KBASE_L2_OFF, timeout);
#else
	remaining = wait_event_timeout(
		kbdev->pm.backend.gpu_in_desired_state_wait,
		kbdev->pm.backend.l2_state == KBASE_L2_OFF, timeout);
#endif

	if (!remaining) {
		err = -ETIMEDOUT;
	} else if (remaining < 0) {
		dev_info(kbdev->dev,
			 "Wait for L2_off got interrupted");
		err = (int)remaining;
	}

	dev_dbg(kbdev->dev, "%s: err=%d mcu_state=%d, L2_state=%d\n", __func__,
		err, kbdev->pm.backend.mcu_state, kbdev->pm.backend.l2_state);

	return err;
}
#endif

void kbase_pm_set_policy(struct kbase_device *kbdev,
				const struct kbase_pm_policy *new_policy)
{
	const struct kbase_pm_policy *old_policy;
	unsigned long flags;
#if MALI_USE_CSF
	unsigned int new_policy_csf_pm_sched_flags;
	bool sched_suspend;
	bool reset_gpu = false;
	bool reset_op_prevented = true;
	struct kbase_csf_scheduler *scheduler = NULL;
#endif

	KBASE_DEBUG_ASSERT(kbdev != NULL);
	KBASE_DEBUG_ASSERT(new_policy != NULL);

	KBASE_KTRACE_ADD(kbdev, PM_SET_POLICY, NULL, new_policy->id);

#if MALI_USE_CSF
	scheduler = &kbdev->csf.scheduler;
	KBASE_DEBUG_ASSERT(scheduler != NULL);

	/* Serialize calls on kbase_pm_set_policy() */
	mutex_lock(&kbdev->pm.backend.policy_change_lock);

	if (kbase_reset_gpu_prevent_and_wait(kbdev)) {
		dev_warn(kbdev->dev, "Set PM policy failing to prevent gpu reset");
		reset_op_prevented = false;
	}

	/* In case of CSF, the scheduler may be invoked to suspend. In that
	 * case, there is a risk that the L2 may be turned on by the time we
	 * check it here. So we hold the scheduler lock to avoid other operations
	 * interfering with the policy change and vice versa.
	 */
	mutex_lock(&scheduler->lock);
	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
	/* policy_change_clamp_state_to_off, when needed, is set/cleared in
	 * this function, a very limited temporal scope for covering the
	 * change transition.
	 */
	WARN_ON(kbdev->pm.backend.policy_change_clamp_state_to_off);
	new_policy_csf_pm_sched_flags = new_policy->pm_sched_flags;

	/* Requiring the scheduler PM suspend operation when changes involving
	 * the always_on policy, reflected by the CSF_DYNAMIC_PM_CORE_KEEP_ON
	 * flag bit.
	 */
	sched_suspend = reset_op_prevented &&
			(CSF_DYNAMIC_PM_CORE_KEEP_ON &
			 (new_policy_csf_pm_sched_flags | kbdev->pm.backend.csf_pm_sched_flags));

	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

	if (sched_suspend) {
		/* Update the suspend flag to reflect actually suspend being done ! */
		sched_suspend = !kbase_csf_scheduler_pm_suspend_no_lock(kbdev);
		/* Set the reset recovery flag if the required suspend failed */
		reset_gpu = !sched_suspend;
	}

	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);

	kbdev->pm.backend.policy_change_clamp_state_to_off = sched_suspend;
	kbase_pm_update_state(kbdev);
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

	if (sched_suspend)
		reset_gpu = policy_change_wait_for_L2_off(kbdev);
#endif

	/* During a policy change we pretend the GPU is active */
	/* A suspend won't happen here, because we're in a syscall from a
	 * userspace thread
	 */
	kbase_pm_context_active(kbdev);

	kbase_pm_lock(kbdev);

	/* Remove the policy to prevent IRQ handlers from working on it */
	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
	old_policy = kbdev->pm.backend.pm_current_policy;
	kbdev->pm.backend.pm_current_policy = NULL;
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

	KBASE_KTRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, old_policy->id);
	if (old_policy->term)
		old_policy->term(kbdev);

	memset(&kbdev->pm.backend.pm_policy_data, 0,
	       sizeof(union kbase_pm_policy_data));

	KBASE_KTRACE_ADD(kbdev, PM_CURRENT_POLICY_INIT, NULL, new_policy->id);
	if (new_policy->init)
		new_policy->init(kbdev);

	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
	kbdev->pm.backend.pm_current_policy = new_policy;
#if MALI_USE_CSF
	kbdev->pm.backend.csf_pm_sched_flags = new_policy_csf_pm_sched_flags;
	/* New policy in place, release the clamping on mcu/L2 off state */
	kbdev->pm.backend.policy_change_clamp_state_to_off = false;
	kbase_pm_update_state(kbdev);
#endif
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

	/* If any core power state changes were previously attempted, but
	 * couldn't be made because the policy was changing (current_policy was
	 * NULL), then re-try them here.
	 */
	kbase_pm_update_active(kbdev);
	kbase_pm_update_cores_state(kbdev);

	kbase_pm_unlock(kbdev);

	/* Now the policy change is finished, we release our fake context active
	 * reference
	 */
	kbase_pm_context_idle(kbdev);

#if MALI_USE_CSF
	/* Reverse the suspension done */
	if (sched_suspend)
		kbase_csf_scheduler_pm_resume_no_lock(kbdev);
	mutex_unlock(&scheduler->lock);

	if (reset_op_prevented)
		kbase_reset_gpu_allow(kbdev);

	if (reset_gpu) {
		dev_warn(kbdev->dev, "Resorting to GPU reset for policy change\n");
		if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_NONE))
			kbase_reset_gpu(kbdev);
		kbase_reset_gpu_wait(kbdev);
	}

	mutex_unlock(&kbdev->pm.backend.policy_change_lock);
#endif
}

KBASE_EXPORT_TEST_API(kbase_pm_set_policy);