2 * Permission is hereby granted, free of charge, to any person obtaining a
3 * copy of this software and associated documentation files (the "Software"),
4 * to deal in the Software without restriction, including without limitation
5 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
6 * and/or sell copies of the Software, and to permit persons to whom the
7 * Software is furnished to do so, subject to the following conditions:
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
16 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
17 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
18 * OTHER DEALINGS IN THE SOFTWARE.
20 * Authors: Rafał Miłecki <zajec5@gmail.com>
21 * Alex Deucher <alexdeucher@gmail.com>
27 #include <linux/power_supply.h>
28 #include <linux/hwmon.h>
29 #include <linux/hwmon-sysfs.h>
31 #define RADEON_IDLE_LOOP_MS 100
32 #define RADEON_RECLOCK_DELAY_MS 200
33 #define RADEON_WAIT_VBLANK_TIMEOUT 200
35 static const char *radeon_pm_state_type_name[5] = {
43 static void radeon_dynpm_idle_work_handler(struct work_struct *work);
44 static int radeon_debugfs_pm_init(struct radeon_device *rdev);
45 static bool radeon_pm_in_vbl(struct radeon_device *rdev);
46 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
47 static void radeon_pm_update_profile(struct radeon_device *rdev);
48 static void radeon_pm_set_clocks(struct radeon_device *rdev);
50 int radeon_pm_get_type_index(struct radeon_device *rdev,
51 enum radeon_pm_state_type ps_type,
55 int found_instance = -1;
57 for (i = 0; i < rdev->pm.num_power_states; i++) {
58 if (rdev->pm.power_state[i].type == ps_type) {
60 if (found_instance == instance)
64 /* return default if no match */
65 return rdev->pm.default_power_state_index;
68 void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
70 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
71 if (rdev->pm.profile == PM_PROFILE_AUTO) {
72 mutex_lock(&rdev->pm.mutex);
73 radeon_pm_update_profile(rdev);
74 radeon_pm_set_clocks(rdev);
75 mutex_unlock(&rdev->pm.mutex);
80 static void radeon_pm_update_profile(struct radeon_device *rdev)
82 switch (rdev->pm.profile) {
83 case PM_PROFILE_DEFAULT:
84 rdev->pm.profile_index = PM_PROFILE_DEFAULT_IDX;
87 if (power_supply_is_system_supplied() > 0) {
88 if (rdev->pm.active_crtc_count > 1)
89 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
91 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
93 if (rdev->pm.active_crtc_count > 1)
94 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
96 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
100 if (rdev->pm.active_crtc_count > 1)
101 rdev->pm.profile_index = PM_PROFILE_LOW_MH_IDX;
103 rdev->pm.profile_index = PM_PROFILE_LOW_SH_IDX;
106 if (rdev->pm.active_crtc_count > 1)
107 rdev->pm.profile_index = PM_PROFILE_MID_MH_IDX;
109 rdev->pm.profile_index = PM_PROFILE_MID_SH_IDX;
111 case PM_PROFILE_HIGH:
112 if (rdev->pm.active_crtc_count > 1)
113 rdev->pm.profile_index = PM_PROFILE_HIGH_MH_IDX;
115 rdev->pm.profile_index = PM_PROFILE_HIGH_SH_IDX;
119 if (rdev->pm.active_crtc_count == 0) {
120 rdev->pm.requested_power_state_index =
121 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_ps_idx;
122 rdev->pm.requested_clock_mode_index =
123 rdev->pm.profiles[rdev->pm.profile_index].dpms_off_cm_idx;
125 rdev->pm.requested_power_state_index =
126 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_ps_idx;
127 rdev->pm.requested_clock_mode_index =
128 rdev->pm.profiles[rdev->pm.profile_index].dpms_on_cm_idx;
132 static void radeon_unmap_vram_bos(struct radeon_device *rdev)
134 struct radeon_bo *bo, *n;
136 if (list_empty(&rdev->gem.objects))
139 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
140 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
141 ttm_bo_unmap_virtual(&bo->tbo);
145 static void radeon_sync_with_vblank(struct radeon_device *rdev)
147 if (rdev->pm.active_crtcs) {
148 rdev->pm.vblank_sync = false;
150 rdev->irq.vblank_queue, rdev->pm.vblank_sync,
151 msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
155 static void radeon_set_power_state(struct radeon_device *rdev)
158 bool misc_after = false;
160 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
161 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
164 if (radeon_gui_idle(rdev)) {
165 sclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
166 clock_info[rdev->pm.requested_clock_mode_index].sclk;
167 if (sclk > rdev->pm.default_sclk)
168 sclk = rdev->pm.default_sclk;
170 mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
171 clock_info[rdev->pm.requested_clock_mode_index].mclk;
172 if (mclk > rdev->pm.default_mclk)
173 mclk = rdev->pm.default_mclk;
175 /* upvolt before raising clocks, downvolt after lowering clocks */
176 if (sclk < rdev->pm.current_sclk)
179 radeon_sync_with_vblank(rdev);
181 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
182 if (!radeon_pm_in_vbl(rdev))
186 radeon_pm_prepare(rdev);
189 /* voltage, pcie lanes, etc.*/
190 radeon_pm_misc(rdev);
192 /* set engine clock */
193 if (sclk != rdev->pm.current_sclk) {
194 radeon_pm_debug_check_in_vbl(rdev, false);
195 radeon_set_engine_clock(rdev, sclk);
196 radeon_pm_debug_check_in_vbl(rdev, true);
197 rdev->pm.current_sclk = sclk;
198 DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
201 /* set memory clock */
202 if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
203 radeon_pm_debug_check_in_vbl(rdev, false);
204 radeon_set_memory_clock(rdev, mclk);
205 radeon_pm_debug_check_in_vbl(rdev, true);
206 rdev->pm.current_mclk = mclk;
207 DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
211 /* voltage, pcie lanes, etc.*/
212 radeon_pm_misc(rdev);
214 radeon_pm_finish(rdev);
216 rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
217 rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
219 DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
222 static void radeon_pm_set_clocks(struct radeon_device *rdev)
226 /* no need to take locks, etc. if nothing's going to change */
227 if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) &&
228 (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index))
231 mutex_lock(&rdev->ddev->struct_mutex);
232 down_write(&rdev->pm.mclk_lock);
233 mutex_lock(&rdev->ring_lock);
235 /* wait for the rings to drain */
236 for (i = 0; i < RADEON_NUM_RINGS; i++) {
237 struct radeon_ring *ring = &rdev->ring[i];
239 radeon_fence_wait_empty_locked(rdev, i);
242 radeon_unmap_vram_bos(rdev);
244 if (rdev->irq.installed) {
245 for (i = 0; i < rdev->num_crtc; i++) {
246 if (rdev->pm.active_crtcs & (1 << i)) {
247 rdev->pm.req_vblank |= (1 << i);
248 drm_vblank_get(rdev->ddev, i);
253 radeon_set_power_state(rdev);
255 if (rdev->irq.installed) {
256 for (i = 0; i < rdev->num_crtc; i++) {
257 if (rdev->pm.req_vblank & (1 << i)) {
258 rdev->pm.req_vblank &= ~(1 << i);
259 drm_vblank_put(rdev->ddev, i);
264 /* update display watermarks based on new power state */
265 radeon_update_bandwidth_info(rdev);
266 if (rdev->pm.active_crtc_count)
267 radeon_bandwidth_update(rdev);
269 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
271 mutex_unlock(&rdev->ring_lock);
272 up_write(&rdev->pm.mclk_lock);
273 mutex_unlock(&rdev->ddev->struct_mutex);
276 static void radeon_pm_print_states(struct radeon_device *rdev)
279 struct radeon_power_state *power_state;
280 struct radeon_pm_clock_info *clock_info;
282 DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
283 for (i = 0; i < rdev->pm.num_power_states; i++) {
284 power_state = &rdev->pm.power_state[i];
285 DRM_DEBUG_DRIVER("State %d: %s\n", i,
286 radeon_pm_state_type_name[power_state->type]);
287 if (i == rdev->pm.default_power_state_index)
288 DRM_DEBUG_DRIVER("\tDefault");
289 if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
290 DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
291 if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
292 DRM_DEBUG_DRIVER("\tSingle display only\n");
293 DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
294 for (j = 0; j < power_state->num_clock_modes; j++) {
295 clock_info = &(power_state->clock_info[j]);
296 if (rdev->flags & RADEON_IS_IGP)
297 DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
299 clock_info->sclk * 10);
301 DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
303 clock_info->sclk * 10,
304 clock_info->mclk * 10,
305 clock_info->voltage.voltage);
310 static ssize_t radeon_get_pm_profile(struct device *dev,
311 struct device_attribute *attr,
314 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
315 struct radeon_device *rdev = ddev->dev_private;
316 int cp = rdev->pm.profile;
318 return snprintf(buf, PAGE_SIZE, "%s\n",
319 (cp == PM_PROFILE_AUTO) ? "auto" :
320 (cp == PM_PROFILE_LOW) ? "low" :
321 (cp == PM_PROFILE_MID) ? "mid" :
322 (cp == PM_PROFILE_HIGH) ? "high" : "default");
325 static ssize_t radeon_set_pm_profile(struct device *dev,
326 struct device_attribute *attr,
330 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
331 struct radeon_device *rdev = ddev->dev_private;
333 mutex_lock(&rdev->pm.mutex);
334 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
335 if (strncmp("default", buf, strlen("default")) == 0)
336 rdev->pm.profile = PM_PROFILE_DEFAULT;
337 else if (strncmp("auto", buf, strlen("auto")) == 0)
338 rdev->pm.profile = PM_PROFILE_AUTO;
339 else if (strncmp("low", buf, strlen("low")) == 0)
340 rdev->pm.profile = PM_PROFILE_LOW;
341 else if (strncmp("mid", buf, strlen("mid")) == 0)
342 rdev->pm.profile = PM_PROFILE_MID;
343 else if (strncmp("high", buf, strlen("high")) == 0)
344 rdev->pm.profile = PM_PROFILE_HIGH;
349 radeon_pm_update_profile(rdev);
350 radeon_pm_set_clocks(rdev);
355 mutex_unlock(&rdev->pm.mutex);
360 static ssize_t radeon_get_pm_method(struct device *dev,
361 struct device_attribute *attr,
364 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
365 struct radeon_device *rdev = ddev->dev_private;
366 int pm = rdev->pm.pm_method;
368 return snprintf(buf, PAGE_SIZE, "%s\n",
369 (pm == PM_METHOD_DYNPM) ? "dynpm" : "profile");
372 static ssize_t radeon_set_pm_method(struct device *dev,
373 struct device_attribute *attr,
377 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
378 struct radeon_device *rdev = ddev->dev_private;
381 if (strncmp("dynpm", buf, strlen("dynpm")) == 0) {
382 mutex_lock(&rdev->pm.mutex);
383 rdev->pm.pm_method = PM_METHOD_DYNPM;
384 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
385 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
386 mutex_unlock(&rdev->pm.mutex);
387 } else if (strncmp("profile", buf, strlen("profile")) == 0) {
388 mutex_lock(&rdev->pm.mutex);
390 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
391 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
392 rdev->pm.pm_method = PM_METHOD_PROFILE;
393 mutex_unlock(&rdev->pm.mutex);
394 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
399 radeon_pm_compute_clocks(rdev);
404 static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
405 static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
407 static ssize_t radeon_hwmon_show_temp(struct device *dev,
408 struct device_attribute *attr,
411 struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
412 struct radeon_device *rdev = ddev->dev_private;
415 switch (rdev->pm.int_thermal_type) {
416 case THERMAL_TYPE_RV6XX:
417 temp = rv6xx_get_temp(rdev);
419 case THERMAL_TYPE_RV770:
420 temp = rv770_get_temp(rdev);
422 case THERMAL_TYPE_EVERGREEN:
423 case THERMAL_TYPE_NI:
424 temp = evergreen_get_temp(rdev);
426 case THERMAL_TYPE_SUMO:
427 temp = sumo_get_temp(rdev);
429 case THERMAL_TYPE_SI:
430 temp = si_get_temp(rdev);
437 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
440 static ssize_t radeon_hwmon_show_name(struct device *dev,
441 struct device_attribute *attr,
444 return sprintf(buf, "radeon\n");
447 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
448 static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
450 static struct attribute *hwmon_attributes[] = {
451 &sensor_dev_attr_temp1_input.dev_attr.attr,
452 &sensor_dev_attr_name.dev_attr.attr,
456 static const struct attribute_group hwmon_attrgroup = {
457 .attrs = hwmon_attributes,
460 static int radeon_hwmon_init(struct radeon_device *rdev)
464 rdev->pm.int_hwmon_dev = NULL;
466 switch (rdev->pm.int_thermal_type) {
467 case THERMAL_TYPE_RV6XX:
468 case THERMAL_TYPE_RV770:
469 case THERMAL_TYPE_EVERGREEN:
470 case THERMAL_TYPE_NI:
471 case THERMAL_TYPE_SUMO:
472 case THERMAL_TYPE_SI:
473 /* No support for TN yet */
474 if (rdev->family == CHIP_ARUBA)
476 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
477 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
478 err = PTR_ERR(rdev->pm.int_hwmon_dev);
480 "Unable to register hwmon device: %d\n", err);
483 dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
484 err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
488 "Unable to create hwmon sysfs file: %d\n", err);
489 hwmon_device_unregister(rdev->dev);
499 static void radeon_hwmon_fini(struct radeon_device *rdev)
501 if (rdev->pm.int_hwmon_dev) {
502 sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
503 hwmon_device_unregister(rdev->pm.int_hwmon_dev);
507 void radeon_pm_suspend(struct radeon_device *rdev)
509 mutex_lock(&rdev->pm.mutex);
510 if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
511 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE)
512 rdev->pm.dynpm_state = DYNPM_STATE_SUSPENDED;
514 mutex_unlock(&rdev->pm.mutex);
516 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
519 void radeon_pm_resume(struct radeon_device *rdev)
521 /* set up the default clocks if the MC ucode is loaded */
522 if ((rdev->family >= CHIP_BARTS) &&
523 (rdev->family <= CHIP_CAYMAN) &&
525 if (rdev->pm.default_vddc)
526 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
527 SET_VOLTAGE_TYPE_ASIC_VDDC);
528 if (rdev->pm.default_vddci)
529 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
530 SET_VOLTAGE_TYPE_ASIC_VDDCI);
531 if (rdev->pm.default_sclk)
532 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
533 if (rdev->pm.default_mclk)
534 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
536 /* asic init will reset the default power state */
537 mutex_lock(&rdev->pm.mutex);
538 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
539 rdev->pm.current_clock_mode_index = 0;
540 rdev->pm.current_sclk = rdev->pm.default_sclk;
541 rdev->pm.current_mclk = rdev->pm.default_mclk;
542 rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
543 rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
544 if (rdev->pm.pm_method == PM_METHOD_DYNPM
545 && rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
546 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
547 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
548 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
550 mutex_unlock(&rdev->pm.mutex);
551 radeon_pm_compute_clocks(rdev);
554 int radeon_pm_init(struct radeon_device *rdev)
558 /* default to profile method */
559 rdev->pm.pm_method = PM_METHOD_PROFILE;
560 rdev->pm.profile = PM_PROFILE_DEFAULT;
561 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
562 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
563 rdev->pm.dynpm_can_upclock = true;
564 rdev->pm.dynpm_can_downclock = true;
565 rdev->pm.default_sclk = rdev->clock.default_sclk;
566 rdev->pm.default_mclk = rdev->clock.default_mclk;
567 rdev->pm.current_sclk = rdev->clock.default_sclk;
568 rdev->pm.current_mclk = rdev->clock.default_mclk;
569 rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
572 if (rdev->is_atom_bios)
573 radeon_atombios_get_power_modes(rdev);
575 radeon_combios_get_power_modes(rdev);
576 radeon_pm_print_states(rdev);
577 radeon_pm_init_profile(rdev);
578 /* set up the default clocks if the MC ucode is loaded */
579 if ((rdev->family >= CHIP_BARTS) &&
580 (rdev->family <= CHIP_CAYMAN) &&
582 if (rdev->pm.default_vddc)
583 radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
584 SET_VOLTAGE_TYPE_ASIC_VDDC);
585 if (rdev->pm.default_vddci)
586 radeon_atom_set_voltage(rdev, rdev->pm.default_vddci,
587 SET_VOLTAGE_TYPE_ASIC_VDDCI);
588 if (rdev->pm.default_sclk)
589 radeon_set_engine_clock(rdev, rdev->pm.default_sclk);
590 if (rdev->pm.default_mclk)
591 radeon_set_memory_clock(rdev, rdev->pm.default_mclk);
595 /* set up the internal thermal sensor if applicable */
596 ret = radeon_hwmon_init(rdev);
600 INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
602 if (rdev->pm.num_power_states > 1) {
603 /* where's the best place to put these? */
604 ret = device_create_file(rdev->dev, &dev_attr_power_profile);
606 DRM_ERROR("failed to create device file for power profile\n");
607 ret = device_create_file(rdev->dev, &dev_attr_power_method);
609 DRM_ERROR("failed to create device file for power method\n");
611 if (radeon_debugfs_pm_init(rdev)) {
612 DRM_ERROR("Failed to register debugfs file for PM!\n");
615 DRM_INFO("radeon: power management initialized\n");
621 void radeon_pm_fini(struct radeon_device *rdev)
623 if (rdev->pm.num_power_states > 1) {
624 mutex_lock(&rdev->pm.mutex);
625 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
626 rdev->pm.profile = PM_PROFILE_DEFAULT;
627 radeon_pm_update_profile(rdev);
628 radeon_pm_set_clocks(rdev);
629 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
630 /* reset default clocks */
631 rdev->pm.dynpm_state = DYNPM_STATE_DISABLED;
632 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
633 radeon_pm_set_clocks(rdev);
635 mutex_unlock(&rdev->pm.mutex);
637 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
639 device_remove_file(rdev->dev, &dev_attr_power_profile);
640 device_remove_file(rdev->dev, &dev_attr_power_method);
643 if (rdev->pm.power_state)
644 kfree(rdev->pm.power_state);
646 radeon_hwmon_fini(rdev);
649 void radeon_pm_compute_clocks(struct radeon_device *rdev)
651 struct drm_device *ddev = rdev->ddev;
652 struct drm_crtc *crtc;
653 struct radeon_crtc *radeon_crtc;
655 if (rdev->pm.num_power_states < 2)
658 mutex_lock(&rdev->pm.mutex);
660 rdev->pm.active_crtcs = 0;
661 rdev->pm.active_crtc_count = 0;
662 list_for_each_entry(crtc,
663 &ddev->mode_config.crtc_list, head) {
664 radeon_crtc = to_radeon_crtc(crtc);
665 if (radeon_crtc->enabled) {
666 rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
667 rdev->pm.active_crtc_count++;
671 if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
672 radeon_pm_update_profile(rdev);
673 radeon_pm_set_clocks(rdev);
674 } else if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
675 if (rdev->pm.dynpm_state != DYNPM_STATE_DISABLED) {
676 if (rdev->pm.active_crtc_count > 1) {
677 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
678 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
680 rdev->pm.dynpm_state = DYNPM_STATE_PAUSED;
681 rdev->pm.dynpm_planned_action = DYNPM_ACTION_DEFAULT;
682 radeon_pm_get_dynpm_state(rdev);
683 radeon_pm_set_clocks(rdev);
685 DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
687 } else if (rdev->pm.active_crtc_count == 1) {
688 /* TODO: Increase clocks if needed for current mode */
690 if (rdev->pm.dynpm_state == DYNPM_STATE_MINIMUM) {
691 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
692 rdev->pm.dynpm_planned_action = DYNPM_ACTION_UPCLOCK;
693 radeon_pm_get_dynpm_state(rdev);
694 radeon_pm_set_clocks(rdev);
696 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
697 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
698 } else if (rdev->pm.dynpm_state == DYNPM_STATE_PAUSED) {
699 rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
700 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
701 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
702 DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
704 } else { /* count == 0 */
705 if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
706 cancel_delayed_work(&rdev->pm.dynpm_idle_work);
708 rdev->pm.dynpm_state = DYNPM_STATE_MINIMUM;
709 rdev->pm.dynpm_planned_action = DYNPM_ACTION_MINIMUM;
710 radeon_pm_get_dynpm_state(rdev);
711 radeon_pm_set_clocks(rdev);
717 mutex_unlock(&rdev->pm.mutex);
720 static bool radeon_pm_in_vbl(struct radeon_device *rdev)
722 int crtc, vpos, hpos, vbl_status;
725 /* Iterate over all active crtc's. All crtc's must be in vblank,
726 * otherwise return in_vbl == false.
728 for (crtc = 0; (crtc < rdev->num_crtc) && in_vbl; crtc++) {
729 if (rdev->pm.active_crtcs & (1 << crtc)) {
730 vbl_status = radeon_get_crtc_scanoutpos(rdev->ddev, crtc, &vpos, &hpos);
731 if ((vbl_status & DRM_SCANOUTPOS_VALID) &&
732 !(vbl_status & DRM_SCANOUTPOS_INVBL))
740 static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
743 bool in_vbl = radeon_pm_in_vbl(rdev);
746 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
747 finish ? "exit" : "entry");
751 static void radeon_dynpm_idle_work_handler(struct work_struct *work)
753 struct radeon_device *rdev;
755 rdev = container_of(work, struct radeon_device,
756 pm.dynpm_idle_work.work);
758 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
759 mutex_lock(&rdev->pm.mutex);
760 if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
761 int not_processed = 0;
764 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
765 struct radeon_ring *ring = &rdev->ring[i];
768 not_processed += radeon_fence_count_emitted(rdev, i);
769 if (not_processed >= 3)
774 if (not_processed >= 3) { /* should upclock */
775 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
776 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
777 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
778 rdev->pm.dynpm_can_upclock) {
779 rdev->pm.dynpm_planned_action =
780 DYNPM_ACTION_UPCLOCK;
781 rdev->pm.dynpm_action_timeout = jiffies +
782 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
784 } else if (not_processed == 0) { /* should downclock */
785 if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_UPCLOCK) {
786 rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
787 } else if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_NONE &&
788 rdev->pm.dynpm_can_downclock) {
789 rdev->pm.dynpm_planned_action =
790 DYNPM_ACTION_DOWNCLOCK;
791 rdev->pm.dynpm_action_timeout = jiffies +
792 msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
796 /* Note, radeon_pm_set_clocks is called with static_switch set
797 * to false since we want to wait for vbl to avoid flicker.
799 if (rdev->pm.dynpm_planned_action != DYNPM_ACTION_NONE &&
800 jiffies > rdev->pm.dynpm_action_timeout) {
801 radeon_pm_get_dynpm_state(rdev);
802 radeon_pm_set_clocks(rdev);
805 schedule_delayed_work(&rdev->pm.dynpm_idle_work,
806 msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
808 mutex_unlock(&rdev->pm.mutex);
809 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
815 #if defined(CONFIG_DEBUG_FS)
817 static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
819 struct drm_info_node *node = (struct drm_info_node *) m->private;
820 struct drm_device *dev = node->minor->dev;
821 struct radeon_device *rdev = dev->dev_private;
823 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
824 seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
825 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
826 if (rdev->asic->pm.get_memory_clock)
827 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
828 if (rdev->pm.current_vddc)
829 seq_printf(m, "voltage: %u mV\n", rdev->pm.current_vddc);
830 if (rdev->asic->pm.get_pcie_lanes)
831 seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
836 static struct drm_info_list radeon_pm_info_list[] = {
837 {"radeon_pm_info", radeon_debugfs_pm_info, 0, NULL},
841 static int radeon_debugfs_pm_init(struct radeon_device *rdev)
843 #if defined(CONFIG_DEBUG_FS)
844 return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));