2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS 1000
43 #define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44 #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45 #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46 #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47 #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
49 MODULE_FIRMWARE(FIRMWARE_RV710);
50 MODULE_FIRMWARE(FIRMWARE_CYPRESS);
51 MODULE_FIRMWARE(FIRMWARE_SUMO);
52 MODULE_FIRMWARE(FIRMWARE_TAHITI);
53 MODULE_FIRMWARE(FIRMWARE_BONAIRE);
55 static void radeon_uvd_idle_work_handler(struct work_struct *work);
57 int radeon_uvd_init(struct radeon_device *rdev)
59 unsigned long bo_size;
63 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
65 switch (rdev->family) {
69 fw_name = FIRMWARE_RV710;
77 fw_name = FIRMWARE_CYPRESS;
87 fw_name = FIRMWARE_SUMO;
94 fw_name = FIRMWARE_TAHITI;
100 fw_name = FIRMWARE_BONAIRE;
107 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
109 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
114 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
115 RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
116 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
117 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
119 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
123 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
125 radeon_bo_unref(&rdev->uvd.vcpu_bo);
126 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
130 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
131 &rdev->uvd.gpu_addr);
133 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
134 radeon_bo_unref(&rdev->uvd.vcpu_bo);
135 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
139 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
141 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
145 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
147 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
148 atomic_set(&rdev->uvd.handles[i], 0);
149 rdev->uvd.filp[i] = NULL;
155 void radeon_uvd_fini(struct radeon_device *rdev)
159 if (rdev->uvd.vcpu_bo == NULL)
162 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
164 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
165 radeon_bo_unpin(rdev->uvd.vcpu_bo);
166 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
169 radeon_bo_unref(&rdev->uvd.vcpu_bo);
171 release_firmware(rdev->uvd_fw);
174 int radeon_uvd_suspend(struct radeon_device *rdev)
180 if (rdev->uvd.vcpu_bo == NULL)
183 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
184 if (atomic_read(&rdev->uvd.handles[i]))
187 if (i == RADEON_MAX_UVD_HANDLES)
190 size = radeon_bo_size(rdev->uvd.vcpu_bo);
191 size -= rdev->uvd_fw->size;
193 ptr = rdev->uvd.cpu_addr;
194 ptr += rdev->uvd_fw->size;
196 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
197 memcpy(rdev->uvd.saved_bo, ptr, size);
202 int radeon_uvd_resume(struct radeon_device *rdev)
207 if (rdev->uvd.vcpu_bo == NULL)
210 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
212 size = radeon_bo_size(rdev->uvd.vcpu_bo);
213 size -= rdev->uvd_fw->size;
215 ptr = rdev->uvd.cpu_addr;
216 ptr += rdev->uvd_fw->size;
218 if (rdev->uvd.saved_bo != NULL) {
219 memcpy(ptr, rdev->uvd.saved_bo, size);
220 kfree(rdev->uvd.saved_bo);
221 rdev->uvd.saved_bo = NULL;
223 memset(ptr, 0, size);
228 void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo)
230 rbo->placement.fpfn = 0 >> PAGE_SHIFT;
231 rbo->placement.lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT;
234 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
237 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
238 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
239 if (handle != 0 && rdev->uvd.filp[i] == filp) {
240 struct radeon_fence *fence;
242 r = radeon_uvd_get_destroy_msg(rdev,
243 R600_RING_TYPE_UVD_INDEX, handle, &fence);
245 DRM_ERROR("Error destroying UVD (%d)!\n", r);
249 radeon_fence_wait(fence, false);
250 radeon_fence_unref(&fence);
252 rdev->uvd.filp[i] = NULL;
253 atomic_set(&rdev->uvd.handles[i], 0);
258 static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
260 unsigned stream_type = msg[4];
261 unsigned width = msg[6];
262 unsigned height = msg[7];
263 unsigned dpb_size = msg[9];
264 unsigned pitch = msg[28];
266 unsigned width_in_mb = width / 16;
267 unsigned height_in_mb = ALIGN(height / 16, 2);
269 unsigned image_size, tmp, min_dpb_size;
271 image_size = width * height;
272 image_size += image_size / 2;
273 image_size = ALIGN(image_size, 1024);
275 switch (stream_type) {
278 /* reference picture buffer */
279 min_dpb_size = image_size * 17;
281 /* macroblock context buffer */
282 min_dpb_size += width_in_mb * height_in_mb * 17 * 192;
284 /* IT surface buffer */
285 min_dpb_size += width_in_mb * height_in_mb * 32;
290 /* reference picture buffer */
291 min_dpb_size = image_size * 3;
294 min_dpb_size += width_in_mb * height_in_mb * 128;
296 /* IT surface buffer */
297 min_dpb_size += width_in_mb * 64;
299 /* DB surface buffer */
300 min_dpb_size += width_in_mb * 128;
303 tmp = max(width_in_mb, height_in_mb);
304 min_dpb_size += ALIGN(tmp * 7 * 16, 64);
309 /* reference picture buffer */
310 min_dpb_size = image_size * 3;
315 /* reference picture buffer */
316 min_dpb_size = image_size * 3;
319 min_dpb_size += width_in_mb * height_in_mb * 64;
321 /* IT surface buffer */
322 min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64);
326 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
331 DRM_ERROR("Invalid UVD decoding target pitch!\n");
335 if (dpb_size < min_dpb_size) {
336 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
337 dpb_size, min_dpb_size);
341 buf_sizes[0x1] = dpb_size;
342 buf_sizes[0x2] = image_size;
346 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
347 unsigned offset, unsigned buf_sizes[])
349 int32_t *msg, msg_type, handle;
355 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
359 if (bo->tbo.sync_obj) {
360 r = radeon_fence_wait(bo->tbo.sync_obj, false);
362 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
367 r = radeon_bo_kmap(bo, &ptr);
369 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
379 DRM_ERROR("Invalid UVD handle!\n");
384 /* it's a decode msg, calc buffer sizes */
385 r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
386 radeon_bo_kunmap(bo);
390 } else if (msg_type == 2) {
391 /* it's a destroy msg, free the handle */
392 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
393 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
394 radeon_bo_kunmap(bo);
397 radeon_bo_kunmap(bo);
400 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
404 /* it's a create msg, no special handling needed */
407 /* create or decode, validate the handle */
408 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
409 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
413 /* handle not found try to alloc a new one */
414 for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
415 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
416 p->rdev->uvd.filp[i] = p->filp;
421 DRM_ERROR("No more free UVD handles!\n");
425 static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
426 int data0, int data1,
427 unsigned buf_sizes[], bool *has_msg_cmd)
429 struct radeon_cs_chunk *relocs_chunk;
430 struct radeon_cs_reloc *reloc;
431 unsigned idx, cmd, offset;
435 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
436 offset = radeon_get_ib_value(p, data0);
437 idx = radeon_get_ib_value(p, data1);
438 if (idx >= relocs_chunk->length_dw) {
439 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
440 idx, relocs_chunk->length_dw);
444 reloc = p->relocs_ptr[(idx / 4)];
445 start = reloc->lobj.gpu_offset;
446 end = start + radeon_bo_size(reloc->robj);
449 p->ib.ptr[data0] = start & 0xFFFFFFFF;
450 p->ib.ptr[data1] = start >> 32;
452 cmd = radeon_get_ib_value(p, p->idx) >> 1;
455 if ((end - start) < buf_sizes[cmd]) {
456 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
457 (unsigned)(end - start), buf_sizes[cmd]);
461 } else if (cmd != 0x100) {
462 DRM_ERROR("invalid UVD command %X!\n", cmd);
466 if ((start >> 28) != (end >> 28)) {
467 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
472 /* TODO: is this still necessary on NI+ ? */
473 if ((cmd == 0 || cmd == 0x3) &&
474 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
475 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
482 DRM_ERROR("More than one message in a UVD-IB!\n");
486 r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
489 } else if (!*has_msg_cmd) {
490 DRM_ERROR("Message needed before other commands are send!\n");
497 static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
498 struct radeon_cs_packet *pkt,
499 int *data0, int *data1,
500 unsigned buf_sizes[],
506 for (i = 0; i <= pkt->count; ++i) {
507 switch (pkt->reg + i*4) {
508 case UVD_GPCOM_VCPU_DATA0:
511 case UVD_GPCOM_VCPU_DATA1:
514 case UVD_GPCOM_VCPU_CMD:
515 r = radeon_uvd_cs_reloc(p, *data0, *data1,
516 buf_sizes, has_msg_cmd);
520 case UVD_ENGINE_CNTL:
523 DRM_ERROR("Invalid reg 0x%X!\n",
532 int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
534 struct radeon_cs_packet pkt;
535 int r, data0 = 0, data1 = 0;
537 /* does the IB has a msg command */
538 bool has_msg_cmd = false;
540 /* minimum buffer sizes */
541 unsigned buf_sizes[] = {
543 [0x00000001] = 32 * 1024 * 1024,
544 [0x00000002] = 2048 * 1152 * 3,
548 if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
549 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
550 p->chunks[p->chunk_ib_idx].length_dw);
554 if (p->chunk_relocs_idx == -1) {
555 DRM_ERROR("No relocation chunk !\n");
561 r = radeon_cs_packet_parse(p, &pkt, p->idx);
565 case RADEON_PACKET_TYPE0:
566 r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
567 buf_sizes, &has_msg_cmd);
571 case RADEON_PACKET_TYPE2:
572 p->idx += pkt.count + 2;
575 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
578 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
581 DRM_ERROR("UVD-IBs need a msg command!\n");
588 static int radeon_uvd_send_msg(struct radeon_device *rdev,
589 int ring, struct radeon_bo *bo,
590 struct radeon_fence **fence)
592 struct ttm_validate_buffer tv;
593 struct ww_acquire_ctx ticket;
594 struct list_head head;
599 memset(&tv, 0, sizeof(tv));
602 INIT_LIST_HEAD(&head);
603 list_add(&tv.head, &head);
605 r = ttm_eu_reserve_buffers(&ticket, &head);
609 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_VRAM);
610 radeon_uvd_force_into_uvd_segment(bo);
612 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
616 r = radeon_ib_get(rdev, ring, &ib, NULL, 16);
620 addr = radeon_bo_gpu_offset(bo);
621 ib.ptr[0] = PACKET0(UVD_GPCOM_VCPU_DATA0, 0);
623 ib.ptr[2] = PACKET0(UVD_GPCOM_VCPU_DATA1, 0);
624 ib.ptr[3] = addr >> 32;
625 ib.ptr[4] = PACKET0(UVD_GPCOM_VCPU_CMD, 0);
627 for (i = 6; i < 16; ++i)
628 ib.ptr[i] = PACKET2(0);
631 r = radeon_ib_schedule(rdev, &ib, NULL);
634 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
637 *fence = radeon_fence_ref(ib.fence);
639 radeon_ib_free(rdev, &ib);
640 radeon_bo_unref(&bo);
644 ttm_eu_backoff_reservation(&ticket, &head);
648 /* multiple fence commands without any stream commands in between can
649 crash the vcpu so just try to emmit a dummy create/destroy msg to
651 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
652 uint32_t handle, struct radeon_fence **fence)
654 struct radeon_bo *bo;
658 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
659 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
663 r = radeon_bo_reserve(bo, false);
665 radeon_bo_unref(&bo);
669 r = radeon_bo_kmap(bo, (void **)&msg);
671 radeon_bo_unreserve(bo);
672 radeon_bo_unref(&bo);
676 /* stitch together an UVD create msg */
677 msg[0] = cpu_to_le32(0x00000de4);
678 msg[1] = cpu_to_le32(0x00000000);
679 msg[2] = cpu_to_le32(handle);
680 msg[3] = cpu_to_le32(0x00000000);
681 msg[4] = cpu_to_le32(0x00000000);
682 msg[5] = cpu_to_le32(0x00000000);
683 msg[6] = cpu_to_le32(0x00000000);
684 msg[7] = cpu_to_le32(0x00000780);
685 msg[8] = cpu_to_le32(0x00000440);
686 msg[9] = cpu_to_le32(0x00000000);
687 msg[10] = cpu_to_le32(0x01b37000);
688 for (i = 11; i < 1024; ++i)
689 msg[i] = cpu_to_le32(0x0);
691 radeon_bo_kunmap(bo);
692 radeon_bo_unreserve(bo);
694 return radeon_uvd_send_msg(rdev, ring, bo, fence);
697 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
698 uint32_t handle, struct radeon_fence **fence)
700 struct radeon_bo *bo;
704 r = radeon_bo_create(rdev, 1024, PAGE_SIZE, true,
705 RADEON_GEM_DOMAIN_VRAM, NULL, &bo);
709 r = radeon_bo_reserve(bo, false);
711 radeon_bo_unref(&bo);
715 r = radeon_bo_kmap(bo, (void **)&msg);
717 radeon_bo_unreserve(bo);
718 radeon_bo_unref(&bo);
722 /* stitch together an UVD destroy msg */
723 msg[0] = cpu_to_le32(0x00000de4);
724 msg[1] = cpu_to_le32(0x00000002);
725 msg[2] = cpu_to_le32(handle);
726 msg[3] = cpu_to_le32(0x00000000);
727 for (i = 4; i < 1024; ++i)
728 msg[i] = cpu_to_le32(0x0);
730 radeon_bo_kunmap(bo);
731 radeon_bo_unreserve(bo);
733 return radeon_uvd_send_msg(rdev, ring, bo, fence);
736 static void radeon_uvd_idle_work_handler(struct work_struct *work)
738 struct radeon_device *rdev =
739 container_of(work, struct radeon_device, uvd.idle_work.work);
741 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
742 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
743 mutex_lock(&rdev->pm.mutex);
744 rdev->pm.dpm.uvd_active = false;
745 mutex_unlock(&rdev->pm.mutex);
746 radeon_pm_compute_clocks(rdev);
748 radeon_set_uvd_clocks(rdev, 0, 0);
751 schedule_delayed_work(&rdev->uvd.idle_work,
752 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
756 void radeon_uvd_note_usage(struct radeon_device *rdev)
758 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
759 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
760 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
762 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
763 /* XXX pick SD/HD/MVC */
764 radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
766 radeon_set_uvd_clocks(rdev, 53300, 40000);
771 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq,
772 unsigned target_freq,
776 unsigned post_div = vco_freq / target_freq;
778 /* adjust to post divider minimum value */
779 if (post_div < pd_min)
782 /* we alway need a frequency less than or equal the target */
783 if ((vco_freq / post_div) > target_freq)
786 /* post dividers above a certain value must be even */
787 if (post_div > pd_even && post_div % 2)
794 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
796 * @rdev: radeon_device pointer
799 * @vco_min: minimum VCO frequency
800 * @vco_max: maximum VCO frequency
801 * @fb_factor: factor to multiply vco freq with
802 * @fb_mask: limit and bitmask for feedback divider
803 * @pd_min: post divider minimum
804 * @pd_max: post divider maximum
805 * @pd_even: post divider must be even above this value
806 * @optimal_fb_div: resulting feedback divider
807 * @optimal_vclk_div: resulting vclk post divider
808 * @optimal_dclk_div: resulting dclk post divider
810 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
811 * Returns zero on success -EINVAL on error.
813 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
814 unsigned vclk, unsigned dclk,
815 unsigned vco_min, unsigned vco_max,
816 unsigned fb_factor, unsigned fb_mask,
817 unsigned pd_min, unsigned pd_max,
819 unsigned *optimal_fb_div,
820 unsigned *optimal_vclk_div,
821 unsigned *optimal_dclk_div)
823 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
825 /* start off with something large */
826 unsigned optimal_score = ~0;
828 /* loop through vco from low to high */
829 vco_min = max(max(vco_min, vclk), dclk);
830 for (vco_freq = vco_min; vco_freq <= vco_max; vco_freq += 100) {
832 uint64_t fb_div = (uint64_t)vco_freq * fb_factor;
833 unsigned vclk_div, dclk_div, score;
835 do_div(fb_div, ref_freq);
837 /* fb div out of range ? */
838 if (fb_div > fb_mask)
839 break; /* it can oly get worse */
843 /* calc vclk divider with current vco freq */
844 vclk_div = radeon_uvd_calc_upll_post_div(vco_freq, vclk,
846 if (vclk_div > pd_max)
847 break; /* vco is too big, it has to stop */
849 /* calc dclk divider with current vco freq */
850 dclk_div = radeon_uvd_calc_upll_post_div(vco_freq, dclk,
852 if (vclk_div > pd_max)
853 break; /* vco is too big, it has to stop */
855 /* calc score with current vco freq */
856 score = vclk - (vco_freq / vclk_div) + dclk - (vco_freq / dclk_div);
858 /* determine if this vco setting is better than current optimal settings */
859 if (score < optimal_score) {
860 *optimal_fb_div = fb_div;
861 *optimal_vclk_div = vclk_div;
862 *optimal_dclk_div = dclk_div;
863 optimal_score = score;
864 if (optimal_score == 0)
865 break; /* it can't get better than this */
869 /* did we found a valid setup ? */
870 if (optimal_score == ~0)
876 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
877 unsigned cg_upll_func_cntl)
881 /* make sure UPLL_CTLREQ is deasserted */
882 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
886 /* assert UPLL_CTLREQ */
887 WREG32_P(cg_upll_func_cntl, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK);
889 /* wait for CTLACK and CTLACK2 to get asserted */
890 for (i = 0; i < 100; ++i) {
891 uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK;
892 if ((RREG32(cg_upll_func_cntl) & mask) == mask)
897 /* deassert UPLL_CTLREQ */
898 WREG32_P(cg_upll_func_cntl, 0, ~UPLL_CTLREQ_MASK);
901 DRM_ERROR("Timeout setting UVD clocks!\n");