CONFIG_GFX_RTPM=y
# CONFIG_SUPPORT_TOSHIBA_MIPI_DISPLAY is not set
# CONFIG_SUPPORT_TMD_MIPI_600X1024_DISPLAY is not set
-# CONFIG_MDFD_VIDEO_DECODE is not set
+CONFIG_MDFD_VIDEO_DECODE=y
# CONFIG_MDFD_VIDEO_ENCODE is not set
CONFIG_X86_PLATFORM_DEVICES=y
# CONFIG_SENSORS_HDAPS is not set
hdmi_state = 0;
-#ifdef CONFIG_MDFD_VIDEO_DECODER
+#ifdef CONFIG_MDFD_VIDEO_DECODE
ret = psb_ttm_global_init(dev_priv);
if (unlikely(ret != 0))
goto out_err;
man = &bdev->man[TTM_PL_TT];
/*spin_lock(&bdev->lru_lock);*///lru_lock is removed from upstream TTM
- clean = drm_mm_clean(&man->manager);
+ clean = drm_mm_clean((struct drm_mm *)man->priv);
/*spin_unlock(&bdev->lru_lock);*/
if (unlikely(!clean))
DRM_INFO("Warning: GATT was not clean after VT switch.\n");
*TTM Glue.
*/
#ifdef CONFIG_MDFD_VIDEO_DECODE
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
#endif
int has_global;
return 0;
}
+#if 0
int psb_validate_kernel_buffer(struct psb_context *context,
struct ttm_buffer_object *bo,
uint32_t fence_class,
spin_unlock(&bo->lock);
return ret;
}
+#endif
static int psb_validate_buffer_list(struct drm_file *file_priv,
item->ret = 0;
req = &item->req;
- spin_lock(&bo->lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = psb_placement_fence_type(bo,
req->set_flags,
req->clear_flags,
placement.fpfn = 0;
placement.lpfn = 0;
- spin_unlock(&bo->lock);
+ spin_unlock(&bo->bdev->fence_lock);
ret = ttm_bo_validate(bo, &placement, 1, 0, 0);
- spin_lock(&bo->lock);
+ /* spin_lock(&bo->lock); */ /* mem and offset field of bo is protected by ::reserve, this function is called in reserve*/
if (unlikely(ret != 0))
goto out_err;
item->offset = bo->offset;
item->flags = bo->mem.placement;
- spin_unlock(&bo->lock);
+ /* spin_unlock(&bo->lock); */
ret =
psb_check_presumed(&item->req, bo, item->user_val_arg,
return 0;
out_err:
- spin_unlock(&bo->lock);
+ /* spin_unlock(&bo->lock); */
item->ret = ret;
return ret;
}
arg.ret = vbuf->ret;
if (!arg.ret) {
struct ttm_buffer_object *bo = entry->bo;
- spin_lock(&bo->lock);
+ /* spin_lock(&bo->lock); */
+ /* offset and mem field of bo is protected by reserve */
+ ret = ttm_bo_reserve(bo, 1, 0, 0, 0);
+ if (unlikely(ret != 0))
+ arg.ret = -EFAULT;
arg.d.rep.gpu_offset = bo->offset;
arg.d.rep.placement = bo->mem.placement;
arg.d.rep.fence_type_mask =
(uint32_t)(unsigned long)
entry->new_sync_obj_arg;
- spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
+ /* spin_unlock(&bo->lock); */
}
if (__copy_to_user(vbuf->user_val_arg,
if (unlikely(ret != 0))
goto out_err1;
- context->val_seq = atomic_add_return(1, &dev_priv->val_seq);
+ /* Not used in K3 */
+ /* context->val_seq = atomic_add_return(1, &dev_priv->val_seq); */
- ret = ttm_eu_reserve_buffers(&context->validate_list,
- context->val_seq);
+ ret = ttm_eu_reserve_buffers(&context->validate_list);
if (unlikely(ret != 0))
goto out_err2;
/* include headers */
/* #define DRM_DEBUG_CODE 2 */
#include <drm/drmP.h>
-#include <drm/drm_os_linux.h>
#include "psb_drv.h"
#include "psb_drm.h"
if (fence)
ttm_fence_object_unref(&fence);
- spin_lock(&cmd_buffer->lock);
+ spin_lock(&cmd_buffer->bdev->fence_lock);
if (cmd_buffer->sync_obj != NULL)
ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
- spin_unlock(&cmd_buffer->lock);
+ spin_unlock(&cmd_buffer->bdev->fence_lock);
return 0;
}
/* include headers */
/* #define DRM_DEBUG_CODE 2 */
#include <drm/drmP.h>
-#include <drm/drm_os_linux.h>
#include "psb_drv.h"
#include "psb_drm.h"
if (fence)
ttm_fence_object_unref(&fence);
- spin_lock(&cmd_buffer->lock);
+ spin_lock(&cmd_buffer->bdev->fence_lock);
if (cmd_buffer->sync_obj != NULL)
ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
- spin_unlock(&cmd_buffer->lock);
+ spin_unlock(&cmd_buffer->bdev->fence_lock);
PSB_DEBUG_GENERAL("TOPAZ exit %s\n", __func__);
return 0;
struct drm_psb_ttm_backend {
struct ttm_backend base;
struct page **pages;
+ dma_addr_t *dma_addrs;
unsigned int desired_tile_stride;
unsigned int hw_tile_stride;
int mem_type;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case DRM_PSB_MEM_MMU:
+ man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->gpu_offset = PSB_MEM_MMU_START;
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_CI:
+ man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_FIXED;
man->gpu_offset = pg->mmu_gatt_start + (pg->ci_start);
man->default_caching = TTM_PL_FLAG_UNCACHED;
break;
case TTM_PL_RAR: /* Unmappable RAR memory */
+ man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_FIXED;
man->available_caching = TTM_PL_FLAG_UNCACHED;
man->gpu_offset = pg->mmu_gatt_start + (pg->rar_start);
break;
case TTM_PL_TT: /* Mappable GATT memory */
+ man->func = &ttm_bo_manager_func;
#ifdef PSB_WORKING_HOST_MMU_ACCESS
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
#else
static int drm_psb_tbe_populate(struct ttm_backend *backend,
unsigned long num_pages,
struct page **pages,
- struct page *dummy_read_page)
+ struct page *dummy_read_page,
+ dma_addr_t *dma_addrs)
{
struct drm_psb_ttm_backend *psb_be =
container_of(backend, struct drm_psb_ttm_backend, base);
psb_be->pages = pages;
+ psb_be->dma_addrs = dma_addrs; /* Not concretely implemented by TTM yet*/
return 0;
}
psb_be->num_pages = bo_mem->num_pages;
psb_be->desired_tile_stride = 0;
psb_be->hw_tile_stride = 0;
- psb_be->offset = (bo_mem->mm_node->start << PAGE_SHIFT) +
+ psb_be->offset = (bo_mem->start << PAGE_SHIFT) +
man->gpu_offset;
type =
container_of(backend, struct drm_psb_ttm_backend, base);
psb_be->pages = NULL;
+ psb_be->dma_addrs = NULL;
return;
}
/* system memory */
return 0;
case TTM_PL_TT:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pg->gatt_start;
mem->bus.is_iomem = false; /* Don't know whether it is IO_MEM, this flag used in vm_fault handle */
break;
case DRM_PSB_MEM_MMU:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = 0x00000000;
break;
case TTM_PL_CI:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->ci_region_start;;
mem->bus.is_iomem = true;
break;
case TTM_PL_RAR:
- mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+ mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = dev_priv->rar_region_start;;
mem->bus.is_iomem = true;
break;
**************************************************************************/
#include <drm/drmP.h>
-#include <drm/drm_os_linux.h>
#include "psb_drm.h"
#include "psb_drv.h"
#include "psb_msvdx.h"
&fence);
ttm_fence_object_unref(&fence);
- spin_lock(&cmd_buffer->lock);
+ spin_lock(&cmd_buffer->bdev->fence_lock);
if (cmd_buffer->sync_obj != NULL)
ttm_fence_sync_obj_unref(&cmd_buffer->sync_obj);
- spin_unlock(&cmd_buffer->lock);
+ spin_unlock(&cmd_buffer->bdev->fence_lock);
return 0;
}
}
#ifdef CONFIG_MDFD_VIDEO_DECODE
-static int psb_ttm_mem_global_init(struct ttm_global_reference *ref)
+static int psb_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
-static void psb_ttm_mem_global_release(struct ttm_global_reference *ref)
+static void psb_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int psb_ttm_global_init(struct drm_psb_private *dev_priv)
{
- struct ttm_global_reference *global_ref;
- struct ttm_global_reference *global;
+ struct drm_global_reference *global_ref;
+ struct drm_global_reference *global;
int ret;
global_ref = &dev_priv->mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &psb_ttm_mem_global_init;
global_ref->release = &psb_ttm_mem_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed referencing a global TTM memory object.\n");
return ret;
dev_priv->bo_global_ref.mem_glob = dev_priv->mem_global_ref.object;
global = &dev_priv->bo_global_ref.ref;
- global->global_type = TTM_GLOBAL_TTM_BO;
+ global->global_type = DRM_GLOBAL_TTM_BO;
global->size = sizeof(struct ttm_bo_global);
global->init = &ttm_bo_global_init;
global->release = &ttm_bo_global_release;
- ret = ttm_global_item_ref((struct ttm_global_reference *)global);
+ ret = drm_global_item_ref((struct drm_global_reference *)global);
if (ret != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- ttm_global_item_unref((struct ttm_global_reference *)global_ref);
+ drm_global_item_unref((struct drm_global_reference *)global_ref);
return ret;
}
void psb_ttm_global_release(struct drm_psb_private *dev_priv)
{
- ttm_global_item_unref(&dev_priv->mem_global_ref);
+ drm_global_item_unref(&dev_priv->mem_global_ref);
}
#endif
if (unlikely(ret != 0))
goto out_err;
- spin_lock(&bo->lock);
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0))
+ goto out_err;
ttm_pl_fill_rep(bo, rep);
- spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
out:
return 0;
if (unlikely(ret != 0))
goto out_err;
- spin_lock(&bo->lock);
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0))
+ goto out_err;
ttm_pl_fill_rep(bo, rep);
- spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
ttm_bo_unref(&bo);
out:
return 0;
goto out;
}
- spin_lock(&bo->lock);
+ ret = ttm_bo_reserve(bo, true, false, false, 0);
+ if (unlikely(ret != 0))
+ goto out;
ttm_pl_fill_rep(bo, rep);
- spin_unlock(&bo->lock);
+ ttm_bo_unreserve(bo);
out:
base = &user_bo->base;
placement.num_placement = 2;
placement.placement = flags;
- spin_lock(&bo->lock);
+ /* spin_lock(&bo->lock); */ /* Already get reserve lock */
ret = psb_ttm_bo_check_placement(bo, &placement);
if (unlikely(ret != 0))
ttm_pl_fill_rep(bo, rep);
out_err2:
- spin_unlock(&bo->lock);
+ /* spin_unlock(&bo->lock); */
ttm_bo_unreserve(bo);
out_err1:
ttm_read_unlock(lock);
arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
if (unlikely(ret != 0))
goto out;
- spin_lock(&bo->lock);
+ spin_lock(&bo->bdev->fence_lock);
ret = ttm_bo_wait(bo,
arg->mode & TTM_PL_WAITIDLE_MODE_LAZY,
true, arg->mode & TTM_PL_WAITIDLE_MODE_NO_BLOCK);
- spin_unlock(&bo->lock);
+ spin_unlock(&bo->bdev->fence_lock);
psb_ttm_bo_unblock_reservation(bo);
out:
ttm_bo_unref(&bo);