return obj;
}
-static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file)
-{
- int ret;
-
- if (!vc4->v3d)
- return -ENODEV;
-
- if (vc4file->bin_bo_used)
- return 0;
-
- ret = vc4_v3d_bin_bo_get(vc4, &vc4file->bin_bo_used);
- if (ret)
- return ret;
-
- return 0;
-}
-
int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vc4_create_bo *args = data;
- struct vc4_file *vc4file = file_priv->driver_priv;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
- ret = vc4_grab_bin_bo(vc4, vc4file);
- if (ret)
- return ret;
-
/*
* We can't allocate from the BO cache, because the BOs don't
* get zeroed, and that might leak data between users.
struct drm_file *file_priv)
{
struct drm_vc4_create_shader_bo *args = data;
- struct vc4_file *vc4file = file_priv->driver_priv;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo = NULL;
int ret;
return -EINVAL;
}
- ret = vc4_grab_bin_bo(vc4, vc4file);
- if (ret)
- return ret;
-
bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER);
if (IS_ERR(bo))
return PTR_ERR(bo);
static void vc4_close(struct drm_device *dev, struct drm_file *file)
{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
- if (vc4file->bin_bo_used)
- vc4_v3d_bin_bo_put(vc4);
-
vc4_perfmon_close_file(vc4file);
kfree(vc4file);
}
platform_set_drvdata(pdev, drm);
INIT_LIST_HEAD(&vc4->debugfs_list);
- mutex_init(&vc4->bin_bo_lock);
-
ret = vc4_bo_cache_init(drm);
if (ret)
return ret;
* the minor is available (after drm_dev_register()).
*/
struct list_head debugfs_list;
-
- /* Mutex for binner bo allocation. */
- struct mutex bin_bo_lock;
- /* Reference count for our binner bo. */
- struct kref bin_bo_kref;
};
static inline struct vc4_dev *
* NULL otherwise.
*/
struct vc4_perfmon *perfmon;
-
- /* Whether the exec has taken a reference to the binner BO, which should
- * happen with a VC4_PACKET_TILE_BINNING_MODE_CONFIG packet.
- */
- bool bin_bo_used;
};
/* Per-open file private data. Any driver-specific resource that has to be
struct idr idr;
struct mutex lock;
} perfmon;
-
- bool bin_bo_used;
};
static inline struct vc4_exec_info *
extern struct platform_driver vc4_v3d_driver;
extern const struct of_device_id vc4_v3d_dt_match[];
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
-int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used);
-void vc4_v3d_bin_bo_put(struct vc4_dev *vc4);
int vc4_v3d_pm_get(struct vc4_dev *vc4);
void vc4_v3d_pm_put(struct vc4_dev *vc4);
vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
void *temp = NULL;
void *bin;
int ret = 0;
if (ret)
goto fail;
- if (exec->found_tile_binning_mode_config_packet) {
- ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
- if (ret)
- goto fail;
- }
-
/* Block waiting on any previous rendering into the CS's VBO,
* IB, or textures, so that pixels are actually written by the
* time we try to read them.
vc4->bin_alloc_used &= ~exec->bin_slots;
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
- /* Release the reference on the binner BO if needed. */
- if (exec->bin_bo_used)
- vc4_v3d_bin_bo_put(vc4);
-
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
{
struct vc4_dev *vc4 =
container_of(work, struct vc4_dev, overflow_mem_work);
- struct vc4_bo *bo;
+ struct vc4_bo *bo = vc4->bin_bo;
int bin_bo_slot;
struct vc4_exec_info *exec;
unsigned long irqflags;
- mutex_lock(&vc4->bin_bo_lock);
-
- if (!vc4->bin_bo)
- goto complete;
-
- bo = vc4->bin_bo;
+ if (!bo)
+ return;
bin_bo_slot = vc4_v3d_get_bin_slot(vc4);
if (bin_bo_slot < 0) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
- goto complete;
+ return;
}
spin_lock_irqsave(&vc4->job_lock, irqflags);
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
spin_unlock_irqrestore(&vc4->job_lock, irqflags);
-
-complete:
- mutex_unlock(&vc4->bin_bo_lock);
}
static void
if (!vc4->v3d)
return 0;
- /* Enable the render done interrupts. The out-of-memory interrupt is
- * enabled as soon as we have a binner BO allocated.
- */
- V3D_WRITE(V3D_INTENA, V3D_INT_FLDONE | V3D_INT_FRDONE);
+ /* Enable both the render done and out of memory interrupts. */
+ V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
return 0;
}
WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 !=
bo->base.base.size / vc4->bin_alloc_size);
- kref_init(&vc4->bin_bo_kref);
-
- /* Enable the out-of-memory interrupt to set our
- * newly-allocated binner BO, potentially from an
- * already-pending-but-masked interrupt.
- */
- V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
-
break;
}
return ret;
}
-int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used)
-{
- int ret = 0;
-
- mutex_lock(&vc4->bin_bo_lock);
-
- if (used && *used)
- goto complete;
-
- if (vc4->bin_bo)
- kref_get(&vc4->bin_bo_kref);
- else
- ret = bin_bo_alloc(vc4);
-
- if (ret == 0 && used)
- *used = true;
-
-complete:
- mutex_unlock(&vc4->bin_bo_lock);
-
- return ret;
-}
-
-static void bin_bo_release(struct kref *ref)
-{
- struct vc4_dev *vc4 = container_of(ref, struct vc4_dev, bin_bo_kref);
-
- if (WARN_ON_ONCE(!vc4->bin_bo))
- return;
-
- drm_gem_object_put(&vc4->bin_bo->base.base);
- vc4->bin_bo = NULL;
-}
-
-void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
-{
- mutex_lock(&vc4->bin_bo_lock);
- kref_put(&vc4->bin_bo_kref, bin_bo_release);
- mutex_unlock(&vc4->bin_bo_lock);
-}
-
#ifdef CONFIG_PM
static int vc4_v3d_runtime_suspend(struct device *dev)
{
vc4_irq_uninstall(&vc4->base);
+ drm_gem_object_put(&vc4->bin_bo->base.base);
+ vc4->bin_bo = NULL;
+
clk_disable_unprepare(v3d->clk);
return 0;
struct vc4_dev *vc4 = v3d->vc4;
int ret;
+ ret = bin_bo_alloc(vc4);
+ if (ret)
+ return ret;
+
ret = clk_prepare_enable(v3d->clk);
if (ret != 0)
return ret;
if (ret != 0)
return ret;
+ ret = bin_bo_alloc(vc4);
+ if (ret) {
+ clk_disable_unprepare(v3d->clk);
+ return ret;
+ }
+
/* Reset the binner overflow address/size at setup, to be sure
* we don't reuse an old one.
*/