drm_dev_unregister(drm);
}
-static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
+static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen)
{
struct drm_device *drm;
- const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
- const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
+ const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver;
+ const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock;
struct vc4_dev *vc4;
struct device *dev;
int ret;
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
vc4->dev = dev;
- vc4->is_vc5 = is_vc5;
+ vc4->gen = gen;
vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
struct vc4_dev *vc4_mock_device(struct kunit *test)
{
- return __mock_device(test, false);
+ return __mock_device(test, VC4_GEN_4);
}
struct vc4_dev *vc5_mock_device(struct kunit *test)
{
- return __mock_device(test, true);
+ return __mock_device(test, VC4_GEN_5);
}
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
mutex_lock(&vc4->purgeable.lock);
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
/* list_del_init() is used here because the caller might release
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return ERR_PTR(-ENODEV);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
struct drm_gem_dma_object *dma_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return ERR_PTR(-ENODEV);
if (size == 0)
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
ret = vc4_dumb_fixup_args(args);
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
/* Fast path: if the BO is already retained by someone, no need to
{
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
/* Fast path: if the BO is still retained by someone, no need to test
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
ret = vc4_grab_bin_bo(vc4, vc4file);
struct drm_vc4_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
struct vc4_bo *bo = NULL;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (args->size == 0)
struct vc4_bo *bo;
bool t_format;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (args->flags != 0)
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (args->flags != 0 || args->modifier != 0)
int ret;
int i;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
/* Create the initial set of BO labels that the kernel will
struct drm_gem_object *gem_obj;
int ret = 0, label;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!args->len)
* Removing 1 from the FIFO full level however
* seems to completely remove that issue.
*/
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
if (is_dsi)
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
- if (vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_5)
CRTC_WRITE(PV_MUX_CFG,
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
struct dma_fence *fence;
int ret;
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
/*
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_5)
return vc5_async_page_flip(crtc, fb, event, flags);
else
return vc4_async_page_flip(crtc, fb, event, flags);
drm_crtc_helper_add(crtc, crtc_helper_funcs);
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
}
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* We support CTM, but only for one CRTC at a time. It's therefore
* implemented as private driver state in vc4_kms, not here.
*/
if (args->pad != 0)
return -EINVAL;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file->driver_priv;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (vc4file->bin_bo_used)
struct vc4_dev *vc4;
struct device_node *node;
struct drm_crtc *crtc;
- bool is_vc5;
+ enum vc4_gen gen;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
- is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
- if (is_vc5)
+ if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"))
+ gen = VC4_GEN_5;
+ else
+ gen = VC4_GEN_4;
+
+ if (gen == VC4_GEN_5)
driver = &vc5_drm_driver;
else
driver = &vc4_drm_driver;
vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
if (IS_ERR(vc4))
return PTR_ERR(vc4);
- vc4->is_vc5 = is_vc5;
+ vc4->gen = gen;
vc4->dev = dev;
drm = &vc4->base;
platform_set_drvdata(pdev, drm);
- if (!is_vc5) {
+ if (gen == VC4_GEN_4) {
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
if (ret)
return ret;
if (ret)
return ret;
- if (!is_vc5) {
+ if (gen == VC4_GEN_4) {
ret = vc4_gem_init(drm);
if (ret)
return ret;
u64 counters[];
};
+enum vc4_gen {
+ VC4_GEN_4,
+ VC4_GEN_5,
+};
+
struct vc4_dev {
struct drm_device base;
struct device *dev;
- bool is_vc5;
+ enum vc4_gen gen;
unsigned int irq;
u32 i;
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d) {
unsigned long timeout_expire;
DEFINE_WAIT(wait);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (vc4->finished_seqno >= seqno)
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_exec_info *exec;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
again:
if (!exec)
return;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
/* A previous RCL may have written to one of our textures, and
struct vc4_dev *vc4 = to_vc4_dev(dev);
bool was_empty = list_empty(&vc4->render_job_list);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
list_move_tail(&exec->head, &vc4->render_job_list);
unsigned long irqflags;
struct vc4_seqno_cb *cb, *cb_temp;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
spin_lock_irqsave(&vc4->job_lock, irqflags);
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
cb->func = func;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_wait_seqno *args = data;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (args->pad != 0)
args->shader_rec_size,
args->bo_handle_count);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d) {
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
vc4->dma_fence_context = dma_fence_context_alloc(1);
struct vc4_bo *bo;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
switch (args->madv) {
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
/* Set the MAI threshold */
- if (vc4->is_vc5)
+ if (vc4->gen >= VC4_GEN_5)
HDMI_WRITE(HDMI_MAI_THR,
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
unsigned int channel)
{
struct vc4_dev *vc4 = hvs->vc4;
- u32 irq_mask = vc4->is_vc5 ?
+ u32 irq_mask = vc4->gen == VC4_GEN_5 ?
SCALER5_DISPCTRL_DSPEIEOF(channel) :
SCALER_DISPCTRL_DSPEIEOF(channel);
unsigned int channel)
{
struct vc4_dev *vc4 = hvs->vc4;
- u32 irq_mask = vc4->is_vc5 ?
+ u32 irq_mask = vc4->gen == VC4_GEN_5 ?
SCALER5_DISPCTRL_DSPEIEOF(channel) :
SCALER_DISPCTRL_DSPEIEOF(channel);
u32 reg;
int ret;
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
return output;
/*
dispctrl = SCALER_DISPCTRLX_ENABLE;
dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
SCALER_DISPCTRLX_WIDTH) |
VC4_SET_FIELD(mode->vdisplay,
/* Reload the LUT, since the SRAMs would have been disabled if
* all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
*/
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
vc4_hvs_lut_load(hvs, vc4_crtc);
else
vc5_hvs_lut_load(hvs, vc4_crtc);
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
return 0;
if (!crtc_state->color_mgmt_changed)
u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
if (crtc->state->gamma_lut) {
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
dispbkgndx |= SCALER_DISPBKGND_GAMMA;
} else {
* should already be disabling/enabling the pipeline
* when gamma changes.
*/
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
}
HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx);
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u32 dispctrl;
int idx;
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel));
+ dispctrl &= ~((vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
{
- struct drm_device *drm = &hvs->vc4->base;
+ struct vc4_dev *vc4 = hvs->vc4;
+ struct drm_device *drm = &vc4->base;
u32 dispctrl;
int idx;
return;
dispctrl = HVS_READ(SCALER_DISPCTRL);
- dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel));
+ dispctrl |= ((vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel));
HVS_WRITE(SCALER_DISPSTAT,
SCALER_DISPSTAT_EUFLOW(channel));
control = HVS_READ(SCALER_DISPCTRL);
for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
- dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
- SCALER_DISPCTRL_DSPEISLUR(channel);
+ dspeislur = (vc4->gen == VC4_GEN_5) ?
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
+ SCALER_DISPCTRL_DSPEISLUR(channel);
+
/* Interrupt masking is not always honored, so check it here. */
if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
control & dspeislur) {
if (!vc4->hvs)
return -ENODEV;
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
minor->debugfs_root,
&vc4->load_tracker_enabled);
* between planes when they don't overlap on the screen, but
* for now we just allocate globally.
*/
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
/* 48k words of 2x12-bit pixels */
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
else
hvs->regset.regs = hvs_regs;
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
- if (vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_5) {
struct rpi_firmware *firmware;
struct device_node *node;
unsigned int max_rate;
}
}
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
hvs->dlist = hvs->regs + SCALER_DLIST_START;
else
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
SCALER_DISPCTRL_DISPEIRQ(1) |
SCALER_DISPCTRL_DISPEIRQ(2);
- if (!vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_4)
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
SCALER_DISPCTRL_SLVWREIRQ |
SCALER_DISPCTRL_SLVRDEIRQ |
/* Recompute Composite Output Buffer (COB) allocations for the displays
*/
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
* The bottom 2048 pixels are full 32bpp RGBA (intended for the
* TXP composing RGBA to memory), whilst the remainder are only
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (!vc4->v3d)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (!vc4->v3d)
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (irq == IRQ_NOTCONNECTED)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
vc4_irq_disable(dev);
struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned long irqflags;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
/* Acknowledge any stale IRQs. */
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
- if (vc4->is_vc5 && !vc4->firmware_kms) {
+ if (vc4->gen == VC4_GEN_5 && !vc4->firmware_kms) {
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
new_hvs_state->core_clock_rate);
unsigned long core_rate = clamp_t(unsigned long, state_rate,
vc4_ctm_commit(vc4, state);
if (!vc4->firmware_kms) {
- if (vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_5)
vc5_hvs_pv_muxing_commit(vc4, state);
else
vc4_hvs_pv_muxing_commit(vc4, state);
drm_atomic_helper_cleanup_planes(dev, state);
- if (vc4->is_vc5 && !vc4->firmware_kms) {
+ if (vc4->gen == VC4_GEN_5 && !vc4->firmware_kms) {
unsigned long core_rate = min_t(unsigned long,
hvs->max_core_rate,
new_hvs_state->core_clock_rate);
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_mode_fb_cmd2 mode_cmd_local;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return ERR_PTR(-ENODEV);
/* If the user didn't specify a modifier, use the
* the BCM2711, but the load tracker computations are used for
* the core clock rate calculation.
*/
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* Start with the load tracker enabled. Can be
* disabled through the debugfs load_tracker file.
*/
return ret;
}
- if (vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_5) {
dev->mode_config.max_width = 7680;
dev->mode_config.max_height = 7680;
} else {
dev->mode_config.max_height = 2048;
}
- dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
+ dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
return;
vc4 = perfmon->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
refcount_inc(&perfmon->refcnt);
return;
vc4 = perfmon->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (refcount_dec_and_test(&perfmon->refcnt))
unsigned int i;
u32 mask;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
{
unsigned int i;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
if (WARN_ON_ONCE(!vc4->active_perfmon ||
struct vc4_dev *vc4 = vc4file->dev;
struct vc4_perfmon *perfmon;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return NULL;
mutex_lock(&vc4file->perfmon.lock);
{
struct vc4_dev *vc4 = vc4file->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
mutex_init(&vc4file->perfmon.lock);
{
struct vc4_dev *vc4 = vc4file->dev;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
mutex_lock(&vc4file->perfmon.lock);
unsigned int i;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d) {
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d) {
struct vc4_perfmon *perfmon;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (!vc4->v3d) {
}
/* Align it to 64 or 128 (hvs5) bytes */
- lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
+ lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64);
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
- lbm /= vc4->is_vc5 ? 4 : 2;
+ lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2;
return lbm;
}
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
&vc4_state->lbm,
lbm_size,
- vc4->is_vc5 ? 64 : 32,
+ vc4->gen == VC4_GEN_5 ? 64 : 32,
0, 0);
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
fb->format->has_alpha;
- if (!vc4->is_vc5) {
+ if (vc4->gen == VC4_GEN_4) {
/* Control word */
vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID |
};
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
- if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
+ if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) {
formats[num_formats] = hvs_formats[i].drm;
num_formats++;
}
return ERR_CAST(vc4_plane);
plane = &vc4_plane->base;
- if (vc4->is_vc5)
+ if (vc4->gen == VC4_GEN_5)
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
else
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
bool has_bin = args->bin_cl_size != 0;
int ret;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
if (args->min_x_tile > args->max_x_tile ||
int
vc4_v3d_pm_get(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
mutex_lock(&vc4->power_lock);
void
vc4_v3d_pm_put(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
mutex_lock(&vc4->power_lock);
uint64_t seqno = 0;
struct vc4_exec_info *exec;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
try_again:
{
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
mutex_lock(&vc4->bin_bo_lock);
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
{
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return;
mutex_lock(&vc4->bin_bo_lock);
struct drm_gem_dma_object *obj;
struct vc4_bo *bo;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return NULL;
if (hindex >= exec->bo_count) {
uint32_t utile_w = utile_width(cpp);
uint32_t utile_h = utile_height(cpp);
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return false;
/* The shaded vertex format stores signed 12.4 fixed point
uint32_t dst_offset = 0;
uint32_t src_offset = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
while (src_offset < len) {
uint32_t i;
int ret = 0;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return -ENODEV;
for (i = 0; i < exec->shader_state_count; i++) {
struct vc4_validated_shader_info *validated_shader = NULL;
struct vc4_shader_validation_state validation_state;
- if (WARN_ON_ONCE(vc4->is_vc5))
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
return NULL;
memset(&validation_state, 0, sizeof(validation_state));