req.channel = chan->id;
req.handle = 0;
ret = drmCommandWriteRead(nouveau_device(dev)->fd,
- DRM_NOUVEAU_GEM_PUSHBUF_CALL,
+ DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
&req, sizeof(req));
- if (ret)
- return;
+ if (ret) {
+ ret = drmCommandWriteRead(nouveau_device(dev)->fd,
+ DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
+ &req, sizeof(req));
+ if (ret)
+ return;
+
+ nvpb->no_aper_update = 1;
+ }
for (i = 0; i < CALPB_BUFFERS; i++) {
ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
nvpb->current_offset;
req.suffix0 = nvpb->cal_suffix0;
req.suffix1 = nvpb->cal_suffix1;
- ret = drmCommandWriteRead(nvdev->fd,
- DRM_NOUVEAU_GEM_PUSHBUF_CALL,
+ ret = drmCommandWriteRead(nvdev->fd, nvpb->no_aper_update ?
+ DRM_NOUVEAU_GEM_PUSHBUF_CALL :
+ DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
&req, sizeof(req));
if (ret == -EAGAIN)
goto restart_cal;
nvpb->cal_suffix0 = req.suffix0;
nvpb->cal_suffix1 = req.suffix1;
assert(ret == 0);
+ if (!nvpb->no_aper_update) {
+ nvdev->base.vm_vram_size = req.vram_available;
+ nvdev->base.vm_gart_size = req.gart_available;
+ }
} else {
struct drm_nouveau_gem_pushbuf req;
uint64_t relocs;
uint32_t suffix0;
uint32_t suffix1;
+ /* below only accessed for CALL2 */
+ uint64_t vram_available;
+ uint64_t gart_available;
};
struct drm_nouveau_gem_pin {
#define DRM_NOUVEAU_GEM_CPU_PREP 0x45
#define DRM_NOUVEAU_GEM_CPU_FINI 0x46
#define DRM_NOUVEAU_GEM_INFO 0x47
+#define DRM_NOUVEAU_GEM_PUSHBUF_CALL2 0x48
#endif /* __NOUVEAU_DRM_H__ */