close(fd);
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd;
-
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture
+ fd = drm_open_any();
igt_subtest("bad-close")
test_bad_close(fd);
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- srandom(0xdeadbeef);
+ igt_fixture {
+ srandom(0xdeadbeef);
- fd = drm_open_any();
+ fd = drm_open_any();
- gem_require_caching(fd);
+ gem_require_caching(fd);
- devid = intel_get_drm_devid(fd);
- if (IS_GEN2(devid)) /* chipset only handles cached -> uncached */
- flags &= ~TEST_READ;
- if (IS_BROADWATER(devid) || IS_CRESTLINE(devid)) {
- /* chipset is completely fubar */
- printf("coherency broken on i965g/gm\n");
- flags = 0;
- }
+ devid = intel_get_drm_devid(fd);
+ if (IS_GEN2(devid)) /* chipset only handles cached -> uncached */
+ flags &= ~TEST_READ;
+ if (IS_BROADWATER(devid) || IS_CRESTLINE(devid)) {
+ /* chipset is completely fubar */
+ printf("coherency broken on i965g/gm\n");
+ flags = 0;
+ }
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
- /* overallocate the buffers we're actually using because */
- scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
- gem_set_caching(fd, scratch_bo->handle, 1);
+ /* overallocate the buffers we're actually using because */
+ scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
+ gem_set_caching(fd, scratch_bo->handle, 1);
- staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
+ staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
+ igt_init_aperture_trashers(bufmgr);
+ mappable_gtt_limit = gem_mappable_aperture_size();
+ }
igt_subtest("reads") {
if (!(flags & TEST_READ))
}
}
- igt_cleanup_aperture_trashers();
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ igt_cleanup_aperture_trashers();
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- if (!igt_only_list_subtests()) {
/* This test is very sensitive to residual gtt_mm noise from previous
* tests. Try to quiet thing down first. */
gem_quiescent_gpu(fd);
igt_subtest("vebox")
run_on_ring(fd, LOCAL_I915_EXEC_VEBOX, "vebox");
- close(fd);
+ igt_fixture
+ close(fd);
igt_exit();
}
return ret;
}
+uint32_t handle;
+uint32_t batch[2] = {MI_BATCH_BUFFER_END};
+uint32_t ctx_id;
+int fd;
+
int main(int argc, char *argv[])
{
- uint32_t handle;
- uint32_t batch[2] = {MI_BATCH_BUFFER_END};
- uint32_t ctx_id;
- int fd;
-
igt_skip_on_simulation();
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- ctx_id = context_create(fd);
+ ctx_id = context_create(fd);
+
+ handle = gem_create(fd, 4096);
+ gem_write(fd, handle, 0, batch, sizeof(batch));
+ }
- handle = gem_create(fd, 4096);
- gem_write(fd, handle, 0, batch, sizeof(batch));
igt_subtest("render")
igt_assert(exec(fd, handle, I915_EXEC_RENDER, ctx_id) == 0);
igt_subtest("bsd")
}
}
+int fd;
+int devid;
+int num_rings;
+
int main(int argc, char **argv)
{
- int fd;
- int devid;
- int num_rings;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
- devid = intel_get_drm_devid(fd);
- num_rings = gem_get_num_rings(fd);
- if (!HAS_BLT_RING(devid)) {
- fprintf(stderr, "not (yet) implemented for pre-snb\n");
- igt_skip();
- }
+ igt_fixture {
+ fd = drm_open_any();
+ devid = intel_get_drm_devid(fd);
+ num_rings = gem_get_num_rings(fd);
+ /* Not yet implemented on pre-snb. */
+ igt_require(!HAS_BLT_RING(devid));
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- if (!bufmgr) {
- fprintf(stderr, "failed to init libdrm\n");
- igt_fail(-1);
- }
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ igt_assert(bufmgr);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
- if (!batch) {
- fprintf(stderr, "failed to create batch buffer\n");
- igt_fail(-1);
- }
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+ igt_assert(batch);
- target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
- if (!target_buffer) {
- fprintf(stderr, "failed to alloc target buffer\n");
- igt_fail(-1);
+ target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
+ igt_assert(target_buffer);
}
igt_subtest("render") {
}
}
- drm_intel_bo_unreference(target_buffer);
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ drm_intel_bo_unreference(target_buffer);
+ intel_batchbuffer_free(batch);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
}
}
+int fd;
+drm_intel_bo *tmp;
+
int main(int argc, char **argv)
{
- int fd, ret;
- drm_intel_bo *tmp;
-
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
- tmp = drm_intel_bo_alloc(bufmgr, "tmp", 128 * 128, 4096);
+ tmp = drm_intel_bo_alloc(bufmgr, "tmp", 128 * 128, 4096);
+ }
igt_subtest("cpu-domain") {
BEGIN_BATCH(2);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, 0, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "(cpu, 0) reloc not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
BEGIN_BATCH(2);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "(cpu, cpu) reloc not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
}
igt_subtest("gtt-domain") {
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, 0, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "(gtt, 0) reloc not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
BEGIN_BATCH(2);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "(gtt, gtt) reloc not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
}
#if 0 /* kernel checks have been eased, doesn't reject conflicting write domains
OUT_RELOC(tmp, I915_GEM_DOMAIN_INSTRUCTION,
I915_GEM_DOMAIN_INSTRUCTION, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "conflicting write domains not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
}
#endif
OUT_RELOC(tmp, ~(I915_GEM_GPU_DOMAINS | I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU),
0, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "invalid gpu read domains not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
BEGIN_BATCH(2);
OUT_BATCH(0);
OUT_RELOC(tmp, I915_GEM_DOMAIN_GTT << 1,
I915_GEM_DOMAIN_GTT << 1, 0);
ADVANCE_BATCH();
- ret = run_batch();
- if (ret != -EINVAL) {
- fprintf(stderr, "invalid gpu domain not rejected\n");
- igt_fail(1);
- }
+ igt_assert(run_batch() == -EINVAL);
}
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ intel_batchbuffer_free(batch);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
}
}
+uint32_t batch[2] = {MI_BATCH_BUFFER_END};
+uint32_t handle;
+int fd;
+
int main(int argc, char **argv)
{
- uint32_t batch[2] = {MI_BATCH_BUFFER_END};
- uint32_t handle;
- int fd;
-
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- handle = gem_create(fd, 4096);
- gem_write(fd, handle, 0, batch, sizeof(batch));
+ handle = gem_create(fd, 4096);
+ gem_write(fd, handle, 0, batch, sizeof(batch));
+ }
igt_subtest("render")
loop(fd, handle, I915_EXEC_RENDER, "render");
igt_subtest("vebox")
loop(fd, handle, LOCAL_I915_EXEC_VEBOX, "vebox");
- gem_close(fd, handle);
+ igt_fixture {
+ gem_close(fd, handle);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
igt_assert(open_struct.handle != 0);
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd;
-
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture
+ fd = drm_open_any();
igt_subtest("basic")
test_flink(fd);
int main(int argc, char **argv)
{
- int fd, count = 0;
+ int fd = 0, count = 0;
igt_skip_on_simulation();
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- if (!igt_only_list_subtests()) {
if (argc > 1)
count = atoi(argv[1]);
if (count == 0)
igt_enable_prefault();
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd;
-
if (igt_run_in_simulation())
OBJECT_SIZE = 1 * 1024 * 1024;
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture
+ fd = drm_open_any();
igt_subtest("copy")
test_copy(fd);
igt_subtest("write-gtt-no-prefault")
run_without_prefault(fd, test_write_gtt);
- close(fd);
+ igt_fixture
+ close(fd);
igt_exit();
}
static void do_tests(int cache_level, const char *suffix)
{
- if (cache_level != -1)
- gem_set_caching(fd, scratch_bo->handle, cache_level);
+ igt_fixture {
+ if (cache_level != -1)
+ gem_set_caching(fd, scratch_bo->handle, cache_level);
+ }
igt_subtest_f("reads%s", suffix)
test_partial_reads();
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- devid = intel_get_drm_devid(fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ devid = intel_get_drm_devid(fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
- /* overallocate the buffers we're actually using because */
- scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
- staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
+ /* overallocate the buffers we're actually using because */
+ scratch_bo = drm_intel_bo_alloc(bufmgr, "scratch bo", BO_SIZE, 4096);
+ staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
+ igt_init_aperture_trashers(bufmgr);
+ mappable_gtt_limit = gem_mappable_aperture_size();
+ }
do_tests(-1, "");
do_tests(1, "-snoop");
do_tests(2, "-display");
- igt_cleanup_aperture_trashers();
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ igt_cleanup_aperture_trashers();
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
}
+uint32_t *src, dst;
+int fd, count;
+
int main(int argc, char **argv)
{
int object_size = 0;
uint32_t buf[20];
- uint32_t *src, dst;
- int fd, count;
const struct {
int level;
const char *name;
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- dst = gem_create(fd, object_size);
- src = malloc(object_size);
+ dst = gem_create(fd, object_size);
+ src = malloc(object_size);
+ }
igt_subtest("normal") {
for (count = 1; count <= 1<<17; count <<= 1) {
}
}
- free(src);
- gem_close(fd, dst);
+ igt_fixture {
+ free(src);
+ gem_close(fd, dst);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
} while (--loop);
}
+drm_intel_bo *src[2], *dst[2];
+int fd;
+
int
main(int argc, char **argv)
{
const uint32_t start[2] = {0, 1024 * 1024 / 4};
- drm_intel_bo *src[2], *dst[2];
- int fd;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
- src[0] = create_bo(start[0]);
- src[1] = create_bo(start[1]);
+ src[0] = create_bo(start[0]);
+ src[1] = create_bo(start[1]);
- dst[0] = drm_intel_bo_alloc(bufmgr, "dst bo", size, 4096);
- dst[1] = drm_intel_bo_alloc(bufmgr, "dst bo", size, 4096);
+ dst[0] = drm_intel_bo_alloc(bufmgr, "dst bo", size, 4096);
+ dst[1] = drm_intel_bo_alloc(bufmgr, "dst bo", size, 4096);
+ }
igt_subtest("normal")
do_test(fd, -1, src, start, dst, 1);
do_test(fd, 2, src, start, dst, 100);
igt_stop_signal_helper();
}
- drm_intel_bo_unreference(src[0]);
- drm_intel_bo_unreference(src[1]);
- drm_intel_bo_unreference(dst[0]);
- drm_intel_bo_unreference(dst[1]);
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ drm_intel_bo_unreference(src[0]);
+ drm_intel_bo_unreference(src[1]);
+ drm_intel_bo_unreference(dst[0]);
+ drm_intel_bo_unreference(dst[1]);
+
+ intel_batchbuffer_free(batch);
+ drm_intel_bufmgr_destroy(bufmgr);
+ }
close(fd);
}
+uint32_t *src, dst;
+int fd;
+
int main(int argc, char **argv)
{
int object_size = 0;
uint32_t buf[20];
- uint32_t *src, dst;
- int fd, count;
+ int count;
const struct {
int level;
const char *name;
igt_skip_on_simulation();
igt_subtest_init(argc, argv);
- igt_skip_on_simulation();
if (argc > 1 && atoi(argv[1]))
object_size = atoi(argv[1]);
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- dst = gem_create(fd, object_size);
- src = malloc(object_size);
+ dst = gem_create(fd, object_size);
+ src = malloc(object_size);
+ }
igt_subtest("normal") {
for (count = 1; count <= 1<<17; count <<= 1) {
}
}
- free(src);
- gem_close(fd, dst);
+ igt_fixture {
+ free(src);
+ gem_close(fd, dst);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
return buf;
}
+uint32_t *tmp, src, dst;
+int fd;
+
int main(int argc, char **argv)
{
int object_size = 0;
uint32_t buf[20];
- uint32_t *tmp, src, dst;
- int fd, count;
+ int count;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
object_size = OBJECT_SIZE;
object_size = (object_size + 3) & -4;
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- dst = gem_create(fd, object_size);
- src = gem_create(fd, object_size);
- tmp = malloc(object_size);
+ dst = gem_create(fd, object_size);
+ src = gem_create(fd, object_size);
+ tmp = malloc(object_size);
- gem_set_caching(fd, src, 0);
- gem_set_caching(fd, dst, 0);
+ gem_set_caching(fd, src, 0);
+ gem_set_caching(fd, dst, 0);
+ }
igt_subtest("uncached-copy-correctness")
test_copy(fd, src, dst, tmp, object_size);
}
}
- gem_set_caching(fd, src, 1);
- gem_set_caching(fd, dst, 1);
+ igt_fixture {
+ gem_set_caching(fd, src, 1);
+ gem_set_caching(fd, dst, 1);
+ }
igt_subtest("snooped-copy-correctness")
test_copy(fd, src, dst, tmp, object_size);
}
}
- gem_set_caching(fd, src, 2);
- gem_set_caching(fd, dst, 2);
+ igt_fixture {
+ gem_set_caching(fd, src, 2);
+ gem_set_caching(fd, dst, 2);
+ }
igt_subtest("display-copy-correctness")
test_copy(fd, src, dst, tmp, object_size);
}
}
- free(tmp);
- gem_close(fd, src);
- gem_close(fd, dst);
+ igt_fixture {
+ free(tmp);
+ gem_close(fd, src);
+ gem_close(fd, dst);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
return ioctl(fd, DRM_IOCTL_I915_GEM_PWRITE, &gem_pwrite);
}
+int fd;
+uint32_t handle;
+
int main(int argc, char **argv)
{
- int fd;
uint8_t expected[OBJECT_SIZE];
uint8_t buf[OBJECT_SIZE];
int ret;
- int handle;
igt_skip_on_simulation();
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- handle = gem_create(fd, OBJECT_SIZE);
+ handle = gem_create(fd, OBJECT_SIZE);
+ }
igt_subtest("new-obj") {
printf("Testing contents of newly created object.\n");
igt_assert(ret == -1 && errno == ENOENT);
}
- close(fd);
+ igt_fixture
+ close(fd);
igt_exit();
}
intel_batchbuffer_flush(batch);
}
+drm_intel_bufmgr *bufmgr;
+struct intel_batchbuffer *batch;
+render_copyfunc_t copy;
+int fd;
+
int main(int argc, char **argv)
{
- drm_intel_bufmgr *bufmgr;
- struct intel_batchbuffer *batch;
- render_copyfunc_t copy;
- int fd;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ }
igt_subtest("blitter")
check_ring(bufmgr, batch, "blt", blt_copy);
- /* Strictly only required on architectures with a separate BLT ring,
- * but lets stress everybody.
- */
- copy = get_render_copyfunc(batch->devid);
- igt_require(copy);
+ igt_fixture {
+ /* Strictly only required on architectures with a separate BLT ring,
+ * but lets stress everybody.
+ */
+ copy = get_render_copyfunc(batch->devid);
+ igt_require(copy);
+ }
igt_subtest("render")
check_ring(bufmgr, batch, "render", copy);
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ intel_batchbuffer_free(batch);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
printf("done\n");
}
+int fd;
+
int main(int argc, char **argv)
{
- int i, fd;
+ int i;
uint32_t tiling, tiling_after;
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- for (i = 0; i < 1024*256; i++)
- data[i] = i;
+ igt_fixture {
+ for (i = 0; i < 1024*256; i++)
+ data[i] = i;
- fd = drm_open_any();
+ fd = drm_open_any();
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- devid = intel_get_drm_devid(fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ devid = intel_get_drm_devid(fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+ }
igt_subtest("untiled-to-tiled") {
printf("testing untiled->tiled transisition:\n");
munmap(ptr_tiled, OBJECT_SIZE);
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd;
-
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture
+ fd = drm_open_any();
igt_subtest("fence-restore-tiled2untiled")
test_fence_restore(fd, true);
igt_subtest("fence-restore-untiled")
test_fence_restore(fd, false);
- close(fd);
+ igt_fixture
+ close(fd);
igt_exit();
}
free(bo);
}
+int fd;
+
int main(int argc, char **argv)
{
- int fd, count = 0;
+ int count = 0;
igt_subtest_init(argc, argv);
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- if (!igt_only_list_subtests()) {
if (igt_run_in_simulation())
count = 2;
if (argc > 1)
}
printf("Using %d 1MiB buffers\n", count);
- }
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- drm_intel_bufmgr_gem_set_vma_cache_size(bufmgr, 32);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ drm_intel_bufmgr_gem_set_vma_cache_size(bufmgr, 32);
+ batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ }
igt_subtest("normal")
run_test(count);
igt_stop_signal_helper();
}
- intel_batchbuffer_free(batch);
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ intel_batchbuffer_free(batch);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
srandom(0xdeadbeef);
- fd = drm_open_any();
-
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- devid = intel_get_drm_devid(fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
-
- /* overallocate the buffers we're actually using because */
- scratch_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
- BO_SIZE/4096, 4,
- &tiling_mode, &scratch_pitch, 0);
- igt_assert(tiling_mode == I915_TILING_X);
- igt_assert(scratch_pitch == 4096);
- staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
- tiled_staging_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
- BO_SIZE/4096, 4,
- &tiling_mode,
- &scratch_pitch, 0);
-
- igt_init_aperture_trashers(bufmgr);
- mappable_gtt_limit = gem_mappable_aperture_size();
+ igt_fixture {
+ fd = drm_open_any();
+
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ devid = intel_get_drm_devid(fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+
+ /* overallocate the buffers we're actually using because */
+ scratch_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
+ BO_SIZE/4096, 4,
+ &tiling_mode, &scratch_pitch, 0);
+ igt_assert(tiling_mode == I915_TILING_X);
+ igt_assert(scratch_pitch == 4096);
+ staging_bo = drm_intel_bo_alloc(bufmgr, "staging bo", BO_SIZE, 4096);
+ tiled_staging_bo = drm_intel_bo_alloc_tiled(bufmgr, "scratch bo", 1024,
+ BO_SIZE/4096, 4,
+ &tiling_mode,
+ &scratch_pitch, 0);
+
+ igt_init_aperture_trashers(bufmgr);
+ mappable_gtt_limit = gem_mappable_aperture_size();
+ }
igt_subtest("reads")
test_partial_reads();
igt_subtest("writes-after-reads")
test_partial_read_writes();
- igt_cleanup_aperture_trashers();
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ igt_cleanup_aperture_trashers();
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- fd = drm_open_any();
+ igt_fixture {
+ fd = drm_open_any();
- /* Test requires MI_FLUSH_DW and MI_COND_BATCH_BUFFER_END */
- if (intel_gen(intel_get_drm_devid(fd)) < 6)
- return 77;
+ /* Test requires MI_FLUSH_DW and MI_COND_BATCH_BUFFER_END */
+ igt_require(intel_gen(intel_get_drm_devid(fd)) >= 6);
- bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
- if (!bufmgr) {
- fprintf(stderr, "failed to init libdrm\n");
- igt_fail(-1);
- }
- /* don't enable buffer reuse!! */
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+ igt_assert(bufmgr);
+ /* don't enable buffer reuse!! */
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
- igt_assert(batch);
+ batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+ igt_assert(batch);
- dummy_bo = drm_intel_bo_alloc(bufmgr, "dummy bo", 4096, 4096);
- if (!dummy_bo) {
- fprintf(stderr, "failed to alloc dummy buffer\n");
- igt_fail(-1);
- }
+ dummy_bo = drm_intel_bo_alloc(bufmgr, "dummy bo", 4096, 4096);
+ igt_assert(dummy_bo);
- load_bo = drm_intel_bo_alloc(bufmgr, "load bo", 1024*4096, 4096);
- if (!load_bo) {
- fprintf(stderr, "failed to alloc load buffer\n");
- igt_fail(-1);
+ load_bo = drm_intel_bo_alloc(bufmgr, "load bo", 1024*4096, 4096);
+ igt_assert(load_bo);
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
}
igt_stop_signal_helper();
- drm_intel_bufmgr_destroy(bufmgr);
+ igt_fixture {
+ drm_intel_bufmgr_destroy(bufmgr);
- close(fd);
+ close(fd);
+ }
igt_exit();
}
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- drm_fd = drm_open_any();
+ igt_fixture {
+ drm_fd = drm_open_any();
- if (!igt_only_list_subtests()) {
do_or_die(igt_set_vt_graphics_mode());
do_or_die(igt_install_exit_handler(kms_flip_exit_handler));
get_timestamp_format();
- }
- bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
- devid = intel_get_drm_devid(drm_fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
+ devid = intel_get_drm_devid(drm_fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+ }
for (i = 0; i < sizeof(tests) / sizeof (tests[0]); i++) {
igt_subtest(tests[i].name) {
- printf("running testcase: %s\n", tests[i].name);
run_test(tests[i].duration, tests[i].flags, tests[i].name);
}
}
}
igt_stop_signal_helper();
- close(drm_fd);
+ igt_fixture
+ close(drm_fd);
igt_exit();
}
igt_subtest_init(argc, argv);
igt_skip_on_simulation();
- if (!igt_only_list_subtests()) {
+ igt_fixture {
drm_fd = drm_open_any();
bufmgr = drm_intel_bufmgr_gem_init(drm_fd, 4096);
run_test(tests[i].name, tests[i].flags);
}
- if (!igt_only_list_subtests())
+ igt_fixture
close(drm_fd);
igt_exit();
int main(int argc, char **argv)
{
- int ret;
-
igt_subtest_init(argc, argv);
- ret = find_and_open_devices();
- if (ret < 0)
- return ret;
+ igt_fixture {
+ igt_assert(find_and_open_devices() == 0);
- if (nouveau_fd == -1 || intel_fd == -1 || nouveau_fd2 == -1 || intel_fd2 == -1) {
- fprintf(stderr,"failed to find intel and nouveau GPU\n");
- if (!igt_only_list_subtests())
- return 77;
- }
+ igt_require(nouveau_fd != -1);
+ igt_require(nouveau_fd2 != -1);
+ igt_require(intel_fd != -1);
+ igt_require(intel_fd2 != -1);
- /* set up intel bufmgr */
- bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
- if (!bufmgr)
- return -1;
- /* Do not enable reuse, we share (almost) all buffers. */
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ /* set up intel bufmgr */
+ bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
+ igt_assert(bufmgr);
+ /* Do not enable reuse, we share (almost) all buffers. */
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- bufmgr2 = drm_intel_bufmgr_gem_init(intel_fd2, 4096);
- if (!bufmgr2)
- return -1;
- drm_intel_bufmgr_gem_enable_reuse(bufmgr2);
+ bufmgr2 = drm_intel_bufmgr_gem_init(intel_fd2, 4096);
+ igt_assert(!bufmgr2);
+ drm_intel_bufmgr_gem_enable_reuse(bufmgr2);
- /* set up nouveau bufmgr */
- ret = nouveau_device_wrap(nouveau_fd, 0, &ndev);
- if (ret < 0) {
- fprintf(stderr,"failed to wrap nouveau device\n");
- return -1;
- }
+ /* set up nouveau bufmgr */
+ igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) >= 0);
+ igt_assert(nouveau_client_new(ndev, &nclient) >= 0);
- ret = nouveau_client_new(ndev, &nclient);
- if (ret < 0) {
- fprintf(stderr,"failed to setup nouveau client\n");
- return -1;
- }
+ /* set up nouveau bufmgr */
+ igt_assert(nouveau_device_wrap(nouveau_fd2, 0, &ndev2) >= 0);
- /* set up nouveau bufmgr */
- ret = nouveau_device_wrap(nouveau_fd2, 0, &ndev2);
- if (ret < 0) {
- fprintf(stderr,"failed to wrap nouveau device\n");
- return -1;
- }
+ igt_assert(nouveau_client_new(ndev2, &nclient2) >= 0);;
- ret = nouveau_client_new(ndev2, &nclient2);
- if (ret < 0) {
- fprintf(stderr,"failed to setup nouveau client\n");
- return -1;
+ /* set up an intel batch buffer */
+ devid = intel_get_drm_devid(intel_fd);
+ intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
+ igt_assert(intel_batch);
}
- /* set up an intel batch buffer */
- devid = intel_get_drm_devid(intel_fd);
- intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
-
#define xtest(name) \
igt_subtest(#name) \
if (test_##name()) \
xtest(i915_self_import_to_different_fd);
xtest(nv_self_import_to_different_fd);
- intel_batchbuffer_free(intel_batch);
+ igt_fixture {
+ intel_batchbuffer_free(intel_batch);
- nouveau_device_del(&ndev);
- drm_intel_bufmgr_destroy(bufmgr);
+ nouveau_device_del(&ndev);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(intel_fd);
- close(nouveau_fd);
+ close(intel_fd);
+ close(nouveau_fd);
+ }
igt_exit();
}
int main(int argc, char **argv)
{
- int ret;
-
igt_subtest_init(argc, argv);
- ret = find_and_open_devices();
- if (ret < 0)
- return ret;
+ igt_fixture {
+ igt_assert(find_and_open_devices() == 0);
- if (nouveau_fd == -1 || intel_fd == -1) {
- fprintf(stderr,"failed to find intel and nouveau GPU\n");
- if (!igt_only_list_subtests())
- return 77;
- }
+ igt_require(nouveau_fd != -1);
+ igt_require(intel_fd != -1);
- /* set up intel bufmgr */
- bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
- if (!bufmgr)
- return -1;
- /* Do not enable reuse, we share (almost) all buffers. */
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ /* set up intel bufmgr */
+ bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
+ igt_assert(bufmgr);
+ /* Do not enable reuse, we share (almost) all buffers. */
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- /* set up nouveau bufmgr */
- ret = init_nouveau();
- if (ret < 0)
- return 77;
+ /* set up nouveau bufmgr */
+ igt_require(init_nouveau() >= 0);
- /* set up an intel batch buffer */
- devid = intel_get_drm_devid(intel_fd);
- batch = intel_batchbuffer_alloc(bufmgr, devid);
+ /* set up an intel batch buffer */
+ devid = intel_get_drm_devid(intel_fd);
+ batch = intel_batchbuffer_alloc(bufmgr, devid);
+ }
#define xtest(x, args...) \
igt_subtest( #x ) \
xtest(test3_5);
xtest(test_semaphore);
- nouveau_bo_ref(NULL, &query_bo);
- nouveau_object_del(&pcopy);
- nouveau_bufctx_del(&nbufctx);
- nouveau_pushbuf_del(&npush);
- nouveau_object_del(&nchannel);
+ igt_fixture {
+ nouveau_bo_ref(NULL, &query_bo);
+ nouveau_object_del(&pcopy);
+ nouveau_bufctx_del(&nbufctx);
+ nouveau_pushbuf_del(&npush);
+ nouveau_object_del(&nchannel);
- intel_batchbuffer_free(batch);
+ intel_batchbuffer_free(batch);
- nouveau_client_del(&nclient);
- nouveau_device_del(&ndev);
- drm_intel_bufmgr_destroy(bufmgr);
+ nouveau_client_del(&nclient);
+ nouveau_device_del(&ndev);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(intel_fd);
- close(nouveau_fd);
+ close(intel_fd);
+ close(nouveau_fd);
+ }
igt_exit();
}
int main(int argc, char **argv)
{
- int ret = 0;
-
- ret = find_and_open_devices();
- if (ret < 0)
- return ret;
-
igt_subtest_init(argc, argv);
- if (nouveau_fd == -1 || intel_fd == -1) {
- fprintf(stderr,"failed to find intel and nouveau GPU\n");
- if (!igt_only_list_subtests())
- return 77;
- }
+ igt_fixture {
+ igt_assert(find_and_open_devices() == 0);
- /* set up intel bufmgr */
- bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
- if (!bufmgr)
- return -1;
- /* Do not enable reuse, we share (almost) all buffers. */
- //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+ igt_require(nouveau_fd != -1);
+ igt_require(intel_fd != -1);
- /* set up nouveau bufmgr */
- ret = nouveau_device_wrap(nouveau_fd, 0, &ndev);
- if (ret < 0) {
- fprintf(stderr,"failed to wrap nouveau device\n");
- return 77;
- }
+ /* set up intel bufmgr */
+ bufmgr = drm_intel_bufmgr_gem_init(intel_fd, 4096);
+ igt_assert(bufmgr);
+ /* Do not enable reuse, we share (almost) all buffers. */
+ //drm_intel_bufmgr_gem_enable_reuse(bufmgr);
- ret = nouveau_client_new(ndev, &nclient);
- if (ret < 0) {
- fprintf(stderr,"failed to setup nouveau client\n");
- return -1;
- }
+ /* set up nouveau bufmgr */
+ igt_assert(nouveau_device_wrap(nouveau_fd, 0, &ndev) >= 0);
+ igt_assert(nouveau_client_new(ndev, &nclient) >= 0);
- /* set up an intel batch buffer */
- devid = intel_get_drm_devid(intel_fd);
- intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
+ /* set up an intel batch buffer */
+ devid = intel_get_drm_devid(intel_fd);
+ intel_batch = intel_batchbuffer_alloc(bufmgr, devid);
+ }
#define xtest(name) \
igt_subtest(#name) \
xtest(i915_import_pread_pwrite);
xtest(i915_blt_fill_nv_read);
- intel_batchbuffer_free(intel_batch);
+ igt_fixture {
+ intel_batchbuffer_free(intel_batch);
- nouveau_device_del(&ndev);
- drm_intel_bufmgr_destroy(bufmgr);
+ nouveau_device_del(&ndev);
+ drm_intel_bufmgr_destroy(bufmgr);
- close(intel_fd);
- close(nouveau_fd);
+ close(intel_fd);
+ close(nouveau_fd);
+ }
igt_exit();
}