2 * Copyright © 2011 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
28 /** @file gem_tiled_pread_pwrite.c
30 * This is a test of pread's behavior on tiled objects with respect to the
31 * reported swizzling value.
33 * The goal is to exercise the slow_bit17_copy path for reading on bit17
34 * machines, but will also be useful for catching swizzling value bugs on
39 * Testcase: Exercise swizzle code for swapping
41 * The swizzle checks in the swapin path are at a different place than the ones
42 * for pread/pwrite, so we need to check them separately.
44 * This test obviously needs swap present (and exits if none is detected).
55 #include <sys/ioctl.h>
60 #include "ioctl_wrappers.h"
67 #define LINEAR_DWORDS (4 * WIDTH * HEIGHT)
68 static uint32_t current_tiling_mode;
70 #define PAGE_SIZE 4096
73 create_bo_and_fill(int fd)
79 handle = gem_create(fd, LINEAR_DWORDS);
80 gem_set_tiling(fd, handle, current_tiling_mode, WIDTH * sizeof(uint32_t));
82 /* Fill the BO with dwords starting at start_val */
83 data = gem_mmap(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE);
84 if (data == NULL && errno == ENOSPC)
87 for (i = 0; i < WIDTH*HEIGHT; i++)
89 munmap(data, LINEAR_DWORDS);
103 check_bo(int fd, uint32_t handle)
108 /* Check the target bo's contents. */
109 data = gem_mmap(fd, handle, LINEAR_DWORDS, PROT_READ | PROT_WRITE);
110 j = rand() % (WIDTH * HEIGHT);
111 igt_assert_f(data[j] == j, "mismatch at %i: %i\n", j, data[j]);
112 munmap(data, LINEAR_DWORDS);
115 static void *thread_run(void *data)
117 struct thread *t = data;
120 for (i = 0; i < t->count; i++)
121 check_bo(t->fd, bo_handles[t->idx_arr[i]]);
126 static void thread_init(struct thread *t, int fd, int count)
132 t->idx_arr = calloc(count, sizeof(int));
133 igt_assert(t->idx_arr);
135 for (i = 0; i < count; i++)
138 igt_permute_array(t->idx_arr, count, igt_exchange_int);
141 static void thread_fini(struct thread *t)
148 struct thread *threads;
149 int fd, n, count, num_threads;
151 current_tiling_mode = I915_TILING_X;
153 igt_skip_on_simulation();
154 intel_purge_vm_caches();
157 /* need slightly more than available memory */
158 count = intel_get_total_ram_mb() + intel_get_total_swap_mb() / 4;
159 bo_handles = calloc(count, sizeof(uint32_t));
160 igt_assert(bo_handles);
162 num_threads = gem_available_fences(fd);
163 threads = calloc(num_threads, sizeof(struct thread));
166 igt_log(IGT_LOG_INFO,
167 "Using %d 1MiB objects (available RAM: %ld/%ld, swap: %ld)\n",
169 (long)intel_get_avail_ram_mb(),
170 (long)intel_get_total_ram_mb(),
171 (long)intel_get_total_swap_mb());
172 igt_require(intel_check_memory(count, 1024*1024, CHECK_RAM | CHECK_SWAP));
174 for (n = 0; n < count; n++) {
175 bo_handles[n] = create_bo_and_fill(fd);
176 /* Not enough mmap address space possible. */
177 igt_require(bo_handles[n]);
180 thread_init(&threads[0], fd, count);
181 thread_run(&threads[0]);
182 thread_fini(&threads[0]);
184 /* Once more with threads */
185 igt_subtest("threaded") {
186 for (n = 0; n < num_threads; n++) {
187 thread_init(&threads[n], fd, count);
188 pthread_create(&threads[n].thread, NULL, thread_run, &threads[n]);
190 for (n = 0; n < num_threads; n++) {
191 pthread_join(threads[n].thread, NULL);
192 thread_fini(&threads[n]);