amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
+struct kmem_cache *sched_fence_slab;
+atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+
/* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq)
{
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
atomic_set(&sched->hw_rq_count, 0);
+ if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
+ sched_fence_slab = kmem_cache_create(
+ "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!sched_fence_slab)
+ return -ENOMEM;
+ }
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
{
if (sched->thread)
kthread_stop(sched->thread);
+ if (atomic_dec_and_test(&sched_fence_slab_ref))
+ kmem_cache_destroy(sched_fence_slab);
}
struct amd_gpu_scheduler;
struct amd_sched_rq;
+extern struct kmem_cache *sched_fence_slab;
+extern atomic_t sched_fence_slab_ref;
+
/**
* A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their
struct amd_sched_fence *fence = NULL;
unsigned seq;
- fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+ fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
return true;
}
+static void amd_sched_fence_release(struct fence *f)
+{
+ struct amd_sched_fence *fence = to_amd_sched_fence(f);
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
const struct fence_ops amd_sched_fence_ops = {
.get_driver_name = amd_sched_fence_get_driver_name,
.get_timeline_name = amd_sched_fence_get_timeline_name,
.enable_signaling = amd_sched_fence_enable_signaling,
.signaled = NULL,
.wait = fence_default_wait,
- .release = NULL,
+ .release = amd_sched_fence_release,
};