nvkm_subdev.mutex is going away.
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
Reviewed-by: Lyude Paul <lyude@redhat.com>
struct nvkm_blob vpr_scrubber;
struct nvkm_ram *ram;
- struct nvkm_mm tags;
+
+ struct {
+ struct mutex mutex; /* protects mm and nvkm_memory::tags */
+ struct nvkm_mm mm;
+ } tags;
struct {
struct nvkm_fb_tile region[16];
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags = *ptags;
if (tags) {
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if (refcount_dec_and_test(&tags->refcount)) {
- nvkm_mm_free(&fb->tags, &tags->mn);
+ nvkm_mm_free(&fb->tags.mm, &tags->mn);
kfree(memory->tags);
memory->tags = NULL;
}
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = NULL;
}
}
struct nvkm_fb *fb = device->fb;
struct nvkm_tags *tags;
- mutex_lock(&fb->subdev.mutex);
+ mutex_lock(&fb->tags.mutex);
if ((tags = memory->tags)) {
/* If comptags exist for the memory, but a different amount
* than requested, the buffer is being mapped with settings
* that are incompatible with existing mappings.
*/
if (tags->mn && tags->mn->length != nr) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -EINVAL;
}
refcount_inc(&tags->refcount);
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
*ptags = tags;
return 0;
}
if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) {
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return -ENOMEM;
}
- if (!nvkm_mm_head(&fb->tags, 0, 1, nr, nr, 1, &tags->mn)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) {
if (clr)
clr(device, tags->mn->offset, tags->mn->length);
} else {
refcount_set(&tags->refcount, 1);
*ptags = memory->tags = tags;
- mutex_unlock(&fb->subdev.mutex);
+ mutex_unlock(&fb->tags.mutex);
return 0;
}
nvkm_debug(subdev, "%d comptags\n", tags);
}
- return nvkm_mm_init(&fb->tags, 0, 0, tags, 1);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, tags, 1);
}
static int
for (i = 0; i < fb->tile.regions; i++)
fb->func->tile.fini(fb, i, &fb->tile.region[i]);
- nvkm_mm_fini(&fb->tags);
+ nvkm_mm_fini(&fb->tags.mm);
+ mutex_destroy(&fb->tags.mutex);
+
nvkm_ram_del(&fb->ram);
nvkm_blob_dtor(&fb->vpr_scrubber);
nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
fb->func = func;
fb->tile.regions = fb->func->tile.regions;
- fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage",
- fb->func->default_bigpage);
+ fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", fb->func->default_bigpage);
+ mutex_init(&fb->tags.mutex);
}
int
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00000000; /* Z16 */
else tile->zcomp = 0x04000000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
tile->limit = 0;
tile->pitch = 0;
tile->zcomp = 0;
- nvkm_mm_free(&fb->tags, &tile->tag);
+ nvkm_mm_free(&fb->tags.mm, &tile->tag);
}
void
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (!(flags & 2)) tile->zcomp = 0x00100000; /* Z16 */
else tile->zcomp = 0x00200000; /* Z24S8 */
tile->zcomp |= tile->tag->offset;
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x01000000; /* Z16 */
else tile->zcomp |= 0x02000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x04000000; /* Z16 */
else tile->zcomp |= 0x08000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
{
u32 tiles = DIV_ROUND_UP(size, 0x40);
u32 tags = round_up(tiles / fb->ram->parts, 0x40);
- if (!nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ if (!nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
if (flags & 2) tile->zcomp |= 0x10000000; /* Z16 */
else tile->zcomp |= 0x20000000; /* Z24S8 */
tile->zcomp |= ((tile->tag->offset ) >> 6);
u32 tiles = DIV_ROUND_UP(size, 0x80);
u32 tags = round_up(tiles / fb->ram->parts, 0x100);
if ( (flags & 2) &&
- !nvkm_mm_head(&fb->tags, 0, 1, tags, tags, 1, &tile->tag)) {
+ !nvkm_mm_head(&fb->tags.mm, 0, 1, tags, tags, 1, &tile->tag)) {
tile->zcomp = 0x28000000; /* Z24S8_SPLIT_GRAD */
tile->zcomp |= ((tile->tag->offset ) >> 8);
tile->zcomp |= ((tile->tag->offset + tags - 1) >> 8) << 13;
}
mm_init:
- nvkm_mm_fini(&fb->tags);
- return nvkm_mm_init(&fb->tags, 0, 0, ltc->num_tags, 1);
+ nvkm_mm_fini(&fb->tags.mm);
+ return nvkm_mm_init(&fb->tags.mm, 0, 0, ltc->num_tags, 1);
}
int