diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/nouveau/nvkm/core | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/core')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/Kbuild | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/client.c | 307 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/engine.c | 204 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/enum.c | 56 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/event.c | 100 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/firmware.c | 109 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c | 278 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/ioctl.c | 459 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/memory.c | 154 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/mm.c | 307 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/notify.c | 163 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/object.c | 336 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/oproxy.c | 209 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/option.c | 139 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/ramht.c | 162 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/core/subdev.c | 194 |
16 files changed, 3193 insertions, 0 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/Kbuild b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild new file mode 100644 index 000000000..2b471ab58 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: MIT +nvkm-y := nvkm/core/client.o +nvkm-y += nvkm/core/engine.o +nvkm-y += nvkm/core/enum.o +nvkm-y += nvkm/core/event.o +nvkm-y += nvkm/core/firmware.o +nvkm-y += nvkm/core/gpuobj.o +nvkm-y += nvkm/core/ioctl.o +nvkm-y += nvkm/core/memory.o +nvkm-y += nvkm/core/mm.o +nvkm-y += nvkm/core/notify.o +nvkm-y += nvkm/core/object.o +nvkm-y += nvkm/core/oproxy.o +nvkm-y += nvkm/core/option.o +nvkm-y += nvkm/core/ramht.o +nvkm-y += nvkm/core/subdev.o diff --git a/drivers/gpu/drm/nouveau/nvkm/core/client.c b/drivers/gpu/drm/nouveau/nvkm/core/client.c new file mode 100644 index 000000000..0c8c55c73 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/client.c @@ -0,0 +1,307 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/client.h> +#include <core/device.h> +#include <core/notify.h> +#include <core/option.h> + +#include <nvif/class.h> +#include <nvif/event.h> +#include <nvif/if0000.h> +#include <nvif/unpack.h> + +static int +nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, + struct nvkm_object **pobject) +{ + union { + struct nvif_client_v0 v0; + } *args = argv; + struct nvkm_client *client; + int ret = -ENOSYS; + + if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){ + args->v0.name[sizeof(args->v0.name) - 1] = 0; + ret = nvkm_client_new(args->v0.name, args->v0.device, NULL, + NULL, oclass->client->ntfy, &client); + if (ret) + return ret; + } else + return ret; + + client->object.client = oclass->client; + client->object.handle = oclass->handle; + client->object.route = oclass->route; + client->object.token = oclass->token; + client->object.object = oclass->object; + client->debug = oclass->client->debug; + *pobject = &client->object; + return 0; +} + +static const struct nvkm_sclass +nvkm_uclient_sclass = { + .oclass = NVIF_CLASS_CLIENT, + .minver = 0, + .maxver = 0, + .ctor = nvkm_uclient_new, +}; + +struct nvkm_client_notify { + struct nvkm_client *client; + struct nvkm_notify n; + u8 version; + u8 size; + union { + struct nvif_notify_rep_v0 v0; + } rep; +}; + +static int +nvkm_client_notify(struct nvkm_notify *n) +{ + struct nvkm_client_notify *notify = container_of(n, typeof(*notify), n); + struct nvkm_client *client = notify->client; + return client->ntfy(¬ify->rep, notify->size, n->data, n->size); +} + +int +nvkm_client_notify_put(struct nvkm_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_put(&client->notify[index]->n); + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_get(struct nvkm_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_get(&client->notify[index]->n); + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_del(struct nvkm_client *client, int index) +{ + if (index < ARRAY_SIZE(client->notify)) { + if (client->notify[index]) { + nvkm_notify_fini(&client->notify[index]->n); + kfree(client->notify[index]); + client->notify[index] = NULL; + return 0; + } + } + return -ENOENT; +} + +int +nvkm_client_notify_new(struct nvkm_object *object, + struct nvkm_event *event, void *data, u32 size) +{ + struct nvkm_client *client = object->client; + struct nvkm_client_notify *notify; + union { + struct nvif_notify_req_v0 v0; + } *req = data; + u8 index, reply; + int ret = -ENOSYS; + + for (index = 0; index < ARRAY_SIZE(client->notify); index++) { + if (!client->notify[index]) + break; + } + + if (index == ARRAY_SIZE(client->notify)) + return -ENOSPC; + + notify = kzalloc(sizeof(*notify), GFP_KERNEL); + if (!notify) + return -ENOMEM; + + nvif_ioctl(object, "notify new size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, req->v0, 0, 0, true))) { + nvif_ioctl(object, "notify new vers %d reply %d route %02x " + "token %llx\n", req->v0.version, + req->v0.reply, req->v0.route, req->v0.token); + notify->version = req->v0.version; + notify->size = sizeof(notify->rep.v0); + notify->rep.v0.version = req->v0.version; + notify->rep.v0.route = req->v0.route; + notify->rep.v0.token = req->v0.token; + reply = req->v0.reply; + } + + if (ret == 0) { + ret = nvkm_notify_init(object, event, nvkm_client_notify, + false, data, size, reply, ¬ify->n); + if (ret == 0) { + client->notify[index] = notify; + notify->client = client; + return index; + } + } + + kfree(notify); + return ret; +} + +static const struct nvkm_object_func nvkm_client; +struct nvkm_client * +nvkm_client_search(struct nvkm_client *client, u64 handle) +{ + struct nvkm_object *object; + + object = nvkm_object_search(client, handle, &nvkm_client); + if (IS_ERR(object)) + return (void *)object; + + return nvkm_client(object); +} + +static int +nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size) +{ + union { + struct nvif_client_devlist_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(&client->object, "client devlist size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(&client->object, "client devlist vers %d count %d\n", + args->v0.version, args->v0.count); + if (size == sizeof(args->v0.device[0]) * args->v0.count) { + ret = nvkm_device_list(args->v0.device, args->v0.count); + if (ret >= 0) { + args->v0.count = ret; + ret = 0; + } + } else { + ret = -EINVAL; + } + } + + return ret; +} + +static int +nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) +{ + struct nvkm_client *client = nvkm_client(object); + switch (mthd) { + case NVIF_CLIENT_V0_DEVLIST: + return nvkm_client_mthd_devlist(client, data, size); + default: + break; + } + return -EINVAL; +} + +static int +nvkm_client_child_new(const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) +{ + return oclass->base.ctor(oclass, data, size, pobject); +} + +static int +nvkm_client_child_get(struct nvkm_object *object, int index, + struct nvkm_oclass *oclass) +{ + const struct nvkm_sclass *sclass; + + switch (index) { + case 0: sclass = &nvkm_uclient_sclass; break; + case 1: sclass = &nvkm_udevice_sclass; break; + default: + return -EINVAL; + } + + oclass->ctor = nvkm_client_child_new; + oclass->base = *sclass; + return 0; +} + +static int +nvkm_client_fini(struct nvkm_object *object, bool suspend) +{ + struct nvkm_client *client = nvkm_client(object); + const char *name[2] = { "fini", "suspend" }; + int i; + nvif_debug(object, "%s notify\n", name[suspend]); + for (i = 0; i < ARRAY_SIZE(client->notify); i++) + nvkm_client_notify_put(client, i); + return 0; +} + +static void * +nvkm_client_dtor(struct nvkm_object *object) +{ + struct nvkm_client *client = nvkm_client(object); + int i; + for (i = 0; i < ARRAY_SIZE(client->notify); i++) + nvkm_client_notify_del(client, i); + return client; +} + +static const struct nvkm_object_func +nvkm_client = { + .dtor = nvkm_client_dtor, + .fini = nvkm_client_fini, + .mthd = nvkm_client_mthd, + .sclass = nvkm_client_child_get, +}; + +int +nvkm_client_new(const char *name, u64 device, const char *cfg, + const char *dbg, + int (*ntfy)(const void *, u32, const void *, u32), + struct nvkm_client **pclient) +{ + struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass }; + struct nvkm_client *client; + + if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL))) + return -ENOMEM; + oclass.client = client; + + nvkm_object_ctor(&nvkm_client, &oclass, &client->object); + snprintf(client->name, sizeof(client->name), "%s", name); + client->device = device; + client->debug = nvkm_dbgopt(dbg, "CLIENT"); + client->objroot = RB_ROOT; + client->ntfy = ntfy; + INIT_LIST_HEAD(&client->umem); + spin_lock_init(&client->lock); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/engine.c b/drivers/gpu/drm/nouveau/nvkm/core/engine.c new file mode 100644 index 000000000..e41a39ae1 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/engine.c @@ -0,0 +1,204 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/engine.h> +#include <core/device.h> +#include <core/option.h> + +#include <subdev/fb.h> + +bool +nvkm_engine_chsw_load(struct nvkm_engine *engine) +{ + if (engine->func->chsw_load) + return engine->func->chsw_load(engine); + return false; +} + +void +nvkm_engine_unref(struct nvkm_engine **pengine) +{ + struct nvkm_engine *engine = *pengine; + if (engine) { + if (refcount_dec_and_mutex_lock(&engine->use.refcount, &engine->use.mutex)) { + nvkm_subdev_fini(&engine->subdev, false); + engine->use.enabled = false; + mutex_unlock(&engine->use.mutex); + } + *pengine = NULL; + } +} + +struct nvkm_engine * +nvkm_engine_ref(struct nvkm_engine *engine) +{ + int ret; + if (engine) { + if (!refcount_inc_not_zero(&engine->use.refcount)) { + mutex_lock(&engine->use.mutex); + if (!refcount_inc_not_zero(&engine->use.refcount)) { + engine->use.enabled = true; + if ((ret = nvkm_subdev_init(&engine->subdev))) { + engine->use.enabled = false; + mutex_unlock(&engine->use.mutex); + return ERR_PTR(ret); + } + refcount_set(&engine->use.refcount, 1); + } + mutex_unlock(&engine->use.mutex); + } + } + return engine; +} + +void +nvkm_engine_tile(struct nvkm_engine *engine, int region) +{ + struct nvkm_fb *fb = engine->subdev.device->fb; + if (engine->func->tile) + engine->func->tile(engine, region, &fb->tile.region[region]); +} + +static void +nvkm_engine_intr(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->intr) + engine->func->intr(engine); +} + +static int +nvkm_engine_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->info) { + if (!IS_ERR((engine = nvkm_engine_ref(engine)))) { + int ret = engine->func->info(engine, mthd, data); + nvkm_engine_unref(&engine); + return ret; + } + return PTR_ERR(engine); + } + return -ENOSYS; +} + +static int +nvkm_engine_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->fini) + return engine->func->fini(engine, suspend); + return 0; +} + +static int +nvkm_engine_init(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + struct nvkm_fb *fb = subdev->device->fb; + int ret = 0, i; + s64 time; + + if (!engine->use.enabled) { + nvkm_trace(subdev, "init skipped, engine has no users\n"); + return ret; + } + + if (engine->func->oneinit && !engine->subdev.oneinit) { + nvkm_trace(subdev, "one-time init running...\n"); + time = ktime_to_us(ktime_get()); + ret = engine->func->oneinit(engine); + if (ret) { + nvkm_trace(subdev, "one-time init failed, %d\n", ret); + return ret; + } + + engine->subdev.oneinit = true; + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "one-time init completed in %lldus\n", time); + } + + if (engine->func->init) + ret = engine->func->init(engine); + + for (i = 0; fb && i < fb->tile.regions; i++) + nvkm_engine_tile(engine, i); + return ret; +} + +static int +nvkm_engine_preinit(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->preinit) + engine->func->preinit(engine); + return 0; +} + +static void * +nvkm_engine_dtor(struct nvkm_subdev *subdev) +{ + struct nvkm_engine *engine = nvkm_engine(subdev); + if (engine->func->dtor) + return engine->func->dtor(engine); + mutex_destroy(&engine->use.mutex); + return engine; +} + +const struct nvkm_subdev_func +nvkm_engine = { + .dtor = nvkm_engine_dtor, + .preinit = nvkm_engine_preinit, + .init = nvkm_engine_init, + .fini = nvkm_engine_fini, + .info = nvkm_engine_info, + .intr = nvkm_engine_intr, +}; + +int +nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, struct nvkm_engine *engine) +{ + nvkm_subdev_ctor(&nvkm_engine, device, type, inst, &engine->subdev); + engine->func = func; + refcount_set(&engine->use.refcount, 0); + mutex_init(&engine->use.mutex); + + if (!nvkm_boolopt(device->cfgopt, engine->subdev.name, enable)) { + nvkm_debug(&engine->subdev, "disabled\n"); + return -ENODEV; + } + + spin_lock_init(&engine->lock); + return 0; +} + +int +nvkm_engine_new_(const struct nvkm_engine_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, bool enable, + struct nvkm_engine **pengine) +{ + if (!(*pengine = kzalloc(sizeof(**pengine), GFP_KERNEL))) + return -ENOMEM; + return nvkm_engine_ctor(func, device, type, inst, enable, *pengine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/enum.c b/drivers/gpu/drm/nouveau/nvkm/core/enum.c new file mode 100644 index 000000000..b9581feb2 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/enum.c @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2010 Nouveau Project + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include <core/enum.h> + +const struct nvkm_enum * +nvkm_enum_find(const struct nvkm_enum *en, u32 value) +{ + while (en->name) { + if (en->value == value) + return en; + en++; + } + + return NULL; +} + +void +nvkm_snprintbf(char *data, int size, const struct nvkm_bitfield *bf, u32 value) +{ + bool space = false; + while (size >= 1 && bf->name) { + if (value & bf->mask) { + int this = snprintf(data, size, "%s%s", + space ? " " : "", bf->name); + size -= this; + data += this; + space = true; + } + bf++; + } + data[0] = '\0'; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/event.c b/drivers/gpu/drm/nouveau/nvkm/core/event.c new file mode 100644 index 000000000..006618d77 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/event.c @@ -0,0 +1,100 @@ +/* + * Copyright 2013-2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/event.h> +#include <core/notify.h> + +void +nvkm_event_put(struct nvkm_event *event, u32 types, int index) +{ + assert_spin_locked(&event->refs_lock); + while (types) { + int type = __ffs(types); types &= ~(1 << type); + if (--event->refs[index * event->types_nr + type] == 0) { + if (event->func->fini) + event->func->fini(event, 1 << type, index); + } + } +} + +void +nvkm_event_get(struct nvkm_event *event, u32 types, int index) +{ + assert_spin_locked(&event->refs_lock); + while (types) { + int type = __ffs(types); types &= ~(1 << type); + if (++event->refs[index * event->types_nr + type] == 1) { + if (event->func->init) + event->func->init(event, 1 << type, index); + } + } +} + +void +nvkm_event_send(struct nvkm_event *event, u32 types, int index, + void *data, u32 size) +{ + struct nvkm_notify *notify; + unsigned long flags; + + if (!event->refs || WARN_ON(index >= event->index_nr)) + return; + + spin_lock_irqsave(&event->list_lock, flags); + list_for_each_entry(notify, &event->list, head) { + if (notify->index == index && (notify->types & types)) { + if (event->func->send) { + event->func->send(data, size, notify); + continue; + } + nvkm_notify_send(notify, data, size); + } + } + spin_unlock_irqrestore(&event->list_lock, flags); +} + +void +nvkm_event_fini(struct nvkm_event *event) +{ + if (event->refs) { + kfree(event->refs); + event->refs = NULL; + } +} + +int +nvkm_event_init(const struct nvkm_event_func *func, int types_nr, int index_nr, + struct nvkm_event *event) +{ + event->refs = kzalloc(array3_size(index_nr, types_nr, + sizeof(*event->refs)), + GFP_KERNEL); + if (!event->refs) + return -ENOMEM; + + event->func = func; + event->types_nr = types_nr; + event->index_nr = index_nr; + spin_lock_init(&event->refs_lock); + spin_lock_init(&event->list_lock); + INIT_LIST_HEAD(&event->list); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c new file mode 100644 index 000000000..ca1f8463c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ +#include <core/device.h> +#include <core/firmware.h> + +int +nvkm_firmware_load_name(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, const struct firmware **pfw) +{ + char path[64]; + int ret; + + snprintf(path, sizeof(path), "%s%s", base, name); + ret = nvkm_firmware_get(subdev, path, ver, pfw); + if (ret < 0) + return ret; + + return 0; +} + +int +nvkm_firmware_load_blob(const struct nvkm_subdev *subdev, const char *base, + const char *name, int ver, struct nvkm_blob *blob) +{ + const struct firmware *fw; + int ret; + + ret = nvkm_firmware_load_name(subdev, base, name, ver, &fw); + if (ret == 0) { + blob->data = kmemdup(fw->data, fw->size, GFP_KERNEL); + blob->size = fw->size; + nvkm_firmware_put(fw); + if (!blob->data) + return -ENOMEM; + } + + return ret; +} + +/** + * nvkm_firmware_get - load firmware from the official nvidia/chip/ directory + * @subdev: subdevice that will use that firmware + * @fwname: name of firmware file to load + * @ver: firmware version to load + * @fw: firmware structure to load to + * + * Use this function to load firmware files in the form nvidia/chip/fwname.bin. + * Firmware files released by NVIDIA will always follow this format. + */ +int +nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver, + const struct firmware **fw) +{ + struct nvkm_device *device = subdev->device; + char f[64]; + char cname[16]; + int i; + + /* Convert device name to lowercase */ + strncpy(cname, device->chip->name, sizeof(cname)); + cname[sizeof(cname) - 1] = '\0'; + i = strlen(cname); + while (i) { + --i; + cname[i] = tolower(cname[i]); + } + + if (ver != 0) + snprintf(f, sizeof(f), "nvidia/%s/%s-%d.bin", cname, fwname, ver); + else + snprintf(f, sizeof(f), "nvidia/%s/%s.bin", cname, fwname); + + if (!firmware_request_nowarn(fw, f, device->dev)) { + nvkm_debug(subdev, "firmware \"%s\" loaded - %zu byte(s)\n", + f, (*fw)->size); + return 0; + } + + nvkm_debug(subdev, "firmware \"%s\" unavailable\n", f); + return -ENOENT; +} + +/* + * nvkm_firmware_put - release firmware loaded with nvkm_firmware_get + */ +void +nvkm_firmware_put(const struct firmware *fw) +{ + release_firmware(fw); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c new file mode 100644 index 000000000..d6de2b3ed --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/gpuobj.c @@ -0,0 +1,278 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/gpuobj.h> +#include <core/engine.h> + +#include <subdev/instmem.h> +#include <subdev/bar.h> +#include <subdev/mmu.h> + +/* fast-path, where backend is able to provide direct pointer to memory */ +static u32 +nvkm_gpuobj_rd32_fast(struct nvkm_gpuobj *gpuobj, u32 offset) +{ + return ioread32_native(gpuobj->map + offset); +} + +static void +nvkm_gpuobj_wr32_fast(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) +{ + iowrite32_native(data, gpuobj->map + offset); +} + +/* accessor functions for gpuobjs allocated directly from instmem */ +static int +nvkm_gpuobj_heap_map(struct nvkm_gpuobj *gpuobj, u64 offset, + struct nvkm_vmm *vmm, struct nvkm_vma *vma, + void *argv, u32 argc) +{ + return nvkm_memory_map(gpuobj->memory, offset, vmm, vma, argv, argc); +} + +static u32 +nvkm_gpuobj_heap_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) +{ + return nvkm_ro32(gpuobj->memory, offset); +} + +static void +nvkm_gpuobj_heap_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) +{ + nvkm_wo32(gpuobj->memory, offset, data); +} + +static const struct nvkm_gpuobj_func nvkm_gpuobj_heap; +static void +nvkm_gpuobj_heap_release(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->func = &nvkm_gpuobj_heap; + nvkm_done(gpuobj->memory); +} + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap_fast = { + .release = nvkm_gpuobj_heap_release, + .rd32 = nvkm_gpuobj_rd32_fast, + .wr32 = nvkm_gpuobj_wr32_fast, + .map = nvkm_gpuobj_heap_map, +}; + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap_slow = { + .release = nvkm_gpuobj_heap_release, + .rd32 = nvkm_gpuobj_heap_rd32, + .wr32 = nvkm_gpuobj_heap_wr32, + .map = nvkm_gpuobj_heap_map, +}; + +static void * +nvkm_gpuobj_heap_acquire(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->map = nvkm_kmap(gpuobj->memory); + if (likely(gpuobj->map)) + gpuobj->func = &nvkm_gpuobj_heap_fast; + else + gpuobj->func = &nvkm_gpuobj_heap_slow; + return gpuobj->map; +} + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_heap = { + .acquire = nvkm_gpuobj_heap_acquire, + .map = nvkm_gpuobj_heap_map, +}; + +/* accessor functions for gpuobjs sub-allocated from a parent gpuobj */ +static int +nvkm_gpuobj_map(struct nvkm_gpuobj *gpuobj, u64 offset, + struct nvkm_vmm *vmm, struct nvkm_vma *vma, + void *argv, u32 argc) +{ + return nvkm_memory_map(gpuobj->parent, gpuobj->node->offset + offset, + vmm, vma, argv, argc); +} + +static u32 +nvkm_gpuobj_rd32(struct nvkm_gpuobj *gpuobj, u32 offset) +{ + return nvkm_ro32(gpuobj->parent, gpuobj->node->offset + offset); +} + +static void +nvkm_gpuobj_wr32(struct nvkm_gpuobj *gpuobj, u32 offset, u32 data) +{ + nvkm_wo32(gpuobj->parent, gpuobj->node->offset + offset, data); +} + +static const struct nvkm_gpuobj_func nvkm_gpuobj_func; +static void +nvkm_gpuobj_release(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->func = &nvkm_gpuobj_func; + nvkm_done(gpuobj->parent); +} + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_fast = { + .release = nvkm_gpuobj_release, + .rd32 = nvkm_gpuobj_rd32_fast, + .wr32 = nvkm_gpuobj_wr32_fast, + .map = nvkm_gpuobj_map, +}; + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_slow = { + .release = nvkm_gpuobj_release, + .rd32 = nvkm_gpuobj_rd32, + .wr32 = nvkm_gpuobj_wr32, + .map = nvkm_gpuobj_map, +}; + +static void * +nvkm_gpuobj_acquire(struct nvkm_gpuobj *gpuobj) +{ + gpuobj->map = nvkm_kmap(gpuobj->parent); + if (likely(gpuobj->map)) { + gpuobj->map = (u8 *)gpuobj->map + gpuobj->node->offset; + gpuobj->func = &nvkm_gpuobj_fast; + } else { + gpuobj->func = &nvkm_gpuobj_slow; + } + return gpuobj->map; +} + +static const struct nvkm_gpuobj_func +nvkm_gpuobj_func = { + .acquire = nvkm_gpuobj_acquire, + .map = nvkm_gpuobj_map, +}; + +static int +nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, + struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj) +{ + u32 offset; + int ret; + + if (parent) { + if (align >= 0) { + ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, + max(align, 1), &gpuobj->node); + } else { + ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, + -align, &gpuobj->node); + } + if (ret) + return ret; + + gpuobj->parent = parent; + gpuobj->func = &nvkm_gpuobj_func; + gpuobj->addr = parent->addr + gpuobj->node->offset; + gpuobj->size = gpuobj->node->length; + + if (zero) { + nvkm_kmap(gpuobj); + for (offset = 0; offset < gpuobj->size; offset += 4) + nvkm_wo32(gpuobj, offset, 0x00000000); + nvkm_done(gpuobj); + } + } else { + ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, + abs(align), zero, &gpuobj->memory); + if (ret) + return ret; + + gpuobj->func = &nvkm_gpuobj_heap; + gpuobj->addr = nvkm_memory_addr(gpuobj->memory); + gpuobj->size = nvkm_memory_size(gpuobj->memory); + } + + return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1); +} + +void +nvkm_gpuobj_del(struct nvkm_gpuobj **pgpuobj) +{ + struct nvkm_gpuobj *gpuobj = *pgpuobj; + if (gpuobj) { + if (gpuobj->parent) + nvkm_mm_free(&gpuobj->parent->heap, &gpuobj->node); + nvkm_mm_fini(&gpuobj->heap); + nvkm_memory_unref(&gpuobj->memory); + kfree(*pgpuobj); + *pgpuobj = NULL; + } +} + +int +nvkm_gpuobj_new(struct nvkm_device *device, u32 size, int align, bool zero, + struct nvkm_gpuobj *parent, struct nvkm_gpuobj **pgpuobj) +{ + struct nvkm_gpuobj *gpuobj; + int ret; + + if (!(gpuobj = *pgpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL))) + return -ENOMEM; + + ret = nvkm_gpuobj_ctor(device, size, align, zero, parent, gpuobj); + if (ret) + nvkm_gpuobj_del(pgpuobj); + return ret; +} + +/* the below is basically only here to support sharing the paged dma object + * for PCI(E)GART on <=nv4x chipsets, and should *not* be expected to work + * anywhere else. + */ + +int +nvkm_gpuobj_wrap(struct nvkm_memory *memory, struct nvkm_gpuobj **pgpuobj) +{ + if (!(*pgpuobj = kzalloc(sizeof(**pgpuobj), GFP_KERNEL))) + return -ENOMEM; + + (*pgpuobj)->addr = nvkm_memory_addr(memory); + (*pgpuobj)->size = nvkm_memory_size(memory); + return 0; +} + +void +nvkm_gpuobj_memcpy_to(struct nvkm_gpuobj *dst, u32 dstoffset, void *src, + u32 length) +{ + int i; + + for (i = 0; i < length; i += 4) + nvkm_wo32(dst, dstoffset + i, *(u32 *)(src + i)); +} + +void +nvkm_gpuobj_memcpy_from(void *dst, struct nvkm_gpuobj *src, u32 srcoffset, + u32 length) +{ + int i; + + for (i = 0; i < length; i += 4) + ((u32 *)src)[i / 4] = nvkm_ro32(src, srcoffset + i); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c new file mode 100644 index 000000000..45f920da8 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/ioctl.c @@ -0,0 +1,459 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include <core/ioctl.h> +#include <core/client.h> +#include <core/engine.h> +#include <core/event.h> + +#include <nvif/unpack.h> +#include <nvif/ioctl.h> + +static int +nvkm_ioctl_nop(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_nop_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "nop size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, "nop vers %lld\n", args->v0.version); + args->v0.version = NVIF_VERSION_LATEST; + } + + return ret; +} + +static int +nvkm_ioctl_sclass(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_sclass_v0 v0; + } *args = data; + struct nvkm_oclass oclass = { .client = client }; + int ret = -ENOSYS, i = 0; + + nvif_ioctl(object, "sclass size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(object, "sclass vers %d count %d\n", + args->v0.version, args->v0.count); + if (size != args->v0.count * sizeof(args->v0.oclass[0])) + return -EINVAL; + + while (object->func->sclass && + object->func->sclass(object, i, &oclass) >= 0) { + if (i < args->v0.count) { + args->v0.oclass[i].oclass = oclass.base.oclass; + args->v0.oclass[i].minver = oclass.base.minver; + args->v0.oclass[i].maxver = oclass.base.maxver; + } + i++; + } + + args->v0.count = i; + } + + return ret; +} + +static int +nvkm_ioctl_new(struct nvkm_client *client, + struct nvkm_object *parent, void *data, u32 size) +{ + union { + struct nvif_ioctl_new_v0 v0; + } *args = data; + struct nvkm_object *object = NULL; + struct nvkm_oclass oclass; + int ret = -ENOSYS, i = 0; + + nvif_ioctl(parent, "new size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(parent, "new vers %d handle %08x class %08x " + "route %02x token %llx object %016llx\n", + args->v0.version, args->v0.handle, args->v0.oclass, + args->v0.route, args->v0.token, args->v0.object); + } else + return ret; + + if (!parent->func->sclass) { + nvif_ioctl(parent, "cannot have children\n"); + return -EINVAL; + } + + do { + memset(&oclass, 0x00, sizeof(oclass)); + oclass.handle = args->v0.handle; + oclass.route = args->v0.route; + oclass.token = args->v0.token; + oclass.object = args->v0.object; + oclass.client = client; + oclass.parent = parent; + ret = parent->func->sclass(parent, i++, &oclass); + if (ret) + return ret; + } while (oclass.base.oclass != args->v0.oclass); + + if (oclass.engine) { + oclass.engine = nvkm_engine_ref(oclass.engine); + if (IS_ERR(oclass.engine)) + return PTR_ERR(oclass.engine); + } + + ret = oclass.ctor(&oclass, data, size, &object); + nvkm_engine_unref(&oclass.engine); + if (ret == 0) { + ret = nvkm_object_init(object); + if (ret == 0) { + list_add_tail(&object->head, &parent->tree); + if (nvkm_object_insert(object)) { + client->data = object; + return 0; + } + ret = -EEXIST; + } + nvkm_object_fini(object, false); + } + + nvkm_object_del(&object); + return ret; +} + +static int +nvkm_ioctl_del(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_del none; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "delete size %d\n", size); + if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { + nvif_ioctl(object, "delete\n"); + nvkm_object_fini(object, false); + nvkm_object_del(&object); + } + + return ret ? ret : 1; +} + +static int +nvkm_ioctl_mthd(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_mthd_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "mthd size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(object, "mthd vers %d mthd %02x\n", + args->v0.version, args->v0.method); + ret = nvkm_object_mthd(object, args->v0.method, data, size); + } + + return ret; +} + + +static int +nvkm_ioctl_rd(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_rd_v0 v0; + } *args = data; + union { + u8 b08; + u16 b16; + u32 b32; + } v; + int ret = -ENOSYS; + + nvif_ioctl(object, "rd size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, "rd vers %d size %d addr %016llx\n", + args->v0.version, args->v0.size, args->v0.addr); + switch (args->v0.size) { + case 1: + ret = nvkm_object_rd08(object, args->v0.addr, &v.b08); + args->v0.data = v.b08; + break; + case 2: + ret = nvkm_object_rd16(object, args->v0.addr, &v.b16); + args->v0.data = v.b16; + break; + case 4: + ret = nvkm_object_rd32(object, args->v0.addr, &v.b32); + args->v0.data = v.b32; + break; + default: + ret = -EINVAL; + break; + } + } + + return ret; +} + +static int +nvkm_ioctl_wr(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_wr_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "wr size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, + "wr vers %d size %d addr %016llx data %08x\n", + args->v0.version, args->v0.size, args->v0.addr, + args->v0.data); + } else + return ret; + + switch (args->v0.size) { + case 1: return nvkm_object_wr08(object, args->v0.addr, args->v0.data); + case 2: return nvkm_object_wr16(object, args->v0.addr, args->v0.data); + case 4: return nvkm_object_wr32(object, args->v0.addr, args->v0.data); + default: + break; + } + + return -EINVAL; +} + +static int +nvkm_ioctl_map(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_map_v0 v0; + } *args = data; + enum nvkm_object_map type; + int ret = -ENOSYS; + + nvif_ioctl(object, "map size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(object, "map vers %d\n", args->v0.version); + ret = nvkm_object_map(object, data, size, &type, + &args->v0.handle, + &args->v0.length); + if (type == NVKM_OBJECT_MAP_IO) + args->v0.type = NVIF_IOCTL_MAP_V0_IO; + else + args->v0.type = NVIF_IOCTL_MAP_V0_VA; + } + + return ret; +} + +static int +nvkm_ioctl_unmap(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_unmap none; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "unmap size %d\n", size); + if (!(ret = nvif_unvers(ret, &data, &size, args->none))) { + nvif_ioctl(object, "unmap\n"); + ret = nvkm_object_unmap(object); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_new(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_ntfy_new_v0 v0; + } *args = data; + struct nvkm_event *event; + int ret = -ENOSYS; + + nvif_ioctl(object, "ntfy new size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(object, "ntfy new vers %d event %02x\n", + args->v0.version, args->v0.event); + ret = nvkm_object_ntfy(object, args->v0.event, &event); + if (ret == 0) { + ret = nvkm_client_notify_new(object, event, data, size); + if (ret >= 0) { + args->v0.index = ret; + ret = 0; + } + } + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_del(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_ntfy_del_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "ntfy del size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, "ntfy del vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_del(client, args->v0.index); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_get(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_ntfy_get_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "ntfy get size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, "ntfy get vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_get(client, args->v0.index); + } + + return ret; +} + +static int +nvkm_ioctl_ntfy_put(struct nvkm_client *client, + struct nvkm_object *object, void *data, u32 size) +{ + union { + struct nvif_ioctl_ntfy_put_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "ntfy put size %d\n", size); + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { + nvif_ioctl(object, "ntfy put vers %d index %d\n", + args->v0.version, args->v0.index); + ret = nvkm_client_notify_put(client, args->v0.index); + } + + return ret; +} + +static struct { + int version; + int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32); +} +nvkm_ioctl_v0[] = { + { 0x00, nvkm_ioctl_nop }, + { 0x00, nvkm_ioctl_sclass }, + { 0x00, nvkm_ioctl_new }, + { 0x00, nvkm_ioctl_del }, + { 0x00, nvkm_ioctl_mthd }, + { 0x00, nvkm_ioctl_rd }, + { 0x00, nvkm_ioctl_wr }, + { 0x00, nvkm_ioctl_map }, + { 0x00, nvkm_ioctl_unmap }, + { 0x00, nvkm_ioctl_ntfy_new }, + { 0x00, nvkm_ioctl_ntfy_del }, + { 0x00, nvkm_ioctl_ntfy_get }, + { 0x00, nvkm_ioctl_ntfy_put }, +}; + +static int +nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type, + void *data, u32 size, u8 owner, u8 *route, u64 *token) +{ + struct nvkm_object *object; + int ret; + + object = nvkm_object_search(client, handle, NULL); + if (IS_ERR(object)) { + nvif_ioctl(&client->object, "object not found\n"); + return PTR_ERR(object); + } + + if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { + nvif_ioctl(&client->object, "route != owner\n"); + return -EACCES; + } + *route = object->route; + *token = object->token; + + if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) { + if (nvkm_ioctl_v0[type].version == 0) + ret = nvkm_ioctl_v0[type].func(client, object, data, size); + } + + return ret; +} + +int +nvkm_ioctl(struct nvkm_client *client, void *data, u32 size, void **hack) +{ + struct nvkm_object *object = &client->object; + union { + struct nvif_ioctl_v0 v0; + } *args = data; + int ret = -ENOSYS; + + nvif_ioctl(object, "size %d\n", size); + + if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) { + nvif_ioctl(object, + "vers %d type %02x object %016llx owner %02x\n", + args->v0.version, args->v0.type, args->v0.object, + args->v0.owner); + ret = nvkm_ioctl_path(client, args->v0.object, args->v0.type, + data, size, args->v0.owner, + &args->v0.route, &args->v0.token); + } + + if (ret != 1) { + nvif_ioctl(object, "return %d\n", ret); + if (hack) { + *hack = client->data; + client->data = NULL; + } + } + + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c new file mode 100644 index 000000000..c69daac9b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c @@ -0,0 +1,154 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include <core/memory.h> +#include <core/mm.h> +#include <subdev/fb.h> +#include <subdev/instmem.h> + +void +nvkm_memory_tags_put(struct nvkm_memory *memory, struct nvkm_device *device, + struct nvkm_tags **ptags) +{ + struct nvkm_fb *fb = device->fb; + struct nvkm_tags *tags = *ptags; + if (tags) { + mutex_lock(&fb->tags.mutex); + if (refcount_dec_and_test(&tags->refcount)) { + nvkm_mm_free(&fb->tags.mm, &tags->mn); + kfree(memory->tags); + memory->tags = NULL; + } + mutex_unlock(&fb->tags.mutex); + *ptags = NULL; + } +} + +int +nvkm_memory_tags_get(struct nvkm_memory *memory, struct nvkm_device *device, + u32 nr, void (*clr)(struct nvkm_device *, u32, u32), + struct nvkm_tags **ptags) +{ + struct nvkm_fb *fb = device->fb; + struct nvkm_tags *tags; + + mutex_lock(&fb->tags.mutex); + if ((tags = memory->tags)) { + /* If comptags exist for the memory, but a different amount + * than requested, the buffer is being mapped with settings + * that are incompatible with existing mappings. + */ + if (tags->mn && tags->mn->length != nr) { + mutex_unlock(&fb->tags.mutex); + return -EINVAL; + } + + refcount_inc(&tags->refcount); + mutex_unlock(&fb->tags.mutex); + *ptags = tags; + return 0; + } + + if (!(tags = kmalloc(sizeof(*tags), GFP_KERNEL))) { + mutex_unlock(&fb->tags.mutex); + return -ENOMEM; + } + + if (!nvkm_mm_head(&fb->tags.mm, 0, 1, nr, nr, 1, &tags->mn)) { + if (clr) + clr(device, tags->mn->offset, tags->mn->length); + } else { + /* Failure to allocate HW comptags is not an error, the + * caller should fall back to an uncompressed map. + * + * As memory can be mapped in multiple places, we still + * need to track the allocation failure and ensure that + * any additional mappings remain uncompressed. + * + * This is handled by returning an empty nvkm_tags. + */ + tags->mn = NULL; + } + + refcount_set(&tags->refcount, 1); + *ptags = memory->tags = tags; + mutex_unlock(&fb->tags.mutex); + return 0; +} + +void +nvkm_memory_ctor(const struct nvkm_memory_func *func, + struct nvkm_memory *memory) +{ + memory->func = func; + kref_init(&memory->kref); +} + +static void +nvkm_memory_del(struct kref *kref) +{ + struct nvkm_memory *memory = container_of(kref, typeof(*memory), kref); + if (!WARN_ON(!memory->func)) { + if (memory->func->dtor) + memory = memory->func->dtor(memory); + kfree(memory); + } +} + +void +nvkm_memory_unref(struct nvkm_memory **pmemory) +{ + struct nvkm_memory *memory = *pmemory; + if (memory) { + kref_put(&memory->kref, nvkm_memory_del); + *pmemory = NULL; + } +} + +struct nvkm_memory * +nvkm_memory_ref(struct nvkm_memory *memory) +{ + if (memory) + kref_get(&memory->kref); + return memory; +} + +int +nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target, + u64 size, u32 align, bool zero, + struct nvkm_memory **pmemory) +{ + struct nvkm_instmem *imem = device->imem; + struct nvkm_memory *memory; + int ret; + + if (unlikely(target != NVKM_MEM_TARGET_INST || !imem)) + return -ENOSYS; + + ret = nvkm_instobj_new(imem, size, align, zero, &memory); + if (ret) + return ret; + + *pmemory = memory; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/mm.c b/drivers/gpu/drm/nouveau/nvkm/core/mm.c new file mode 100644 index 000000000..f78a06a6b --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/mm.c @@ -0,0 +1,307 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/mm.h> + +#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \ + list_entry((root)->nl_entry.dir, struct nvkm_mm_node, nl_entry) + +void +nvkm_mm_dump(struct nvkm_mm *mm, const char *header) +{ + struct nvkm_mm_node *node; + + pr_err("nvkm: %s\n", header); + pr_err("nvkm: node list:\n"); + list_for_each_entry(node, &mm->nodes, nl_entry) { + pr_err("nvkm: \t%08x %08x %d\n", + node->offset, node->length, node->type); + } + pr_err("nvkm: free list:\n"); + list_for_each_entry(node, &mm->free, fl_entry) { + pr_err("nvkm: \t%08x %08x %d\n", + node->offset, node->length, node->type); + } +} + +void +nvkm_mm_free(struct nvkm_mm *mm, struct nvkm_mm_node **pthis) +{ + struct nvkm_mm_node *this = *pthis; + + if (this) { + struct nvkm_mm_node *prev = node(this, prev); + struct nvkm_mm_node *next = node(this, next); + + if (prev && prev->type == NVKM_MM_TYPE_NONE) { + prev->length += this->length; + list_del(&this->nl_entry); + kfree(this); this = prev; + } + + if (next && next->type == NVKM_MM_TYPE_NONE) { + next->offset = this->offset; + next->length += this->length; + if (this->type == NVKM_MM_TYPE_NONE) + list_del(&this->fl_entry); + list_del(&this->nl_entry); + kfree(this); this = NULL; + } + + if (this && this->type != NVKM_MM_TYPE_NONE) { + list_for_each_entry(prev, &mm->free, fl_entry) { + if (this->offset < prev->offset) + break; + } + + list_add_tail(&this->fl_entry, &prev->fl_entry); + this->type = NVKM_MM_TYPE_NONE; + } + } + + *pthis = NULL; +} + +static struct nvkm_mm_node * +region_head(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) +{ + struct nvkm_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + b->offset = a->offset; + b->length = size; + b->heap = a->heap; + b->type = a->type; + a->offset += size; + a->length -= size; + list_add_tail(&b->nl_entry, &a->nl_entry); + if (b->type == NVKM_MM_TYPE_NONE) + list_add_tail(&b->fl_entry, &a->fl_entry); + + return b; +} + +int +nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, + u32 align, struct nvkm_mm_node **pnode) +{ + struct nvkm_mm_node *prev, *this, *next; + u32 mask = align - 1; + u32 splitoff; + u32 s, e; + + BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); + + list_for_each_entry(this, &mm->free, fl_entry) { + if (unlikely(heap != NVKM_MM_HEAP_ANY)) { + if (this->heap != heap) + continue; + } + e = this->offset + this->length; + s = this->offset; + + prev = node(this, prev); + if (prev && prev->type != type) + s = roundup(s, mm->block_size); + + next = node(this, next); + if (next && next->type != type) + e = rounddown(e, mm->block_size); + + s = (s + mask) & ~mask; + e &= ~mask; + if (s > e || e - s < size_min) + continue; + + splitoff = s - this->offset; + if (splitoff && !region_head(mm, this, splitoff)) + return -ENOMEM; + + this = region_head(mm, this, min(size_max, e - s)); + if (!this) + return -ENOMEM; + + this->next = NULL; + this->type = type; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOSPC; +} + +static struct nvkm_mm_node * +region_tail(struct nvkm_mm *mm, struct nvkm_mm_node *a, u32 size) +{ + struct nvkm_mm_node *b; + + if (a->length == size) + return a; + + b = kmalloc(sizeof(*b), GFP_KERNEL); + if (unlikely(b == NULL)) + return NULL; + + a->length -= size; + b->offset = a->offset + a->length; + b->length = size; + b->heap = a->heap; + b->type = a->type; + + list_add(&b->nl_entry, &a->nl_entry); + if (b->type == NVKM_MM_TYPE_NONE) + list_add(&b->fl_entry, &a->fl_entry); + + return b; +} + +int +nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min, + u32 align, struct nvkm_mm_node **pnode) +{ + struct nvkm_mm_node *prev, *this, *next; + u32 mask = align - 1; + + BUG_ON(type == NVKM_MM_TYPE_NONE || type == NVKM_MM_TYPE_HOLE); + + list_for_each_entry_reverse(this, &mm->free, fl_entry) { + u32 e = this->offset + this->length; + u32 s = this->offset; + u32 c = 0, a; + if (unlikely(heap != NVKM_MM_HEAP_ANY)) { + if (this->heap != heap) + continue; + } + + prev = node(this, prev); + if (prev && prev->type != type) + s = roundup(s, mm->block_size); + + next = node(this, next); + if (next && next->type != type) { + e = rounddown(e, mm->block_size); + c = next->offset - e; + } + + s = (s + mask) & ~mask; + a = e - s; + if (s > e || a < size_min) + continue; + + a = min(a, size_max); + s = (e - a) & ~mask; + c += (e - s) - a; + + if (c && !region_tail(mm, this, c)) + return -ENOMEM; + + this = region_tail(mm, this, a); + if (!this) + return -ENOMEM; + + this->next = NULL; + this->type = type; + list_del(&this->fl_entry); + *pnode = this; + return 0; + } + + return -ENOSPC; +} + +int +nvkm_mm_init(struct nvkm_mm *mm, u8 heap, u32 offset, u32 length, u32 block) +{ + struct nvkm_mm_node *node, *prev; + u32 next; + + if (nvkm_mm_initialised(mm)) { + prev = list_last_entry(&mm->nodes, typeof(*node), nl_entry); + next = prev->offset + prev->length; + if (next != offset) { + BUG_ON(next > offset); + if (!(node = kzalloc(sizeof(*node), GFP_KERNEL))) + return -ENOMEM; + node->type = NVKM_MM_TYPE_HOLE; + node->offset = next; + node->length = offset - next; + list_add_tail(&node->nl_entry, &mm->nodes); + } + BUG_ON(block != mm->block_size); + } else { + INIT_LIST_HEAD(&mm->nodes); + INIT_LIST_HEAD(&mm->free); + mm->block_size = block; + mm->heap_nodes = 0; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + if (length) { + node->offset = roundup(offset, mm->block_size); + node->length = rounddown(offset + length, mm->block_size); + node->length -= node->offset; + } + + list_add_tail(&node->nl_entry, &mm->nodes); + list_add_tail(&node->fl_entry, &mm->free); + node->heap = heap; + mm->heap_nodes++; + return 0; +} + +int +nvkm_mm_fini(struct nvkm_mm *mm) +{ + struct nvkm_mm_node *node, *temp; + int nodes = 0; + + if (!nvkm_mm_initialised(mm)) + return 0; + + list_for_each_entry(node, &mm->nodes, nl_entry) { + if (node->type != NVKM_MM_TYPE_HOLE) { + if (++nodes > mm->heap_nodes) { + nvkm_mm_dump(mm, "mm not clean!"); + return -EBUSY; + } + } + } + + list_for_each_entry_safe(node, temp, &mm->nodes, nl_entry) { + list_del(&node->nl_entry); + kfree(node); + } + + mm->heap_nodes = 0; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/notify.c b/drivers/gpu/drm/nouveau/nvkm/core/notify.c new file mode 100644 index 000000000..023610d01 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/notify.c @@ -0,0 +1,163 @@ +/* + * Copyright 2014 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include <core/notify.h> +#include <core/event.h> + +static inline void +nvkm_notify_put_locked(struct nvkm_notify *notify) +{ + if (notify->block++ == 0) + nvkm_event_put(notify->event, notify->types, notify->index); +} + +void +nvkm_notify_put(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + if (likely(event) && + test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_put_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) + flush_work(¬ify->work); + } +} + +static inline void +nvkm_notify_get_locked(struct nvkm_notify *notify) +{ + if (--notify->block == 0) + nvkm_event_get(notify->event, notify->types, notify->index); +} + +void +nvkm_notify_get(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + if (likely(event) && + !test_and_set_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_get_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + } +} + +static inline void +nvkm_notify_func(struct nvkm_notify *notify) +{ + struct nvkm_event *event = notify->event; + int ret = notify->func(notify); + unsigned long flags; + if ((ret == NVKM_NOTIFY_KEEP) || + !test_and_clear_bit(NVKM_NOTIFY_USER, ¬ify->flags)) { + spin_lock_irqsave(&event->refs_lock, flags); + nvkm_notify_get_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + } +} + +static void +nvkm_notify_work(struct work_struct *work) +{ + struct nvkm_notify *notify = container_of(work, typeof(*notify), work); + nvkm_notify_func(notify); +} + +void +nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) +{ + struct nvkm_event *event = notify->event; + unsigned long flags; + + assert_spin_locked(&event->list_lock); + BUG_ON(size != notify->size); + + spin_lock_irqsave(&event->refs_lock, flags); + if (notify->block) { + spin_unlock_irqrestore(&event->refs_lock, flags); + return; + } + nvkm_notify_put_locked(notify); + spin_unlock_irqrestore(&event->refs_lock, flags); + + if (test_bit(NVKM_NOTIFY_WORK, ¬ify->flags)) { + memcpy((void *)notify->data, data, size); + schedule_work(¬ify->work); + } else { + notify->data = data; + nvkm_notify_func(notify); + notify->data = NULL; + } +} + +void +nvkm_notify_fini(struct nvkm_notify *notify) +{ + unsigned long flags; + if (notify->event) { + nvkm_notify_put(notify); + spin_lock_irqsave(¬ify->event->list_lock, flags); + list_del(¬ify->head); + spin_unlock_irqrestore(¬ify->event->list_lock, flags); + kfree((void *)notify->data); + notify->event = NULL; + } +} + +int +nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, + int (*func)(struct nvkm_notify *), bool work, + void *data, u32 size, u32 reply, + struct nvkm_notify *notify) +{ + unsigned long flags; + int ret = -ENODEV; + if ((notify->event = event), event->refs) { + ret = event->func->ctor(object, data, size, notify); + if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { + notify->flags = 0; + notify->block = 1; + notify->func = func; + notify->data = NULL; + if (ret = 0, work) { + INIT_WORK(¬ify->work, nvkm_notify_work); + set_bit(NVKM_NOTIFY_WORK, ¬ify->flags); + notify->data = kmalloc(reply, GFP_KERNEL); + if (!notify->data) + ret = -ENOMEM; + } + } + if (ret == 0) { + spin_lock_irqsave(&event->list_lock, flags); + list_add_tail(¬ify->head, &event->list); + spin_unlock_irqrestore(&event->list_lock, flags); + } + } + if (ret) + notify->event = NULL; + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/object.c b/drivers/gpu/drm/nouveau/nvkm/core/object.c new file mode 100644 index 000000000..301a5e5b5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/object.c @@ -0,0 +1,336 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/object.h> +#include <core/client.h> +#include <core/engine.h> + +struct nvkm_object * +nvkm_object_search(struct nvkm_client *client, u64 handle, + const struct nvkm_object_func *func) +{ + struct nvkm_object *object; + + if (handle) { + struct rb_node *node = client->objroot.rb_node; + while (node) { + object = rb_entry(node, typeof(*object), node); + if (handle < object->object) + node = node->rb_left; + else + if (handle > object->object) + node = node->rb_right; + else + goto done; + } + return ERR_PTR(-ENOENT); + } else { + object = &client->object; + } + +done: + if (unlikely(func && object->func != func)) + return ERR_PTR(-EINVAL); + return object; +} + +void +nvkm_object_remove(struct nvkm_object *object) +{ + if (!RB_EMPTY_NODE(&object->node)) + rb_erase(&object->node, &object->client->objroot); +} + +bool +nvkm_object_insert(struct nvkm_object *object) +{ + struct rb_node **ptr = &object->client->objroot.rb_node; + struct rb_node *parent = NULL; + + while (*ptr) { + struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node); + parent = *ptr; + if (object->object < this->object) + ptr = &parent->rb_left; + else + if (object->object > this->object) + ptr = &parent->rb_right; + else + return false; + } + + rb_link_node(&object->node, parent, ptr); + rb_insert_color(&object->node, &object->client->objroot); + return true; +} + +int +nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) +{ + if (likely(object->func->mthd)) + return object->func->mthd(object, mthd, data, size); + return -ENODEV; +} + +int +nvkm_object_ntfy(struct nvkm_object *object, u32 mthd, + struct nvkm_event **pevent) +{ + if (likely(object->func->ntfy)) + return object->func->ntfy(object, mthd, pevent); + return -ENODEV; +} + +int +nvkm_object_map(struct nvkm_object *object, void *argv, u32 argc, + enum nvkm_object_map *type, u64 *addr, u64 *size) +{ + if (likely(object->func->map)) + return object->func->map(object, argv, argc, type, addr, size); + return -ENODEV; +} + +int +nvkm_object_unmap(struct nvkm_object *object) +{ + if (likely(object->func->unmap)) + return object->func->unmap(object); + return -ENODEV; +} + +int +nvkm_object_rd08(struct nvkm_object *object, u64 addr, u8 *data) +{ + if (likely(object->func->rd08)) + return object->func->rd08(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_rd16(struct nvkm_object *object, u64 addr, u16 *data) +{ + if (likely(object->func->rd16)) + return object->func->rd16(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_rd32(struct nvkm_object *object, u64 addr, u32 *data) +{ + if (likely(object->func->rd32)) + return object->func->rd32(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_wr08(struct nvkm_object *object, u64 addr, u8 data) +{ + if (likely(object->func->wr08)) + return object->func->wr08(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_wr16(struct nvkm_object *object, u64 addr, u16 data) +{ + if (likely(object->func->wr16)) + return object->func->wr16(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_wr32(struct nvkm_object *object, u64 addr, u32 data) +{ + if (likely(object->func->wr32)) + return object->func->wr32(object, addr, data); + return -ENODEV; +} + +int +nvkm_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *gpuobj, + int align, struct nvkm_gpuobj **pgpuobj) +{ + if (object->func->bind) + return object->func->bind(object, gpuobj, align, pgpuobj); + return -ENODEV; +} + +int +nvkm_object_fini(struct nvkm_object *object, bool suspend) +{ + const char *action = suspend ? "suspend" : "fini"; + struct nvkm_object *child; + s64 time; + int ret; + + nvif_debug(object, "%s children...\n", action); + time = ktime_to_us(ktime_get()); + list_for_each_entry(child, &object->tree, head) { + ret = nvkm_object_fini(child, suspend); + if (ret && suspend) + goto fail_child; + } + + nvif_debug(object, "%s running...\n", action); + if (object->func->fini) { + ret = object->func->fini(object, suspend); + if (ret) { + nvif_error(object, "%s failed with %d\n", action, ret); + if (suspend) + goto fail; + } + } + + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "%s completed in %lldus\n", action, time); + return 0; + +fail: + if (object->func->init) { + int rret = object->func->init(object); + if (rret) + nvif_fatal(object, "failed to restart, %d\n", rret); + } +fail_child: + list_for_each_entry_continue_reverse(child, &object->tree, head) { + nvkm_object_init(child); + } + return ret; +} + +int +nvkm_object_init(struct nvkm_object *object) +{ + struct nvkm_object *child; + s64 time; + int ret; + + nvif_debug(object, "init running...\n"); + time = ktime_to_us(ktime_get()); + if (object->func->init) { + ret = object->func->init(object); + if (ret) + goto fail; + } + + nvif_debug(object, "init children...\n"); + list_for_each_entry(child, &object->tree, head) { + ret = nvkm_object_init(child); + if (ret) + goto fail_child; + } + + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "init completed in %lldus\n", time); + return 0; + +fail_child: + list_for_each_entry_continue_reverse(child, &object->tree, head) + nvkm_object_fini(child, false); +fail: + nvif_error(object, "init failed with %d\n", ret); + if (object->func->fini) + object->func->fini(object, false); + return ret; +} + +void * +nvkm_object_dtor(struct nvkm_object *object) +{ + struct nvkm_object *child, *ctemp; + void *data = object; + s64 time; + + nvif_debug(object, "destroy children...\n"); + time = ktime_to_us(ktime_get()); + list_for_each_entry_safe(child, ctemp, &object->tree, head) { + nvkm_object_del(&child); + } + + nvif_debug(object, "destroy running...\n"); + nvkm_object_unmap(object); + if (object->func->dtor) + data = object->func->dtor(object); + nvkm_engine_unref(&object->engine); + time = ktime_to_us(ktime_get()) - time; + nvif_debug(object, "destroy completed in %lldus...\n", time); + return data; +} + +void +nvkm_object_del(struct nvkm_object **pobject) +{ + struct nvkm_object *object = *pobject; + if (object && !WARN_ON(!object->func)) { + *pobject = nvkm_object_dtor(object); + nvkm_object_remove(object); + list_del(&object->head); + kfree(*pobject); + *pobject = NULL; + } +} + +void +nvkm_object_ctor(const struct nvkm_object_func *func, + const struct nvkm_oclass *oclass, struct nvkm_object *object) +{ + object->func = func; + object->client = oclass->client; + object->engine = nvkm_engine_ref(oclass->engine); + object->oclass = oclass->base.oclass; + object->handle = oclass->handle; + object->route = oclass->route; + object->token = oclass->token; + object->object = oclass->object; + INIT_LIST_HEAD(&object->head); + INIT_LIST_HEAD(&object->tree); + RB_CLEAR_NODE(&object->node); + WARN_ON(IS_ERR(object->engine)); +} + +int +nvkm_object_new_(const struct nvkm_object_func *func, + const struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + if (size == 0) { + if (!(*pobject = kzalloc(sizeof(**pobject), GFP_KERNEL))) + return -ENOMEM; + nvkm_object_ctor(func, oclass, *pobject); + return 0; + } + return -ENOSYS; +} + +static const struct nvkm_object_func +nvkm_object_func = { +}; + +int +nvkm_object_new(const struct nvkm_oclass *oclass, void *data, u32 size, + struct nvkm_object **pobject) +{ + const struct nvkm_object_func *func = + oclass->base.func ? oclass->base.func : &nvkm_object_func; + return nvkm_object_new_(func, oclass, data, size, pobject); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c new file mode 100644 index 000000000..16299837a --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/oproxy.c @@ -0,0 +1,209 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include <core/oproxy.h> + +static int +nvkm_oproxy_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size) +{ + return nvkm_object_mthd(nvkm_oproxy(object)->object, mthd, data, size); +} + +static int +nvkm_oproxy_ntfy(struct nvkm_object *object, u32 mthd, + struct nvkm_event **pevent) +{ + return nvkm_object_ntfy(nvkm_oproxy(object)->object, mthd, pevent); +} + +static int +nvkm_oproxy_map(struct nvkm_object *object, void *argv, u32 argc, + enum nvkm_object_map *type, u64 *addr, u64 *size) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + return nvkm_object_map(oproxy->object, argv, argc, type, addr, size); +} + +static int +nvkm_oproxy_unmap(struct nvkm_object *object) +{ + return nvkm_object_unmap(nvkm_oproxy(object)->object); +} + +static int +nvkm_oproxy_rd08(struct nvkm_object *object, u64 addr, u8 *data) +{ + return nvkm_object_rd08(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_rd16(struct nvkm_object *object, u64 addr, u16 *data) +{ + return nvkm_object_rd16(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_rd32(struct nvkm_object *object, u64 addr, u32 *data) +{ + return nvkm_object_rd32(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr08(struct nvkm_object *object, u64 addr, u8 data) +{ + return nvkm_object_wr08(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr16(struct nvkm_object *object, u64 addr, u16 data) +{ + return nvkm_object_wr16(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_wr32(struct nvkm_object *object, u64 addr, u32 data) +{ + return nvkm_object_wr32(nvkm_oproxy(object)->object, addr, data); +} + +static int +nvkm_oproxy_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, + int align, struct nvkm_gpuobj **pgpuobj) +{ + return nvkm_object_bind(nvkm_oproxy(object)->object, + parent, align, pgpuobj); +} + +static int +nvkm_oproxy_sclass(struct nvkm_object *object, int index, + struct nvkm_oclass *oclass) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + oclass->parent = oproxy->object; + if (!oproxy->object->func->sclass) + return -ENODEV; + return oproxy->object->func->sclass(oproxy->object, index, oclass); +} + +static int +nvkm_oproxy_fini(struct nvkm_object *object, bool suspend) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + int ret; + + if (oproxy->func->fini[0]) { + ret = oproxy->func->fini[0](oproxy, suspend); + if (ret && suspend) + return ret; + } + + if (oproxy->object->func->fini) { + ret = oproxy->object->func->fini(oproxy->object, suspend); + if (ret && suspend) + return ret; + } + + if (oproxy->func->fini[1]) { + ret = oproxy->func->fini[1](oproxy, suspend); + if (ret && suspend) + return ret; + } + + return 0; +} + +static int +nvkm_oproxy_init(struct nvkm_object *object) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + int ret; + + if (oproxy->func->init[0]) { + ret = oproxy->func->init[0](oproxy); + if (ret) + return ret; + } + + if (oproxy->object->func->init) { + ret = oproxy->object->func->init(oproxy->object); + if (ret) + return ret; + } + + if (oproxy->func->init[1]) { + ret = oproxy->func->init[1](oproxy); + if (ret) + return ret; + } + + return 0; +} + +static void * +nvkm_oproxy_dtor(struct nvkm_object *object) +{ + struct nvkm_oproxy *oproxy = nvkm_oproxy(object); + if (oproxy->func->dtor[0]) + oproxy->func->dtor[0](oproxy); + nvkm_object_del(&oproxy->object); + if (oproxy->func->dtor[1]) + oproxy->func->dtor[1](oproxy); + return oproxy; +} + +static const struct nvkm_object_func +nvkm_oproxy_func = { + .dtor = nvkm_oproxy_dtor, + .init = nvkm_oproxy_init, + .fini = nvkm_oproxy_fini, + .mthd = nvkm_oproxy_mthd, + .ntfy = nvkm_oproxy_ntfy, + .map = nvkm_oproxy_map, + .unmap = nvkm_oproxy_unmap, + .rd08 = nvkm_oproxy_rd08, + .rd16 = nvkm_oproxy_rd16, + .rd32 = nvkm_oproxy_rd32, + .wr08 = nvkm_oproxy_wr08, + .wr16 = nvkm_oproxy_wr16, + .wr32 = nvkm_oproxy_wr32, + .bind = nvkm_oproxy_bind, + .sclass = nvkm_oproxy_sclass, +}; + +void +nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func, + const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy) +{ + nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base); + oproxy->func = func; +} + +int +nvkm_oproxy_new_(const struct nvkm_oproxy_func *func, + const struct nvkm_oclass *oclass, struct nvkm_oproxy **poproxy) +{ + if (!(*poproxy = kzalloc(sizeof(**poproxy), GFP_KERNEL))) + return -ENOMEM; + nvkm_oproxy_ctor(func, oclass, *poproxy); + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/option.c b/drivers/gpu/drm/nouveau/nvkm/core/option.c new file mode 100644 index 000000000..3e62cf8cd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/option.c @@ -0,0 +1,139 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/option.h> +#include <core/debug.h> + +const char * +nvkm_stropt(const char *optstr, const char *opt, int *arglen) +{ + while (optstr && *optstr != '\0') { + int len = strcspn(optstr, ",="); + switch (optstr[len]) { + case '=': + if (!strncasecmpz(optstr, opt, len)) { + optstr += len + 1; + *arglen = strcspn(optstr, ",="); + return *arglen ? optstr : NULL; + } + optstr++; + break; + case ',': + optstr++; + break; + default: + break; + } + optstr += len; + } + + return NULL; +} + +bool +nvkm_boolopt(const char *optstr, const char *opt, bool value) +{ + int arglen; + + optstr = nvkm_stropt(optstr, opt, &arglen); + if (optstr) { + if (!strncasecmpz(optstr, "0", arglen) || + !strncasecmpz(optstr, "no", arglen) || + !strncasecmpz(optstr, "off", arglen) || + !strncasecmpz(optstr, "false", arglen)) + value = false; + else + if (!strncasecmpz(optstr, "1", arglen) || + !strncasecmpz(optstr, "yes", arglen) || + !strncasecmpz(optstr, "on", arglen) || + !strncasecmpz(optstr, "true", arglen)) + value = true; + } + + return value; +} + +long +nvkm_longopt(const char *optstr, const char *opt, long value) +{ + long result = value; + int arglen; + char *s; + + optstr = nvkm_stropt(optstr, opt, &arglen); + if (optstr && (s = kstrndup(optstr, arglen, GFP_KERNEL))) { + int ret = kstrtol(s, 0, &value); + if (ret == 0) + result = value; + kfree(s); + } + + return result; +} + +int +nvkm_dbgopt(const char *optstr, const char *sub) +{ + int mode = 1, level = CONFIG_NOUVEAU_DEBUG_DEFAULT; + + while (optstr) { + int len = strcspn(optstr, ",="); + switch (optstr[len]) { + case '=': + if (strncasecmpz(optstr, sub, len)) + mode = 0; + optstr++; + break; + default: + if (mode) { + if (!strncasecmpz(optstr, "fatal", len)) + level = NV_DBG_FATAL; + else if (!strncasecmpz(optstr, "error", len)) + level = NV_DBG_ERROR; + else if (!strncasecmpz(optstr, "warn", len)) + level = NV_DBG_WARN; + else if (!strncasecmpz(optstr, "info", len)) + level = NV_DBG_INFO; + else if (!strncasecmpz(optstr, "debug", len)) + level = NV_DBG_DEBUG; + else if (!strncasecmpz(optstr, "trace", len)) + level = NV_DBG_TRACE; + else if (!strncasecmpz(optstr, "paranoia", len)) + level = NV_DBG_PARANOIA; + else if (!strncasecmpz(optstr, "spam", len)) + level = NV_DBG_SPAM; + } + + if (optstr[len] != '\0') { + optstr++; + mode = 1; + break; + } + + return level; + } + optstr += len; + } + + return level; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c new file mode 100644 index 000000000..8162e3d23 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c @@ -0,0 +1,162 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <core/ramht.h> +#include <core/engine.h> +#include <core/object.h> + +static u32 +nvkm_ramht_hash(struct nvkm_ramht *ramht, int chid, u32 handle) +{ + u32 hash = 0; + + while (handle) { + hash ^= (handle & ((1 << ramht->bits) - 1)); + handle >>= ramht->bits; + } + + hash ^= chid << (ramht->bits - 4); + return hash; +} + +struct nvkm_gpuobj * +nvkm_ramht_search(struct nvkm_ramht *ramht, int chid, u32 handle) +{ + u32 co, ho; + + co = ho = nvkm_ramht_hash(ramht, chid, handle); + do { + if (ramht->data[co].chid == chid) { + if (ramht->data[co].handle == handle) + return ramht->data[co].inst; + } + + if (++co >= ramht->size) + co = 0; + } while (co != ho); + + return NULL; +} + +static int +nvkm_ramht_update(struct nvkm_ramht *ramht, int co, struct nvkm_object *object, + int chid, int addr, u32 handle, u32 context) +{ + struct nvkm_ramht_data *data = &ramht->data[co]; + u64 inst = 0x00000040; /* just non-zero for <=g8x fifo ramht */ + int ret; + + nvkm_gpuobj_del(&data->inst); + data->chid = chid; + data->handle = handle; + + if (object) { + ret = nvkm_object_bind(object, ramht->parent, 16, &data->inst); + if (ret) { + if (ret != -ENODEV) { + data->chid = -1; + return ret; + } + data->inst = NULL; + } + + if (data->inst) { + if (ramht->device->card_type >= NV_50) + inst = data->inst->node->offset; + else + inst = data->inst->addr; + } + + if (addr < 0) context |= inst << -addr; + else context |= inst >> addr; + } + + nvkm_kmap(ramht->gpuobj); + nvkm_wo32(ramht->gpuobj, (co << 3) + 0, handle); + nvkm_wo32(ramht->gpuobj, (co << 3) + 4, context); + nvkm_done(ramht->gpuobj); + return co + 1; +} + +void +nvkm_ramht_remove(struct nvkm_ramht *ramht, int cookie) +{ + if (--cookie >= 0) + nvkm_ramht_update(ramht, cookie, NULL, -1, 0, 0, 0); +} + +int +nvkm_ramht_insert(struct nvkm_ramht *ramht, struct nvkm_object *object, + int chid, int addr, u32 handle, u32 context) +{ + u32 co, ho; + + if (nvkm_ramht_search(ramht, chid, handle)) + return -EEXIST; + + co = ho = nvkm_ramht_hash(ramht, chid, handle); + do { + if (ramht->data[co].chid < 0) { + return nvkm_ramht_update(ramht, co, object, chid, + addr, handle, context); + } + + if (++co >= ramht->size) + co = 0; + } while (co != ho); + + return -ENOSPC; +} + +void +nvkm_ramht_del(struct nvkm_ramht **pramht) +{ + struct nvkm_ramht *ramht = *pramht; + if (ramht) { + nvkm_gpuobj_del(&ramht->gpuobj); + vfree(*pramht); + *pramht = NULL; + } +} + +int +nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align, + struct nvkm_gpuobj *parent, struct nvkm_ramht **pramht) +{ + struct nvkm_ramht *ramht; + int ret, i; + + if (!(ramht = *pramht = vzalloc(struct_size(ramht, data, (size >> 3))))) + return -ENOMEM; + + ramht->device = device; + ramht->parent = parent; + ramht->size = size >> 3; + ramht->bits = order_base_2(ramht->size); + for (i = 0; i < ramht->size; i++) + ramht->data[i].chid = -1; + + ret = nvkm_gpuobj_new(ramht->device, size, align, true, + ramht->parent, &ramht->gpuobj); + if (ret) + nvkm_ramht_del(pramht); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c new file mode 100644 index 000000000..a74b7acb6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -0,0 +1,194 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include <core/subdev.h> +#include <core/device.h> +#include <core/option.h> +#include <subdev/mc.h> + +const char * +nvkm_subdev_type[NVKM_SUBDEV_NR] = { +#define NVKM_LAYOUT_ONCE(type,data,ptr,...) [type] = #ptr, +#define NVKM_LAYOUT_INST(A...) NVKM_LAYOUT_ONCE(A) +#include <core/layout.h> +#undef NVKM_LAYOUT_ONCE +#undef NVKM_LAYOUT_INST +}; + +void +nvkm_subdev_intr(struct nvkm_subdev *subdev) +{ + if (subdev->func->intr) + subdev->func->intr(subdev); +} + +int +nvkm_subdev_info(struct nvkm_subdev *subdev, u64 mthd, u64 *data) +{ + if (subdev->func->info) + return subdev->func->info(subdev, mthd, data); + return -ENOSYS; +} + +int +nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) +{ + struct nvkm_device *device = subdev->device; + const char *action = suspend ? "suspend" : "fini"; + s64 time; + + nvkm_trace(subdev, "%s running...\n", action); + time = ktime_to_us(ktime_get()); + + if (subdev->func->fini) { + int ret = subdev->func->fini(subdev, suspend); + if (ret) { + nvkm_error(subdev, "%s failed, %d\n", action, ret); + if (suspend) + return ret; + } + } + + nvkm_mc_reset(device, subdev->type, subdev->inst); + + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "%s completed in %lldus\n", action, time); + return 0; +} + +int +nvkm_subdev_preinit(struct nvkm_subdev *subdev) +{ + s64 time; + + nvkm_trace(subdev, "preinit running...\n"); + time = ktime_to_us(ktime_get()); + + if (subdev->func->preinit) { + int ret = subdev->func->preinit(subdev); + if (ret) { + nvkm_error(subdev, "preinit failed, %d\n", ret); + return ret; + } + } + + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "preinit completed in %lldus\n", time); + return 0; +} + +int +nvkm_subdev_init(struct nvkm_subdev *subdev) +{ + s64 time; + int ret; + + nvkm_trace(subdev, "init running...\n"); + time = ktime_to_us(ktime_get()); + + if (subdev->func->oneinit && !subdev->oneinit) { + s64 time; + nvkm_trace(subdev, "one-time init running...\n"); + time = ktime_to_us(ktime_get()); + ret = subdev->func->oneinit(subdev); + if (ret) { + nvkm_error(subdev, "one-time init failed, %d\n", ret); + return ret; + } + + subdev->oneinit = true; + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "one-time init completed in %lldus\n", time); + } + + if (subdev->func->init) { + ret = subdev->func->init(subdev); + if (ret) { + nvkm_error(subdev, "init failed, %d\n", ret); + return ret; + } + } + + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "init completed in %lldus\n", time); + return 0; +} + +void +nvkm_subdev_del(struct nvkm_subdev **psubdev) +{ + struct nvkm_subdev *subdev = *psubdev; + s64 time; + + if (subdev && !WARN_ON(!subdev->func)) { + nvkm_trace(subdev, "destroy running...\n"); + time = ktime_to_us(ktime_get()); + list_del(&subdev->head); + if (subdev->func->dtor) + *psubdev = subdev->func->dtor(subdev); + time = ktime_to_us(ktime_get()) - time; + nvkm_trace(subdev, "destroy completed in %lldus\n", time); + kfree(*psubdev); + *psubdev = NULL; + } +} + +void +nvkm_subdev_disable(struct nvkm_device *device, enum nvkm_subdev_type type, int inst) +{ + struct nvkm_subdev *subdev; + list_for_each_entry(subdev, &device->subdev, head) { + if (subdev->type == type && subdev->inst == inst) { + *subdev->pself = NULL; + nvkm_subdev_del(&subdev); + break; + } + } +} + +void +nvkm_subdev_ctor(const struct nvkm_subdev_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_subdev *subdev) +{ + subdev->func = func; + subdev->device = device; + subdev->type = type; + subdev->inst = inst < 0 ? 0 : inst; + + if (inst >= 0) + snprintf(subdev->name, sizeof(subdev->name), "%s%d", nvkm_subdev_type[type], inst); + else + strscpy(subdev->name, nvkm_subdev_type[type], sizeof(subdev->name)); + subdev->debug = nvkm_dbgopt(device->dbgopt, subdev->name); + list_add_tail(&subdev->head, &device->subdev); +} + +int +nvkm_subdev_new_(const struct nvkm_subdev_func *func, struct nvkm_device *device, + enum nvkm_subdev_type type, int inst, struct nvkm_subdev **psubdev) +{ + if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL))) + return -ENOMEM; + nvkm_subdev_ctor(func, device, type, inst, *psubdev); + return 0; +} |