summaryrefslogtreecommitdiffstats
path: root/radeon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:22:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 09:22:22 +0000
commit3f13df4d019cfcbef3f9909e3b993896d3c934e6 (patch)
tree6c515810bee8549d81e68c548a26a63909f8e716 /radeon
parentInitial commit. (diff)
downloadlibdrm-3f13df4d019cfcbef3f9909e3b993896d3c934e6.tar.xz
libdrm-3f13df4d019cfcbef3f9909e3b993896d3c934e6.zip
Adding upstream version 2.4.114.upstream/2.4.114upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'radeon')
-rw-r--r--radeon/Android.mk14
-rw-r--r--radeon/Makefile.sources21
-rw-r--r--radeon/bof.c477
-rw-r--r--radeon/bof.h90
-rw-r--r--radeon/libdrm_radeon.pc.in11
-rw-r--r--radeon/meson.build70
-rw-r--r--radeon/r600_pci_ids.h487
-rw-r--r--radeon/radeon-symbols.txt44
-rw-r--r--radeon/radeon_bo.c142
-rw-r--r--radeon/radeon_bo.h73
-rw-r--r--radeon/radeon_bo_gem.c399
-rw-r--r--radeon/radeon_bo_gem.h48
-rw-r--r--radeon/radeon_bo_int.h45
-rw-r--r--radeon/radeon_cs.c95
-rw-r--r--radeon/radeon_cs.h141
-rw-r--r--radeon/radeon_cs_gem.c556
-rw-r--r--radeon/radeon_cs_gem.h41
-rw-r--r--radeon/radeon_cs_int.h67
-rw-r--r--radeon/radeon_cs_space.c248
-rw-r--r--radeon/radeon_surface.c2566
-rw-r--r--radeon/radeon_surface.h149
21 files changed, 5784 insertions, 0 deletions
diff --git a/radeon/Android.mk b/radeon/Android.mk
new file mode 100644
index 0000000..71040da
--- /dev/null
+++ b/radeon/Android.mk
@@ -0,0 +1,14 @@
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+# Import variables LIBDRM_RADEON_FILES, LIBDRM_RADEON_H_FILES
+include $(LOCAL_PATH)/Makefile.sources
+
+LOCAL_MODULE := libdrm_radeon
+
+LOCAL_SHARED_LIBRARIES := libdrm
+
+LOCAL_SRC_FILES := $(LIBDRM_RADEON_FILES)
+
+include $(LIBDRM_COMMON_MK)
+include $(BUILD_SHARED_LIBRARY)
diff --git a/radeon/Makefile.sources b/radeon/Makefile.sources
new file mode 100644
index 0000000..1cf482a
--- /dev/null
+++ b/radeon/Makefile.sources
@@ -0,0 +1,21 @@
+LIBDRM_RADEON_FILES := \
+ radeon_bo_gem.c \
+ radeon_cs_gem.c \
+ radeon_cs_space.c \
+ radeon_bo.c \
+ radeon_cs.c \
+ radeon_surface.c
+
+LIBDRM_RADEON_H_FILES := \
+ radeon_bo.h \
+ radeon_cs.h \
+ radeon_surface.h \
+ radeon_bo_gem.h \
+ radeon_cs_gem.h \
+ radeon_bo_int.h \
+ radeon_cs_int.h \
+ r600_pci_ids.h
+
+LIBDRM_RADEON_BOF_FILES := \
+ bof.c \
+ bof.h
diff --git a/radeon/bof.c b/radeon/bof.c
new file mode 100644
index 0000000..0598cc6
--- /dev/null
+++ b/radeon/bof.c
@@ -0,0 +1,477 @@
+/*
+ * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse
+ */
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "bof.h"
+
+/*
+ * helpers
+ */
+static int bof_entry_grow(bof_t *bof)
+{
+ bof_t **array;
+
+ if (bof->array_size < bof->nentry)
+ return 0;
+ array = realloc(bof->array, (bof->nentry + 16) * sizeof(void*));
+ if (array == NULL)
+ return -ENOMEM;
+ bof->array = array;
+ bof->nentry += 16;
+ return 0;
+}
+
+/*
+ * object
+ */
+bof_t *bof_object(void)
+{
+ bof_t *object;
+
+ object = calloc(1, sizeof(bof_t));
+ if (object == NULL)
+ return NULL;
+ object->refcount = 1;
+ object->type = BOF_TYPE_OBJECT;
+ object->size = 12;
+ return object;
+}
+
+bof_t *bof_object_get(bof_t *object, const char *keyname)
+{
+ unsigned i;
+
+ for (i = 0; i < object->array_size; i += 2) {
+ if (!strcmp(object->array[i]->value, keyname)) {
+ return object->array[i + 1];
+ }
+ }
+ return NULL;
+}
+
+int bof_object_set(bof_t *object, const char *keyname, bof_t *value)
+{
+ bof_t *key;
+ int r;
+
+ if (object->type != BOF_TYPE_OBJECT)
+ return -EINVAL;
+ r = bof_entry_grow(object);
+ if (r)
+ return r;
+ key = bof_string(keyname);
+ if (key == NULL)
+ return -ENOMEM;
+ object->array[object->array_size++] = key;
+ object->array[object->array_size++] = value;
+ object->size += value->size;
+ object->size += key->size;
+ bof_incref(value);
+ return 0;
+}
+
+/*
+ * array
+ */
+bof_t *bof_array(void)
+{
+ bof_t *array = bof_object();
+
+ if (array == NULL)
+ return NULL;
+ array->type = BOF_TYPE_ARRAY;
+ array->size = 12;
+ return array;
+}
+
+int bof_array_append(bof_t *array, bof_t *value)
+{
+ int r;
+ if (array->type != BOF_TYPE_ARRAY)
+ return -EINVAL;
+ r = bof_entry_grow(array);
+ if (r)
+ return r;
+ array->array[array->array_size++] = value;
+ array->size += value->size;
+ bof_incref(value);
+ return 0;
+}
+
+bof_t *bof_array_get(bof_t *bof, unsigned i)
+{
+ if (!bof_is_array(bof) || i >= bof->array_size)
+ return NULL;
+ return bof->array[i];
+}
+
+unsigned bof_array_size(bof_t *bof)
+{
+ if (!bof_is_array(bof))
+ return 0;
+ return bof->array_size;
+}
+
+/*
+ * blob
+ */
+bof_t *bof_blob(unsigned size, void *value)
+{
+ bof_t *blob = bof_object();
+
+ if (blob == NULL)
+ return NULL;
+ blob->type = BOF_TYPE_BLOB;
+ blob->value = calloc(1, size);
+ if (blob->value == NULL) {
+ bof_decref(blob);
+ return NULL;
+ }
+ blob->size = size;
+ memcpy(blob->value, value, size);
+ blob->size += 12;
+ return blob;
+}
+
+unsigned bof_blob_size(bof_t *bof)
+{
+ if (!bof_is_blob(bof))
+ return 0;
+ return bof->size - 12;
+}
+
+void *bof_blob_value(bof_t *bof)
+{
+ if (!bof_is_blob(bof))
+ return NULL;
+ return bof->value;
+}
+
+/*
+ * string
+ */
+bof_t *bof_string(const char *value)
+{
+ bof_t *string = bof_object();
+
+ if (string == NULL)
+ return NULL;
+ string->type = BOF_TYPE_STRING;
+ string->size = strlen(value) + 1;
+ string->value = calloc(1, string->size);
+ if (string->value == NULL) {
+ bof_decref(string);
+ return NULL;
+ }
+ strcpy(string->value, value);
+ string->size += 12;
+ return string;
+}
+
+/*
+ * int32
+ */
+bof_t *bof_int32(int32_t value)
+{
+ bof_t *int32 = bof_object();
+
+ if (int32 == NULL)
+ return NULL;
+ int32->type = BOF_TYPE_INT32;
+ int32->size = 4;
+ int32->value = calloc(1, int32->size);
+ if (int32->value == NULL) {
+ bof_decref(int32);
+ return NULL;
+ }
+ memcpy(int32->value, &value, 4);
+ int32->size += 12;
+ return int32;
+}
+
+int32_t bof_int32_value(bof_t *bof)
+{
+ return *((uint32_t*)bof->value);
+}
+
+/*
+ * common
+ */
+static void bof_indent(int level)
+{
+ int i;
+
+ for (i = 0; i < level; i++)
+ fprintf(stderr, " ");
+}
+
+static void bof_print_bof(bof_t *bof, int level, int entry)
+{
+ bof_indent(level);
+ if (bof == NULL) {
+ fprintf(stderr, "--NULL-- for entry %d\n", entry);
+ return;
+ }
+ switch (bof->type) {
+ case BOF_TYPE_STRING:
+ fprintf(stderr, "%p string [%s %d]\n", bof, (char*)bof->value, bof->size);
+ break;
+ case BOF_TYPE_INT32:
+ fprintf(stderr, "%p int32 [%d %d]\n", bof, *(int*)bof->value, bof->size);
+ break;
+ case BOF_TYPE_BLOB:
+ fprintf(stderr, "%p blob [%d]\n", bof, bof->size);
+ break;
+ case BOF_TYPE_NULL:
+ fprintf(stderr, "%p null [%d]\n", bof, bof->size);
+ break;
+ case BOF_TYPE_OBJECT:
+ fprintf(stderr, "%p object [%d %d]\n", bof, bof->array_size / 2, bof->size);
+ break;
+ case BOF_TYPE_ARRAY:
+ fprintf(stderr, "%p array [%d %d]\n", bof, bof->array_size, bof->size);
+ break;
+ default:
+ fprintf(stderr, "%p unknown [%d]\n", bof, bof->type);
+ return;
+ }
+}
+
+static void bof_print_rec(bof_t *bof, int level, int entry)
+{
+ unsigned i;
+
+ bof_print_bof(bof, level, entry);
+ for (i = 0; i < bof->array_size; i++) {
+ bof_print_rec(bof->array[i], level + 2, i);
+ }
+}
+
+void bof_print(bof_t *bof)
+{
+ bof_print_rec(bof, 0, 0);
+}
+
+static int bof_read(bof_t *root, FILE *file, long end, int level)
+{
+ bof_t *bof = NULL;
+ int r;
+
+ if (ftell(file) >= end) {
+ return 0;
+ }
+ r = bof_entry_grow(root);
+ if (r)
+ return r;
+ bof = bof_object();
+ if (bof == NULL)
+ return -ENOMEM;
+ bof->offset = ftell(file);
+ r = fread(&bof->type, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&bof->size, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&bof->array_size, 4, 1, file);
+ if (r != 1)
+ goto out_err;
+ switch (bof->type) {
+ case BOF_TYPE_STRING:
+ case BOF_TYPE_INT32:
+ case BOF_TYPE_BLOB:
+ bof->value = calloc(1, bof->size - 12);
+ if (bof->value == NULL) {
+ goto out_err;
+ }
+ r = fread(bof->value, bof->size - 12, 1, file);
+ if (r != 1) {
+ fprintf(stderr, "error reading %d\n", bof->size - 12);
+ goto out_err;
+ }
+ break;
+ case BOF_TYPE_NULL:
+ return 0;
+ case BOF_TYPE_OBJECT:
+ case BOF_TYPE_ARRAY:
+ r = bof_read(bof, file, bof->offset + bof->size, level + 2);
+ if (r)
+ goto out_err;
+ break;
+ default:
+ fprintf(stderr, "invalid type %d\n", bof->type);
+ goto out_err;
+ }
+ root->array[root->centry++] = bof;
+ return bof_read(root, file, end, level);
+out_err:
+ bof_decref(bof);
+ return -EINVAL;
+}
+
+bof_t *bof_load_file(const char *filename)
+{
+ bof_t *root = bof_object();
+ int r;
+
+ if (root == NULL) {
+ fprintf(stderr, "%s failed to create root object\n", __func__);
+ return NULL;
+ }
+ root->file = fopen(filename, "r");
+ if (root->file == NULL)
+ goto out_err;
+ r = fseek(root->file, 0L, SEEK_SET);
+ if (r) {
+ fprintf(stderr, "%s failed to seek into file %s\n", __func__, filename);
+ goto out_err;
+ }
+ root->offset = ftell(root->file);
+ r = fread(&root->type, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&root->size, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = fread(&root->array_size, 4, 1, root->file);
+ if (r != 1)
+ goto out_err;
+ r = bof_read(root, root->file, root->offset + root->size, 2);
+ if (r)
+ goto out_err;
+ return root;
+out_err:
+ bof_decref(root);
+ return NULL;
+}
+
+void bof_incref(bof_t *bof)
+{
+ bof->refcount++;
+}
+
+void bof_decref(bof_t *bof)
+{
+ unsigned i;
+
+ if (bof == NULL)
+ return;
+ if (--bof->refcount > 0)
+ return;
+ for (i = 0; i < bof->array_size; i++) {
+ bof_decref(bof->array[i]);
+ bof->array[i] = NULL;
+ }
+ bof->array_size = 0;
+ if (bof->file) {
+ fclose(bof->file);
+ bof->file = NULL;
+ }
+ free(bof->array);
+ free(bof->value);
+ free(bof);
+}
+
+static int bof_file_write(bof_t *bof, FILE *file)
+{
+ unsigned i;
+ int r;
+
+ r = fwrite(&bof->type, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ r = fwrite(&bof->size, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ r = fwrite(&bof->array_size, 4, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ switch (bof->type) {
+ case BOF_TYPE_NULL:
+ if (bof->size)
+ return -EINVAL;
+ break;
+ case BOF_TYPE_STRING:
+ case BOF_TYPE_INT32:
+ case BOF_TYPE_BLOB:
+ r = fwrite(bof->value, bof->size - 12, 1, file);
+ if (r != 1)
+ return -EINVAL;
+ break;
+ case BOF_TYPE_OBJECT:
+ case BOF_TYPE_ARRAY:
+ for (i = 0; i < bof->array_size; i++) {
+ r = bof_file_write(bof->array[i], file);
+ if (r)
+ return r;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bof_dump_file(bof_t *bof, const char *filename)
+{
+ unsigned i;
+ int r = 0;
+
+ if (bof->file) {
+ fclose(bof->file);
+ bof->file = NULL;
+ }
+ bof->file = fopen(filename, "w");
+ if (bof->file == NULL) {
+ fprintf(stderr, "%s failed to open file %s\n", __func__, filename);
+ r = -EINVAL;
+ goto out_err;
+ }
+ r = fseek(bof->file, 0L, SEEK_SET);
+ if (r) {
+ fprintf(stderr, "%s failed to seek into file %s\n", __func__, filename);
+ goto out_err;
+ }
+ r = fwrite(&bof->type, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ r = fwrite(&bof->size, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ r = fwrite(&bof->array_size, 4, 1, bof->file);
+ if (r != 1)
+ goto out_err;
+ for (i = 0; i < bof->array_size; i++) {
+ r = bof_file_write(bof->array[i], bof->file);
+ if (r)
+ return r;
+ }
+out_err:
+ fclose(bof->file);
+ bof->file = NULL;
+ return r;
+}
diff --git a/radeon/bof.h b/radeon/bof.h
new file mode 100644
index 0000000..014affb
--- /dev/null
+++ b/radeon/bof.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jerome Glisse
+ */
+#ifndef BOF_H
+#define BOF_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+#define BOF_TYPE_STRING 0
+#define BOF_TYPE_NULL 1
+#define BOF_TYPE_BLOB 2
+#define BOF_TYPE_OBJECT 3
+#define BOF_TYPE_ARRAY 4
+#define BOF_TYPE_INT32 5
+
+struct bof;
+
+typedef struct bof {
+ struct bof **array;
+ unsigned centry;
+ unsigned nentry;
+ unsigned refcount;
+ FILE *file;
+ uint32_t type;
+ uint32_t size;
+ uint32_t array_size;
+ void *value;
+ long offset;
+} bof_t;
+
+extern int bof_file_flush(bof_t *root);
+extern bof_t *bof_file_new(const char *filename);
+extern int bof_object_dump(bof_t *object, const char *filename);
+
+/* object */
+extern bof_t *bof_object(void);
+extern bof_t *bof_object_get(bof_t *object, const char *keyname);
+extern int bof_object_set(bof_t *object, const char *keyname, bof_t *value);
+/* array */
+extern bof_t *bof_array(void);
+extern int bof_array_append(bof_t *array, bof_t *value);
+extern bof_t *bof_array_get(bof_t *bof, unsigned i);
+extern unsigned bof_array_size(bof_t *bof);
+/* blob */
+extern bof_t *bof_blob(unsigned size, void *value);
+extern unsigned bof_blob_size(bof_t *bof);
+extern void *bof_blob_value(bof_t *bof);
+/* string */
+extern bof_t *bof_string(const char *value);
+/* int32 */
+extern bof_t *bof_int32(int32_t value);
+extern int32_t bof_int32_value(bof_t *bof);
+/* common functions */
+extern void bof_decref(bof_t *bof);
+extern void bof_incref(bof_t *bof);
+extern bof_t *bof_load_file(const char *filename);
+extern int bof_dump_file(bof_t *bof, const char *filename);
+extern void bof_print(bof_t *bof);
+
+static inline int bof_is_object(bof_t *bof){return (bof->type == BOF_TYPE_OBJECT);}
+static inline int bof_is_blob(bof_t *bof){return (bof->type == BOF_TYPE_BLOB);}
+static inline int bof_is_null(bof_t *bof){return (bof->type == BOF_TYPE_NULL);}
+static inline int bof_is_int32(bof_t *bof){return (bof->type == BOF_TYPE_INT32);}
+static inline int bof_is_array(bof_t *bof){return (bof->type == BOF_TYPE_ARRAY);}
+static inline int bof_is_string(bof_t *bof){return (bof->type == BOF_TYPE_STRING);}
+
+#endif
diff --git a/radeon/libdrm_radeon.pc.in b/radeon/libdrm_radeon.pc.in
new file mode 100644
index 0000000..432993a
--- /dev/null
+++ b/radeon/libdrm_radeon.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: libdrm_radeon
+Description: Userspace interface to kernel DRM services for radeon
+Version: @PACKAGE_VERSION@
+Libs: -L${libdir} -ldrm_radeon
+Cflags: -I${includedir} -I${includedir}/libdrm
+Requires.private: libdrm
diff --git a/radeon/meson.build b/radeon/meson.build
new file mode 100644
index 0000000..4c1c71e
--- /dev/null
+++ b/radeon/meson.build
@@ -0,0 +1,70 @@
+# Copyright © 2017-2018 Intel Corporation
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+libdrm_radeon = library(
+ 'drm_radeon',
+ [
+ files(
+ 'radeon_bo_gem.c', 'radeon_cs_gem.c', 'radeon_cs_space.c', 'radeon_bo.c',
+ 'radeon_cs.c', 'radeon_surface.c',
+ ),
+ config_file,
+ ],
+ c_args : libdrm_c_args,
+ gnu_symbol_visibility : 'hidden',
+ include_directories : [inc_root, inc_drm],
+ link_with : libdrm,
+ dependencies : [dep_pthread_stubs, dep_atomic_ops],
+ version : '1.0.1',
+ install : true,
+)
+
+ext_libdrm_radeon = declare_dependency(
+ link_with : [libdrm, libdrm_radeon],
+ include_directories : [inc_drm, include_directories('.')],
+)
+
+if meson.version().version_compare('>= 0.54.0')
+ meson.override_dependency('libdrm_radeon', ext_libdrm_radeon)
+endif
+
+install_headers(
+ 'radeon_bo.h', 'radeon_cs.h', 'radeon_surface.h', 'radeon_bo_gem.h',
+ 'radeon_cs_gem.h', 'radeon_bo_int.h', 'radeon_cs_int.h', 'r600_pci_ids.h',
+ subdir : 'libdrm'
+)
+
+pkg.generate(
+ libdrm_radeon,
+ name : 'libdrm_radeon',
+ subdirs : ['.', 'libdrm'],
+ description : 'Userspace interface to kernel DRM services for radeon',
+)
+
+test(
+ 'radeon-symbols-check',
+ symbols_check,
+ args : [
+ '--lib', libdrm_radeon,
+ '--symbols-file', files('radeon-symbols.txt'),
+ '--nm', prog_nm.path(),
+ ],
+)
diff --git a/radeon/r600_pci_ids.h b/radeon/r600_pci_ids.h
new file mode 100644
index 0000000..a3b2eac
--- /dev/null
+++ b/radeon/r600_pci_ids.h
@@ -0,0 +1,487 @@
+CHIPSET(0x9400, R600_9400, R600)
+CHIPSET(0x9401, R600_9401, R600)
+CHIPSET(0x9402, R600_9402, R600)
+CHIPSET(0x9403, R600_9403, R600)
+CHIPSET(0x9405, R600_9405, R600)
+CHIPSET(0x940A, R600_940A, R600)
+CHIPSET(0x940B, R600_940B, R600)
+CHIPSET(0x940F, R600_940F, R600)
+
+CHIPSET(0x94C0, RV610_94C0, RV610)
+CHIPSET(0x94C1, RV610_94C1, RV610)
+CHIPSET(0x94C3, RV610_94C3, RV610)
+CHIPSET(0x94C4, RV610_94C4, RV610)
+CHIPSET(0x94C5, RV610_94C5, RV610)
+CHIPSET(0x94C6, RV610_94C6, RV610)
+CHIPSET(0x94C7, RV610_94C7, RV610)
+CHIPSET(0x94C8, RV610_94C8, RV610)
+CHIPSET(0x94C9, RV610_94C9, RV610)
+CHIPSET(0x94CB, RV610_94CB, RV610)
+CHIPSET(0x94CC, RV610_94CC, RV610)
+CHIPSET(0x94CD, RV610_94CD, RV610)
+
+CHIPSET(0x9580, RV630_9580, RV630)
+CHIPSET(0x9581, RV630_9581, RV630)
+CHIPSET(0x9583, RV630_9583, RV630)
+CHIPSET(0x9586, RV630_9586, RV630)
+CHIPSET(0x9587, RV630_9587, RV630)
+CHIPSET(0x9588, RV630_9588, RV630)
+CHIPSET(0x9589, RV630_9589, RV630)
+CHIPSET(0x958A, RV630_958A, RV630)
+CHIPSET(0x958B, RV630_958B, RV630)
+CHIPSET(0x958C, RV630_958C, RV630)
+CHIPSET(0x958D, RV630_958D, RV630)
+CHIPSET(0x958E, RV630_958E, RV630)
+CHIPSET(0x958F, RV630_958F, RV630)
+
+CHIPSET(0x9500, RV670_9500, RV670)
+CHIPSET(0x9501, RV670_9501, RV670)
+CHIPSET(0x9504, RV670_9504, RV670)
+CHIPSET(0x9505, RV670_9505, RV670)
+CHIPSET(0x9506, RV670_9506, RV670)
+CHIPSET(0x9507, RV670_9507, RV670)
+CHIPSET(0x9508, RV670_9508, RV670)
+CHIPSET(0x9509, RV670_9509, RV670)
+CHIPSET(0x950F, RV670_950F, RV670)
+CHIPSET(0x9511, RV670_9511, RV670)
+CHIPSET(0x9515, RV670_9515, RV670)
+CHIPSET(0x9517, RV670_9517, RV670)
+CHIPSET(0x9519, RV670_9519, RV670)
+
+CHIPSET(0x95C0, RV620_95C0, RV620)
+CHIPSET(0x95C2, RV620_95C2, RV620)
+CHIPSET(0x95C4, RV620_95C4, RV620)
+CHIPSET(0x95C5, RV620_95C5, RV620)
+CHIPSET(0x95C6, RV620_95C6, RV620)
+CHIPSET(0x95C7, RV620_95C7, RV620)
+CHIPSET(0x95C9, RV620_95C9, RV620)
+CHIPSET(0x95CC, RV620_95CC, RV620)
+CHIPSET(0x95CD, RV620_95CD, RV620)
+CHIPSET(0x95CE, RV620_95CE, RV620)
+CHIPSET(0x95CF, RV620_95CF, RV620)
+
+CHIPSET(0x9590, RV635_9590, RV635)
+CHIPSET(0x9591, RV635_9591, RV635)
+CHIPSET(0x9593, RV635_9593, RV635)
+CHIPSET(0x9595, RV635_9595, RV635)
+CHIPSET(0x9596, RV635_9596, RV635)
+CHIPSET(0x9597, RV635_9597, RV635)
+CHIPSET(0x9598, RV635_9598, RV635)
+CHIPSET(0x9599, RV635_9599, RV635)
+CHIPSET(0x959B, RV635_959B, RV635)
+
+CHIPSET(0x9610, RS780_9610, RS780)
+CHIPSET(0x9611, RS780_9611, RS780)
+CHIPSET(0x9612, RS780_9612, RS780)
+CHIPSET(0x9613, RS780_9613, RS780)
+CHIPSET(0x9614, RS780_9614, RS780)
+CHIPSET(0x9615, RS780_9615, RS780)
+CHIPSET(0x9616, RS780_9616, RS780)
+
+CHIPSET(0x9710, RS880_9710, RS880)
+CHIPSET(0x9711, RS880_9711, RS880)
+CHIPSET(0x9712, RS880_9712, RS880)
+CHIPSET(0x9713, RS880_9713, RS880)
+CHIPSET(0x9714, RS880_9714, RS880)
+CHIPSET(0x9715, RS880_9715, RS880)
+
+CHIPSET(0x9440, RV770_9440, RV770)
+CHIPSET(0x9441, RV770_9441, RV770)
+CHIPSET(0x9442, RV770_9442, RV770)
+CHIPSET(0x9443, RV770_9443, RV770)
+CHIPSET(0x9444, RV770_9444, RV770)
+CHIPSET(0x9446, RV770_9446, RV770)
+CHIPSET(0x944A, RV770_944A, RV770)
+CHIPSET(0x944B, RV770_944B, RV770)
+CHIPSET(0x944C, RV770_944C, RV770)
+CHIPSET(0x944E, RV770_944E, RV770)
+CHIPSET(0x9450, RV770_9450, RV770)
+CHIPSET(0x9452, RV770_9452, RV770)
+CHIPSET(0x9456, RV770_9456, RV770)
+CHIPSET(0x945A, RV770_945A, RV770)
+CHIPSET(0x945B, RV770_945B, RV770)
+CHIPSET(0x945E, RV770_945E, RV770)
+CHIPSET(0x9460, RV790_9460, RV770)
+CHIPSET(0x9462, RV790_9462, RV770)
+CHIPSET(0x946A, RV770_946A, RV770)
+CHIPSET(0x946B, RV770_946B, RV770)
+CHIPSET(0x947A, RV770_947A, RV770)
+CHIPSET(0x947B, RV770_947B, RV770)
+
+CHIPSET(0x9480, RV730_9480, RV730)
+CHIPSET(0x9487, RV730_9487, RV730)
+CHIPSET(0x9488, RV730_9488, RV730)
+CHIPSET(0x9489, RV730_9489, RV730)
+CHIPSET(0x948A, RV730_948A, RV730)
+CHIPSET(0x948F, RV730_948F, RV730)
+CHIPSET(0x9490, RV730_9490, RV730)
+CHIPSET(0x9491, RV730_9491, RV730)
+CHIPSET(0x9495, RV730_9495, RV730)
+CHIPSET(0x9498, RV730_9498, RV730)
+CHIPSET(0x949C, RV730_949C, RV730)
+CHIPSET(0x949E, RV730_949E, RV730)
+CHIPSET(0x949F, RV730_949F, RV730)
+
+CHIPSET(0x9540, RV710_9540, RV710)
+CHIPSET(0x9541, RV710_9541, RV710)
+CHIPSET(0x9542, RV710_9542, RV710)
+CHIPSET(0x954E, RV710_954E, RV710)
+CHIPSET(0x954F, RV710_954F, RV710)
+CHIPSET(0x9552, RV710_9552, RV710)
+CHIPSET(0x9553, RV710_9553, RV710)
+CHIPSET(0x9555, RV710_9555, RV710)
+CHIPSET(0x9557, RV710_9557, RV710)
+CHIPSET(0x955F, RV710_955F, RV710)
+
+CHIPSET(0x94A0, RV740_94A0, RV740)
+CHIPSET(0x94A1, RV740_94A1, RV740)
+CHIPSET(0x94A3, RV740_94A3, RV740)
+CHIPSET(0x94B1, RV740_94B1, RV740)
+CHIPSET(0x94B3, RV740_94B3, RV740)
+CHIPSET(0x94B4, RV740_94B4, RV740)
+CHIPSET(0x94B5, RV740_94B5, RV740)
+CHIPSET(0x94B9, RV740_94B9, RV740)
+
+CHIPSET(0x68E0, CEDAR_68E0, CEDAR)
+CHIPSET(0x68E1, CEDAR_68E1, CEDAR)
+CHIPSET(0x68E4, CEDAR_68E4, CEDAR)
+CHIPSET(0x68E5, CEDAR_68E5, CEDAR)
+CHIPSET(0x68E8, CEDAR_68E8, CEDAR)
+CHIPSET(0x68E9, CEDAR_68E9, CEDAR)
+CHIPSET(0x68F1, CEDAR_68F1, CEDAR)
+CHIPSET(0x68F2, CEDAR_68F2, CEDAR)
+CHIPSET(0x68F8, CEDAR_68F8, CEDAR)
+CHIPSET(0x68F9, CEDAR_68F9, CEDAR)
+CHIPSET(0x68FA, CEDAR_68FA, CEDAR)
+CHIPSET(0x68FE, CEDAR_68FE, CEDAR)
+
+CHIPSET(0x68C0, REDWOOD_68C0, REDWOOD)
+CHIPSET(0x68C1, REDWOOD_68C1, REDWOOD)
+CHIPSET(0x68C7, REDWOOD_68C7, REDWOOD)
+CHIPSET(0x68C8, REDWOOD_68C8, REDWOOD)
+CHIPSET(0x68C9, REDWOOD_68C9, REDWOOD)
+CHIPSET(0x68D8, REDWOOD_68D8, REDWOOD)
+CHIPSET(0x68D9, REDWOOD_68D9, REDWOOD)
+CHIPSET(0x68DA, REDWOOD_68DA, REDWOOD)
+CHIPSET(0x68DE, REDWOOD_68DE, REDWOOD)
+
+CHIPSET(0x68A0, JUNIPER_68A0, JUNIPER)
+CHIPSET(0x68A1, JUNIPER_68A1, JUNIPER)
+CHIPSET(0x68A8, JUNIPER_68A8, JUNIPER)
+CHIPSET(0x68A9, JUNIPER_68A9, JUNIPER)
+CHIPSET(0x68B0, JUNIPER_68B0, JUNIPER)
+CHIPSET(0x68B8, JUNIPER_68B8, JUNIPER)
+CHIPSET(0x68B9, JUNIPER_68B9, JUNIPER)
+CHIPSET(0x68BA, JUNIPER_68BA, JUNIPER)
+CHIPSET(0x68BE, JUNIPER_68BE, JUNIPER)
+CHIPSET(0x68BF, JUNIPER_68BF, JUNIPER)
+
+CHIPSET(0x6880, CYPRESS_6880, CYPRESS)
+CHIPSET(0x6888, CYPRESS_6888, CYPRESS)
+CHIPSET(0x6889, CYPRESS_6889, CYPRESS)
+CHIPSET(0x688A, CYPRESS_688A, CYPRESS)
+CHIPSET(0x688C, CYPRESS_688C, CYPRESS)
+CHIPSET(0x688D, CYPRESS_688D, CYPRESS)
+CHIPSET(0x6898, CYPRESS_6898, CYPRESS)
+CHIPSET(0x6899, CYPRESS_6899, CYPRESS)
+CHIPSET(0x689B, CYPRESS_689B, CYPRESS)
+CHIPSET(0x689E, CYPRESS_689E, CYPRESS)
+
+CHIPSET(0x689C, HEMLOCK_689C, HEMLOCK)
+CHIPSET(0x689D, HEMLOCK_689D, HEMLOCK)
+
+CHIPSET(0x9802, PALM_9802, PALM)
+CHIPSET(0x9803, PALM_9803, PALM)
+CHIPSET(0x9804, PALM_9804, PALM)
+CHIPSET(0x9805, PALM_9805, PALM)
+CHIPSET(0x9806, PALM_9806, PALM)
+CHIPSET(0x9807, PALM_9807, PALM)
+CHIPSET(0x9808, PALM_9808, PALM)
+CHIPSET(0x9809, PALM_9809, PALM)
+CHIPSET(0x980A, PALM_980A, PALM)
+
+CHIPSET(0x9640, SUMO_9640, SUMO)
+CHIPSET(0x9641, SUMO_9641, SUMO)
+CHIPSET(0x9642, SUMO2_9642, SUMO2)
+CHIPSET(0x9643, SUMO2_9643, SUMO2)
+CHIPSET(0x9644, SUMO2_9644, SUMO2)
+CHIPSET(0x9645, SUMO2_9645, SUMO2)
+CHIPSET(0x9647, SUMO_9647, SUMO)
+CHIPSET(0x9648, SUMO_9648, SUMO)
+CHIPSET(0x9649, SUMO2_9649, SUMO2)
+CHIPSET(0x964a, SUMO_964A, SUMO)
+CHIPSET(0x964b, SUMO_964B, SUMO)
+CHIPSET(0x964c, SUMO_964C, SUMO)
+CHIPSET(0x964e, SUMO_964E, SUMO)
+CHIPSET(0x964f, SUMO_964F, SUMO)
+
+CHIPSET(0x6700, CAYMAN_6700, CAYMAN)
+CHIPSET(0x6701, CAYMAN_6701, CAYMAN)
+CHIPSET(0x6702, CAYMAN_6702, CAYMAN)
+CHIPSET(0x6703, CAYMAN_6703, CAYMAN)
+CHIPSET(0x6704, CAYMAN_6704, CAYMAN)
+CHIPSET(0x6705, CAYMAN_6705, CAYMAN)
+CHIPSET(0x6706, CAYMAN_6706, CAYMAN)
+CHIPSET(0x6707, CAYMAN_6707, CAYMAN)
+CHIPSET(0x6708, CAYMAN_6708, CAYMAN)
+CHIPSET(0x6709, CAYMAN_6709, CAYMAN)
+CHIPSET(0x6718, CAYMAN_6718, CAYMAN)
+CHIPSET(0x6719, CAYMAN_6719, CAYMAN)
+CHIPSET(0x671C, CAYMAN_671C, CAYMAN)
+CHIPSET(0x671D, CAYMAN_671D, CAYMAN)
+CHIPSET(0x671F, CAYMAN_671F, CAYMAN)
+
+CHIPSET(0x6720, BARTS_6720, BARTS)
+CHIPSET(0x6721, BARTS_6721, BARTS)
+CHIPSET(0x6722, BARTS_6722, BARTS)
+CHIPSET(0x6723, BARTS_6723, BARTS)
+CHIPSET(0x6724, BARTS_6724, BARTS)
+CHIPSET(0x6725, BARTS_6725, BARTS)
+CHIPSET(0x6726, BARTS_6726, BARTS)
+CHIPSET(0x6727, BARTS_6727, BARTS)
+CHIPSET(0x6728, BARTS_6728, BARTS)
+CHIPSET(0x6729, BARTS_6729, BARTS)
+CHIPSET(0x6738, BARTS_6738, BARTS)
+CHIPSET(0x6739, BARTS_6739, BARTS)
+CHIPSET(0x673E, BARTS_673E, BARTS)
+
+CHIPSET(0x6740, TURKS_6740, TURKS)
+CHIPSET(0x6741, TURKS_6741, TURKS)
+CHIPSET(0x6742, TURKS_6742, TURKS)
+CHIPSET(0x6743, TURKS_6743, TURKS)
+CHIPSET(0x6744, TURKS_6744, TURKS)
+CHIPSET(0x6745, TURKS_6745, TURKS)
+CHIPSET(0x6746, TURKS_6746, TURKS)
+CHIPSET(0x6747, TURKS_6747, TURKS)
+CHIPSET(0x6748, TURKS_6748, TURKS)
+CHIPSET(0x6749, TURKS_6749, TURKS)
+CHIPSET(0x674A, TURKS_674A, TURKS)
+CHIPSET(0x6750, TURKS_6750, TURKS)
+CHIPSET(0x6751, TURKS_6751, TURKS)
+CHIPSET(0x6758, TURKS_6758, TURKS)
+CHIPSET(0x6759, TURKS_6759, TURKS)
+CHIPSET(0x675B, TURKS_675B, TURKS)
+CHIPSET(0x675D, TURKS_675D, TURKS)
+CHIPSET(0x675F, TURKS_675F, TURKS)
+CHIPSET(0x6840, TURKS_6840, TURKS)
+CHIPSET(0x6841, TURKS_6841, TURKS)
+CHIPSET(0x6842, TURKS_6842, TURKS)
+CHIPSET(0x6843, TURKS_6843, TURKS)
+CHIPSET(0x6849, TURKS_6849, TURKS)
+CHIPSET(0x6850, TURKS_6850, TURKS)
+CHIPSET(0x6858, TURKS_6858, TURKS)
+CHIPSET(0x6859, TURKS_6859, TURKS)
+
+CHIPSET(0x6760, CAICOS_6760, CAICOS)
+CHIPSET(0x6761, CAICOS_6761, CAICOS)
+CHIPSET(0x6762, CAICOS_6762, CAICOS)
+CHIPSET(0x6763, CAICOS_6763, CAICOS)
+CHIPSET(0x6764, CAICOS_6764, CAICOS)
+CHIPSET(0x6765, CAICOS_6765, CAICOS)
+CHIPSET(0x6766, CAICOS_6766, CAICOS)
+CHIPSET(0x6767, CAICOS_6767, CAICOS)
+CHIPSET(0x6768, CAICOS_6768, CAICOS)
+CHIPSET(0x6770, CAICOS_6770, CAICOS)
+CHIPSET(0x6771, CAICOS_6771, CAICOS)
+CHIPSET(0x6772, CAICOS_6772, CAICOS)
+CHIPSET(0x6778, CAICOS_6778, CAICOS)
+CHIPSET(0x6779, CAICOS_6779, CAICOS)
+CHIPSET(0x677B, CAICOS_677B, CAICOS)
+
+CHIPSET(0x9900, ARUBA_9900, ARUBA)
+CHIPSET(0x9901, ARUBA_9901, ARUBA)
+CHIPSET(0x9903, ARUBA_9903, ARUBA)
+CHIPSET(0x9904, ARUBA_9904, ARUBA)
+CHIPSET(0x9905, ARUBA_9905, ARUBA)
+CHIPSET(0x9906, ARUBA_9906, ARUBA)
+CHIPSET(0x9907, ARUBA_9907, ARUBA)
+CHIPSET(0x9908, ARUBA_9908, ARUBA)
+CHIPSET(0x9909, ARUBA_9909, ARUBA)
+CHIPSET(0x990A, ARUBA_990A, ARUBA)
+CHIPSET(0x990B, ARUBA_990B, ARUBA)
+CHIPSET(0x990C, ARUBA_990C, ARUBA)
+CHIPSET(0x990D, ARUBA_990D, ARUBA)
+CHIPSET(0x990E, ARUBA_990E, ARUBA)
+CHIPSET(0x990F, ARUBA_990F, ARUBA)
+CHIPSET(0x9910, ARUBA_9910, ARUBA)
+CHIPSET(0x9913, ARUBA_9913, ARUBA)
+CHIPSET(0x9917, ARUBA_9917, ARUBA)
+CHIPSET(0x9918, ARUBA_9918, ARUBA)
+CHIPSET(0x9919, ARUBA_9919, ARUBA)
+CHIPSET(0x9990, ARUBA_9990, ARUBA)
+CHIPSET(0x9991, ARUBA_9991, ARUBA)
+CHIPSET(0x9992, ARUBA_9992, ARUBA)
+CHIPSET(0x9993, ARUBA_9993, ARUBA)
+CHIPSET(0x9994, ARUBA_9994, ARUBA)
+CHIPSET(0x9995, ARUBA_9995, ARUBA)
+CHIPSET(0x9996, ARUBA_9996, ARUBA)
+CHIPSET(0x9997, ARUBA_9997, ARUBA)
+CHIPSET(0x9998, ARUBA_9998, ARUBA)
+CHIPSET(0x9999, ARUBA_9999, ARUBA)
+CHIPSET(0x999A, ARUBA_999A, ARUBA)
+CHIPSET(0x999B, ARUBA_999B, ARUBA)
+CHIPSET(0x999C, ARUBA_999C, ARUBA)
+CHIPSET(0x999D, ARUBA_999D, ARUBA)
+CHIPSET(0x99A0, ARUBA_99A0, ARUBA)
+CHIPSET(0x99A2, ARUBA_99A2, ARUBA)
+CHIPSET(0x99A4, ARUBA_99A4, ARUBA)
+
+CHIPSET(0x6780, TAHITI_6780, TAHITI)
+CHIPSET(0x6784, TAHITI_6784, TAHITI)
+CHIPSET(0x6788, TAHITI_6788, TAHITI)
+CHIPSET(0x678A, TAHITI_678A, TAHITI)
+CHIPSET(0x6790, TAHITI_6790, TAHITI)
+CHIPSET(0x6791, TAHITI_6791, TAHITI)
+CHIPSET(0x6792, TAHITI_6792, TAHITI)
+CHIPSET(0x6798, TAHITI_6798, TAHITI)
+CHIPSET(0x6799, TAHITI_6799, TAHITI)
+CHIPSET(0x679A, TAHITI_679A, TAHITI)
+CHIPSET(0x679B, TAHITI_679B, TAHITI)
+CHIPSET(0x679E, TAHITI_679E, TAHITI)
+CHIPSET(0x679F, TAHITI_679F, TAHITI)
+
+CHIPSET(0x6800, PITCAIRN_6800, PITCAIRN)
+CHIPSET(0x6801, PITCAIRN_6801, PITCAIRN)
+CHIPSET(0x6802, PITCAIRN_6802, PITCAIRN)
+CHIPSET(0x6806, PITCAIRN_6806, PITCAIRN)
+CHIPSET(0x6808, PITCAIRN_6808, PITCAIRN)
+CHIPSET(0x6809, PITCAIRN_6809, PITCAIRN)
+CHIPSET(0x6810, PITCAIRN_6810, PITCAIRN)
+CHIPSET(0x6811, PITCAIRN_6811, PITCAIRN)
+CHIPSET(0x6816, PITCAIRN_6816, PITCAIRN)
+CHIPSET(0x6817, PITCAIRN_6817, PITCAIRN)
+CHIPSET(0x6818, PITCAIRN_6818, PITCAIRN)
+CHIPSET(0x6819, PITCAIRN_6819, PITCAIRN)
+CHIPSET(0x684C, PITCAIRN_684C, PITCAIRN)
+
+CHIPSET(0x6820, VERDE_6820, VERDE)
+CHIPSET(0x6821, VERDE_6821, VERDE)
+CHIPSET(0x6822, VERDE_6822, VERDE)
+CHIPSET(0x6823, VERDE_6823, VERDE)
+CHIPSET(0x6824, VERDE_6824, VERDE)
+CHIPSET(0x6825, VERDE_6825, VERDE)
+CHIPSET(0x6826, VERDE_6826, VERDE)
+CHIPSET(0x6827, VERDE_6827, VERDE)
+CHIPSET(0x6828, VERDE_6828, VERDE)
+CHIPSET(0x6829, VERDE_6829, VERDE)
+CHIPSET(0x682A, VERDE_682A, VERDE)
+CHIPSET(0x682B, VERDE_682B, VERDE)
+CHIPSET(0x682C, VERDE_682C, VERDE)
+CHIPSET(0x682D, VERDE_682D, VERDE)
+CHIPSET(0x682F, VERDE_682F, VERDE)
+CHIPSET(0x6830, VERDE_6830, VERDE)
+CHIPSET(0x6831, VERDE_6831, VERDE)
+CHIPSET(0x6835, VERDE_6835, VERDE)
+CHIPSET(0x6837, VERDE_6837, VERDE)
+CHIPSET(0x6838, VERDE_6838, VERDE)
+CHIPSET(0x6839, VERDE_6839, VERDE)
+CHIPSET(0x683B, VERDE_683B, VERDE)
+CHIPSET(0x683D, VERDE_683D, VERDE)
+CHIPSET(0x683F, VERDE_683F, VERDE)
+
+CHIPSET(0x6600, OLAND_6600, OLAND)
+CHIPSET(0x6601, OLAND_6601, OLAND)
+CHIPSET(0x6602, OLAND_6602, OLAND)
+CHIPSET(0x6603, OLAND_6603, OLAND)
+CHIPSET(0x6604, OLAND_6604, OLAND)
+CHIPSET(0x6605, OLAND_6605, OLAND)
+CHIPSET(0x6606, OLAND_6606, OLAND)
+CHIPSET(0x6607, OLAND_6607, OLAND)
+CHIPSET(0x6608, OLAND_6608, OLAND)
+CHIPSET(0x6610, OLAND_6610, OLAND)
+CHIPSET(0x6611, OLAND_6611, OLAND)
+CHIPSET(0x6613, OLAND_6613, OLAND)
+CHIPSET(0x6617, OLAND_6617, OLAND)
+CHIPSET(0x6620, OLAND_6620, OLAND)
+CHIPSET(0x6621, OLAND_6621, OLAND)
+CHIPSET(0x6623, OLAND_6623, OLAND)
+CHIPSET(0x6631, OLAND_6631, OLAND)
+
+CHIPSET(0x6660, HAINAN_6660, HAINAN)
+CHIPSET(0x6663, HAINAN_6663, HAINAN)
+CHIPSET(0x6664, HAINAN_6664, HAINAN)
+CHIPSET(0x6665, HAINAN_6665, HAINAN)
+CHIPSET(0x6667, HAINAN_6667, HAINAN)
+CHIPSET(0x666F, HAINAN_666F, HAINAN)
+
+CHIPSET(0x6640, BONAIRE_6640, BONAIRE)
+CHIPSET(0x6641, BONAIRE_6641, BONAIRE)
+CHIPSET(0x6646, BONAIRE_6646, BONAIRE)
+CHIPSET(0x6647, BONAIRE_6647, BONAIRE)
+CHIPSET(0x6649, BONAIRE_6649, BONAIRE)
+CHIPSET(0x6650, BONAIRE_6650, BONAIRE)
+CHIPSET(0x6651, BONAIRE_6651, BONAIRE)
+CHIPSET(0x6658, BONAIRE_6658, BONAIRE)
+CHIPSET(0x665C, BONAIRE_665C, BONAIRE)
+CHIPSET(0x665D, BONAIRE_665D, BONAIRE)
+CHIPSET(0x665F, BONAIRE_665F, BONAIRE)
+
+CHIPSET(0x9830, KABINI_9830, KABINI)
+CHIPSET(0x9831, KABINI_9831, KABINI)
+CHIPSET(0x9832, KABINI_9832, KABINI)
+CHIPSET(0x9833, KABINI_9833, KABINI)
+CHIPSET(0x9834, KABINI_9834, KABINI)
+CHIPSET(0x9835, KABINI_9835, KABINI)
+CHIPSET(0x9836, KABINI_9836, KABINI)
+CHIPSET(0x9837, KABINI_9837, KABINI)
+CHIPSET(0x9838, KABINI_9838, KABINI)
+CHIPSET(0x9839, KABINI_9839, KABINI)
+CHIPSET(0x983A, KABINI_983A, KABINI)
+CHIPSET(0x983B, KABINI_983B, KABINI)
+CHIPSET(0x983C, KABINI_983C, KABINI)
+CHIPSET(0x983D, KABINI_983D, KABINI)
+CHIPSET(0x983E, KABINI_983E, KABINI)
+CHIPSET(0x983F, KABINI_983F, KABINI)
+
+CHIPSET(0x9850, MULLINS_9850, MULLINS)
+CHIPSET(0x9851, MULLINS_9851, MULLINS)
+CHIPSET(0x9852, MULLINS_9852, MULLINS)
+CHIPSET(0x9853, MULLINS_9853, MULLINS)
+CHIPSET(0x9854, MULLINS_9854, MULLINS)
+CHIPSET(0x9855, MULLINS_9855, MULLINS)
+CHIPSET(0x9856, MULLINS_9856, MULLINS)
+CHIPSET(0x9857, MULLINS_9857, MULLINS)
+CHIPSET(0x9858, MULLINS_9858, MULLINS)
+CHIPSET(0x9859, MULLINS_9859, MULLINS)
+CHIPSET(0x985A, MULLINS_985A, MULLINS)
+CHIPSET(0x985B, MULLINS_985B, MULLINS)
+CHIPSET(0x985C, MULLINS_985C, MULLINS)
+CHIPSET(0x985D, MULLINS_985D, MULLINS)
+CHIPSET(0x985E, MULLINS_985E, MULLINS)
+CHIPSET(0x985F, MULLINS_985F, MULLINS)
+
+CHIPSET(0x1304, KAVERI_1304, KAVERI)
+CHIPSET(0x1305, KAVERI_1305, KAVERI)
+CHIPSET(0x1306, KAVERI_1306, KAVERI)
+CHIPSET(0x1307, KAVERI_1307, KAVERI)
+CHIPSET(0x1309, KAVERI_1309, KAVERI)
+CHIPSET(0x130A, KAVERI_130A, KAVERI)
+CHIPSET(0x130B, KAVERI_130B, KAVERI)
+CHIPSET(0x130C, KAVERI_130C, KAVERI)
+CHIPSET(0x130D, KAVERI_130D, KAVERI)
+CHIPSET(0x130E, KAVERI_130E, KAVERI)
+CHIPSET(0x130F, KAVERI_130F, KAVERI)
+CHIPSET(0x1310, KAVERI_1310, KAVERI)
+CHIPSET(0x1311, KAVERI_1311, KAVERI)
+CHIPSET(0x1312, KAVERI_1312, KAVERI)
+CHIPSET(0x1313, KAVERI_1313, KAVERI)
+CHIPSET(0x1315, KAVERI_1315, KAVERI)
+CHIPSET(0x1316, KAVERI_1316, KAVERI)
+CHIPSET(0x1317, KAVERI_1317, KAVERI)
+CHIPSET(0x1318, KAVERI_1318, KAVERI)
+CHIPSET(0x131B, KAVERI_131B, KAVERI)
+CHIPSET(0x131C, KAVERI_131C, KAVERI)
+CHIPSET(0x131D, KAVERI_131D, KAVERI)
+
+CHIPSET(0x67A0, HAWAII_67A0, HAWAII)
+CHIPSET(0x67A1, HAWAII_67A1, HAWAII)
+CHIPSET(0x67A2, HAWAII_67A2, HAWAII)
+CHIPSET(0x67A8, HAWAII_67A8, HAWAII)
+CHIPSET(0x67A9, HAWAII_67A9, HAWAII)
+CHIPSET(0x67AA, HAWAII_67AA, HAWAII)
+CHIPSET(0x67B0, HAWAII_67B0, HAWAII)
+CHIPSET(0x67B1, HAWAII_67B1, HAWAII)
+CHIPSET(0x67B8, HAWAII_67B8, HAWAII)
+CHIPSET(0x67B9, HAWAII_67B9, HAWAII)
+CHIPSET(0x67BA, HAWAII_67BA, HAWAII)
+CHIPSET(0x67BE, HAWAII_67BE, HAWAII)
diff --git a/radeon/radeon-symbols.txt b/radeon/radeon-symbols.txt
new file mode 100644
index 0000000..5a532d8
--- /dev/null
+++ b/radeon/radeon-symbols.txt
@@ -0,0 +1,44 @@
+radeon_bo_debug
+radeon_bo_get_handle
+radeon_bo_get_src_domain
+radeon_bo_get_tiling
+radeon_bo_is_busy
+radeon_bo_is_referenced_by_cs
+radeon_bo_is_static
+radeon_bo_manager_gem_ctor
+radeon_bo_manager_gem_dtor
+radeon_bo_map
+radeon_bo_open
+radeon_bo_ref
+radeon_bo_set_tiling
+radeon_bo_unmap
+radeon_bo_unref
+radeon_bo_wait
+radeon_cs_begin
+radeon_cs_create
+radeon_cs_destroy
+radeon_cs_emit
+radeon_cs_end
+radeon_cs_erase
+radeon_cs_get_id
+radeon_cs_manager_gem_ctor
+radeon_cs_manager_gem_dtor
+radeon_cs_need_flush
+radeon_cs_print
+radeon_cs_set_limit
+radeon_cs_space_add_persistent_bo
+radeon_cs_space_check
+radeon_cs_space_check_with_bo
+radeon_cs_space_reset_bos
+radeon_cs_space_set_flush
+radeon_cs_write_reloc
+radeon_gem_bo_open_prime
+radeon_gem_get_kernel_name
+radeon_gem_get_reloc_in_cs
+radeon_gem_name_bo
+radeon_gem_prime_share_bo
+radeon_gem_set_domain
+radeon_surface_best
+radeon_surface_init
+radeon_surface_manager_free
+radeon_surface_manager_new
diff --git a/radeon/radeon_bo.c b/radeon/radeon_bo.c
new file mode 100644
index 0000000..9192953
--- /dev/null
+++ b/radeon/radeon_bo.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#include <libdrm_macros.h>
+#include <radeon_bo.h>
+#include <radeon_bo_int.h>
+
+drm_public void radeon_bo_debug(struct radeon_bo *bo, const char *op)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+
+ fprintf(stderr, "%s %p 0x%08X 0x%08X 0x%08X\n",
+ op, bo, bo->handle, boi->size, boi->cref);
+}
+
+drm_public struct radeon_bo *
+radeon_bo_open(struct radeon_bo_manager *bom, uint32_t handle, uint32_t size,
+ uint32_t alignment, uint32_t domains, uint32_t flags)
+{
+ struct radeon_bo *bo;
+ bo = bom->funcs->bo_open(bom, handle, size, alignment, domains, flags);
+ return bo;
+}
+
+drm_public void radeon_bo_ref(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ boi->cref++;
+ boi->bom->funcs->bo_ref(boi);
+}
+
+drm_public struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (bo == NULL)
+ return NULL;
+
+ boi->cref--;
+ return boi->bom->funcs->bo_unref(boi);
+}
+
+drm_public int radeon_bo_map(struct radeon_bo *bo, int write)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_map(boi, write);
+}
+
+drm_public int radeon_bo_unmap(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_unmap(boi);
+}
+
+drm_public int radeon_bo_wait(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (!boi->bom->funcs->bo_wait)
+ return 0;
+ return boi->bom->funcs->bo_wait(boi);
+}
+
+drm_public int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_is_busy(boi, domain);
+}
+
+drm_public int
+radeon_bo_set_tiling(struct radeon_bo *bo,
+ uint32_t tiling_flags, uint32_t pitch)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_set_tiling(boi, tiling_flags, pitch);
+}
+
+drm_public int
+radeon_bo_get_tiling(struct radeon_bo *bo,
+ uint32_t *tiling_flags, uint32_t *pitch)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->bom->funcs->bo_get_tiling(boi, tiling_flags, pitch);
+}
+
+drm_public int radeon_bo_is_static(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ if (boi->bom->funcs->bo_is_static)
+ return boi->bom->funcs->bo_is_static(boi);
+ return 0;
+}
+
+drm_public int
+radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ return boi->cref > 1;
+}
+
+drm_public uint32_t radeon_bo_get_handle(struct radeon_bo *bo)
+{
+ return bo->handle;
+}
+
+drm_public uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ uint32_t src_domain;
+
+ src_domain = boi->space_accounted & 0xffff;
+ if (!src_domain)
+ src_domain = boi->space_accounted >> 16;
+
+ return src_domain;
+}
diff --git a/radeon/radeon_bo.h b/radeon/radeon_bo.h
new file mode 100644
index 0000000..6e20c6c
--- /dev/null
+++ b/radeon/radeon_bo.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_H
+#define RADEON_BO_H
+
+#include <stdio.h>
+#include <stdint.h>
+
+/* bo object */
+#define RADEON_BO_FLAGS_MACRO_TILE 1
+#define RADEON_BO_FLAGS_MICRO_TILE 2
+#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
+
+struct radeon_bo_manager;
+struct radeon_cs;
+
+struct radeon_bo {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+};
+
+
+void radeon_bo_debug(struct radeon_bo *bo, const char *op);
+
+struct radeon_bo *radeon_bo_open(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+
+void radeon_bo_ref(struct radeon_bo *bo);
+struct radeon_bo *radeon_bo_unref(struct radeon_bo *bo);
+int radeon_bo_map(struct radeon_bo *bo, int write);
+int radeon_bo_unmap(struct radeon_bo *bo);
+int radeon_bo_wait(struct radeon_bo *bo);
+int radeon_bo_is_busy(struct radeon_bo *bo, uint32_t *domain);
+int radeon_bo_set_tiling(struct radeon_bo *bo, uint32_t tiling_flags, uint32_t pitch);
+int radeon_bo_get_tiling(struct radeon_bo *bo, uint32_t *tiling_flags, uint32_t *pitch);
+int radeon_bo_is_static(struct radeon_bo *bo);
+int radeon_bo_is_referenced_by_cs(struct radeon_bo *bo, struct radeon_cs *cs);
+uint32_t radeon_bo_get_handle(struct radeon_bo *bo);
+uint32_t radeon_bo_get_src_domain(struct radeon_bo *bo);
+#endif
diff --git a/radeon/radeon_bo_gem.c b/radeon/radeon_bo_gem.c
new file mode 100644
index 0000000..bbe72ce
--- /dev/null
+++ b/radeon/radeon_bo_gem.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "xf86atomic.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_bo.h"
+#include "radeon_bo_int.h"
+#include "radeon_bo_gem.h"
+#include <fcntl.h>
+struct radeon_bo_gem {
+ struct radeon_bo_int base;
+ uint32_t name;
+ int map_count;
+ atomic_t reloc_in_cs;
+ void *priv_ptr;
+};
+
+struct bo_manager_gem {
+ struct radeon_bo_manager base;
+};
+
+static int bo_wait(struct radeon_bo_int *boi);
+
+static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags)
+{
+ struct radeon_bo_gem *bo;
+ int r;
+
+ bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ bo->base.bom = bom;
+ bo->base.handle = 0;
+ bo->base.size = size;
+ bo->base.alignment = alignment;
+ bo->base.domains = domains;
+ bo->base.flags = flags;
+ bo->base.ptr = NULL;
+ atomic_set(&bo->reloc_in_cs, 0);
+ bo->map_count = 0;
+ if (handle) {
+ struct drm_gem_open open_arg;
+
+ memset(&open_arg, 0, sizeof(open_arg));
+ open_arg.name = handle;
+ r = drmIoctl(bom->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+ if (r != 0) {
+ free(bo);
+ return NULL;
+ }
+ bo->base.handle = open_arg.handle;
+ bo->base.size = open_arg.size;
+ bo->name = handle;
+ } else {
+ struct drm_radeon_gem_create args;
+
+ args.size = size;
+ args.alignment = alignment;
+ args.initial_domain = bo->base.domains;
+ args.flags = flags;
+ args.handle = 0;
+ r = drmCommandWriteRead(bom->fd, DRM_RADEON_GEM_CREATE,
+ &args, sizeof(args));
+ bo->base.handle = args.handle;
+ if (r) {
+ fprintf(stderr, "Failed to allocate :\n");
+ fprintf(stderr, " size : %d bytes\n", size);
+ fprintf(stderr, " alignment : %d bytes\n", alignment);
+ fprintf(stderr, " domains : %d\n", bo->base.domains);
+ free(bo);
+ return NULL;
+ }
+ }
+ radeon_bo_ref((struct radeon_bo*)bo);
+ return (struct radeon_bo*)bo;
+}
+
+static void bo_ref(struct radeon_bo_int *boi)
+{
+}
+
+static struct radeon_bo *bo_unref(struct radeon_bo_int *boi)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+
+ if (boi->cref) {
+ return (struct radeon_bo *)boi;
+ }
+ if (bo_gem->priv_ptr) {
+ drm_munmap(bo_gem->priv_ptr, boi->size);
+ }
+
+ /* close object */
+ drmCloseBufferHandle(boi->bom->fd, boi->handle);
+ memset(bo_gem, 0, sizeof(struct radeon_bo_gem));
+ free(bo_gem);
+ return NULL;
+}
+
+static int bo_map(struct radeon_bo_int *boi, int write)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+ struct drm_radeon_gem_mmap args;
+ int r;
+ void *ptr;
+
+ if (bo_gem->map_count++ != 0) {
+ return 0;
+ }
+ if (bo_gem->priv_ptr) {
+ goto wait;
+ }
+
+ boi->ptr = NULL;
+
+ /* Zero out args to make valgrind happy */
+ memset(&args, 0, sizeof(args));
+ args.handle = boi->handle;
+ args.offset = 0;
+ args.size = (uint64_t)boi->size;
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_MMAP,
+ &args,
+ sizeof(args));
+ if (r) {
+ fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n",
+ boi, boi->handle, r);
+ return r;
+ }
+ ptr = drm_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, boi->bom->fd, args.addr_ptr);
+ if (ptr == MAP_FAILED)
+ return -errno;
+ bo_gem->priv_ptr = ptr;
+wait:
+ boi->ptr = bo_gem->priv_ptr;
+ r = bo_wait(boi);
+ if (r)
+ return r;
+ return 0;
+}
+
+static int bo_unmap(struct radeon_bo_int *boi)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)boi;
+
+ if (--bo_gem->map_count > 0) {
+ return 0;
+ }
+ //drm_munmap(bo->ptr, bo->size);
+ boi->ptr = NULL;
+ return 0;
+}
+
+static int bo_wait(struct radeon_bo_int *boi)
+{
+ struct drm_radeon_gem_wait_idle args;
+ int ret;
+
+ /* Zero out args to make valgrind happy */
+ memset(&args, 0, sizeof(args));
+ args.handle = boi->handle;
+ do {
+ ret = drmCommandWrite(boi->bom->fd, DRM_RADEON_GEM_WAIT_IDLE,
+ &args, sizeof(args));
+ } while (ret == -EBUSY);
+ return ret;
+}
+
+static int bo_is_busy(struct radeon_bo_int *boi, uint32_t *domain)
+{
+ struct drm_radeon_gem_busy args;
+ int ret;
+
+ args.handle = boi->handle;
+ args.domain = 0;
+
+ ret = drmCommandWriteRead(boi->bom->fd, DRM_RADEON_GEM_BUSY,
+ &args, sizeof(args));
+
+ *domain = args.domain;
+ return ret;
+}
+
+static int bo_set_tiling(struct radeon_bo_int *boi, uint32_t tiling_flags,
+ uint32_t pitch)
+{
+ struct drm_radeon_gem_set_tiling args;
+ int r;
+
+ args.handle = boi->handle;
+ args.tiling_flags = tiling_flags;
+ args.pitch = pitch;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_SET_TILING,
+ &args,
+ sizeof(args));
+ return r;
+}
+
+static int bo_get_tiling(struct radeon_bo_int *boi, uint32_t *tiling_flags,
+ uint32_t *pitch)
+{
+ struct drm_radeon_gem_set_tiling args = {};
+ int r;
+
+ args.handle = boi->handle;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_GET_TILING,
+ &args,
+ sizeof(args));
+
+ if (r)
+ return r;
+
+ *tiling_flags = args.tiling_flags;
+ *pitch = args.pitch;
+ return r;
+}
+
+static const struct radeon_bo_funcs bo_gem_funcs = {
+ .bo_open = bo_open,
+ .bo_ref = bo_ref,
+ .bo_unref = bo_unref,
+ .bo_map = bo_map,
+ .bo_unmap = bo_unmap,
+ .bo_wait = bo_wait,
+ .bo_is_static = NULL,
+ .bo_set_tiling = bo_set_tiling,
+ .bo_get_tiling = bo_get_tiling,
+ .bo_is_busy = bo_is_busy,
+ .bo_is_referenced_by_cs = NULL,
+};
+
+drm_public struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd)
+{
+ struct bo_manager_gem *bomg;
+
+ bomg = (struct bo_manager_gem*)calloc(1, sizeof(struct bo_manager_gem));
+ if (bomg == NULL) {
+ return NULL;
+ }
+ bomg->base.funcs = &bo_gem_funcs;
+ bomg->base.fd = fd;
+ return (struct radeon_bo_manager*)bomg;
+}
+
+drm_public void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom)
+{
+ struct bo_manager_gem *bomg = (struct bo_manager_gem*)bom;
+
+ if (bom == NULL) {
+ return;
+ }
+ free(bomg);
+}
+
+drm_public uint32_t
+radeon_gem_name_bo(struct radeon_bo *bo)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ return bo_gem->name;
+}
+
+drm_public void *
+radeon_gem_get_reloc_in_cs(struct radeon_bo *bo)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ return &bo_gem->reloc_in_cs;
+}
+
+drm_public int
+radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct drm_gem_flink flink;
+ int r;
+
+ if (bo_gem->name) {
+ *name = bo_gem->name;
+ return 0;
+ }
+ flink.handle = bo->handle;
+ r = drmIoctl(boi->bom->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (r) {
+ return r;
+ }
+ bo_gem->name = flink.name;
+ *name = flink.name;
+ return 0;
+}
+
+drm_public int
+radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct drm_radeon_gem_set_domain args;
+ int r;
+
+ args.handle = bo->handle;
+ args.read_domains = read_domains;
+ args.write_domain = write_domain;
+
+ r = drmCommandWriteRead(boi->bom->fd,
+ DRM_RADEON_GEM_SET_DOMAIN,
+ &args,
+ sizeof(args));
+ return r;
+}
+
+drm_public int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle)
+{
+ struct radeon_bo_gem *bo_gem = (struct radeon_bo_gem*)bo;
+ int ret;
+
+ ret = drmPrimeHandleToFD(bo_gem->base.bom->fd, bo->handle, DRM_CLOEXEC, handle);
+ return ret;
+}
+
+drm_public struct radeon_bo *
+radeon_gem_bo_open_prime(struct radeon_bo_manager *bom, int fd_handle, uint32_t size)
+{
+ struct radeon_bo_gem *bo;
+ int r;
+ uint32_t handle;
+
+ bo = (struct radeon_bo_gem*)calloc(1, sizeof(struct radeon_bo_gem));
+ if (bo == NULL) {
+ return NULL;
+ }
+
+ bo->base.bom = bom;
+ bo->base.handle = 0;
+ bo->base.size = size;
+ bo->base.alignment = 0;
+ bo->base.domains = RADEON_GEM_DOMAIN_GTT;
+ bo->base.flags = 0;
+ bo->base.ptr = NULL;
+ atomic_set(&bo->reloc_in_cs, 0);
+ bo->map_count = 0;
+
+ r = drmPrimeFDToHandle(bom->fd, fd_handle, &handle);
+ if (r != 0) {
+ free(bo);
+ return NULL;
+ }
+
+ bo->base.handle = handle;
+ bo->name = handle;
+
+ radeon_bo_ref((struct radeon_bo *)bo);
+ return (struct radeon_bo *)bo;
+
+}
diff --git a/radeon/radeon_bo_gem.h b/radeon/radeon_bo_gem.h
new file mode 100644
index 0000000..08965f3
--- /dev/null
+++ b/radeon/radeon_bo_gem.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2008 Dave Airlie
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Dave Airlie
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_BO_GEM_H
+#define RADEON_BO_GEM_H
+
+#include "radeon_bo.h"
+
+struct radeon_bo_manager *radeon_bo_manager_gem_ctor(int fd);
+void radeon_bo_manager_gem_dtor(struct radeon_bo_manager *bom);
+
+uint32_t radeon_gem_name_bo(struct radeon_bo *bo);
+void *radeon_gem_get_reloc_in_cs(struct radeon_bo *bo);
+int radeon_gem_set_domain(struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
+int radeon_gem_get_kernel_name(struct radeon_bo *bo, uint32_t *name);
+int radeon_gem_prime_share_bo(struct radeon_bo *bo, int *handle);
+struct radeon_bo *radeon_gem_bo_open_prime(struct radeon_bo_manager *bom,
+ int fd_handle,
+ uint32_t size);
+#endif
diff --git a/radeon/radeon_bo_int.h b/radeon/radeon_bo_int.h
new file mode 100644
index 0000000..de981b0
--- /dev/null
+++ b/radeon/radeon_bo_int.h
@@ -0,0 +1,45 @@
+#ifndef RADEON_BO_INT
+#define RADEON_BO_INT
+
+struct radeon_bo_manager {
+ const struct radeon_bo_funcs *funcs;
+ int fd;
+};
+
+struct radeon_bo_int {
+ void *ptr;
+ uint32_t flags;
+ uint32_t handle;
+ uint32_t size;
+ /* private members */
+ uint32_t alignment;
+ uint32_t domains;
+ unsigned cref;
+ struct radeon_bo_manager *bom;
+ uint32_t space_accounted;
+ uint32_t referenced_in_cs;
+};
+
+/* bo functions */
+struct radeon_bo_funcs {
+ struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
+ uint32_t handle,
+ uint32_t size,
+ uint32_t alignment,
+ uint32_t domains,
+ uint32_t flags);
+ void (*bo_ref)(struct radeon_bo_int *bo);
+ struct radeon_bo *(*bo_unref)(struct radeon_bo_int *bo);
+ int (*bo_map)(struct radeon_bo_int *bo, int write);
+ int (*bo_unmap)(struct radeon_bo_int *bo);
+ int (*bo_wait)(struct radeon_bo_int *bo);
+ int (*bo_is_static)(struct radeon_bo_int *bo);
+ int (*bo_set_tiling)(struct radeon_bo_int *bo, uint32_t tiling_flags,
+ uint32_t pitch);
+ int (*bo_get_tiling)(struct radeon_bo_int *bo, uint32_t *tiling_flags,
+ uint32_t *pitch);
+ int (*bo_is_busy)(struct radeon_bo_int *bo, uint32_t *domain);
+ int (*bo_is_referenced_by_cs)(struct radeon_bo_int *bo, struct radeon_cs *cs);
+};
+
+#endif
diff --git a/radeon/radeon_cs.c b/radeon/radeon_cs.c
new file mode 100644
index 0000000..1132d06
--- /dev/null
+++ b/radeon/radeon_cs.c
@@ -0,0 +1,95 @@
+#include "libdrm_macros.h"
+#include <stdio.h>
+#include "radeon_cs.h"
+#include "radeon_cs_int.h"
+
+drm_public struct radeon_cs *
+radeon_cs_create(struct radeon_cs_manager *csm, uint32_t ndw)
+{
+ struct radeon_cs_int *csi = csm->funcs->cs_create(csm, ndw);
+ return (struct radeon_cs *)csi;
+}
+
+drm_public int
+radeon_cs_write_reloc(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domain, uint32_t write_domain,
+ uint32_t flags)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+
+ return csi->csm->funcs->cs_write_reloc(csi,
+ bo,
+ read_domain,
+ write_domain,
+ flags);
+}
+
+drm_public int
+radeon_cs_begin(struct radeon_cs *cs, uint32_t ndw,
+ const char *file, const char *func, int line)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_begin(csi, ndw, file, func, line);
+}
+
+drm_public int
+radeon_cs_end(struct radeon_cs *cs,
+ const char *file, const char *func, int line)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_end(csi, file, func, line);
+}
+
+drm_public int radeon_cs_emit(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_emit(csi);
+}
+
+drm_public int radeon_cs_destroy(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_destroy(csi);
+}
+
+drm_public int radeon_cs_erase(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_erase(csi);
+}
+
+drm_public int radeon_cs_need_flush(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->csm->funcs->cs_need_flush(csi);
+}
+
+drm_public void radeon_cs_print(struct radeon_cs *cs, FILE *file)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ csi->csm->funcs->cs_print(csi, file);
+}
+
+drm_public void
+radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
+ csi->csm->vram_limit = limit;
+ else
+ csi->csm->gart_limit = limit;
+}
+
+drm_public void radeon_cs_space_set_flush(struct radeon_cs *cs,
+ void (*fn)(void *), void *data)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ csi->space_flush_fn = fn;
+ csi->space_flush_data = data;
+}
+
+drm_public uint32_t radeon_cs_get_id(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return csi->id;
+}
diff --git a/radeon/radeon_cs.h b/radeon/radeon_cs.h
new file mode 100644
index 0000000..f68a624
--- /dev/null
+++ b/radeon/radeon_cs.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_H
+#define RADEON_CS_H
+
+#include <stdint.h>
+#include <string.h>
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_bo.h"
+
+struct radeon_cs_reloc {
+ struct radeon_bo *bo;
+ uint32_t read_domain;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+
+#define RADEON_CS_SPACE_OK 0
+#define RADEON_CS_SPACE_OP_TO_BIG 1
+#define RADEON_CS_SPACE_FLUSH 2
+
+struct radeon_cs {
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+};
+
+#define MAX_SPACE_BOS (32)
+
+struct radeon_cs_manager;
+
+extern struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+
+extern int radeon_cs_begin(struct radeon_cs *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func, int line);
+extern int radeon_cs_end(struct radeon_cs *cs,
+ const char *file,
+ const char *func,
+ int line);
+extern int radeon_cs_emit(struct radeon_cs *cs);
+extern int radeon_cs_destroy(struct radeon_cs *cs);
+extern int radeon_cs_erase(struct radeon_cs *cs);
+extern int radeon_cs_need_flush(struct radeon_cs *cs);
+extern void radeon_cs_print(struct radeon_cs *cs, FILE *file);
+extern void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit);
+extern void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data);
+extern int radeon_cs_write_reloc(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+extern uint32_t radeon_cs_get_id(struct radeon_cs *cs);
+/*
+ * add a persistent BO to the list
+ * a persistent BO is one that will be referenced across flushes,
+ * i.e. colorbuffer, textures etc.
+ * They get reset when a new "operation" happens, where an operation
+ * is a state emission with a color/textures etc followed by a bunch of vertices.
+ */
+void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+/* reset the persistent BO list */
+void radeon_cs_space_reset_bos(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list */
+int radeon_cs_space_check(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list and a temporary BO
+ * a temporary BO is like a DMA buffer, which gets flushed with the
+ * command buffer */
+int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+static inline void radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
+{
+ cs->packets[cs->cdw++] = dword;
+ if (cs->section_ndw) {
+ cs->section_cdw++;
+ }
+}
+
+static inline void radeon_cs_write_qword(struct radeon_cs *cs, uint64_t qword)
+{
+ memcpy(cs->packets + cs->cdw, &qword, sizeof(uint64_t));
+ cs->cdw += 2;
+ if (cs->section_ndw) {
+ cs->section_cdw += 2;
+ }
+}
+
+static inline void radeon_cs_write_table(struct radeon_cs *cs,
+ const void *data, uint32_t size)
+{
+ memcpy(cs->packets + cs->cdw, data, size * 4);
+ cs->cdw += size;
+ if (cs->section_ndw) {
+ cs->section_cdw += size;
+ }
+}
+#endif
diff --git a/radeon/radeon_cs_gem.c b/radeon/radeon_cs_gem.c
new file mode 100644
index 0000000..ef070c6
--- /dev/null
+++ b/radeon/radeon_cs_gem.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <sys/ioctl.h>
+#include "radeon_cs.h"
+#include "radeon_cs_int.h"
+#include "radeon_bo_int.h"
+#include "radeon_cs_gem.h"
+#include "radeon_bo_gem.h"
+#include "drm.h"
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "xf86atomic.h"
+#include "radeon_drm.h"
+
+/* Add LIBDRM_RADEON_BOF_FILES to libdrm_radeon_la_SOURCES when building with BOF_DUMP */
+#define CS_BOF_DUMP 0
+#if CS_BOF_DUMP
+#include "bof.h"
+#endif
+
+struct radeon_cs_manager_gem {
+ struct radeon_cs_manager base;
+ uint32_t device_id;
+ unsigned nbof;
+};
+
+#pragma pack(1)
+struct cs_reloc_gem {
+ uint32_t handle;
+ uint32_t read_domain;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+#pragma pack()
+#define RELOC_SIZE (sizeof(struct cs_reloc_gem) / sizeof(uint32_t))
+
+struct cs_gem {
+ struct radeon_cs_int base;
+ struct drm_radeon_cs cs;
+ struct drm_radeon_cs_chunk chunks[2];
+ unsigned nrelocs;
+ uint32_t *relocs;
+ struct radeon_bo_int **relocs_bo;
+};
+
+static pthread_mutex_t id_mutex = PTHREAD_MUTEX_INITIALIZER;
+static uint32_t cs_id_source = 0;
+
+/**
+ * result is undefined if called with ~0
+ */
+static uint32_t get_first_zero(const uint32_t n)
+{
+ /* __builtin_ctz returns number of trailing zeros. */
+ return 1 << __builtin_ctz(~n);
+}
+
+/**
+ * Returns a free id for cs.
+ * If there is no free id we return zero
+ **/
+static uint32_t generate_id(void)
+{
+ uint32_t r = 0;
+ pthread_mutex_lock( &id_mutex );
+ /* check for free ids */
+ if (cs_id_source != ~r) {
+ /* find first zero bit */
+ r = get_first_zero(cs_id_source);
+
+ /* set id as reserved */
+ cs_id_source |= r;
+ }
+ pthread_mutex_unlock( &id_mutex );
+ return r;
+}
+
+/**
+ * Free the id for later reuse
+ **/
+static void free_id(uint32_t id)
+{
+ pthread_mutex_lock( &id_mutex );
+
+ cs_id_source &= ~id;
+
+ pthread_mutex_unlock( &id_mutex );
+}
+
+static struct radeon_cs_int *cs_gem_create(struct radeon_cs_manager *csm,
+ uint32_t ndw)
+{
+ struct cs_gem *csg;
+
+ /* max cmd buffer size is 64Kb */
+ if (ndw > (64 * 1024 / 4)) {
+ return NULL;
+ }
+ csg = (struct cs_gem*)calloc(1, sizeof(struct cs_gem));
+ if (csg == NULL) {
+ return NULL;
+ }
+ csg->base.csm = csm;
+ csg->base.ndw = 64 * 1024 / 4;
+ csg->base.packets = (uint32_t*)calloc(1, 64 * 1024);
+ if (csg->base.packets == NULL) {
+ free(csg);
+ return NULL;
+ }
+ csg->base.relocs_total_size = 0;
+ csg->base.crelocs = 0;
+ csg->base.id = generate_id();
+ csg->nrelocs = 4096 / (4 * 4) ;
+ csg->relocs_bo = (struct radeon_bo_int**)calloc(1,
+ csg->nrelocs*sizeof(void*));
+ if (csg->relocs_bo == NULL) {
+ free(csg->base.packets);
+ free(csg);
+ return NULL;
+ }
+ csg->base.relocs = csg->relocs = (uint32_t*)calloc(1, 4096);
+ if (csg->relocs == NULL) {
+ free(csg->relocs_bo);
+ free(csg->base.packets);
+ free(csg);
+ return NULL;
+ }
+ csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
+ csg->chunks[0].length_dw = 0;
+ csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
+ csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
+ csg->chunks[1].length_dw = 0;
+ csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
+ return (struct radeon_cs_int*)csg;
+}
+
+static int cs_gem_write_reloc(struct radeon_cs_int *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags)
+{
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ struct cs_reloc_gem *reloc;
+ uint32_t idx;
+ unsigned i;
+
+ assert(boi->space_accounted);
+
+ /* check domains */
+ if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
+ /* in one CS a bo can only be in read or write domain but not
+ * in read & write domain at the same time
+ */
+ return -EINVAL;
+ }
+ if (read_domain == RADEON_GEM_DOMAIN_CPU) {
+ return -EINVAL;
+ }
+ if (write_domain == RADEON_GEM_DOMAIN_CPU) {
+ return -EINVAL;
+ }
+ /* use bit field hash function to determine
+ if this bo is for sure not in this cs.*/
+ if ((atomic_read((atomic_t *)radeon_gem_get_reloc_in_cs(bo)) & cs->id)) {
+ /* check if bo is already referenced.
+ * Scanning from end to begin reduces cycles with mesa because
+ * it often relocates same shared dma bo again. */
+ for(i = cs->crelocs; i != 0;) {
+ --i;
+ idx = i * RELOC_SIZE;
+ reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
+ if (reloc->handle == bo->handle) {
+ /* Check domains must be in read or write. As we check already
+ * checked that in argument one of the read or write domain was
+ * set we only need to check that if previous reloc as the read
+ * domain set then the read_domain should also be set for this
+ * new relocation.
+ */
+ /* the DDX expects to read and write from same pixmap */
+ if (write_domain && (reloc->read_domain & write_domain)) {
+ reloc->read_domain = 0;
+ reloc->write_domain = write_domain;
+ } else if (read_domain & reloc->write_domain) {
+ reloc->read_domain = 0;
+ } else {
+ if (write_domain != reloc->write_domain)
+ return -EINVAL;
+ if (read_domain != reloc->read_domain)
+ return -EINVAL;
+ }
+
+ reloc->read_domain |= read_domain;
+ reloc->write_domain |= write_domain;
+ /* update flags */
+ reloc->flags |= (flags & reloc->flags);
+ /* write relocation packet */
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
+ radeon_cs_write_dword((struct radeon_cs *)cs, idx);
+ return 0;
+ }
+ }
+ }
+ /* new relocation */
+ if (csg->base.crelocs >= csg->nrelocs) {
+ /* allocate more memory (TODO: should use a slab allocator maybe) */
+ uint32_t *tmp, size;
+ size = ((csg->nrelocs + 1) * sizeof(struct radeon_bo*));
+ tmp = (uint32_t*)realloc(csg->relocs_bo, size);
+ if (tmp == NULL) {
+ return -ENOMEM;
+ }
+ csg->relocs_bo = (struct radeon_bo_int **)tmp;
+ size = ((csg->nrelocs + 1) * RELOC_SIZE * 4);
+ tmp = (uint32_t*)realloc(csg->relocs, size);
+ if (tmp == NULL) {
+ return -ENOMEM;
+ }
+ cs->relocs = csg->relocs = tmp;
+ csg->nrelocs += 1;
+ csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
+ }
+ csg->relocs_bo[csg->base.crelocs] = boi;
+ idx = (csg->base.crelocs++) * RELOC_SIZE;
+ reloc = (struct cs_reloc_gem*)&csg->relocs[idx];
+ reloc->handle = bo->handle;
+ reloc->read_domain = read_domain;
+ reloc->write_domain = write_domain;
+ reloc->flags = flags;
+ csg->chunks[1].length_dw += RELOC_SIZE;
+ radeon_bo_ref(bo);
+ /* bo might be referenced from another context so have to use atomic operations */
+ atomic_add((atomic_t *)radeon_gem_get_reloc_in_cs(bo), cs->id);
+ cs->relocs_total_size += boi->size;
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0xc0001000);
+ radeon_cs_write_dword((struct radeon_cs *)cs, idx);
+ return 0;
+}
+
+static int cs_gem_begin(struct radeon_cs_int *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func,
+ int line)
+{
+
+ if (cs->section_ndw) {
+ fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
+ cs->section_file, cs->section_func, cs->section_line);
+ fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
+ file, func, line);
+ return -EPIPE;
+ }
+ cs->section_ndw = ndw;
+ cs->section_cdw = 0;
+ cs->section_file = file;
+ cs->section_func = func;
+ cs->section_line = line;
+
+ if (cs->cdw + ndw > cs->ndw) {
+ uint32_t tmp, *ptr;
+
+ /* round up the required size to a multiple of 1024 */
+ tmp = (cs->cdw + ndw + 0x3FF) & (~0x3FF);
+ ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
+ if (ptr == NULL) {
+ return -ENOMEM;
+ }
+ cs->packets = ptr;
+ cs->ndw = tmp;
+ }
+ return 0;
+}
+
+static int cs_gem_end(struct radeon_cs_int *cs,
+ const char *file,
+ const char *func,
+ int line)
+
+{
+ if (!cs->section_ndw) {
+ fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
+ file, func, line);
+ return -EPIPE;
+ }
+ if (cs->section_ndw != cs->section_cdw) {
+ fprintf(stderr, "CS section size mismatch start at (%s,%s,%d) %d vs %d\n",
+ cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
+ fprintf(stderr, "CS section end at (%s,%s,%d)\n",
+ file, func, line);
+
+ /* We must reset the section even when there is error. */
+ cs->section_ndw = 0;
+ return -EPIPE;
+ }
+ cs->section_ndw = 0;
+ return 0;
+}
+
+#if CS_BOF_DUMP
+static void cs_gem_dump_bof(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ struct radeon_cs_manager_gem *csm;
+ bof_t *bcs, *blob, *array, *bo, *size, *handle, *device_id, *root;
+ char tmp[256];
+ unsigned i;
+
+ csm = (struct radeon_cs_manager_gem *)cs->csm;
+ root = device_id = bcs = blob = array = bo = size = handle = NULL;
+ root = bof_object();
+ if (root == NULL)
+ goto out_err;
+ device_id = bof_int32(csm->device_id);
+ if (device_id == NULL)
+ return;
+ if (bof_object_set(root, "device_id", device_id))
+ goto out_err;
+ bof_decref(device_id);
+ device_id = NULL;
+ /* dump relocs */
+ blob = bof_blob(csg->nrelocs * 16, csg->relocs);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(root, "reloc", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ /* dump cs */
+ blob = bof_blob(cs->cdw * 4, cs->packets);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(root, "pm4", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ /* dump bo */
+ array = bof_array();
+ if (array == NULL)
+ goto out_err;
+ for (i = 0; i < csg->base.crelocs; i++) {
+ bo = bof_object();
+ if (bo == NULL)
+ goto out_err;
+ size = bof_int32(csg->relocs_bo[i]->size);
+ if (size == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "size", size))
+ goto out_err;
+ bof_decref(size);
+ size = NULL;
+ handle = bof_int32(csg->relocs_bo[i]->handle);
+ if (handle == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "handle", handle))
+ goto out_err;
+ bof_decref(handle);
+ handle = NULL;
+ radeon_bo_map((struct radeon_bo*)csg->relocs_bo[i], 0);
+ blob = bof_blob(csg->relocs_bo[i]->size, csg->relocs_bo[i]->ptr);
+ radeon_bo_unmap((struct radeon_bo*)csg->relocs_bo[i]);
+ if (blob == NULL)
+ goto out_err;
+ if (bof_object_set(bo, "data", blob))
+ goto out_err;
+ bof_decref(blob);
+ blob = NULL;
+ if (bof_array_append(array, bo))
+ goto out_err;
+ bof_decref(bo);
+ bo = NULL;
+ }
+ if (bof_object_set(root, "bo", array))
+ goto out_err;
+ sprintf(tmp, "d-0x%04X-%08d.bof", csm->device_id, csm->nbof++);
+ bof_dump_file(root, tmp);
+out_err:
+ bof_decref(blob);
+ bof_decref(array);
+ bof_decref(bo);
+ bof_decref(size);
+ bof_decref(handle);
+ bof_decref(device_id);
+ bof_decref(root);
+}
+#endif
+
+static int cs_gem_emit(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ uint64_t chunk_array[2];
+ unsigned i;
+ int r;
+
+ while (cs->cdw & 7)
+ radeon_cs_write_dword((struct radeon_cs *)cs, 0x80000000);
+
+#if CS_BOF_DUMP
+ cs_gem_dump_bof(cs);
+#endif
+ csg->chunks[0].length_dw = cs->cdw;
+
+ chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
+ chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
+
+ csg->cs.num_chunks = 2;
+ csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
+
+ r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
+ &csg->cs, sizeof(struct drm_radeon_cs));
+ for (i = 0; i < csg->base.crelocs; i++) {
+ csg->relocs_bo[i]->space_accounted = 0;
+ /* bo might be referenced from another context so have to use atomic operations */
+ atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
+ radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
+ csg->relocs_bo[i] = NULL;
+ }
+
+ cs->csm->read_used = 0;
+ cs->csm->vram_write_used = 0;
+ cs->csm->gart_write_used = 0;
+ return r;
+}
+
+static int cs_gem_destroy(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+
+ free_id(cs->id);
+ free(csg->relocs_bo);
+ free(cs->relocs);
+ free(cs->packets);
+ free(cs);
+ return 0;
+}
+
+static int cs_gem_erase(struct radeon_cs_int *cs)
+{
+ struct cs_gem *csg = (struct cs_gem*)cs;
+ unsigned i;
+
+ if (csg->relocs_bo) {
+ for (i = 0; i < csg->base.crelocs; i++) {
+ if (csg->relocs_bo[i]) {
+ /* bo might be referenced from another context so have to use atomic operations */
+ atomic_dec((atomic_t *)radeon_gem_get_reloc_in_cs((struct radeon_bo*)csg->relocs_bo[i]), cs->id);
+ radeon_bo_unref((struct radeon_bo *)csg->relocs_bo[i]);
+ csg->relocs_bo[i] = NULL;
+ }
+ }
+ }
+ cs->relocs_total_size = 0;
+ cs->cdw = 0;
+ cs->section_ndw = 0;
+ cs->crelocs = 0;
+ csg->chunks[0].length_dw = 0;
+ csg->chunks[1].length_dw = 0;
+ return 0;
+}
+
+static int cs_gem_need_flush(struct radeon_cs_int *cs)
+{
+ return 0; //(cs->relocs_total_size > (32*1024*1024));
+}
+
+static void cs_gem_print(struct radeon_cs_int *cs, FILE *file)
+{
+ struct radeon_cs_manager_gem *csm;
+ unsigned int i;
+
+ csm = (struct radeon_cs_manager_gem *)cs->csm;
+ fprintf(file, "VENDORID:DEVICEID 0x%04X:0x%04X\n", 0x1002, csm->device_id);
+ for (i = 0; i < cs->cdw; i++) {
+ fprintf(file, "0x%08X\n", cs->packets[i]);
+ }
+}
+
+static const struct radeon_cs_funcs radeon_cs_gem_funcs = {
+ .cs_create = cs_gem_create,
+ .cs_write_reloc = cs_gem_write_reloc,
+ .cs_begin = cs_gem_begin,
+ .cs_end = cs_gem_end,
+ .cs_emit = cs_gem_emit,
+ .cs_destroy = cs_gem_destroy,
+ .cs_erase = cs_gem_erase,
+ .cs_need_flush = cs_gem_need_flush,
+ .cs_print = cs_gem_print,
+};
+
+static int radeon_get_device_id(int fd, uint32_t *device_id)
+{
+ struct drm_radeon_info info = {};
+ int r;
+
+ *device_id = 0;
+ info.request = RADEON_INFO_DEVICE_ID;
+ info.value = (uintptr_t)device_id;
+ r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ return r;
+}
+
+drm_public struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd)
+{
+ struct radeon_cs_manager_gem *csm;
+
+ csm = calloc(1, sizeof(struct radeon_cs_manager_gem));
+ if (csm == NULL) {
+ return NULL;
+ }
+ csm->base.funcs = &radeon_cs_gem_funcs;
+ csm->base.fd = fd;
+ radeon_get_device_id(fd, &csm->device_id);
+ return &csm->base;
+}
+
+drm_public void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm)
+{
+ free(csm);
+}
diff --git a/radeon/radeon_cs_gem.h b/radeon/radeon_cs_gem.h
new file mode 100644
index 0000000..5dea38a
--- /dev/null
+++ b/radeon/radeon_cs_gem.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright © 2008 Nicolai Haehnle
+ * Copyright © 2008 Jérôme Glisse
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Aapo Tahkola <aet@rasterburn.org>
+ * Nicolai Haehnle <prefect_@gmx.net>
+ * Jérôme Glisse <glisse@freedesktop.org>
+ */
+#ifndef RADEON_CS_GEM_H
+#define RADEON_CS_GEM_H
+
+#include "radeon_cs.h"
+
+struct radeon_cs_manager *radeon_cs_manager_gem_ctor(int fd);
+void radeon_cs_manager_gem_dtor(struct radeon_cs_manager *csm);
+
+#endif
diff --git a/radeon/radeon_cs_int.h b/radeon/radeon_cs_int.h
new file mode 100644
index 0000000..d906ad4
--- /dev/null
+++ b/radeon/radeon_cs_int.h
@@ -0,0 +1,67 @@
+
+#ifndef _RADEON_CS_INT_H_
+#define _RADEON_CS_INT_H_
+
+struct radeon_cs_space_check {
+ struct radeon_bo_int *bo;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ uint32_t new_accounted;
+};
+
+struct radeon_cs_int {
+ /* keep first two in same place */
+ uint32_t *packets;
+ unsigned cdw;
+ unsigned ndw;
+ unsigned section_ndw;
+ unsigned section_cdw;
+ /* private members */
+ struct radeon_cs_manager *csm;
+ void *relocs;
+ unsigned crelocs;
+ unsigned relocs_total_size;
+ const char *section_file;
+ const char *section_func;
+ int section_line;
+ struct radeon_cs_space_check bos[MAX_SPACE_BOS];
+ int bo_count;
+ void (*space_flush_fn)(void *);
+ void *space_flush_data;
+ uint32_t id;
+};
+
+/* cs functions */
+struct radeon_cs_funcs {
+ struct radeon_cs_int *(*cs_create)(struct radeon_cs_manager *csm,
+ uint32_t ndw);
+ int (*cs_write_reloc)(struct radeon_cs_int *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domain,
+ uint32_t write_domain,
+ uint32_t flags);
+ int (*cs_begin)(struct radeon_cs_int *cs,
+ uint32_t ndw,
+ const char *file,
+ const char *func,
+ int line);
+ int (*cs_end)(struct radeon_cs_int *cs,
+ const char *file, const char *func,
+ int line);
+
+
+ int (*cs_emit)(struct radeon_cs_int *cs);
+ int (*cs_destroy)(struct radeon_cs_int *cs);
+ int (*cs_erase)(struct radeon_cs_int *cs);
+ int (*cs_need_flush)(struct radeon_cs_int *cs);
+ void (*cs_print)(struct radeon_cs_int *cs, FILE *file);
+};
+
+struct radeon_cs_manager {
+ const struct radeon_cs_funcs *funcs;
+ int fd;
+ int32_t vram_limit, gart_limit;
+ int32_t vram_write_used, gart_write_used;
+ int32_t read_used;
+};
+#endif
diff --git a/radeon/radeon_cs_space.c b/radeon/radeon_cs_space.c
new file mode 100644
index 0000000..039b041
--- /dev/null
+++ b/radeon/radeon_cs_space.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright © 2009 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "libdrm_macros.h"
+#include "radeon_cs.h"
+#include "radeon_bo_int.h"
+#include "radeon_cs_int.h"
+
+struct rad_sizes {
+ int32_t op_read;
+ int32_t op_gart_write;
+ int32_t op_vram_write;
+};
+
+static inline int radeon_cs_setup_bo(struct radeon_cs_space_check *sc, struct rad_sizes *sizes)
+{
+ uint32_t read_domains, write_domain;
+ struct radeon_bo_int *bo;
+
+ bo = sc->bo;
+ sc->new_accounted = 0;
+ read_domains = sc->read_domains;
+ write_domain = sc->write_domain;
+
+ /* legacy needs a static check */
+ if (radeon_bo_is_static((struct radeon_bo *)sc->bo)) {
+ bo->space_accounted = sc->new_accounted = (read_domains << 16) | write_domain;
+ return 0;
+ }
+
+ /* already accounted this bo */
+ if (write_domain && (write_domain == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+ if (read_domains && ((read_domains << 16) == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+
+ if (bo->space_accounted == 0) {
+ if (write_domain) {
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM)
+ sizes->op_vram_write += bo->size;
+ else if (write_domain == RADEON_GEM_DOMAIN_GTT)
+ sizes->op_gart_write += bo->size;
+ sc->new_accounted = write_domain;
+ } else {
+ sizes->op_read += bo->size;
+ sc->new_accounted = read_domains << 16;
+ }
+ } else {
+ uint16_t old_read, old_write;
+
+ old_read = bo->space_accounted >> 16;
+ old_write = bo->space_accounted & 0xffff;
+
+ if (write_domain && (old_read & write_domain)) {
+ sc->new_accounted = write_domain;
+ /* moving from read to a write domain */
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
+ sizes->op_read -= bo->size;
+ sizes->op_vram_write += bo->size;
+ } else if (write_domain == RADEON_GEM_DOMAIN_GTT) {
+ sizes->op_read -= bo->size;
+ sizes->op_gart_write += bo->size;
+ }
+ } else if (read_domains & old_write) {
+ sc->new_accounted = bo->space_accounted & 0xffff;
+ } else {
+ /* rewrite the domains */
+ if (write_domain != old_write)
+ fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
+ if (read_domains != old_read)
+ fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
+ return RADEON_CS_SPACE_FLUSH;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_do_space_check(struct radeon_cs_int *cs, struct radeon_cs_space_check *new_tmp)
+{
+ struct radeon_cs_manager *csm = cs->csm;
+ int i;
+ struct radeon_bo_int *bo;
+ struct rad_sizes sizes;
+ int ret;
+
+ /* check the totals for this operation */
+
+ if (cs->bo_count == 0 && !new_tmp)
+ return 0;
+
+ memset(&sizes, 0, sizeof(struct rad_sizes));
+
+ /* prepare */
+ for (i = 0; i < cs->bo_count; i++) {
+ ret = radeon_cs_setup_bo(&cs->bos[i], &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (new_tmp) {
+ ret = radeon_cs_setup_bo(new_tmp, &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (sizes.op_read < 0)
+ sizes.op_read = 0;
+
+ /* check sizes - operation first */
+ if ((sizes.op_read + sizes.op_gart_write > csm->gart_limit) ||
+ (sizes.op_vram_write > csm->vram_limit)) {
+ return RADEON_CS_SPACE_OP_TO_BIG;
+ }
+
+ if (((csm->vram_write_used + sizes.op_vram_write) > csm->vram_limit) ||
+ ((csm->read_used + csm->gart_write_used + sizes.op_gart_write + sizes.op_read) > csm->gart_limit)) {
+ return RADEON_CS_SPACE_FLUSH;
+ }
+
+ csm->gart_write_used += sizes.op_gart_write;
+ csm->vram_write_used += sizes.op_vram_write;
+ csm->read_used += sizes.op_read;
+ /* commit */
+ for (i = 0; i < cs->bo_count; i++) {
+ bo = cs->bos[i].bo;
+ bo->space_accounted = cs->bos[i].new_accounted;
+ }
+ if (new_tmp)
+ new_tmp->bo->space_accounted = new_tmp->new_accounted;
+
+ return RADEON_CS_SPACE_OK;
+}
+
+drm_public void
+radeon_cs_space_add_persistent_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ int i;
+ for (i = 0; i < csi->bo_count; i++) {
+ if (csi->bos[i].bo == boi &&
+ csi->bos[i].read_domains == read_domains &&
+ csi->bos[i].write_domain == write_domain)
+ return;
+ }
+ radeon_bo_ref(bo);
+ i = csi->bo_count;
+ csi->bos[i].bo = boi;
+ csi->bos[i].read_domains = read_domains;
+ csi->bos[i].write_domain = write_domain;
+ csi->bos[i].new_accounted = 0;
+ csi->bo_count++;
+
+ assert(csi->bo_count < MAX_SPACE_BOS);
+}
+
+static int radeon_cs_check_space_internal(struct radeon_cs_int *cs,
+ struct radeon_cs_space_check *tmp_bo)
+{
+ int ret;
+ int flushed = 0;
+
+again:
+ ret = radeon_cs_do_space_check(cs, tmp_bo);
+ if (ret == RADEON_CS_SPACE_OP_TO_BIG)
+ return -1;
+ if (ret == RADEON_CS_SPACE_FLUSH) {
+ (*cs->space_flush_fn)(cs->space_flush_data);
+ if (flushed)
+ return -1;
+ flushed = 1;
+ goto again;
+ }
+ return 0;
+}
+
+drm_public int
+radeon_cs_space_check_with_bo(struct radeon_cs *cs, struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ struct radeon_bo_int *boi = (struct radeon_bo_int *)bo;
+ struct radeon_cs_space_check temp_bo;
+
+ int ret = 0;
+
+ if (bo) {
+ temp_bo.bo = boi;
+ temp_bo.read_domains = read_domains;
+ temp_bo.write_domain = write_domain;
+ temp_bo.new_accounted = 0;
+ }
+
+ ret = radeon_cs_check_space_internal(csi, bo ? &temp_bo : NULL);
+ return ret;
+}
+
+drm_public int radeon_cs_space_check(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ return radeon_cs_check_space_internal(csi, NULL);
+}
+
+drm_public void radeon_cs_space_reset_bos(struct radeon_cs *cs)
+{
+ struct radeon_cs_int *csi = (struct radeon_cs_int *)cs;
+ int i;
+ for (i = 0; i < csi->bo_count; i++) {
+ radeon_bo_unref((struct radeon_bo *)csi->bos[i].bo);
+ csi->bos[i].bo = NULL;
+ csi->bos[i].read_domains = 0;
+ csi->bos[i].write_domain = 0;
+ csi->bos[i].new_accounted = 0;
+ }
+ csi->bo_count = 0;
+}
diff --git a/radeon/radeon_surface.c b/radeon/radeon_surface.c
new file mode 100644
index 0000000..ea0a27a
--- /dev/null
+++ b/radeon/radeon_surface.c
@@ -0,0 +1,2566 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#include <stdbool.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include "drm.h"
+#include "libdrm_macros.h"
+#include "xf86drm.h"
+#include "radeon_drm.h"
+#include "radeon_surface.h"
+
+#define CIK_TILE_MODE_COLOR_2D 14
+#define CIK_TILE_MODE_COLOR_2D_SCANOUT 10
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64 0
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128 1
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256 2
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512 3
+#define CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE 4
+
+#define ALIGN(value, alignment) (((value) + alignment - 1) & ~(alignment - 1))
+#define MAX2(A, B) ((A) > (B) ? (A) : (B))
+#define MIN2(A, B) ((A) < (B) ? (A) : (B))
+
+/* keep this private */
+enum radeon_family {
+ CHIP_UNKNOWN,
+ CHIP_R600,
+ CHIP_RV610,
+ CHIP_RV630,
+ CHIP_RV670,
+ CHIP_RV620,
+ CHIP_RV635,
+ CHIP_RS780,
+ CHIP_RS880,
+ CHIP_RV770,
+ CHIP_RV730,
+ CHIP_RV710,
+ CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
+ CHIP_PALM,
+ CHIP_SUMO,
+ CHIP_SUMO2,
+ CHIP_BARTS,
+ CHIP_TURKS,
+ CHIP_CAICOS,
+ CHIP_CAYMAN,
+ CHIP_ARUBA,
+ CHIP_TAHITI,
+ CHIP_PITCAIRN,
+ CHIP_VERDE,
+ CHIP_OLAND,
+ CHIP_HAINAN,
+ CHIP_BONAIRE,
+ CHIP_KAVERI,
+ CHIP_KABINI,
+ CHIP_HAWAII,
+ CHIP_MULLINS,
+ CHIP_LAST,
+};
+
+typedef int (*hw_init_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+typedef int (*hw_best_surface_t)(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+struct radeon_hw_info {
+ /* apply to r6, eg */
+ uint32_t group_bytes;
+ uint32_t num_banks;
+ uint32_t num_pipes;
+ /* apply to eg */
+ uint32_t row_size;
+ unsigned allow_2d;
+ /* apply to si */
+ uint32_t tile_mode_array[32];
+ /* apply to cik */
+ uint32_t macrotile_mode_array[16];
+};
+
+struct radeon_surface_manager {
+ int fd;
+ uint32_t device_id;
+ struct radeon_hw_info hw_info;
+ unsigned family;
+ hw_init_surface_t surface_init;
+ hw_best_surface_t surface_best;
+};
+
+/* helper */
+static int radeon_get_value(int fd, unsigned req, uint32_t *value)
+{
+ struct drm_radeon_info info = {};
+ int r;
+
+ *value = 0;
+ info.request = req;
+ info.value = (uintptr_t)value;
+ r = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info,
+ sizeof(struct drm_radeon_info));
+ return r;
+}
+
+static int radeon_get_family(struct radeon_surface_manager *surf_man)
+{
+ switch (surf_man->device_id) {
+#define CHIPSET(pci_id, name, fam) case pci_id: surf_man->family = CHIP_##fam; break;
+#include "r600_pci_ids.h"
+#undef CHIPSET
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned next_power_of_two(unsigned x)
+{
+ if (x <= 1)
+ return 1;
+
+ return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
+}
+
+static unsigned mip_minify(unsigned size, unsigned level)
+{
+ unsigned val;
+
+ val = MAX2(1, size >> level);
+ if (level > 0)
+ val = next_power_of_two(val);
+ return val;
+}
+
+static void surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ uint64_t offset)
+{
+ surflevel->npix_x = mip_minify(surf->npix_x, level);
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < xalign || surflevel->nblk_y < yalign) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)surflevel->pitch_bytes * surflevel->nblk_y;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+/* ===========================================================================
+ * r600/r700 family
+ */
+static int r6_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 14) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ drmFreeVersion(version);
+
+ switch ((tiling_config & 0xe) >> 1) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0x30) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xc0) >> 6) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ /* the 32 alignment is for scanout, cb or db but to allow texture to be
+ * easily bound as such we force this alignment to all surface
+ */
+ xalign = MAX2(1, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_linear_aligned(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ xalign = MAX2(64, surf_man->hw_info.group_bytes / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ zalign = 1;
+ xalign = (surf_man->hw_info.group_bytes * surf_man->hw_info.num_banks) /
+ (tilew * surf->bpe * surf->nsamples);
+ xalign = MAX2(tilew * surf_man->hw_info.num_banks, xalign);
+ if (surf->flags & RADEON_SURF_FMASK)
+ xalign = MAX2(128, xalign);
+ yalign = tilew * surf_man->hw_info.num_pipes;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((surf->bpe == 1) ? 64 : 32, xalign);
+ }
+ if (!start_level) {
+ surf->bo_alignment =
+ MAX2(surf_man->hw_info.num_pipes *
+ surf_man->hw_info.num_banks *
+ surf->nsamples * surf->bpe * 64,
+ xalign * yalign * surf->nsamples * surf->bpe);
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_2D;
+ surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, offset);
+ if (surf->level[i].mode == RADEON_SURF_MODE_1D) {
+ return r6_surface_init_1d(surf_man, surf, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int r6_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check surface dimension */
+ if (surf->npix_x > 8192 || surf->npix_y > 8192 || surf->npix_z > 8192) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 14) {
+ return -EINVAL;
+ }
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = r6_surface_init_1d(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = r6_surface_init_2d(surf_man, surf, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static int r6_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ /* no value to optimize for r6xx/r7xx */
+ return 0;
+}
+
+
+/* ===========================================================================
+ * evergreen family
+ */
+static int eg_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 16) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static void eg_surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe,
+ unsigned level,
+ unsigned slice_pt,
+ unsigned mtilew,
+ unsigned mtileh,
+ unsigned mtileb,
+ uint64_t offset)
+{
+ unsigned mtile_pr, mtile_ps;
+
+ surflevel->npix_x = mip_minify(surf->npix_x, level);
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < mtilew || surflevel->nblk_y < mtileh) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, mtilew);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, mtileh);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, 1);
+
+ /* macro tile per row */
+ mtile_pr = surflevel->nblk_x / mtilew;
+ /* macro tile per slice */
+ mtile_ps = (mtile_pr * surflevel->nblk_y) / mtileh;
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)mtile_ps * mtileb * slice_pt;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static int eg_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, tilew;
+ unsigned i;
+
+ /* compute alignment */
+ tilew = 8;
+ xalign = surf_man->hw_info.group_bytes / (tilew * bpe * surf->nsamples);
+ xalign = MAX2(tilew, xalign);
+ yalign = tilew;
+ zalign = 1;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((bpe == 1) ? 64 : 32, xalign);
+ }
+
+ if (!start_level) {
+ unsigned alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_1D;
+ surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int eg_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_split,
+ uint64_t offset, unsigned start_level)
+{
+ unsigned tilew, tileh, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb = tilew * tileh * bpe * surf->nsamples;
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ }
+ tileb = tileb / slice_pt;
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * surf_man->hw_info.num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * surf_man->hw_info.num_banks) / surf->mtilea;
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (!start_level) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ eg_surf_minify(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, mtileb, offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ return eg_surface_init_1d(surf_man, surf, level, bpe, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ }
+ return 0;
+}
+
+static int eg_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode)
+{
+ unsigned tileb;
+
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (!surf_man->hw_info.allow_2d && mode > RADEON_SURF_MODE_1D) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 2D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ /* check tile split */
+ if (mode == RADEON_SURF_MODE_2D) {
+ switch (surf->tile_split) {
+ case 64:
+ case 128:
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ case 4096:
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (surf->mtilea) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check aspect ratio */
+ if (surf_man->hw_info.num_banks < surf->mtilea) {
+ return -EINVAL;
+ }
+ /* check bank width */
+ switch (surf->bankw) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check bank height */
+ switch (surf->bankh) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ if ((tileb * surf->bankh * surf->bankw) < surf_man->hw_info.group_bytes) {
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int eg_surface_init_1d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned zs_flags = RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER;
+ int r, is_depth_stencil = (surf->flags & zs_flags) == zs_flags;
+ /* Old libdrm_macros.headers didn't have stencil_level in it. This prevents crashes. */
+ struct radeon_surface_level tmp[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level *stencil_level =
+ (surf->flags & RADEON_SURF_HAS_SBUFFER_MIPTREE) ? surf->stencil_level : tmp;
+
+ r = eg_surface_init_1d(surf_man, surf, surf->level, surf->bpe, 0, 0);
+ if (r)
+ return r;
+
+ if (is_depth_stencil) {
+ r = eg_surface_init_1d(surf_man, surf, stencil_level, 1,
+ surf->bo_size, 0);
+ surf->stencil_offset = stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int eg_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned zs_flags = RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER;
+ int r, is_depth_stencil = (surf->flags & zs_flags) == zs_flags;
+ /* Old libdrm_macros.headers didn't have stencil_level in it. This prevents crashes. */
+ struct radeon_surface_level tmp[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level *stencil_level =
+ (surf->flags & RADEON_SURF_HAS_SBUFFER_MIPTREE) ? surf->stencil_level : tmp;
+
+ r = eg_surface_init_2d(surf_man, surf, surf->level, surf->bpe,
+ surf->tile_split, 0, 0);
+ if (r)
+ return r;
+
+ if (is_depth_stencil) {
+ r = eg_surface_init_2d(surf_man, surf, stencil_level, 1,
+ surf->stencil_tile_split, surf->bo_size, 0);
+ surf->stencil_offset = stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int eg_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = r6_surface_init_linear_aligned(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = eg_surface_init_1d_miptrees(surf_man, surf);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = eg_surface_init_2d_miptrees(surf_man, surf);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+static unsigned log2_int(unsigned x)
+{
+ unsigned l;
+
+ if (x < 2) {
+ return 0;
+ }
+ for (l = 2; ; l++) {
+ if ((unsigned)(1 << l) > x) {
+ return l - 1;
+ }
+ }
+ return 0;
+}
+
+/* compute best tile_split, bankw, bankh, mtilea
+ * depending on surface
+ */
+static int eg_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tileb, h_over_w;
+ int r;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ /* set some default value to avoid sanity check choking on them */
+ surf->tile_split = 1024;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->mtilea = surf_man->hw_info.num_banks;
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+ if (surf->mtilea > 8) {
+ surf->mtilea = 8;
+ }
+
+ r = eg_surface_sanity(surf_man, surf, mode);
+ if (r) {
+ return r;
+ }
+
+ if (mode != RADEON_SURF_MODE_2D) {
+ /* nothing to do for non 2D tiled surface */
+ return 0;
+ }
+
+ /* Tweak TILE_SPLIT for performance here. */
+ if (surf->nsamples > 1) {
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ switch (surf->nsamples) {
+ case 2:
+ surf->tile_split = 128;
+ break;
+ case 4:
+ surf->tile_split = 128;
+ break;
+ case 8:
+ surf->tile_split = 256;
+ break;
+ case 16: /* cayman only */
+ surf->tile_split = 512;
+ break;
+ default:
+ fprintf(stderr, "radeon: Wrong number of samples %i (%i)\n",
+ surf->nsamples, __LINE__);
+ return -EINVAL;
+ }
+ surf->stencil_tile_split = 64;
+ } else {
+ /* tile split must be >= 256 for colorbuffer surfaces,
+ * SAMPLE_SPLIT = tile_split / (bpe * 64), the optimal value is 2
+ */
+ surf->tile_split = MAX2(2 * surf->bpe * 64, 256);
+ if (surf->tile_split > 4096)
+ surf->tile_split = 4096;
+ }
+ } else {
+ /* set tile split to row size */
+ surf->tile_split = surf_man->hw_info.row_size;
+ surf->stencil_tile_split = surf_man->hw_info.row_size / 2;
+ }
+
+ /* bankw or bankh greater than 1 increase alignment requirement, not
+ * sure if it's worth using smaller bankw & bankh to stick with 2D
+ * tiling on small surface rather than falling back to 1D tiling.
+ * Use recommended value based on tile size for now.
+ *
+ * fmask buffer has different optimal value figure them out once we
+ * use it.
+ */
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ /* assume 1 bytes for stencil, we optimize for stencil as stencil
+ * and depth shares surface values
+ */
+ tileb = MIN2(surf->tile_split, 64 * surf->nsamples);
+ } else {
+ tileb = MIN2(surf->tile_split, 64 * surf->bpe * surf->nsamples);
+ }
+
+ /* use bankw of 1 to minimize width alignment, might be interesting to
+ * increase it for large surface
+ */
+ surf->bankw = 1;
+ switch (tileb) {
+ case 64:
+ surf->bankh = 4;
+ break;
+ case 128:
+ case 256:
+ surf->bankh = 2;
+ break;
+ default:
+ surf->bankh = 1;
+ break;
+ }
+ /* double check the constraint */
+ for (; surf->bankh <= 8; surf->bankh *= 2) {
+ if ((tileb * surf->bankh * surf->bankw) >= surf_man->hw_info.group_bytes) {
+ break;
+ }
+ }
+
+ h_over_w = (((surf->bankh * surf_man->hw_info.num_banks) << 16) /
+ (surf->bankw * surf_man->hw_info.num_pipes)) >> 16;
+ surf->mtilea = 1 << (log2_int(h_over_w) >> 1);
+
+ return 0;
+}
+
+
+/* ===========================================================================
+ * Southern Islands family
+ */
+#define SI__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
+#define SI__PIPE_CONFIG__ADDR_SURF_P2 0
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
+#define SI__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
+#define SI__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
+#define SI__GB_TILE_MODE__TILE_SPLIT(x) (((x) >> 11) & 0x7)
+#define SI__TILE_SPLIT__64B 0
+#define SI__TILE_SPLIT__128B 1
+#define SI__TILE_SPLIT__256B 2
+#define SI__TILE_SPLIT__512B 3
+#define SI__TILE_SPLIT__1024B 4
+#define SI__TILE_SPLIT__2048B 5
+#define SI__TILE_SPLIT__4096B 6
+#define SI__GB_TILE_MODE__BANK_WIDTH(x) (((x) >> 14) & 0x3)
+#define SI__BANK_WIDTH__1 0
+#define SI__BANK_WIDTH__2 1
+#define SI__BANK_WIDTH__4 2
+#define SI__BANK_WIDTH__8 3
+#define SI__GB_TILE_MODE__BANK_HEIGHT(x) (((x) >> 16) & 0x3)
+#define SI__BANK_HEIGHT__1 0
+#define SI__BANK_HEIGHT__2 1
+#define SI__BANK_HEIGHT__4 2
+#define SI__BANK_HEIGHT__8 3
+#define SI__GB_TILE_MODE__MACRO_TILE_ASPECT(x) (((x) >> 18) & 0x3)
+#define SI__MACRO_TILE_ASPECT__1 0
+#define SI__MACRO_TILE_ASPECT__2 1
+#define SI__MACRO_TILE_ASPECT__4 2
+#define SI__MACRO_TILE_ASPECT__8 3
+#define SI__GB_TILE_MODE__NUM_BANKS(x) (((x) >> 20) & 0x3)
+#define SI__NUM_BANKS__2_BANK 0
+#define SI__NUM_BANKS__4_BANK 1
+#define SI__NUM_BANKS__8_BANK 2
+#define SI__NUM_BANKS__16_BANK 3
+
+
+static void si_gb_tile_mode(uint32_t gb_tile_mode,
+ unsigned *num_pipes,
+ unsigned *num_banks,
+ uint32_t *macro_tile_aspect,
+ uint32_t *bank_w,
+ uint32_t *bank_h,
+ uint32_t *tile_split)
+{
+ if (num_pipes) {
+ switch (SI__GB_TILE_MODE__PIPE_CONFIG(gb_tile_mode)) {
+ case SI__PIPE_CONFIG__ADDR_SURF_P2:
+ default:
+ *num_pipes = 2;
+ break;
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_16x32:
+ case SI__PIPE_CONFIG__ADDR_SURF_P4_32x32:
+ *num_pipes = 4;
+ break;
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
+ case SI__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
+ *num_pipes = 8;
+ break;
+ }
+ }
+ if (num_banks) {
+ switch (SI__GB_TILE_MODE__NUM_BANKS(gb_tile_mode)) {
+ default:
+ case SI__NUM_BANKS__2_BANK:
+ *num_banks = 2;
+ break;
+ case SI__NUM_BANKS__4_BANK:
+ *num_banks = 4;
+ break;
+ case SI__NUM_BANKS__8_BANK:
+ *num_banks = 8;
+ break;
+ case SI__NUM_BANKS__16_BANK:
+ *num_banks = 16;
+ break;
+ }
+ }
+ if (macro_tile_aspect) {
+ switch (SI__GB_TILE_MODE__MACRO_TILE_ASPECT(gb_tile_mode)) {
+ default:
+ case SI__MACRO_TILE_ASPECT__1:
+ *macro_tile_aspect = 1;
+ break;
+ case SI__MACRO_TILE_ASPECT__2:
+ *macro_tile_aspect = 2;
+ break;
+ case SI__MACRO_TILE_ASPECT__4:
+ *macro_tile_aspect = 4;
+ break;
+ case SI__MACRO_TILE_ASPECT__8:
+ *macro_tile_aspect = 8;
+ break;
+ }
+ }
+ if (bank_w) {
+ switch (SI__GB_TILE_MODE__BANK_WIDTH(gb_tile_mode)) {
+ default:
+ case SI__BANK_WIDTH__1:
+ *bank_w = 1;
+ break;
+ case SI__BANK_WIDTH__2:
+ *bank_w = 2;
+ break;
+ case SI__BANK_WIDTH__4:
+ *bank_w = 4;
+ break;
+ case SI__BANK_WIDTH__8:
+ *bank_w = 8;
+ break;
+ }
+ }
+ if (bank_h) {
+ switch (SI__GB_TILE_MODE__BANK_HEIGHT(gb_tile_mode)) {
+ default:
+ case SI__BANK_HEIGHT__1:
+ *bank_h = 1;
+ break;
+ case SI__BANK_HEIGHT__2:
+ *bank_h = 2;
+ break;
+ case SI__BANK_HEIGHT__4:
+ *bank_h = 4;
+ break;
+ case SI__BANK_HEIGHT__8:
+ *bank_h = 8;
+ break;
+ }
+ }
+ if (tile_split) {
+ switch (SI__GB_TILE_MODE__TILE_SPLIT(gb_tile_mode)) {
+ default:
+ case SI__TILE_SPLIT__64B:
+ *tile_split = 64;
+ break;
+ case SI__TILE_SPLIT__128B:
+ *tile_split = 128;
+ break;
+ case SI__TILE_SPLIT__256B:
+ *tile_split = 256;
+ break;
+ case SI__TILE_SPLIT__512B:
+ *tile_split = 512;
+ break;
+ case SI__TILE_SPLIT__1024B:
+ *tile_split = 1024;
+ break;
+ case SI__TILE_SPLIT__2048B:
+ *tile_split = 2048;
+ break;
+ case SI__TILE_SPLIT__4096B:
+ *tile_split = 4096;
+ break;
+ }
+ }
+}
+
+static int si_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 33) {
+ if (!radeon_get_value(surf_man->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, surf_man->hw_info.tile_mode_array)) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int si_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode, unsigned *tile_mode, unsigned *stencil_tile_mode)
+{
+ uint32_t gb_tile_mode;
+
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (mode > RADEON_SURF_MODE_1D &&
+ (!surf_man->hw_info.allow_2d || !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX))) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 1D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ if (surf->nsamples > 1 && mode != RADEON_SURF_MODE_2D) {
+ return -EINVAL;
+ }
+
+ if (!surf->tile_split) {
+ /* default value */
+ surf->mtilea = 1;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->tile_split = 64;
+ surf->stencil_tile_split = 64;
+ }
+
+ switch (mode) {
+ case RADEON_SURF_MODE_2D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D;
+ break;
+ case 2:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_2AA;
+ break;
+ case 4:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_4AA;
+ break;
+ case 8:
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_8AA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[*stencil_tile_mode];
+ si_gb_tile_mode(gb_tile_mode, NULL, NULL, NULL, NULL, NULL, &surf->stencil_tile_split);
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D;
+ break;
+ case 2:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_2AA;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_4AA;
+ break;
+ case 8:
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_2D_8AA;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ switch (surf->bpe) {
+ case 2:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (surf->bpe) {
+ case 1:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_8BPP;
+ break;
+ case 2:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_16BPP;
+ break;
+ case 4:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_32BPP;
+ break;
+ case 8:
+ case 16:
+ *tile_mode = SI_TILE_MODE_COLOR_2D_64BPP;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[*tile_mode];
+ si_gb_tile_mode(gb_tile_mode, NULL, NULL, &surf->mtilea, &surf->bankw, &surf->bankh, &surf->tile_split);
+ break;
+ case RADEON_SURF_MODE_1D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ *tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ } else {
+ *tile_mode = SI_TILE_MODE_COLOR_1D;
+ }
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ default:
+ *tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
+ }
+
+ return 0;
+}
+
+static void si_surf_minify(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ uint32_t slice_align, uint64_t offset)
+{
+ if (level == 0) {
+ surflevel->npix_x = surf->npix_x;
+ } else {
+ surflevel->npix_x = mip_minify(next_power_of_two(surf->npix_x), level);
+ }
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+
+ if (level == 0 && surf->last_level > 0) {
+ surflevel->nblk_x = (next_power_of_two(surflevel->npix_x) + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (next_power_of_two(surflevel->npix_y) + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (next_power_of_two(surflevel->npix_z) + surf->blk_d - 1) / surf->blk_d;
+ } else {
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ }
+
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+
+ /* XXX: Texture sampling uses unexpectedly large pitches in some cases,
+ * these are just guesses for the rules behind those
+ */
+ if (level == 0 && surf->last_level == 0)
+ /* Non-mipmap pitch padded to slice alignment */
+ /* Using just bpe here breaks stencil blitting; surf->bpe works. */
+ xalign = MAX2(xalign, slice_align / surf->bpe);
+ else if (surflevel->mode == RADEON_SURF_MODE_LINEAR_ALIGNED)
+ /* Small rows evenly distributed across slice */
+ xalign = MAX2(xalign, slice_align / bpe / surflevel->nblk_y);
+
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = ALIGN((uint64_t)surflevel->pitch_bytes * surflevel->nblk_y,
+ (uint64_t)slice_align);
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static void si_surf_minify_2d(struct radeon_surface *surf,
+ struct radeon_surface_level *surflevel,
+ unsigned bpe, unsigned level, unsigned slice_pt,
+ uint32_t xalign, uint32_t yalign, uint32_t zalign,
+ unsigned mtileb, uint64_t offset)
+{
+ unsigned mtile_pr, mtile_ps;
+
+ if (level == 0) {
+ surflevel->npix_x = surf->npix_x;
+ } else {
+ surflevel->npix_x = mip_minify(next_power_of_two(surf->npix_x), level);
+ }
+ surflevel->npix_y = mip_minify(surf->npix_y, level);
+ surflevel->npix_z = mip_minify(surf->npix_z, level);
+
+ if (level == 0 && surf->last_level > 0) {
+ surflevel->nblk_x = (next_power_of_two(surflevel->npix_x) + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (next_power_of_two(surflevel->npix_y) + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (next_power_of_two(surflevel->npix_z) + surf->blk_d - 1) / surf->blk_d;
+ } else {
+ surflevel->nblk_x = (surflevel->npix_x + surf->blk_w - 1) / surf->blk_w;
+ surflevel->nblk_y = (surflevel->npix_y + surf->blk_h - 1) / surf->blk_h;
+ surflevel->nblk_z = (surflevel->npix_z + surf->blk_d - 1) / surf->blk_d;
+ }
+
+ if (surf->nsamples == 1 && surflevel->mode == RADEON_SURF_MODE_2D &&
+ !(surf->flags & RADEON_SURF_FMASK)) {
+ if (surflevel->nblk_x < xalign || surflevel->nblk_y < yalign) {
+ surflevel->mode = RADEON_SURF_MODE_1D;
+ return;
+ }
+ }
+ surflevel->nblk_x = ALIGN(surflevel->nblk_x, xalign);
+ surflevel->nblk_y = ALIGN(surflevel->nblk_y, yalign);
+ surflevel->nblk_z = ALIGN(surflevel->nblk_z, zalign);
+
+ /* macro tile per row */
+ mtile_pr = surflevel->nblk_x / xalign;
+ /* macro tile per slice */
+ mtile_ps = (mtile_pr * surflevel->nblk_y) / yalign;
+ surflevel->offset = offset;
+ surflevel->pitch_bytes = surflevel->nblk_x * bpe * surf->nsamples;
+ surflevel->slice_size = (uint64_t)mtile_ps * mtileb * slice_pt;
+
+ surf->bo_size = offset + surflevel->slice_size * surflevel->nblk_z * surf->array_size;
+}
+
+static int si_surface_init_linear_aligned(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, slice_align;
+ unsigned i;
+
+ /* compute alignment */
+ if (!start_level) {
+ surf->bo_alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ }
+ xalign = MAX2(8, 64 / surf->bpe);
+ yalign = 1;
+ zalign = 1;
+ slice_align = MAX2(64 * surf->bpe, surf_man->hw_info.group_bytes);
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ surf->level[i].mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
+ si_surf_minify(surf, surf->level+i, surf->bpe, i, xalign, yalign, zalign, slice_align, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ surf->tiling_index[i] = tile_mode;
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_1d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ uint64_t offset, unsigned start_level)
+{
+ uint32_t xalign, yalign, zalign, slice_align;
+ unsigned alignment = MAX2(256, surf_man->hw_info.group_bytes);
+ unsigned i;
+
+ /* compute alignment */
+ xalign = 8;
+ yalign = 8;
+ zalign = 1;
+ slice_align = surf_man->hw_info.group_bytes;
+ if (surf->flags & RADEON_SURF_SCANOUT) {
+ xalign = MAX2((bpe == 1) ? 64 : 32, xalign);
+ }
+
+ if (start_level <= 1) {
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (offset) {
+ offset = ALIGN(offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_1D;
+ si_surf_minify(surf, level+i, bpe, i, xalign, yalign, zalign, slice_align, offset);
+ /* level0 and first mipmap need to have alignment */
+ offset = surf->bo_size;
+ if (i == 0) {
+ offset = ALIGN(offset, alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_1d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ int r;
+
+ r = si_surface_init_1d(surf_man, surf, surf->level, surf->bpe, tile_mode, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = si_surface_init_1d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode, surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int si_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ unsigned num_pipes, unsigned num_banks,
+ unsigned tile_split,
+ uint64_t offset,
+ unsigned start_level)
+{
+ uint64_t aligned_offset = offset;
+ unsigned tilew, tileh, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb = tilew * tileh * bpe * surf->nsamples;
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ }
+ tileb = tileb / slice_pt;
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * num_banks) / surf->mtilea;
+
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (start_level <= 1) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (aligned_offset) {
+ aligned_offset = ALIGN(aligned_offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ si_surf_minify_2d(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, 1, mtileb, aligned_offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ switch (tile_mode) {
+ case SI_TILE_MODE_COLOR_2D_8BPP:
+ case SI_TILE_MODE_COLOR_2D_16BPP:
+ case SI_TILE_MODE_COLOR_2D_32BPP:
+ case SI_TILE_MODE_COLOR_2D_64BPP:
+ tile_mode = SI_TILE_MODE_COLOR_1D;
+ break;
+ case SI_TILE_MODE_COLOR_2D_SCANOUT_16BPP:
+ case SI_TILE_MODE_COLOR_2D_SCANOUT_32BPP:
+ tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ break;
+ case SI_TILE_MODE_DEPTH_STENCIL_2D:
+ tile_mode = SI_TILE_MODE_DEPTH_STENCIL_1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return si_surface_init_1d(surf_man, surf, level, bpe, tile_mode, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ aligned_offset = offset = surf->bo_size;
+ if (i == 0) {
+ aligned_offset = ALIGN(aligned_offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int si_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ unsigned num_pipes, num_banks;
+ uint32_t gb_tile_mode;
+ int r;
+
+ /* retrieve tiling mode value */
+ gb_tile_mode = surf_man->hw_info.tile_mode_array[tile_mode];
+ si_gb_tile_mode(gb_tile_mode, &num_pipes, &num_banks, NULL, NULL, NULL, NULL);
+
+ r = si_surface_init_2d(surf_man, surf, surf->level, surf->bpe, tile_mode, num_pipes, num_banks, surf->tile_split, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = si_surface_init_2d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode, num_pipes, num_banks, surf->stencil_tile_split, surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int si_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = si_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = si_surface_init_linear_aligned(surf_man, surf, tile_mode, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = si_surface_init_1d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = si_surface_init_2d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * depending on surface
+ */
+static int si_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER) &&
+ !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX)) {
+ /* depth/stencil force 1d tiling for old mesa */
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ }
+
+ return si_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+}
+
+
+/* ===========================================================================
+ * Sea Islands family
+ */
+#define CIK__GB_TILE_MODE__PIPE_CONFIG(x) (((x) >> 6) & 0x1f)
+#define CIK__PIPE_CONFIG__ADDR_SURF_P2 0
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16 4
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16 5
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32 6
+#define CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32 7
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16 8
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16 9
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16 10
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16 11
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16 12
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32 13
+#define CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32 14
+#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16 16
+#define CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16 17
+#define CIK__GB_TILE_MODE__TILE_SPLIT(x) (((x) >> 11) & 0x7)
+#define CIK__TILE_SPLIT__64B 0
+#define CIK__TILE_SPLIT__128B 1
+#define CIK__TILE_SPLIT__256B 2
+#define CIK__TILE_SPLIT__512B 3
+#define CIK__TILE_SPLIT__1024B 4
+#define CIK__TILE_SPLIT__2048B 5
+#define CIK__TILE_SPLIT__4096B 6
+#define CIK__GB_TILE_MODE__SAMPLE_SPLIT(x) (((x) >> 25) & 0x3)
+#define CIK__SAMPLE_SPLIT__1 0
+#define CIK__SAMPLE_SPLIT__2 1
+#define CIK__SAMPLE_SPLIT__4 2
+#define CIK__SAMPLE_SPLIT__8 3
+#define CIK__GB_MACROTILE_MODE__BANK_WIDTH(x) ((x) & 0x3)
+#define CIK__BANK_WIDTH__1 0
+#define CIK__BANK_WIDTH__2 1
+#define CIK__BANK_WIDTH__4 2
+#define CIK__BANK_WIDTH__8 3
+#define CIK__GB_MACROTILE_MODE__BANK_HEIGHT(x) (((x) >> 2) & 0x3)
+#define CIK__BANK_HEIGHT__1 0
+#define CIK__BANK_HEIGHT__2 1
+#define CIK__BANK_HEIGHT__4 2
+#define CIK__BANK_HEIGHT__8 3
+#define CIK__GB_MACROTILE_MODE__MACRO_TILE_ASPECT(x) (((x) >> 4) & 0x3)
+#define CIK__MACRO_TILE_ASPECT__1 0
+#define CIK__MACRO_TILE_ASPECT__2 1
+#define CIK__MACRO_TILE_ASPECT__4 2
+#define CIK__MACRO_TILE_ASPECT__8 3
+#define CIK__GB_MACROTILE_MODE__NUM_BANKS(x) (((x) >> 6) & 0x3)
+#define CIK__NUM_BANKS__2_BANK 0
+#define CIK__NUM_BANKS__4_BANK 1
+#define CIK__NUM_BANKS__8_BANK 2
+#define CIK__NUM_BANKS__16_BANK 3
+
+
+static void cik_get_2d_params(struct radeon_surface_manager *surf_man,
+ unsigned bpe, unsigned nsamples, bool is_color,
+ unsigned tile_mode,
+ uint32_t *num_pipes,
+ uint32_t *tile_split_ptr,
+ uint32_t *num_banks,
+ uint32_t *macro_tile_aspect,
+ uint32_t *bank_w,
+ uint32_t *bank_h)
+{
+ uint32_t gb_tile_mode = surf_man->hw_info.tile_mode_array[tile_mode];
+ unsigned tileb_1x, tileb;
+ unsigned gb_macrotile_mode;
+ unsigned macrotile_index;
+ unsigned tile_split, sample_split;
+
+ if (num_pipes) {
+ switch (CIK__GB_TILE_MODE__PIPE_CONFIG(gb_tile_mode)) {
+ case CIK__PIPE_CONFIG__ADDR_SURF_P2:
+ default:
+ *num_pipes = 2;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_16x32:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P4_32x32:
+ *num_pipes = 4;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x16_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_8x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_16x32_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x32_16x32:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P8_32x64_32x32:
+ *num_pipes = 8;
+ break;
+ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_8X16:
+ case CIK__PIPE_CONFIG__ADDR_SURF_P16_32X32_16X16:
+ *num_pipes = 16;
+ break;
+ }
+ }
+ switch (CIK__GB_TILE_MODE__TILE_SPLIT(gb_tile_mode)) {
+ default:
+ case CIK__TILE_SPLIT__64B:
+ tile_split = 64;
+ break;
+ case CIK__TILE_SPLIT__128B:
+ tile_split = 128;
+ break;
+ case CIK__TILE_SPLIT__256B:
+ tile_split = 256;
+ break;
+ case CIK__TILE_SPLIT__512B:
+ tile_split = 512;
+ break;
+ case CIK__TILE_SPLIT__1024B:
+ tile_split = 1024;
+ break;
+ case CIK__TILE_SPLIT__2048B:
+ tile_split = 2048;
+ break;
+ case CIK__TILE_SPLIT__4096B:
+ tile_split = 4096;
+ break;
+ }
+ switch (CIK__GB_TILE_MODE__SAMPLE_SPLIT(gb_tile_mode)) {
+ default:
+ case CIK__SAMPLE_SPLIT__1:
+ sample_split = 1;
+ break;
+ case CIK__SAMPLE_SPLIT__2:
+ sample_split = 2;
+ break;
+ case CIK__SAMPLE_SPLIT__4:
+ sample_split = 4;
+ break;
+ case CIK__SAMPLE_SPLIT__8:
+ sample_split = 8;
+ break;
+ }
+
+ /* Adjust the tile split. */
+ tileb_1x = 8 * 8 * bpe;
+ if (is_color) {
+ tile_split = MAX2(256, sample_split * tileb_1x);
+ }
+ tile_split = MIN2(surf_man->hw_info.row_size, tile_split);
+
+ /* Determine the macrotile index. */
+ tileb = MIN2(tile_split, nsamples * tileb_1x);
+
+ for (macrotile_index = 0; tileb > 64; macrotile_index++) {
+ tileb >>= 1;
+ }
+ gb_macrotile_mode = surf_man->hw_info.macrotile_mode_array[macrotile_index];
+
+ if (tile_split_ptr) {
+ *tile_split_ptr = tile_split;
+ }
+ if (num_banks) {
+ switch (CIK__GB_MACROTILE_MODE__NUM_BANKS(gb_macrotile_mode)) {
+ default:
+ case CIK__NUM_BANKS__2_BANK:
+ *num_banks = 2;
+ break;
+ case CIK__NUM_BANKS__4_BANK:
+ *num_banks = 4;
+ break;
+ case CIK__NUM_BANKS__8_BANK:
+ *num_banks = 8;
+ break;
+ case CIK__NUM_BANKS__16_BANK:
+ *num_banks = 16;
+ break;
+ }
+ }
+ if (macro_tile_aspect) {
+ switch (CIK__GB_MACROTILE_MODE__MACRO_TILE_ASPECT(gb_macrotile_mode)) {
+ default:
+ case CIK__MACRO_TILE_ASPECT__1:
+ *macro_tile_aspect = 1;
+ break;
+ case CIK__MACRO_TILE_ASPECT__2:
+ *macro_tile_aspect = 2;
+ break;
+ case CIK__MACRO_TILE_ASPECT__4:
+ *macro_tile_aspect = 4;
+ break;
+ case CIK__MACRO_TILE_ASPECT__8:
+ *macro_tile_aspect = 8;
+ break;
+ }
+ }
+ if (bank_w) {
+ switch (CIK__GB_MACROTILE_MODE__BANK_WIDTH(gb_macrotile_mode)) {
+ default:
+ case CIK__BANK_WIDTH__1:
+ *bank_w = 1;
+ break;
+ case CIK__BANK_WIDTH__2:
+ *bank_w = 2;
+ break;
+ case CIK__BANK_WIDTH__4:
+ *bank_w = 4;
+ break;
+ case CIK__BANK_WIDTH__8:
+ *bank_w = 8;
+ break;
+ }
+ }
+ if (bank_h) {
+ switch (CIK__GB_MACROTILE_MODE__BANK_HEIGHT(gb_macrotile_mode)) {
+ default:
+ case CIK__BANK_HEIGHT__1:
+ *bank_h = 1;
+ break;
+ case CIK__BANK_HEIGHT__2:
+ *bank_h = 2;
+ break;
+ case CIK__BANK_HEIGHT__4:
+ *bank_h = 4;
+ break;
+ case CIK__BANK_HEIGHT__8:
+ *bank_h = 8;
+ break;
+ }
+ }
+}
+
+static int cik_init_hw_info(struct radeon_surface_manager *surf_man)
+{
+ uint32_t tiling_config;
+ drmVersionPtr version;
+ int r;
+
+ r = radeon_get_value(surf_man->fd, RADEON_INFO_TILING_CONFIG,
+ &tiling_config);
+ if (r) {
+ return r;
+ }
+
+ surf_man->hw_info.allow_2d = 0;
+ version = drmGetVersion(surf_man->fd);
+ if (version && version->version_minor >= 35) {
+ if (!radeon_get_value(surf_man->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, surf_man->hw_info.tile_mode_array) &&
+ !radeon_get_value(surf_man->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, surf_man->hw_info.macrotile_mode_array)) {
+ surf_man->hw_info.allow_2d = 1;
+ }
+ }
+ drmFreeVersion(version);
+
+ switch (tiling_config & 0xf) {
+ case 0:
+ surf_man->hw_info.num_pipes = 1;
+ break;
+ case 1:
+ surf_man->hw_info.num_pipes = 2;
+ break;
+ case 2:
+ surf_man->hw_info.num_pipes = 4;
+ break;
+ case 3:
+ surf_man->hw_info.num_pipes = 8;
+ break;
+ default:
+ surf_man->hw_info.num_pipes = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf0) >> 4) {
+ case 0:
+ surf_man->hw_info.num_banks = 4;
+ break;
+ case 1:
+ surf_man->hw_info.num_banks = 8;
+ break;
+ case 2:
+ surf_man->hw_info.num_banks = 16;
+ break;
+ default:
+ surf_man->hw_info.num_banks = 8;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf00) >> 8) {
+ case 0:
+ surf_man->hw_info.group_bytes = 256;
+ break;
+ case 1:
+ surf_man->hw_info.group_bytes = 512;
+ break;
+ default:
+ surf_man->hw_info.group_bytes = 256;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+
+ switch ((tiling_config & 0xf000) >> 12) {
+ case 0:
+ surf_man->hw_info.row_size = 1024;
+ break;
+ case 1:
+ surf_man->hw_info.row_size = 2048;
+ break;
+ case 2:
+ surf_man->hw_info.row_size = 4096;
+ break;
+ default:
+ surf_man->hw_info.row_size = 4096;
+ surf_man->hw_info.allow_2d = 0;
+ break;
+ }
+ return 0;
+}
+
+static int cik_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned mode, unsigned *tile_mode, unsigned *stencil_tile_mode)
+{
+ /* check surface dimension */
+ if (surf->npix_x > 16384 || surf->npix_y > 16384 || surf->npix_z > 16384) {
+ return -EINVAL;
+ }
+
+ /* check mipmap last_level */
+ if (surf->last_level > 15) {
+ return -EINVAL;
+ }
+
+ /* force 1d on kernel that can't do 2d */
+ if (mode > RADEON_SURF_MODE_1D &&
+ (!surf_man->hw_info.allow_2d || !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX))) {
+ if (surf->nsamples > 1) {
+ fprintf(stderr, "radeon: Cannot use 1D tiling for an MSAA surface (%i).\n", __LINE__);
+ return -EFAULT;
+ }
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(mode, MODE);
+ }
+
+ if (surf->nsamples > 1 && mode != RADEON_SURF_MODE_2D) {
+ return -EINVAL;
+ }
+
+ if (!surf->tile_split) {
+ /* default value */
+ surf->mtilea = 1;
+ surf->bankw = 1;
+ surf->bankh = 1;
+ surf->tile_split = 64;
+ surf->stencil_tile_split = 64;
+ }
+
+ switch (mode) {
+ case RADEON_SURF_MODE_2D: {
+ if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
+ switch (surf->nsamples) {
+ case 1:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64;
+ break;
+ case 2:
+ case 4:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128;
+ break;
+ case 8:
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = *tile_mode;
+
+ cik_get_2d_params(surf_man, 1, surf->nsamples, false,
+ *stencil_tile_mode, NULL,
+ &surf->stencil_tile_split,
+ NULL, NULL, NULL, NULL);
+ }
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = CIK_TILE_MODE_COLOR_2D_SCANOUT;
+ } else {
+ *tile_mode = CIK_TILE_MODE_COLOR_2D;
+ }
+
+ /* retrieve tiling mode values */
+ cik_get_2d_params(surf_man, surf->bpe, surf->nsamples,
+ !(surf->flags & RADEON_SURF_Z_OR_SBUFFER), *tile_mode,
+ NULL, &surf->tile_split, NULL, &surf->mtilea,
+ &surf->bankw, &surf->bankh);
+ break;
+ }
+ case RADEON_SURF_MODE_1D:
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ *stencil_tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ }
+ if (surf->flags & RADEON_SURF_ZBUFFER) {
+ *tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ } else if (surf->flags & RADEON_SURF_SCANOUT) {
+ *tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ } else {
+ *tile_mode = SI_TILE_MODE_COLOR_1D;
+ }
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ default:
+ *tile_mode = SI_TILE_MODE_COLOR_LINEAR_ALIGNED;
+ }
+
+ return 0;
+}
+
+static int cik_surface_init_2d(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ struct radeon_surface_level *level,
+ unsigned bpe, unsigned tile_mode,
+ unsigned tile_split,
+ unsigned num_pipes, unsigned num_banks,
+ uint64_t offset,
+ unsigned start_level)
+{
+ uint64_t aligned_offset = offset;
+ unsigned tilew, tileh, tileb_1x, tileb;
+ unsigned mtilew, mtileh, mtileb;
+ unsigned slice_pt;
+ unsigned i;
+
+ /* compute tile values */
+ tilew = 8;
+ tileh = 8;
+ tileb_1x = tilew * tileh * bpe;
+
+ tile_split = MIN2(surf_man->hw_info.row_size, tile_split);
+
+ tileb = surf->nsamples * tileb_1x;
+
+ /* slices per tile */
+ slice_pt = 1;
+ if (tileb > tile_split && tile_split) {
+ slice_pt = tileb / tile_split;
+ tileb = tileb / slice_pt;
+ }
+
+ /* macro tile width & height */
+ mtilew = (tilew * surf->bankw * num_pipes) * surf->mtilea;
+ mtileh = (tileh * surf->bankh * num_banks) / surf->mtilea;
+
+ /* macro tile bytes */
+ mtileb = (mtilew / tilew) * (mtileh / tileh) * tileb;
+
+ if (start_level <= 1) {
+ unsigned alignment = MAX2(256, mtileb);
+ surf->bo_alignment = MAX2(surf->bo_alignment, alignment);
+
+ if (aligned_offset) {
+ aligned_offset = ALIGN(aligned_offset, alignment);
+ }
+ }
+
+ /* build mipmap tree */
+ for (i = start_level; i <= surf->last_level; i++) {
+ level[i].mode = RADEON_SURF_MODE_2D;
+ si_surf_minify_2d(surf, level+i, bpe, i, slice_pt, mtilew, mtileh, 1, mtileb, aligned_offset);
+ if (level[i].mode == RADEON_SURF_MODE_1D) {
+ switch (tile_mode) {
+ case CIK_TILE_MODE_COLOR_2D:
+ tile_mode = SI_TILE_MODE_COLOR_1D;
+ break;
+ case CIK_TILE_MODE_COLOR_2D_SCANOUT:
+ tile_mode = SI_TILE_MODE_COLOR_1D_SCANOUT;
+ break;
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_64:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_128:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_256:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_512:
+ case CIK_TILE_MODE_DEPTH_STENCIL_2D_TILESPLIT_ROW_SIZE:
+ tile_mode = CIK_TILE_MODE_DEPTH_STENCIL_1D;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return si_surface_init_1d(surf_man, surf, level, bpe, tile_mode, offset, i);
+ }
+ /* level0 and first mipmap need to have alignment */
+ aligned_offset = offset = surf->bo_size;
+ if (i == 0) {
+ aligned_offset = ALIGN(aligned_offset, surf->bo_alignment);
+ }
+ if (surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX) {
+ if (surf->level == level) {
+ surf->tiling_index[i] = tile_mode;
+ /* it's ok because stencil is done after */
+ surf->stencil_tiling_index[i] = tile_mode;
+ } else {
+ surf->stencil_tiling_index[i] = tile_mode;
+ }
+ }
+ }
+ return 0;
+}
+
+static int cik_surface_init_2d_miptrees(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned tile_mode, unsigned stencil_tile_mode)
+{
+ int r;
+ uint32_t num_pipes, num_banks;
+
+ cik_get_2d_params(surf_man, surf->bpe, surf->nsamples,
+ !(surf->flags & RADEON_SURF_Z_OR_SBUFFER), tile_mode,
+ &num_pipes, NULL, &num_banks, NULL, NULL, NULL);
+
+ r = cik_surface_init_2d(surf_man, surf, surf->level, surf->bpe, tile_mode,
+ surf->tile_split, num_pipes, num_banks, 0, 0);
+ if (r) {
+ return r;
+ }
+
+ if (surf->flags & RADEON_SURF_SBUFFER) {
+ r = cik_surface_init_2d(surf_man, surf, surf->stencil_level, 1, stencil_tile_mode,
+ surf->stencil_tile_split, num_pipes, num_banks,
+ surf->bo_size, 0);
+ surf->stencil_offset = surf->stencil_level[0].offset;
+ }
+ return r;
+}
+
+static int cik_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+ int r;
+
+ /* MSAA surfaces support the 2D mode only. */
+ if (surf->nsamples > 1) {
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
+ }
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)) {
+ /* zbuffer only support 1D or 2D tiled surface */
+ switch (mode) {
+ case RADEON_SURF_MODE_1D:
+ case RADEON_SURF_MODE_2D:
+ break;
+ default:
+ mode = RADEON_SURF_MODE_1D;
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ break;
+ }
+ }
+
+ r = cik_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+ if (r) {
+ return r;
+ }
+
+ surf->stencil_offset = 0;
+ surf->bo_alignment = 0;
+
+ /* check tiling mode */
+ switch (mode) {
+ case RADEON_SURF_MODE_LINEAR:
+ r = r6_surface_init_linear(surf_man, surf, 0, 0);
+ break;
+ case RADEON_SURF_MODE_LINEAR_ALIGNED:
+ r = si_surface_init_linear_aligned(surf_man, surf, tile_mode, 0, 0);
+ break;
+ case RADEON_SURF_MODE_1D:
+ r = si_surface_init_1d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ case RADEON_SURF_MODE_2D:
+ r = cik_surface_init_2d_miptrees(surf_man, surf, tile_mode, stencil_tile_mode);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return r;
+}
+
+/*
+ * depending on surface
+ */
+static int cik_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, tile_mode, stencil_tile_mode;
+
+ /* tiling mode */
+ mode = (surf->flags >> RADEON_SURF_MODE_SHIFT) & RADEON_SURF_MODE_MASK;
+
+ if (surf->flags & (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER) &&
+ !(surf->flags & RADEON_SURF_HAS_TILE_MODE_INDEX)) {
+ /* depth/stencil force 1d tiling for old mesa */
+ surf->flags = RADEON_SURF_CLR(surf->flags, MODE);
+ surf->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
+ }
+
+ return cik_surface_sanity(surf_man, surf, mode, &tile_mode, &stencil_tile_mode);
+}
+
+
+/* ===========================================================================
+ * public API
+ */
+drm_public struct radeon_surface_manager *
+radeon_surface_manager_new(int fd)
+{
+ struct radeon_surface_manager *surf_man;
+
+ surf_man = calloc(1, sizeof(struct radeon_surface_manager));
+ if (surf_man == NULL) {
+ return NULL;
+ }
+ surf_man->fd = fd;
+ if (radeon_get_value(fd, RADEON_INFO_DEVICE_ID, &surf_man->device_id)) {
+ goto out_err;
+ }
+ if (radeon_get_family(surf_man)) {
+ goto out_err;
+ }
+
+ if (surf_man->family <= CHIP_RV740) {
+ if (r6_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &r6_surface_init;
+ surf_man->surface_best = &r6_surface_best;
+ } else if (surf_man->family <= CHIP_ARUBA) {
+ if (eg_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &eg_surface_init;
+ surf_man->surface_best = &eg_surface_best;
+ } else if (surf_man->family < CHIP_BONAIRE) {
+ if (si_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &si_surface_init;
+ surf_man->surface_best = &si_surface_best;
+ } else {
+ if (cik_init_hw_info(surf_man)) {
+ goto out_err;
+ }
+ surf_man->surface_init = &cik_surface_init;
+ surf_man->surface_best = &cik_surface_best;
+ }
+
+ return surf_man;
+out_err:
+ free(surf_man);
+ return NULL;
+}
+
+drm_public void
+radeon_surface_manager_free(struct radeon_surface_manager *surf_man)
+{
+ free(surf_man);
+}
+
+static int radeon_surface_sanity(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf,
+ unsigned type,
+ unsigned mode)
+{
+ if (surf_man == NULL || surf_man->surface_init == NULL || surf == NULL) {
+ return -EINVAL;
+ }
+
+ /* all dimension must be at least 1 ! */
+ if (!surf->npix_x || !surf->npix_y || !surf->npix_z) {
+ return -EINVAL;
+ }
+ if (!surf->blk_w || !surf->blk_h || !surf->blk_d) {
+ return -EINVAL;
+ }
+ if (!surf->array_size) {
+ return -EINVAL;
+ }
+ /* array size must be a power of 2 */
+ surf->array_size = next_power_of_two(surf->array_size);
+
+ switch (surf->nsamples) {
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check type */
+ switch (type) {
+ case RADEON_SURF_TYPE_1D:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ /* fallthrough */
+ case RADEON_SURF_TYPE_2D:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ break;
+ case RADEON_SURF_TYPE_CUBEMAP:
+ if (surf->npix_z > 1) {
+ return -EINVAL;
+ }
+ /* deal with cubemap as they were texture array */
+ if (surf_man->family >= CHIP_RV770) {
+ surf->array_size = 8;
+ } else {
+ surf->array_size = 6;
+ }
+ break;
+ case RADEON_SURF_TYPE_3D:
+ break;
+ case RADEON_SURF_TYPE_1D_ARRAY:
+ if (surf->npix_y > 1) {
+ return -EINVAL;
+ }
+ case RADEON_SURF_TYPE_2D_ARRAY:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+drm_public int
+radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_init(surf_man, surf);
+}
+
+drm_public int
+radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf)
+{
+ unsigned mode, type;
+ int r;
+
+ type = RADEON_SURF_GET(surf->flags, TYPE);
+ mode = RADEON_SURF_GET(surf->flags, MODE);
+
+ r = radeon_surface_sanity(surf_man, surf, type, mode);
+ if (r) {
+ return r;
+ }
+ return surf_man->surface_best(surf_man, surf);
+}
diff --git a/radeon/radeon_surface.h b/radeon/radeon_surface.h
new file mode 100644
index 0000000..af7cab6
--- /dev/null
+++ b/radeon/radeon_surface.h
@@ -0,0 +1,149 @@
+/*
+ * Copyright © 2011 Red Hat All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ * Authors:
+ * Jérôme Glisse <jglisse@redhat.com>
+ */
+#ifndef RADEON_SURFACE_H
+#define RADEON_SURFACE_H
+
+/* Note :
+ *
+ * For texture array, the n layer are stored one after the other within each
+ * mipmap level. 0 value for field than can be hint is always valid.
+ */
+
+#define RADEON_SURF_MAX_LEVEL 32
+
+#define RADEON_SURF_TYPE_MASK 0xFF
+#define RADEON_SURF_TYPE_SHIFT 0
+#define RADEON_SURF_TYPE_1D 0
+#define RADEON_SURF_TYPE_2D 1
+#define RADEON_SURF_TYPE_3D 2
+#define RADEON_SURF_TYPE_CUBEMAP 3
+#define RADEON_SURF_TYPE_1D_ARRAY 4
+#define RADEON_SURF_TYPE_2D_ARRAY 5
+#define RADEON_SURF_MODE_MASK 0xFF
+#define RADEON_SURF_MODE_SHIFT 8
+#define RADEON_SURF_MODE_LINEAR 0
+#define RADEON_SURF_MODE_LINEAR_ALIGNED 1
+#define RADEON_SURF_MODE_1D 2
+#define RADEON_SURF_MODE_2D 3
+#define RADEON_SURF_SCANOUT (1 << 16)
+#define RADEON_SURF_ZBUFFER (1 << 17)
+#define RADEON_SURF_SBUFFER (1 << 18)
+#define RADEON_SURF_Z_OR_SBUFFER (RADEON_SURF_ZBUFFER | RADEON_SURF_SBUFFER)
+#define RADEON_SURF_HAS_SBUFFER_MIPTREE (1 << 19)
+#define RADEON_SURF_HAS_TILE_MODE_INDEX (1 << 20)
+#define RADEON_SURF_FMASK (1 << 21)
+
+#define RADEON_SURF_GET(v, field) (((v) >> RADEON_SURF_ ## field ## _SHIFT) & RADEON_SURF_ ## field ## _MASK)
+#define RADEON_SURF_SET(v, field) (((v) & RADEON_SURF_ ## field ## _MASK) << RADEON_SURF_ ## field ## _SHIFT)
+#define RADEON_SURF_CLR(v, field) ((v) & ~(RADEON_SURF_ ## field ## _MASK << RADEON_SURF_ ## field ## _SHIFT))
+
+/* first field up to mode need to match r6 struct so that we can reuse
+ * same function for linear & linear aligned
+ */
+struct radeon_surface_level {
+ uint64_t offset;
+ uint64_t slice_size;
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t nblk_x;
+ uint32_t nblk_y;
+ uint32_t nblk_z;
+ uint32_t pitch_bytes;
+ uint32_t mode;
+};
+
+enum si_tiling_mode {
+ SI_TILING_AUTO = 0,
+
+ SI_TILING_COLOR_1D,
+ SI_TILING_COLOR_1D_SCANOUT,
+ SI_TILING_COLOR_2D_8BPP,
+ SI_TILING_COLOR_2D_16BPP,
+ SI_TILING_COLOR_2D_32BPP,
+ SI_TILING_COLOR_2D_64BPP,
+ SI_TILING_COLOR_2D_SCANOUT_16BPP,
+ SI_TILING_COLOR_2D_SCANOUT_32BPP,
+ SI_TILING_COLOR_LINEAR,
+
+ SI_TILING_STENCIL_1D,
+ SI_TILING_STENCIL_2D,
+ SI_TILING_STENCIL_2D_2AA,
+ SI_TILING_STENCIL_2D_4AA,
+ SI_TILING_STENCIL_2D_8AA,
+
+ SI_TILING_DEPTH_1D,
+ SI_TILING_DEPTH_2D,
+ SI_TILING_DEPTH_2D_2AA,
+ SI_TILING_DEPTH_2D_4AA,
+ SI_TILING_DEPTH_2D_8AA,
+
+ SI_TILING_LAST_MODE,
+};
+
+struct radeon_surface {
+ uint32_t npix_x;
+ uint32_t npix_y;
+ uint32_t npix_z;
+ uint32_t blk_w;
+ uint32_t blk_h;
+ uint32_t blk_d;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t bpe;
+ uint32_t nsamples;
+ uint32_t flags;
+ /* Following is updated/fill by the allocator. It's allowed to
+ * set some of the value but they are use as hint and can be
+ * overridden (things lile bankw/bankh on evergreen for
+ * instance).
+ */
+ uint64_t bo_size;
+ uint64_t bo_alignment;
+ /* apply to eg */
+ uint32_t bankw;
+ uint32_t bankh;
+ uint32_t mtilea;
+ uint32_t tile_split;
+ uint32_t stencil_tile_split;
+ uint64_t stencil_offset;
+ struct radeon_surface_level level[RADEON_SURF_MAX_LEVEL];
+ struct radeon_surface_level stencil_level[RADEON_SURF_MAX_LEVEL];
+ uint32_t tiling_index[RADEON_SURF_MAX_LEVEL];
+ uint32_t stencil_tiling_index[RADEON_SURF_MAX_LEVEL];
+};
+
+struct radeon_surface_manager *radeon_surface_manager_new(int fd);
+void radeon_surface_manager_free(struct radeon_surface_manager *surf_man);
+int radeon_surface_init(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+int radeon_surface_best(struct radeon_surface_manager *surf_man,
+ struct radeon_surface *surf);
+
+#endif