summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_map.h
blob: f62e0c8b67aba8e55a0dd0480e6a0e65aabf67c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2022 Intel Corporation
 */

#ifndef _XE_MAP_H_
#define _XE_MAP_H_

#include <linux/iosys-map.h>

#include <xe_device.h>

/**
 * DOC: Map layer
 *
 * All access to any memory shared with a device (both sysmem and vram) in the
 * XE driver should go through this layer (xe_map). This layer is built on top
 * of :ref:`driver-api/device-io:Generalizing Access to System and I/O Memory`
 * and with extra hooks into the XE driver that allows adding asserts to memory
 * accesses (e.g. for blocking runtime_pm D3Cold on Discrete Graphics).
 */

static inline void xe_map_memcpy_to(struct xe_device *xe, struct iosys_map *dst,
				    size_t dst_offset, const void *src,
				    size_t len)
{
	xe_device_assert_mem_access(xe);
	iosys_map_memcpy_to(dst, dst_offset, src, len);
}

static inline void xe_map_memcpy_from(struct xe_device *xe, void *dst,
				      const struct iosys_map *src,
				      size_t src_offset, size_t len)
{
	xe_device_assert_mem_access(xe);
	iosys_map_memcpy_from(dst, src, src_offset, len);
}

static inline void xe_map_memset(struct xe_device *xe,
				 struct iosys_map *dst, size_t offset,
				 int value, size_t len)
{
	xe_device_assert_mem_access(xe);
	iosys_map_memset(dst, offset, value, len);
}

/* FIXME: We likely should kill these two functions sooner or later */
static inline u32 xe_map_read32(struct xe_device *xe, struct iosys_map *map)
{
	xe_device_assert_mem_access(xe);

	if (map->is_iomem)
		return readl(map->vaddr_iomem);
	else
		return READ_ONCE(*(u32 *)map->vaddr);
}

static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
				  u32 val)
{
	xe_device_assert_mem_access(xe);

	if (map->is_iomem)
		writel(val, map->vaddr_iomem);
	else
		*(u32 *)map->vaddr = val;
}

#define xe_map_rd(xe__, map__, offset__, type__) ({			\
	struct xe_device *__xe = xe__;					\
	xe_device_assert_mem_access(__xe);				\
	iosys_map_rd(map__, offset__, type__);				\
})

#define xe_map_wr(xe__, map__, offset__, type__, val__) ({		\
	struct xe_device *__xe = xe__;					\
	xe_device_assert_mem_access(__xe);				\
	iosys_map_wr(map__, offset__, type__, val__);			\
})

#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({	\
	struct xe_device *__xe = xe__;					\
	xe_device_assert_mem_access(__xe);				\
	iosys_map_rd_field(map__, struct_offset__, struct_type__, field__);		\
})

#define xe_map_wr_field(xe__, map__, struct_offset__, struct_type__, field__, val__) ({	\
	struct xe_device *__xe = xe__;					\
	xe_device_assert_mem_access(__xe);				\
	iosys_map_wr_field(map__, struct_offset__, struct_type__, field__, val__);	\
})

#endif