summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xe/xe_mmio.h
blob: 98de5c13c89ba1d0b59ae6d60743f67971a07416 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/* SPDX-License-Identifier: MIT */
/*
 * Copyright © 2021-2023 Intel Corporation
 */

#ifndef _XE_MMIO_H_
#define _XE_MMIO_H_

#include <linux/delay.h>
#include <linux/io-64-nonatomic-lo-hi.h>

#include "regs/xe_reg_defs.h"
#include "xe_device_types.h"
#include "xe_gt_printk.h"
#include "xe_gt_types.h"

struct drm_device;
struct drm_file;
struct xe_device;

#define LMEM_BAR		2

int xe_mmio_init(struct xe_device *xe);
int xe_mmio_root_tile_init(struct xe_device *xe);
void xe_mmio_probe_tiles(struct xe_device *xe);

static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{
	struct xe_tile *tile = gt_to_tile(gt);

	if (reg.addr < gt->mmio.adj_limit)
		reg.addr += gt->mmio.adj_offset;

	return readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}

static inline u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
{
	struct xe_tile *tile = gt_to_tile(gt);

	if (reg.addr < gt->mmio.adj_limit)
		reg.addr += gt->mmio.adj_offset;

	return readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}

static inline void xe_mmio_write32(struct xe_gt *gt,
				   struct xe_reg reg, u32 val)
{
	struct xe_tile *tile = gt_to_tile(gt);

	if (reg.addr < gt->mmio.adj_limit)
		reg.addr += gt->mmio.adj_offset;

	writel(val, (reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}

static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{
	struct xe_tile *tile = gt_to_tile(gt);

	if (reg.addr < gt->mmio.adj_limit)
		reg.addr += gt->mmio.adj_offset;

	return readl((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + reg.addr);
}

static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
				u32 set)
{
	u32 old, reg_val;

	old = xe_mmio_read32(gt, reg);
	reg_val = (old & ~clr) | set;
	xe_mmio_write32(gt, reg, reg_val);

	return old;
}

static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
					     struct xe_reg reg, u32 val,
					     u32 mask, u32 eval)
{
	u32 reg_val;

	xe_mmio_write32(gt, reg, val);
	reg_val = xe_mmio_read32(gt, reg);

	return (reg_val & mask) != eval ? -EINVAL : 0;
}

static inline bool xe_mmio_in_range(const struct xe_gt *gt,
				    const struct xe_mmio_range *range,
				    struct xe_reg reg)
{
	if (reg.addr < gt->mmio.adj_limit)
		reg.addr += gt->mmio.adj_offset;

	return range && reg.addr >= range->start && reg.addr <= range->end;
}

int xe_mmio_probe_vram(struct xe_device *xe);
u64 xe_mmio_read64_2x32(struct xe_gt *gt, struct xe_reg reg);
int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 mask, u32 val, u32 timeout_us,
		   u32 *out_val, bool atomic);

#endif