summaryrefslogtreecommitdiffstats
path: root/drivers/accel/ivpu/ivpu_ipc.h
blob: 40ca3cc4e61f738d6f45075cd3b9e5ad7b903fad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2020-2023 Intel Corporation
 */

#ifndef __IVPU_IPC_H__
#define __IVPU_IPC_H__

#include <linux/interrupt.h>
#include <linux/spinlock.h>

#include "vpu_jsm_api.h"

struct ivpu_bo;

/* VPU FW boot notification */
#define IVPU_IPC_CHAN_BOOT_MSG		0x3ff
#define IVPU_IPC_BOOT_MSG_DATA_ADDR	0x424f4f54

/* The alignment to be used for IPC Buffers and IPC Data. */
#define IVPU_IPC_ALIGNMENT	   64

#define IVPU_IPC_HDR_FREE	   0
#define IVPU_IPC_HDR_ALLOCATED	   1

/**
 * struct ivpu_ipc_hdr - The IPC message header structure, exchanged
 * with the VPU device firmware.
 * @data_addr: The VPU address of the payload (JSM message)
 * @data_size: The size of the payload.
 * @channel: The channel used.
 * @src_node: The Node ID of the sender.
 * @dst_node: The Node ID of the intended receiver.
 * @status: IPC buffer usage status
 */
struct ivpu_ipc_hdr {
	u32 data_addr;
	u32 data_size;
	u16 channel;
	u8 src_node;
	u8 dst_node;
	u8 status;
} __packed __aligned(IVPU_IPC_ALIGNMENT);

typedef void (*ivpu_ipc_rx_callback_t)(struct ivpu_device *vdev,
				       struct ivpu_ipc_hdr *ipc_hdr,
				       struct vpu_jsm_msg *jsm_msg);

struct ivpu_ipc_rx_msg {
	struct list_head link;
	struct ivpu_ipc_hdr *ipc_hdr;
	struct vpu_jsm_msg *jsm_msg;
	ivpu_ipc_rx_callback_t callback;
};

struct ivpu_ipc_consumer {
	struct list_head link;
	u32 channel;
	u32 tx_vpu_addr;
	u32 request_id;
	bool aborted;
	ivpu_ipc_rx_callback_t rx_callback;

	spinlock_t rx_lock; /* Protects rx_msg_list and aborted */
	struct list_head rx_msg_list;
	wait_queue_head_t rx_msg_wq;
};

struct ivpu_ipc_info {
	struct gen_pool *mm_tx;
	struct ivpu_bo *mem_tx;
	struct ivpu_bo *mem_rx;

	atomic_t rx_msg_count;

	spinlock_t cons_lock; /* Protects cons_list and cb_msg_list */
	struct list_head cons_list;
	struct list_head cb_msg_list;

	atomic_t request_id;
	struct mutex lock; /* Lock on status */
	bool on;
};

int ivpu_ipc_init(struct ivpu_device *vdev);
void ivpu_ipc_fini(struct ivpu_device *vdev);

void ivpu_ipc_enable(struct ivpu_device *vdev);
void ivpu_ipc_disable(struct ivpu_device *vdev);
void ivpu_ipc_reset(struct ivpu_device *vdev);

void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread);
irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev);

void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
			   u32 channel, ivpu_ipc_rx_callback_t callback);
void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons);

int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons,
		     struct ivpu_ipc_hdr *ipc_buf, struct vpu_jsm_msg *jsm_msg,
		     unsigned long timeout_ms);

int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
				 enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
				 u32 channel, unsigned long timeout_ms);
int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req,
			  enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp,
			  u32 channel, unsigned long timeout_ms);

#endif /* __IVPU_IPC_H__ */