summaryrefslogtreecommitdiffstats
path: root/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
blob: 32142c7d9a04382259e6760f1b046721477f0916 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Most ISHTP provider device and ISHTP logic declarations
 *
 * Copyright (c) 2003-2016, Intel Corporation.
 */

#ifndef _ISHTP_DEV_H_
#define _ISHTP_DEV_H_

#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/intel-ish-client-if.h>
#include "bus.h"
#include "hbm.h"

#define	IPC_PAYLOAD_SIZE	128
#define ISHTP_RD_MSG_BUF_SIZE	IPC_PAYLOAD_SIZE
#define	IPC_FULL_MSG_SIZE	132

/* Number of messages to be held in ISR->BH FIFO */
#define	RD_INT_FIFO_SIZE	64

/*
 * Number of IPC messages to be held in Tx FIFO, to be sent by ISR -
 * Tx complete interrupt or RX_COMPLETE handler
 */
#define	IPC_TX_FIFO_SIZE	512

/*
 * Number of Maximum ISHTP Clients
 */
#define ISHTP_CLIENTS_MAX 256

/*
 * Number of File descriptors/handles
 * that can be opened to the driver.
 *
 * Limit to 255: 256 Total Clients
 * minus internal client for ISHTP Bus Messages
 */
#define ISHTP_MAX_OPEN_HANDLE_COUNT (ISHTP_CLIENTS_MAX - 1)

/* Internal Clients Number */
#define ISHTP_HOST_CLIENT_ID_ANY		(-1)
#define ISHTP_HBM_HOST_CLIENT_ID		0

#define	MAX_DMA_DELAY	20

/* ISHTP device states */
enum ishtp_dev_state {
	ISHTP_DEV_INITIALIZING = 0,
	ISHTP_DEV_INIT_CLIENTS,
	ISHTP_DEV_ENABLED,
	ISHTP_DEV_RESETTING,
	ISHTP_DEV_DISABLED,
	ISHTP_DEV_POWER_DOWN,
	ISHTP_DEV_POWER_UP
};
const char *ishtp_dev_state_str(int state);

struct ishtp_cl;

/**
 * struct ishtp_fw_client - representation of fw client
 *
 * @props - client properties
 * @client_id - fw client id
 */
struct ishtp_fw_client {
	struct ishtp_client_properties props;
	uint8_t client_id;
};

/*
 * Control info for IPC messages ISHTP/IPC sending FIFO -
 * list with inline data buffer
 * This structure will be filled with parameters submitted
 * by the caller glue layer
 * 'buf' may be pointing to the external buffer or to 'inline_data'
 * 'offset' will be initialized to 0 by submitting
 *
 * 'ipc_send_compl' is intended for use by clients that send fragmented
 * messages. When a fragment is sent down to IPC msg regs,
 * it will be called.
 * If it has more fragments to send, it will do it. With last fragment
 * it will send appropriate ISHTP "message-complete" flag.
 * It will remove the outstanding message
 * (mark outstanding buffer as available).
 * If counting flow control is in work and there are more flow control
 * credits, it can put the next client message queued in cl.
 * structure for IPC processing.
 *
 */
struct wr_msg_ctl_info {
	/* Will be called with 'ipc_send_compl_prm' as parameter */
	void (*ipc_send_compl)(void *);

	void *ipc_send_compl_prm;
	size_t length;
	struct list_head	link;
	unsigned char	inline_data[IPC_FULL_MSG_SIZE];
};

/*
 * The ISHTP layer talks to hardware IPC message using the following
 * callbacks
 */
struct ishtp_hw_ops {
	int	(*hw_reset)(struct ishtp_device *dev);
	int	(*ipc_reset)(struct ishtp_device *dev);
	uint32_t (*ipc_get_header)(struct ishtp_device *dev, int length,
				   int busy);
	int	(*write)(struct ishtp_device *dev,
		void (*ipc_send_compl)(void *), void *ipc_send_compl_prm,
		unsigned char *msg, int length);
	uint32_t	(*ishtp_read_hdr)(const struct ishtp_device *dev);
	int	(*ishtp_read)(struct ishtp_device *dev, unsigned char *buffer,
			unsigned long buffer_length);
	uint32_t	(*get_fw_status)(struct ishtp_device *dev);
	void	(*sync_fw_clock)(struct ishtp_device *dev);
	bool	(*dma_no_cache_snooping)(struct ishtp_device *dev);
};

/**
 * struct ishtp_device - ISHTP private device struct
 */
struct ishtp_device {
	struct device *devc;	/* pointer to lowest device */
	struct pci_dev *pdev;	/* PCI device to get device ids */

	/* waitq for waiting for suspend response */
	wait_queue_head_t suspend_wait;
	bool suspend_flag;	/* Suspend is active */

	/* waitq for waiting for resume response */
	wait_queue_head_t resume_wait;
	bool resume_flag;	/*Resume is active */

	/*
	 * lock for the device, for everything that doesn't have
	 * a dedicated spinlock
	 */
	spinlock_t device_lock;

	bool recvd_hw_ready;
	struct hbm_version version;
	int transfer_path; /* Choice of transfer path: IPC or DMA */

	/* ishtp device states */
	enum ishtp_dev_state dev_state;
	enum ishtp_hbm_state hbm_state;

	/* driver read queue */
	struct ishtp_cl_rb read_list;
	spinlock_t read_list_spinlock;

	/* list of ishtp_cl's */
	struct list_head cl_list;
	spinlock_t cl_list_lock;
	long open_handle_count;

	/* List of bus devices */
	struct list_head device_list;
	spinlock_t device_list_lock;

	/* waiting queues for receive message from FW */
	wait_queue_head_t wait_hw_ready;
	wait_queue_head_t wait_hbm_recvd_msg;

	/* FIFO for input messages for BH processing */
	unsigned char rd_msg_fifo[RD_INT_FIFO_SIZE * IPC_PAYLOAD_SIZE];
	unsigned int rd_msg_fifo_head, rd_msg_fifo_tail;
	spinlock_t rd_msg_spinlock;
	struct work_struct bh_hbm_work;

	/* IPC write queue */
	struct list_head wr_processing_list, wr_free_list;
	/* For both processing list  and free list */
	spinlock_t wr_processing_spinlock;

	struct ishtp_fw_client *fw_clients; /*Note:memory has to be allocated*/
	DECLARE_BITMAP(fw_clients_map, ISHTP_CLIENTS_MAX);
	DECLARE_BITMAP(host_clients_map, ISHTP_CLIENTS_MAX);
	uint8_t fw_clients_num;
	uint8_t fw_client_presentation_num;
	uint8_t fw_client_index;
	spinlock_t fw_clients_lock;

	/* TX DMA buffers and slots */
	int ishtp_host_dma_enabled;
	void *ishtp_host_dma_tx_buf;
	unsigned int ishtp_host_dma_tx_buf_size;
	uint64_t ishtp_host_dma_tx_buf_phys;
	int ishtp_dma_num_slots;

	/* map of 4k blocks in Tx dma buf: 0-free, 1-used */
	uint8_t *ishtp_dma_tx_map;
	spinlock_t ishtp_dma_tx_lock;

	/* RX DMA buffers and slots */
	void *ishtp_host_dma_rx_buf;
	unsigned int ishtp_host_dma_rx_buf_size;
	uint64_t ishtp_host_dma_rx_buf_phys;

	/* Dump to trace buffers if enabled*/
	ishtp_print_log print_log;

	/* Debug stats */
	unsigned int	ipc_rx_cnt;
	unsigned long long	ipc_rx_bytes_cnt;
	unsigned int	ipc_tx_cnt;
	unsigned long long	ipc_tx_bytes_cnt;

	const struct ishtp_hw_ops *ops;
	size_t	mtu;
	uint32_t	ishtp_msg_hdr;
	char hw[] __aligned(sizeof(void *));
};

static inline unsigned long ishtp_secs_to_jiffies(unsigned long sec)
{
	return msecs_to_jiffies(sec * MSEC_PER_SEC);
}

/*
 * Register Access Function
 */
static inline int ish_ipc_reset(struct ishtp_device *dev)
{
	return dev->ops->ipc_reset(dev);
}

/* Exported function */
void	ishtp_device_init(struct ishtp_device *dev);
int	ishtp_start(struct ishtp_device *dev);

#endif /*_ISHTP_DEV_H_*/